text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _checkblk(name):
'''
Check if the blk exists and return its fstype if ok
'''
blk = __salt__['cmd.run']('blkid -o value -s TYPE {0}'.format(name),
ignore_retcode=True)
return '' if not blk else blk | 0.003984 |
def ignore(self, argument_dest, **kwargs):
""" Register an argument with type knack.arguments.ignore_type (hidden/ignored)
:param argument_dest: The destination argument to apply the ignore type to
:type argument_dest: str
"""
self._check_stale()
if not self._applicable():
return
dest_option = ['--__{}'.format(argument_dest.upper())]
self.argument(argument_dest, arg_type=ignore_type, options_list=dest_option, **kwargs) | 0.01 |
def read_archive(self,header,prepend=None):
""" Extract a copy of WCS keywords from an open file header,
if they have already been created and remember the prefix
used for those keywords. Otherwise, setup the current WCS
keywords as the archive values.
"""
# Start by looking for the any backup WCS keywords to
# determine whether archived values are present and to set
# the prefix used.
_prefix = None
_archive = False
if header is not None:
for kw in header.items():
if kw[0][1:] in self.wcstrans.keys():
_prefix = kw[0][0]
_archive = True
break
if not _archive:
self.archive(prepend=prepend)
return
# We have archive keywords and a defined prefix
# Go through and append them to self.backup
if _prefix is not None:
self.prepend = _prefix
else:
self.prepend = DEFAULT_PREFIX
for key in self.wcstrans.keys():
_archive_key = self._buildNewKeyname(key,_prefix)
if key!= 'pixel scale':
if _archive_key in header:
self.orig_wcs[_archive_key] = header[_archive_key]
else:
self.orig_wcs[_archive_key] = header[key]
self.backup[key] = _archive_key
self.revert[_archive_key] = key
# Establish plate scale value
_cd11str = self.prepend+'CD1_1'
_cd21str = self.prepend+'CD2_1'
pscale = self.compute_pscale(self.orig_wcs[_cd11str],self.orig_wcs[_cd21str])
_archive_key = self.prepend.lower()+'pscale'
self.orig_wcs[_archive_key] = pscale
self.backup['pixel scale'] = _archive_key
self.revert[_archive_key] = 'pixel scale'
# Setup keyword to record when these keywords were backed up.
if 'WCSCDATE' in header:
self.orig_wcs['WCSCDATE'] = header['WCSCDATE']
else:
self.orig_wcs['WCSCDATE'] = fileutil.getLTime()
self.backup['WCSCDATE'] = 'WCSCDATE'
self.revert['WCSCDATE'] = 'WCSCDATE' | 0.003605 |
def _ls_print_listing(dir_: str, recursive: bool, all_: bool, long: bool) -> List[Tuple[str, dict, TrainingTrace]]:
"""
Print names of the train dirs contained in the given dir.
:param dir_: dir to be listed
:param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)
:param all_: include train dirs with no epochs done (--all option)
:param long: list more details including model name, model and dataset classes,
age, duration and epochs done (--long option)
:return: list of found training tuples (train_dir, configuration dict, trace)
"""
all_trainings = []
for root_dir, train_dirs in walk_train_dirs(dir_):
if train_dirs:
if recursive:
print(root_dir + ':')
trainings = [(train_dir,
load_config(path.join(train_dir, CXF_CONFIG_FILE), []),
TrainingTrace.from_file(path.join(train_dir, CXF_TRACE_FILE)))
for train_dir
in [os.path.join(root_dir, train_dir) for train_dir in train_dirs]]
if not all_:
trainings = [train_dir for train_dir in trainings if train_dir[2][TrainingTraceKeys.EPOCHS_DONE]]
if long:
print('total {}'.format(len(trainings)))
_print_trainings_long(trainings)
else:
for train_dir, _, _ in trainings:
print(path.basename(train_dir))
all_trainings.extend(trainings)
if recursive:
print()
if not recursive:
break
return all_trainings | 0.005351 |
def uniq(args):
"""
%prog uniq fastqfile
Retain only first instance of duplicate reads. Duplicate is defined as
having the same read name.
"""
p = OptionParser(uniq.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
fw = must_open(opts.outfile, "w")
nduplicates = nreads = 0
seen = set()
for rec in iter_fastq(fastqfile):
nreads += 1
if rec is None:
break
name = rec.name
if name in seen:
nduplicates += 1
continue
seen.add(name)
print(rec, file=fw)
logging.debug("Removed duplicate reads: {}".\
format(percentage(nduplicates, nreads))) | 0.002561 |
def inhull(self, xyz, pore, tol=1e-7):
r"""
Tests whether points lie within a convex hull or not.
Computes a tesselation of the hull works out the normals of the facets.
Then tests whether dot(x.normals) < dot(a.normals) where a is the the
first vertex of the facets
"""
xyz = np.around(xyz, 10)
# Work out range to span over for pore hull
xmin = xyz[:, 0].min()
xr = (np.ceil(xyz[:, 0].max())-np.floor(xmin)).astype(int)+1
ymin = xyz[:, 1].min()
yr = (np.ceil(xyz[:, 1].max())-np.floor(ymin)).astype(int)+1
zmin = xyz[:, 2].min()
zr = (np.ceil(xyz[:, 2].max())-np.floor(zmin)).astype(int)+1
origin = np.array([xmin, ymin, zmin])
# start index
si = np.floor(origin).astype(int)
xyz -= origin
dom = np.zeros([xr, yr, zr], dtype=np.uint8)
indx, indy, indz = np.indices((xr, yr, zr))
# Calculate the tesselation of the points
hull = sptl.ConvexHull(xyz)
# Assume 3d for now
# Calc normals from the vector cross product of the vectors defined
# by joining points in the simplices
vab = xyz[hull.simplices[:, 0]]-xyz[hull.simplices[:, 1]]
vac = xyz[hull.simplices[:, 0]]-xyz[hull.simplices[:, 2]]
nrmls = np.cross(vab, vac)
# Scale normal vectors to unit length
nrmlen = np.sum(nrmls**2, axis=-1)**(1./2)
nrmls = nrmls*np.tile((1/nrmlen), (3, 1)).T
# Center of Mass
center = np.mean(xyz, axis=0)
# Any point from each simplex
a = xyz[hull.simplices[:, 0]]
# Make sure all normals point inwards
dp = np.sum((np.tile(center, (len(a), 1))-a)*nrmls, axis=-1)
k = dp < 0
nrmls[k] = -nrmls[k]
# Now we want to test whether dot(x,N) >= dot(a,N)
aN = np.sum(nrmls*a, axis=-1)
for plane_index in range(len(a)):
eqx = nrmls[plane_index][0]*(indx)
eqy = nrmls[plane_index][1]*(indy)
eqz = nrmls[plane_index][2]*(indz)
xN = eqx + eqy + eqz
dom[xN - aN[plane_index] >= 0-tol] += 1
dom[dom < len(a)] = 0
dom[dom == len(a)] = 1
ds = np.shape(dom)
temp_arr = np.zeros_like(self._hull_image, dtype=bool)
temp_arr[si[0]:si[0]+ds[0], si[1]:si[1]+ds[1], si[2]:si[2]+ds[2]] = dom
self._hull_image[temp_arr] = pore
del temp_arr | 0.000815 |
def get_value(self, sid, dt, field):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
colname : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
ix = self.sid_day_index(sid, dt)
price = self._spot_col(field)[ix]
if field != 'volume':
if price == 0:
return nan
else:
return price * 0.001
else:
return price | 0.002112 |
def change_function_style(self, stripped_record, func_decl_style):
"""Converts a function definition syntax from the 'func_decl_style' to the one that has been
set in self.apply_function_style and returns the string with the converted syntax."""
if func_decl_style is None:
return stripped_record
if self.apply_function_style is None:
# user does not want to enforce any specific function style
return stripped_record
regex = FUNCTION_STYLE_REGEX[func_decl_style]
replacement = FUNCTION_STYLE_REPLACEMENT[self.apply_function_style]
changed_record = re.sub(regex, replacement, stripped_record)
return changed_record.strip() | 0.005517 |
def fire_lifecycle_event(self, new_state):
"""
Called when instance's state changes.
:param new_state: (Lifecycle State), the new state of the instance.
"""
if new_state == LIFECYCLE_STATE_SHUTTING_DOWN:
self.is_live = False
self.state = new_state
self.logger.info(self._git_info + "HazelcastClient is %s", new_state, extra=self._logger_extras)
for listener in list(self._listeners.values()):
try:
listener(new_state)
except:
self.logger.exception("Exception in lifecycle listener", extra=self._logger_extras) | 0.007776 |
def get_os_file_names(files):
"""
returns file names
:param files: list of strings and\\or :class:`file_configuration_t`
instances.
:type files: list
"""
fnames = []
for f in files:
if utils.is_str(f):
fnames.append(f)
elif isinstance(f, file_configuration_t):
if f.content_type in (
file_configuration_t.CONTENT_TYPE.STANDARD_SOURCE_FILE,
file_configuration_t.CONTENT_TYPE.CACHED_SOURCE_FILE):
fnames.append(f.data)
else:
pass
return fnames | 0.002933 |
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an ASL file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_header_map = self._GetDataTypeMap('asl_file_header')
try:
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile(
'Unable to parse file header with error: {0!s}'.format(
exception))
if file_header.signature != self._FILE_SIGNATURE:
raise errors.UnableToParseFile('Invalid file signature.')
# TODO: generate event for creation time.
file_size = file_object.get_size()
if file_header.first_log_entry_offset > 0:
last_log_entry_offset = 0
file_offset = file_header.first_log_entry_offset
while file_offset < file_size:
last_log_entry_offset = file_offset
try:
file_offset = self._ParseRecord(
parser_mediator, file_object, file_offset)
except errors.ParseError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse record with error: {0!s}'.format(exception))
return
if file_offset == 0:
break
if last_log_entry_offset != file_header.last_log_entry_offset:
parser_mediator.ProduceExtractionWarning(
'last log entry offset does not match value in file header.') | 0.007051 |
def send_media(self, path, chatid, caption):
"""
converts the file to base64 and sends it using the sendImage function of wapi.js
:param path: file path
:param chatid: chatId to be sent
:param caption:
:return:
"""
imgBase64 = self.convert_to_base64(path)
filename = os.path.split(path)[-1]
return self.wapi_functions.sendImage(imgBase64, chatid, filename, caption) | 0.008909 |
def create_parser():
"""Creat a commandline parser for epubcheck
:return Argumentparser:
"""
parser = ArgumentParser(
prog='epubcheck',
description="EpubCheck v%s - Validate your ebooks" % __version__
)
# Arguments
parser.add_argument(
'path',
nargs='?',
default=getcwd(),
help="Path to EPUB-file or folder for batch validation. "
"The current directory will be processed if this argument "
"is not specified."
)
# Options
parser.add_argument(
'-x', '--xls', nargs='?', type=FileType(mode='wb'),
const='epubcheck_report.xls',
help='Create a detailed Excel report.'
)
parser.add_argument(
'-c', '--csv', nargs='?', type=FileType(mode='wb'),
const='epubcheck_report.csv',
help='Create a CSV report.'
)
parser.add_argument(
'-r', '--recursive', action='store_true',
help='Recurse into subfolders.'
)
return parser | 0.000981 |
def delete_external_account(resource_root, name):
"""
Delete an external account by name
@param resource_root: The root Resource object.
@param name: Account name
@return: The deleted ApiExternalAccount object
"""
return call(resource_root.delete,
EXTERNAL_ACCOUNT_FETCH_PATH % ("delete", name,),
ApiExternalAccount, False) | 0.014327 |
def convert_coco_stuff_mat(data_dir, out_dir):
"""Convert to png and save json with path. This currently only contains
the segmentation labels for objects+stuff in cocostuff - if we need to
combine with other labels from original COCO that will be a TODO."""
sets = ['train', 'val']
categories = []
json_name = 'coco_stuff_%s.json'
ann_dict = {}
for data_set in sets:
file_list = os.path.join(data_dir, '%s.txt')
images = []
with open(file_list % data_set) as f:
for img_id, img_name in enumerate(f):
img_name = img_name.replace('coco', 'COCO').strip('\n')
image = {}
mat_file = os.path.join(
data_dir, 'annotations/%s.mat' % img_name)
data = h5py.File(mat_file, 'r')
labelMap = data.get('S')
if len(categories) == 0:
labelNames = data.get('names')
for idx, n in enumerate(labelNames):
categories.append(
{"id": idx, "name": ''.join(chr(i) for i in data[
n[0]])})
ann_dict['categories'] = categories
scipy.misc.imsave(
os.path.join(data_dir, img_name + '.png'), labelMap)
image['width'] = labelMap.shape[0]
image['height'] = labelMap.shape[1]
image['file_name'] = img_name
image['seg_file_name'] = img_name
image['id'] = img_id
images.append(image)
ann_dict['images'] = images
print("Num images: %s" % len(images))
with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile:
outfile.write(json.dumps(ann_dict)) | 0.0011 |
def louvain(adjacency_matrix):
"""
Performs community embedding using the LOUVAIN method.
Introduced in: Blondel, V. D., Guillaume, J. L., Lambiotte, R., & Lefebvre, E. (2008).
Fast unfolding of communities in large networks.
Journal of Statistical Mechanics: Theory and Experiment, 2008(10), P10008.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix.
"""
# Convert to networkx undirected graph.
adjacency_matrix = nx.from_scipy_sparse_matrix(adjacency_matrix, create_using=nx.Graph())
# Call LOUVAIN algorithm to calculate a hierarchy of communities.
tree = community.generate_dendogram(adjacency_matrix, part_init=None)
# Embed communities
row = list()
col = list()
append_row = row.append
append_col = col.append
community_counter = 0
for i in range(len(tree)):
partition = community.partition_at_level(tree, i)
for n, c in partition.items():
append_row(n)
append_col(community_counter + c)
community_counter += max(partition.values()) + 1
row = np.array(row)
col = np.array(col)
data = np.ones(row.size, dtype=np.float64)
louvain_features = sparse.coo_matrix((data, (row, col)), shape=(len(partition.keys()), community_counter),
dtype=np.float64)
return louvain_features | 0.004467 |
def parse_shebang_from_file(path):
"""Parse the shebang given a file path."""
if not os.path.lexists(path):
raise ValueError('{} does not exist.'.format(path))
if not os.access(path, os.X_OK):
return ()
with open(path, 'rb') as f:
return parse_shebang(f) | 0.00339 |
def author_notes(soup):
"""
Find the fn tags included in author-notes
"""
author_notes = []
author_notes_section = raw_parser.author_notes(soup)
if author_notes_section:
fn_nodes = raw_parser.fn(author_notes_section)
for tag in fn_nodes:
if 'fn-type' in tag.attrs:
if(tag['fn-type'] != 'present-address'):
author_notes.append(node_text(tag))
return author_notes | 0.002198 |
def add_health_monitor(self, type, delay=10, timeout=10,
attemptsBeforeDeactivation=3, path="/", statusRegex=None,
bodyRegex=None, hostHeader=None):
"""
Adds a health monitor to the load balancer. If a monitor already
exists, it is updated with the supplied settings.
"""
abd = attemptsBeforeDeactivation
return self.manager.add_health_monitor(self, type=type, delay=delay,
timeout=timeout, attemptsBeforeDeactivation=abd,
path=path, statusRegex=statusRegex, bodyRegex=bodyRegex,
hostHeader=hostHeader) | 0.011254 |
def ExamineEvent(self, mediator, event):
"""Analyzes an event.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event to examine.
"""
# This event requires an URL attribute.
url = getattr(event, 'url', None)
if not url:
return
# TODO: refactor this the source should be used in formatting only.
# Check if we are dealing with a web history event.
source, _ = formatters_manager.FormattersManager.GetSourceStrings(event)
if source != 'WEBHIST':
return
for engine, url_expression, method_name in self._URL_FILTERS:
callback_method = getattr(self, method_name, None)
if not callback_method:
logger.warning('Missing method: {0:s}'.format(callback_method))
continue
match = url_expression.search(url)
if not match:
continue
search_query = callback_method(url)
if not search_query:
logger.warning('Missing search query for URL: {0:s}'.format(url))
continue
search_query = self._DecodeURL(search_query)
if not search_query:
continue
event_tag = self._CreateEventTag(
event, self._EVENT_TAG_COMMENT, self._EVENT_TAG_LABELS)
mediator.ProduceEventTag(event_tag)
self._counter['{0:s}:{1:s}'.format(engine, search_query)] += 1
# Add the timeline format for each search term.
timestamp = getattr(event, 'timestamp', 0)
source = getattr(event, 'parser', 'N/A')
source = getattr(event, 'plugin', source)
self._search_term_timeline.append(
SEARCH_OBJECT(timestamp, source, engine, search_query)) | 0.010957 |
def parse_config(self, config_file):
"""
Given a configuration file, read in and interpret the results
:param config_file:
:return:
"""
with open(config_file, 'r') as f:
config = json.load(f)
self.params = config
if self.params['proxy']['proxy_type']:
self.params['proxy'] = {self.params['proxy']['proxy_type']:
self.params['proxy']['proxy_url']} | 0.004237 |
def to_dict(cls, network=None, phases=[], element=['pore', 'throat'],
interleave=True, flatten=True, categorize_by=[]):
r"""
Returns a single dictionary object containing data from the given
OpenPNM objects, with the keys organized differently depending on
optional arguments.
Parameters
----------
network : OpenPNM Network Object (optional)
The network containing the desired data
phases : list of OpenPNM Phase Objects (optional, default is none)
A list of phase objects whose data are to be included
element : string or list of strings
An indication of whether 'pore' and/or 'throat' data are desired.
The default is both.
interleave : boolean (default is ``True``)
When ``True`` (default) the data from all Geometry objects (and
Physics objects if ``phases`` are given) is interleaved into
a single array and stored as a network property (or Phase
property for Physics data). When ``False``, the data for each
object are stored under their own dictionary key, the structuring
of which depends on the value of the ``flatten`` argument.
flatten : boolean (default is ``True``)
When ``True``, all objects are accessible from the top level
of the dictionary. When ``False`` objects are nested under their
parent object. If ``interleave`` is ``True`` this argument is
ignored.
categorize_by : string or list of strings
Indicates how the dictionaries should be organized. The list can
contain any, all or none of the following strings:
**'object'** : If specified the dictionary keys will be stored
under a general level corresponding to their type (e.g.
'network/net_01/pore.all'). If ``interleave`` is ``True`` then
only the only categories are *network* and *phase*, since
*geometry* and *physics* data get stored under their respective
*network* and *phase*.
**'data'** : If specified the data arrays are additionally
categorized by ``label`` and ``property`` to separate *boolean*
from *numeric* data.
**'element'** : If specified the data arrays are
additionally categorized by ``pore`` and ``throat``, meaning
that the propnames are no longer prepended by a 'pore.' or
'throat.'
Returns
-------
A dictionary with the data stored in a hierarchical data structure, the
actual format of which depends on the arguments to the function.
Notes
-----
There is a handy package called *flatdict* that can be used to
access this dictionary using a single key such that:
``d[level_1][level_2] == d[level_1/level_2]``
Importantly, converting to a *flatdict* allows it be converted to an
*HDF5* file directly, since the hierarchy is dictated by the placement
of '/' characters.
"""
project, network, phases = cls._parse_args(network=network,
phases=phases)
delim = ' | '
d = NestedDict(delimiter=delim)
def build_path(obj, key):
propname = delim + key
prefix = 'root'
datatype = ''
arr = obj[key]
if 'object' in categorize_by:
prefix = obj._isa()
if 'element' in categorize_by:
propname = delim + key.replace('.', delim)
if 'data' in categorize_by:
if arr.dtype == bool:
datatype = delim + 'labels'
else:
datatype = delim + 'properties'
path = prefix + delim + obj.name + datatype + propname
return path
for net in network:
for key in net.keys(element=element, mode='all'):
path = build_path(obj=net, key=key)
d[path] = net[key]
for geo in project.geometries().values():
for key in geo.keys(element=element, mode='all'):
if interleave:
path = build_path(obj=net, key=key)
d[path] = net[key]
else:
path = build_path(obj=geo, key=key)
if flatten:
d[path] = geo[key]
elif 'object' in categorize_by:
path = path.split(delim)
path.insert(0, 'network')
path.insert(1, net.name)
path = delim.join(path)
else:
path = path.split(delim)
path.insert(1, net.name)
path = delim.join(path)
d[path] = geo[key]
for phase in phases:
for key in phase.keys(element=element, mode='all'):
path = build_path(obj=phase, key=key)
d[path] = phase[key]
for phys in project.find_physics(phase=phase):
if phys:
for key in phys.keys(element=element, mode='all'):
if interleave:
path = build_path(obj=phase, key=key)
d[path] = phase[key]
else:
path = build_path(obj=phys, key=key)
if flatten:
d[path] = phys[key]
elif 'object' in categorize_by:
path = path.split(delim)
path.insert(0, 'phase')
path.insert(1, phase.name)
path = delim.join(path)
else:
path = path.split(delim)
path.insert(1, phase.name)
path = delim.join(path)
d[path] = phys[key]
if 'root' in d.keys():
d = d['root']
if 'project' in categorize_by:
new_d = NestedDict()
new_d[project.name] = d
d = new_d
return d | 0.000461 |
def _convert_to_namecheap(self, record):
""" converts from lexicon format record to namecheap format record,
suitable to sending through the api to namecheap"""
name = record['name']
if name.endswith('.'):
name = name[:-1]
short_name = name[:name.find(self.domain) - 1]
processed_record = {
'Type': record['type'],
'Name': short_name,
'TTL': record['ttl'],
'Address': record['content'],
'HostId': record['id']
}
return processed_record | 0.003484 |
def restart_kernel(self):
"""Restart kernel of current client."""
client = self.get_current_client()
if client is not None:
self.switch_to_plugin()
client.restart_kernel() | 0.008929 |
def _getStringStream(self, filename, prefer='unicode'):
"""Gets a string representation of the requested filename.
Checks for both ASCII and Unicode representations and returns
a value if possible. If there are both ASCII and Unicode
versions, then the parameter /prefer/ specifies which will be
returned.
"""
if isinstance(filename, list):
# Join with slashes to make it easier to append the type
filename = "/".join(filename)
asciiVersion = self._getStream(filename + '001E')
unicodeVersion = windowsUnicode(self._getStream(filename + '001F'))
if asciiVersion is None:
return unicodeVersion
elif unicodeVersion is None:
return asciiVersion.decode('ascii', 'ignore')
else:
if prefer == 'unicode':
return unicodeVersion
else:
return asciiVersion.decode('ascii', 'ignore') | 0.002053 |
def set_implementation(impl):
"""
Sets the implementation of this module
Parameters
----------
impl : str
One of ["python", "c"]
"""
global __impl__
if impl.lower() == 'python':
__impl__ = __IMPL_PYTHON__
elif impl.lower() == 'c':
__impl__ = __IMPL_C__
else:
import warnings
warnings.warn('Implementation '+impl+' is not known. Using the fallback python implementation.')
__impl__ = __IMPL_PYTHON__ | 0.004098 |
def generateSimpleSequences(nCoinc=10, seqLength=[5,6,7], nSeq=100):
"""
Generate a set of simple sequences. The elements of the sequences will be
integers from 0 to 'nCoinc'-1. The length of each sequence will be
randomly chosen from the 'seqLength' list.
Parameters:
-----------------------------------------------
nCoinc: the number of elements available to use in the sequences
seqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nSeq: The number of sequences to generate
retval: a list of sequences. Each sequence is itself a list
containing the coincidence indices for that sequence.
"""
coincList = range(nCoinc)
seqList = []
for i in xrange(nSeq):
if max(seqLength) <= nCoinc:
seqList.append(random.sample(coincList, random.choice(seqLength)))
else:
len = random.choice(seqLength)
seq = []
for x in xrange(len):
seq.append(random.choice(coincList))
seqList.append(seq)
return seqList | 0.012951 |
def _are_nearby_parallel_boxes(self, b1, b2):
"Are two boxes nearby, parallel, and similar in width?"
if not self._are_aligned_angles(b1.angle, b2.angle):
return False
# Otherwise pick the smaller angle and see whether the two boxes are close according to the "up" direction wrt that angle
angle = min(b1.angle, b2.angle)
return abs(np.dot(b1.center - b2.center, [-np.sin(angle), np.cos(angle)])) < self.lineskip_tol * (
b1.height + b2.height) and (b1.width > 0) and (b2.width > 0) and (0.5 < b1.width / b2.width < 2.0) | 0.008576 |
def describe_pipelines(pipeline_ids, region=None, key=None, keyid=None, profile=None):
'''
Retrieve metadata about one or more pipelines.
CLI example:
.. code-block:: bash
salt myminion boto_datapipeline.describe_pipelines ['my_pipeline_id']
'''
client = _get_client(region, key, keyid, profile)
r = {}
try:
r['result'] = client.describe_pipelines(pipelineIds=pipeline_ids)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
r['error'] = six.text_type(e)
return r | 0.005348 |
def load_client_ca(self, cafile):
"""
Load the trusted certificates that will be sent to the client. Does
not actually imply any of the certificates are trusted; that must be
configured separately.
:param bytes cafile: The path to a certificates file in PEM format.
:return: None
"""
ca_list = _lib.SSL_load_client_CA_file(
_text_to_bytes_and_warn("cafile", cafile)
)
_openssl_assert(ca_list != _ffi.NULL)
_lib.SSL_CTX_set_client_CA_list(self._context, ca_list) | 0.003552 |
def drop(self, format_p, action):
"""Informs the source that a drop event occurred for a pending
drag and drop operation.
in format_p of type str
The mime type the data must be in.
in action of type :class:`DnDAction`
The action to use.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorVmError`
VMM device is not available.
"""
if not isinstance(format_p, basestring):
raise TypeError("format_p can only be an instance of type basestring")
if not isinstance(action, DnDAction):
raise TypeError("action can only be an instance of type DnDAction")
progress = self._call("drop",
in_p=[format_p, action])
progress = IProgress(progress)
return progress | 0.005447 |
def restful(self, path, params):
"""
Allows you to make a direct REST call if you know the path
Arguments:
:param path: The path of the request. Example: sobjects/User/ABC123/password'
:param params: dict of parameters to pass to the path
"""
url = self._get_norm_base_url() + path
response = requests.get(url, headers=self._get_rest_headers(), params=params)
if response.status_code != 200:
raise Exception(response)
json_result = response.json(object_pairs_hook=OrderedDict)
if len(json_result) == 0:
return None
else:
return json_result | 0.005944 |
def post_db_dump(self):
"""
Runs methods services that have requested to be run before each
database dump.
"""
for service in self.genv.services:
service = service.strip().upper()
funcs = common.service_post_db_dumpers.get(service)
if funcs:
print('Running post-database dump for service %s...' % (service,))
for func in funcs:
func() | 0.006494 |
def clean_old_jobs():
'''
Called in the master's event loop every loop_interval. Archives and/or
deletes the events and job details from the database.
:return:
'''
if __opts__.get('keep_jobs', False) and int(__opts__.get('keep_jobs', 0)) > 0:
try:
with _get_serv() as cur:
sql = 'select date_sub(now(), interval {0} hour) as stamp;'.format(__opts__['keep_jobs'])
cur.execute(sql)
rows = cur.fetchall()
stamp = rows[0][0]
if __opts__.get('archive_jobs', False):
_archive_jobs(stamp)
else:
_purge_jobs(stamp)
except MySQLdb.Error as e:
log.error('Mysql returner was unable to get timestamp for purge/archive of jobs')
log.error(six.text_type(e))
raise salt.exceptions.SaltRunnerError(six.text_type(e)) | 0.0044 |
def clear(self):
"""
Clears all the data in the object, keeping original data
"""
self.__modified_data__ = {}
self.__deleted_fields__ = [field for field in self.__original_data__.keys()] | 0.013274 |
def linewidth(self, linewidth=None):
"""Returns or sets (if a value is provided) the width of the series'
line.
:param Number linewidth: If given, the series' linewidth will be set to\
this.
:rtype: ``Number``"""
if linewidth is None:
return self._linewidth
else:
if not is_numeric(linewidth):
raise TypeError(
"linewidth must be number, not '%s'" % str(linewidth)
)
self._linewidth = linewidth | 0.005587 |
def delayed_close(self):
"""Delayed close - won't close immediately, but on next ioloop tick."""
self.state = CLOSING
self.server.io_loop.add_callback(self.close) | 0.010753 |
def parse(cls, fptr, offset, length):
"""Parse component definition box.
Parameters
----------
fptr : file
Open file object.
offset : int
Start position of box in bytes.
length : int
Length of the box in bytes.
Returns
-------
ComponentDefinitionBox
Instance of the current component definition box.
"""
num_bytes = offset + length - fptr.tell()
read_buffer = fptr.read(num_bytes)
# Read the number of components.
num_components, = struct.unpack_from('>H', read_buffer)
data = struct.unpack_from('>' + 'HHH' * num_components, read_buffer,
offset=2)
index = data[0:num_components * 6:3]
channel_type = data[1:num_components * 6:3]
association = data[2:num_components * 6:3]
return cls(index=tuple(index),
channel_type=tuple(channel_type),
association=tuple(association),
length=length, offset=offset) | 0.001826 |
def _operator_generator(index, conj):
"""
Internal method to generate the appropriate operator
"""
pterm = PauliTerm('I', 0, 1.0)
Zstring = PauliTerm('I', 0, 1.0)
for j in range(index):
Zstring = Zstring*PauliTerm('Z', j, 1.0)
pterm1 = Zstring*PauliTerm('X', index, 0.5)
scalar = 0.5 * conj * 1.0j
pterm2 = Zstring*PauliTerm('Y', index, scalar)
pterm = pterm * (pterm1 + pterm2)
pterm = pterm.simplify()
return pterm | 0.003802 |
def _validate_metadata(metadata_props):
'''
Validate metadata properties and possibly show warnings or throw exceptions.
:param metadata_props: A dictionary of metadata properties, with property names and values (see :func:`~onnxmltools.utils.metadata_props.add_metadata_props` for examples)
'''
if len(CaseInsensitiveDict(metadata_props)) != len(metadata_props):
raise RuntimeError('Duplicate metadata props found')
for key, value in metadata_props.items():
valid_values = KNOWN_METADATA_PROPS.get(key)
if valid_values and value.lower() not in valid_values:
warnings.warn('Key {} has invalid value {}. Valid values are {}'.format(key, value, valid_values)) | 0.005563 |
def _get_passwordkey(self):
"""This method just hashes self.password."""
sha = SHA256.new()
sha.update(self.password.encode('utf-8'))
return sha.digest() | 0.010753 |
def _int_size_to_type(size):
"""
Return the Catalyst datatype from the size of integers.
"""
if size <= 8:
return ByteType
if size <= 16:
return ShortType
if size <= 32:
return IntegerType
if size <= 64:
return LongType | 0.003584 |
def convert_gemm(params, w_name, scope_name, inputs, layers, weights, names):
"""
Convert Linear.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting Linear ...')
if names == 'short':
tf_name = 'FC' + random_string(6)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
bias_name = '{0}.bias'.format(w_name)
weights_name = '{0}.weight'.format(w_name)
W = weights[weights_name].numpy().transpose()
input_channels, output_channels = W.shape
keras_weights = [W]
has_bias = False
if bias_name in weights:
bias = weights[bias_name].numpy()
keras_weights = [W, bias]
has_bias = True
dense = keras.layers.Dense(
output_channels,
weights=keras_weights, use_bias=has_bias, name=tf_name, bias_initializer='zeros', kernel_initializer='zeros',
)
layers[scope_name] = dense(layers[inputs[0]]) | 0.00163 |
def getPostStates(self):
'''
Slightly extends the base version of this method by recalculating aLvlNow to account for the
consumer's (potential) misperception about their productivity level.
Parameters
----------
None
Returns
-------
None
'''
AggShockConsumerType.getPostStates(self)
self.cLvlNow = self.cNrmNow*self.pLvlNow # True consumption level
self.aLvlNow = self.mLvlTrueNow - self.cLvlNow # True asset level
self.aNrmNow = self.aLvlNow/self.pLvlNow | 0.008772 |
def getImageForExpression(self, retina_name, body, image_scalar=2, plot_shape="circle", image_encoding="base64/png", sparsity=1.0):
"""Get images for expressions
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
image_scalar, int: The scale of the image (optional) (optional)
plot_shape, str: The image shape (optional) (optional)
image_encoding, str: The encoding of the returned image (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns: java.io.ByteArrayInputStream
"""
resourcePath = '/image'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'image/png', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['image_scalar'] = image_scalar
queryParams['plot_shape'] = plot_shape
queryParams['image_encoding'] = image_encoding
queryParams['sparsity'] = sparsity
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return response.content | 0.005364 |
def discoverLangs(self,domain="*"):
"""
Generates a list of languages based on files found on disk.
The optional ``domain`` argument may specify a domain to use when checking
for files. By default, all domains are checked.
This internally uses the :py:mod:`glob` built-in module and the
:confval:`i18n.lang.format` config option to find suitable filenames.
It then applies the regex in :confval:`i18n.discover_regex` to extract the
language code.
"""
rsrc = self.peng.cfg["i18n.lang.format"].format(domain=domain,lang="*")
pattern = self.peng.rsrcMgr.resourceNameToPath(rsrc,self.peng.cfg["i18n.lang.ext"])
files = glob.iglob(pattern)
langs = set()
r = re.compile(self.peng.cfg["i18n.discover_regex"])
for f in files:
m = r.fullmatch(f)
if m is not None:
langs.add(m.group("lang"))
return list(langs) | 0.012833 |
def getRootJobs(self):
"""
:return: The roots of the connected component of jobs that contains this job. \
A root is a job with no predecessors.
:rtype : set of toil.job.Job instances
"""
roots = set()
visited = set()
#Function to get the roots of a job
def getRoots(job):
if job not in visited:
visited.add(job)
if len(job._directPredecessors) > 0:
list(map(lambda p : getRoots(p), job._directPredecessors))
else:
roots.add(job)
#The following call ensures we explore all successor edges.
list(map(lambda c : getRoots(c), job._children +
job._followOns))
getRoots(self)
return roots | 0.010922 |
def _from_binary_attrlist_e(cls, binary_stream):
"""See base class."""
'''
Attribute type - 4
Length of a particular entry - 2
Length of the name - 1 (in characters)
Offset to name - 1
Starting VCN - 8
File reference - 8
Attribute ID - 1
Name (unicode) - variable
'''
attr_type, entry_len, name_len, name_off, s_vcn, f_tag, attr_id = cls._REPR.unpack(binary_stream[:cls._REPR.size])
if name_len:
name = binary_stream[name_off:name_off+(2*name_len)].tobytes().decode("utf_16_le")
else:
name = None
file_ref, file_seq = get_file_reference(f_tag)
nw_obj = cls((AttrTypes(attr_type), entry_len, name_off, s_vcn, file_ref, file_seq, attr_id, name))
_MOD_LOGGER.debug("Attempted to unpack ATTRIBUTE_LIST Entry from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj | 0.005562 |
def buffer(self):
'''
Get a copy of the buffer that this is reading from. Returns a
buffer object
'''
return buffer(self._input, self._start_pos,
(self._end_pos - self._start_pos)) | 0.008264 |
def add_server(self, name, prefer=False):
"""Add or update an NTP server entry to the node config
Args:
name (string): The IP address or FQDN of the NTP server.
prefer (bool): Sets the NTP server entry as preferred if True.
Returns:
True if the operation succeeds, otherwise False.
"""
if not name or re.match(r'^[\s]+$', name):
raise ValueError('ntp server name must be specified')
if prefer:
name = '%s prefer' % name
cmd = self.command_builder('ntp server', value=name)
return self.configure(cmd) | 0.0032 |
def apply_noise(data, noise):
"""
Applies noise to a sparse matrix. Noise can be an integer between 0 and
100, indicating the percentage of ones in the original input to move, or
a float in [0, 1), indicating the same thing.
The input matrix is modified in-place, and nothing is returned.
This operation does not affect the sparsity of the matrix, or of any
individual datapoint.
"""
if noise >= 1:
noise = noise/100.
for i in range(data.nRows()):
ones = data.rowNonZeros(i)[0]
replace_indices = numpy.random.choice(ones,
size = int(len(ones)*noise), replace = False)
for index in replace_indices:
data[i, index] = 0
new_indices = numpy.random.choice(data.nCols(),
size = int(len(ones)*noise), replace = False)
for index in new_indices:
while data[i, index] == 1:
index = numpy.random.randint(0, data.nCols())
data[i, index] = 1 | 0.018519 |
def _get_config(**api_opts):
'''
Return configuration
user passed api_opts override salt config.get vars
'''
config = {
'api_sslverify': True,
'api_url': 'https://INFOBLOX/wapi/v1.2.1',
'api_user': '',
'api_key': '',
}
if '__salt__' in globals():
config_key = '{0}.config'.format(__virtualname__)
config.update(__salt__['config.get'](config_key, {}))
# pylint: disable=C0201
for k in set(config.keys()) & set(api_opts.keys()):
config[k] = api_opts[k]
return config | 0.001786 |
def hilbertrot(n, x, y, rx, ry):
"""Rotates and flips a quadrant appropriately for the Hilbert scan
generator. See https://en.wikipedia.org/wiki/Hilbert_curve.
"""
if ry == 0:
if rx == 1:
x = n - 1 - x
y = n - 1 - y
return y, x
return x, y | 0.003344 |
def snip_string(string, max_len=20, snip_string='...', snip_point=0.5):
"""
Snips a string so that it is no longer than max_len, replacing deleted
characters with the snip_string.
The snip is done at snip_point, which is a fraction between 0 and 1,
indicating relatively where along the string to snip. snip_point of
0.5 would be the middle.
>>> snip_string('this is long', 8)
'this ...'
>>> snip_string('this is long', 8, snip_point=0.5)
'th...ong'
>>> snip_string('this is long', 12)
'this is long'
>>> snip_string('this is long', 8, '~')
'this is~'
>>> snip_string('this is long', 8, '~', 0.5)
'thi~long'
"""
if len(string) <= max_len:
new_string = string
else:
visible_len = (max_len - len(snip_string))
start_len = int(visible_len*snip_point)
end_len = visible_len-start_len
new_string = string[0:start_len]+ snip_string
if end_len > 0:
new_string += string[-end_len:]
return new_string | 0.004762 |
def staticmap(ctx, mapid, output, features, lat, lon, zoom, size):
"""
Generate static map images from existing Mapbox map ids.
Optionally overlay with geojson features.
$ mapbox staticmap --features features.geojson mapbox.satellite out.png
$ mapbox staticmap --lon -61.7 --lat 12.1 --zoom 12 mapbox.satellite out2.png
An access token is required, see `mapbox --help`.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
if features:
features = list(
cligj.normalize_feature_inputs(None, 'features', [features]))
service = mapbox.Static(access_token=access_token)
try:
res = service.image(
mapid,
lon=lon, lat=lat, z=zoom,
width=size[0], height=size[1],
features=features, sort_keys=True)
except mapbox.errors.ValidationError as exc:
raise click.BadParameter(str(exc))
if res.status_code == 200:
output.write(res.content)
else:
raise MapboxCLIException(res.text.strip()) | 0.001899 |
def data(self, index, role = QtCore.Qt.DisplayRole):
"""Reimplemented from QtCore.QAbstractItemModel
The value gets validated and is red if validation fails
and green if it passes.
"""
if not index.isValid():
return None
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
if index.column() == 0:
p = index.internalPointer()
k = self.get_key(p, index.row())
return k
if index.column() == 1:
v = self.get_value(index)
if not isinstance(v, Section):
return self._val_to_str(v)
if index.column() == 2:
return self.get_configspec_str(index)
if role == QtCore.Qt.ForegroundRole:
if index.column() == 1:
v = self.get_value(index)
if not isinstance(v, Section):
spec = self.get_configspec_str(index)
if spec is None or isinstance(spec, Section):
return
try:
self._vld.check(spec, v)
except ValidateError:
return QtGui.QBrush(self._invalid_col)
else:
return QtGui.QBrush(self._valid_col) | 0.002959 |
def telnet_config(self, status):
"""
status:
false - Telnet is disabled
true - Telnet is enabled
"""
ret = self.command(
'configManager.cgi?action=setConfig&Telnet.Enable={0}'.format(
status)
)
return ret.content.decode('utf-8') | 0.006079 |
def get_list(self, key, default=UndefinedKey):
"""Return list representation of value found at key
:param key: key to use (dot separated). E.g., a.b.c
:type key: basestring
:param default: default value if key not found
:type default: list
:return: list value
:type return: list
"""
value = self.get(key, default)
if isinstance(value, list):
return value
elif isinstance(value, ConfigTree):
lst = []
for k, v in sorted(value.items(), key=lambda kv: kv[0]):
if re.match('^[1-9][0-9]*$|0', k):
lst.append(v)
else:
raise ConfigException(u"{key} does not translate to a list".format(key=key))
return lst
elif value is None:
return None
else:
raise ConfigException(
u"{key} has type '{type}' rather than 'list'".format(key=key, type=type(value).__name__)) | 0.003941 |
def getRow(self, key):
"""
Get a row by value of the indexing columns. If the index is not
specified, gets the only row of a dataframe with no indexing columns.
Args:
key: Tuple representing the index of the desired row.
Returns:
The row.
"""
return Row(self._impl.getRow(Tuple(key)._impl)) | 0.005376 |
def calculate_permute_output_shapes(operator):
'''
Allowed input/output patterns are
1. [N, C, H, W] ---> [N', C', H', W']
Note that here [N', C', H', W'] means all possible permutations of [N, C, H, W]
'''
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
check_input_and_output_types(operator, good_input_types=[FloatTensorType, Int64TensorType, StringTensorType],
good_output_types=[FloatTensorType, Int64TensorType, StringTensorType])
input = operator.inputs[0]
output = operator.outputs[0]
axes = [int(i) for i in operator.raw_operator.permute.axis]
input_shape = copy.deepcopy(input.type.shape)
output.type.shape = [input_shape[a] for a in axes] | 0.006468 |
def getAttachment(self, oid, attachment_id, out_folder=None):
"""
downloads a feature's attachment.
Inputs:
oid - object id of the feature
attachment_id - ID of the attachment. Should be an integer.
out_folder - save path of the file
Output:
string - full path of the file
"""
attachments = self.listAttachments(oid=oid)
if "attachmentInfos" in attachments:
for attachment in attachments['attachmentInfos']:
if "id" in attachment and \
attachment['id'] == attachment_id:
url = self._url + "/%s/attachments/%s" % (oid, attachment_id)
return self._get(url=url, param_dict={"f":'json'},
securityHandler=self._securityHandler,
out_folder=out_folder,
file_name=attachment['name'])
return None | 0.00402 |
def safe_nested_val(key_tuple, dict_obj, default_value=None):
"""Return a value from nested dicts by the order of the given keys tuple.
Parameters
----------
key_tuple : tuple
The keys to use for extraction, in order.
dict_obj : dict
The outer-most dict to extract from.
default_value : object, default None
The value to return when no matching nested value is found.
Returns
-------
value : object
The extracted value, if exists. Otherwise, the given default_value.
Example
-------
>>> dict_obj = {'a': {'b': 7}}
>>> safe_nested_val(('a', 'b'), dict_obj)
7
>>> safe_nested_val(('a', 'c'), dict_obj)
>>> safe_nested_val(('a', 'c'), dict_obj, 5)
5
>>> safe_nested_val(('d'), dict_obj, 5)
5
"""
try:
return get_nested_val(key_tuple, dict_obj)
except (KeyError, IndexError, TypeError):
return default_value | 0.00106 |
def powernodes_containing(self, name, directly=False) -> iter:
"""Yield all power nodes containing (power) node of given *name*.
If *directly* is True, will only yield the direct parent of given name.
"""
if directly:
yield from (node for node in self.all_in(name)
if name in self.inclusions[node])
else:
# This algorithm is very bad. Inverting the inclusion dict could
# be far better.
@functools.lru_cache(maxsize=self.node_number(count_pnode=True))
def contains_target(node, target):
succs = self.inclusions[node]
if target in succs:
return True
else:
return any(contains_target(succ, target) for succ in succs)
# populate the cache
for root in self.roots:
contains_target(root, name)
# output all that contains target at some level
yield from (node for node in self.inclusions.keys()
if contains_target(node, name)) | 0.001783 |
def has_reduction(expr):
"""Does `expr` contain a reduction?
Parameters
----------
expr : ibis.expr.types.Expr
An ibis expression
Returns
-------
truth_value : bool
Whether or not there's at least one reduction in `expr`
Notes
-----
The ``isinstance(op, ops.TableNode)`` check in this function implies
that we only examine every non-table expression that precedes the first
table expression.
"""
def fn(expr):
op = expr.op()
if isinstance(op, ops.TableNode): # don't go below any table nodes
return lin.halt, None
if isinstance(op, ops.Reduction):
return lin.halt, True
return lin.proceed, None
reduction_status = lin.traverse(fn, expr)
return any(reduction_status) | 0.001239 |
def to_root(df, path, key='my_ttree', mode='w', store_index=True, *args, **kwargs):
"""
Write DataFrame to a ROOT file.
Parameters
----------
path: string
File path to new ROOT file (will be overwritten)
key: string
Name of tree that the DataFrame will be saved as
mode: string, {'w', 'a'}
Mode that the file should be opened in (default: 'w')
store_index: bool (optional, default: True)
Whether the index of the DataFrame should be stored as
an __index__* branch in the tree
Notes
-----
Further *args and *kwargs are passed to root_numpy's array2root.
>>> df = DataFrame({'x': [1,2,3], 'y': [4,5,6]})
>>> df.to_root('test.root')
The DataFrame index will be saved as a branch called '__index__*',
where * is the name of the index in the original DataFrame
"""
if mode == 'a':
mode = 'update'
elif mode == 'w':
mode = 'recreate'
else:
raise ValueError('Unknown mode: {}. Must be "a" or "w".'.format(mode))
from root_numpy import array2tree
# We don't want to modify the user's DataFrame here, so we make a shallow copy
df_ = df.copy(deep=False)
if store_index:
name = df_.index.name
if name is None:
# Handle the case where the index has no name
name = ''
df_['__index__' + name] = df_.index
# Convert categorical columns into something root_numpy can serialise
for col in df_.select_dtypes(['category']).columns:
name_components = ['__rpCaT', col, str(df_[col].cat.ordered)]
name_components.extend(df_[col].cat.categories)
if ['*' not in c for c in name_components]:
sep = '*'
else:
raise ValueError('Unable to find suitable separator for columns')
df_[col] = df_[col].cat.codes
df_.rename(index=str, columns={col: sep.join(name_components)}, inplace=True)
arr = df_.to_records(index=False)
root_file = ROOT.TFile.Open(path, mode)
if not root_file:
raise IOError("cannot open file {0}".format(path))
if not root_file.IsWritable():
raise IOError("file {0} is not writable".format(path))
# Navigate to the requested directory
open_dirs = [root_file]
for dir_name in key.split('/')[:-1]:
current_dir = open_dirs[-1].Get(dir_name)
if not current_dir:
current_dir = open_dirs[-1].mkdir(dir_name)
current_dir.cd()
open_dirs.append(current_dir)
# The key is now just the top component
key = key.split('/')[-1]
# If a tree with that name exists, we want to update it
tree = open_dirs[-1].Get(key)
if not tree:
tree = None
tree = array2tree(arr, name=key, tree=tree)
tree.Write(key, ROOT.TFile.kOverwrite)
root_file.Close() | 0.001408 |
def rm_op(l, name, op):
"""Remove an opcode. This is used when basing a new Python release off
of another one, and there is an opcode that is in the old release
that was removed in the new release.
We are pretty aggressive about removing traces of the op.
"""
# opname is an array, so we need to keep the position in there.
l['opname'][op] = '<%s>' % op
if op in l['hasconst']:
l['hasconst'].remove(op)
if op in l['hascompare']:
l['hascompare'].remove(op)
if op in l['hascondition']:
l['hascondition'].remove(op)
if op in l['hasfree']:
l['hasfree'].remove(op)
if op in l['hasjabs']:
l['hasjabs'].remove(op)
if op in l['hasname']:
l['hasname'].remove(op)
if op in l['hasjrel']:
l['hasjrel'].remove(op)
if op in l['haslocal']:
l['haslocal'].remove(op)
if op in l['hasname']:
l['hasname'].remove(op)
if op in l['hasnargs']:
l['hasnargs'].remove(op)
if op in l['hasvargs']:
l['hasvargs'].remove(op)
if op in l['nofollow']:
l['nofollow'].remove(op)
assert l['opmap'][name] == op
del l['opmap'][name] | 0.012017 |
def group_add_user_action(model, request):
"""Add user to group.
"""
user_id = request.params.get('id')
if not user_id:
user_ids = request.params.getall('id[]')
else:
user_ids = [user_id]
try:
group = model.model
validate_add_users_to_groups(model, user_ids, [group.id])
for user_id in user_ids:
group.add(user_id)
group()
model.parent.invalidate(group.name)
localizer = get_localizer(request)
message = localizer.translate(_(
'added_user_to_group',
default="Added user '${uid}' to group '${gid}'.",
mapping={
'uid': ', '.join(user_ids),
'gid': group.id
}
))
return {
'success': True,
'message': message
}
except ManageMembershipError as e:
if e.reason is not LM_TARGET_UID_NOT_ALLOWED:
raise Exception(u"Unknown ManageMembershipError reason.")
localizer = get_localizer(request)
message = localizer.translate(_(
'lm_add_target_uid_not_allowed',
default=(
"Failed adding user '${uid}' to group '${gid}'. "
"Manage membership denied for user."
),
mapping={
'uid': e.data,
'gid': group.id
}
))
return {
'success': False,
'message': message
}
except Exception as e:
return {
'success': False,
'message': str(e)
} | 0.000625 |
def to_sql(self):
"""
This function build a sql condition string (those used in the 'WHERE' clause) based on given condition
Supported match pattern:
{a: 1} -> a == 1
{a: {$gt: 1}} -> a > 1
{a: {$gte: 1}} -> a >= 1
{a: {$lt: 1}} -> a < 1
{a: {$lte: 1}} -> a <= 1
{a: {$eq: 1}} -> a == 1
{a: {$in: [1, 2]}} -> a == 1
{a: {$contains: '123'}} -> a like %123%
And complex combination
{$not: condition} -> NOT (condition)
{$and: [condition1, condition2]} -> condition1 and condition2
{$or: [condition1, condition2]} -> condition1 or condition2
"""
condition = self.condition
if condition:
# If the condition is not None nor empty
if len(condition.keys()) > 1:
# If in the form of {'a': 1, 'b': 2}, simplify to {'$and': [{'a': 1, 'b': 2}]}
split_conditions = []
for key in condition.keys():
split_conditions.append({key: condition[key]})
return QueryCondition({'$and': split_conditions}).to_sql()
else:
query_field, query_value = condition.items()[0]
if query_field in QueryCondition.COMPLEX_QUERY_INDICATOR:
# This is a composite query
if u'$not' == query_field:
not_condition = QueryCondition(query_value).to_sql()
if not_condition is not None:
return 'NOT (%s)' %(not_condition)
else:
return None
if query_field in (u'$or', u'$and', ):
conditions = query_value
if not isinstance(conditions, list) or len(conditions) < 2:
raise MonSQLException('QUERY VALUE FOR KEY %s MUST BE LIST WITH LENGTH BEING AT LEAST 2' %(query_field))
# compute sub conditions recursively
conditions = map(lambda c: QueryCondition(c).to_sql(), conditions)
conditions = filter(lambda c: c is not None, conditions)
# join them together
if len(conditions) > 0:
if query_field == u'$or':
return ' OR '.join(conditions)
elif query_field == u'$and':
return ' AND '.join(conditions)
else:
return None
else:
raise MonSQLException('Unsupport query_field')
else:
# This is a one-field query like {'id': ...}
if query_field in QueryCondition.MYSQL_RESERVE_WORDS:
query_field = "`%s`" %(query_field)
if not type(query_value) is types.DictType:
# transform {'id': 1} to {'id': {'$eq': 1}} for convenience
query_value = {'$eq': query_value}
if len(query_value.keys()) > 1:
# Deal with situation like a: {'$gt': 1, '$lt': 10}
# Split into {$and: [a: {'$gt': 1}, a: {'$lt': 10}]}
split_conditions = []
for key in query_value.keys():
split_conditions.append(QueryCondition({query_field: {key: query_value[key]}}))
return QueryCondition({'$and': split_conditions}).to_sql()
else:
# The simple case of {a: {$complex_operator: 1}}
complex_operator = query_value.keys()[0] # the complex operator
target_value = query_value[complex_operator]
query_str = None
if u"$contains" == complex_operator:
query_str = u"LIKE " + value_to_sql_str('%' + target_value + '%')
elif complex_operator in ('$eq', '$gte', '$gt', '$lt', '$lte'):
map_dic = {'$eq': '=', '$gte': '>=', '$gt': '>', '$lt': '<', '$lte': '<='}
query_str = map_dic[complex_operator] + value_to_sql_str(target_value)
elif u'$in' == complex_operator:
if len(target_value) == 0:
query_str = u"IN (null) "
else:
query_str = u"IN (" + u','.join([str(_v_) for _v_ in target_value]) + u") "
else:
raise MonSQLException(u"Unsupport complex query: %s" %(complex_operator))
return query_field + ' ' + query_str
else:
return None
# For testing
assert False | 0.004814 |
def ces(subsystem, mechanisms=False, purviews=False, cause_purviews=False,
effect_purviews=False, parallel=False):
"""Return the conceptual structure of this subsystem, optionally restricted
to concepts with the mechanisms and purviews given in keyword arguments.
If you don't need the full |CauseEffectStructure|, restricting the possible
mechanisms and purviews can make this function much faster.
Args:
subsystem (Subsystem): The subsystem for which to determine the
|CauseEffectStructure|.
Keyword Args:
mechanisms (tuple[tuple[int]]): Restrict possible mechanisms to those
in this list.
purviews (tuple[tuple[int]]): Same as in |Subsystem.concept()|.
cause_purviews (tuple[tuple[int]]): Same as in |Subsystem.concept()|.
effect_purviews (tuple[tuple[int]]): Same as in |Subsystem.concept()|.
parallel (bool): Whether to compute concepts in parallel. If ``True``,
overrides :data:`config.PARALLEL_CONCEPT_EVALUATION`.
Returns:
CauseEffectStructure: A tuple of every |Concept| in the cause-effect
structure.
"""
if mechanisms is False:
mechanisms = utils.powerset(subsystem.node_indices, nonempty=True)
engine = ComputeCauseEffectStructure(mechanisms, subsystem, purviews,
cause_purviews, effect_purviews)
return CauseEffectStructure(engine.run(parallel or
config.PARALLEL_CONCEPT_EVALUATION),
subsystem=subsystem) | 0.000625 |
def extract_args(self, data):
"""
It extracts irc msg arguments.
"""
args = []
data = data.strip(' ')
if ':' in data:
lhs, rhs = data.split(':', 1)
if lhs: args.extend(lhs.rstrip(' ').split(' '))
args.append(rhs)
else:
args.extend(data.split(' '))
return tuple(args) | 0.015831 |
def __validate_arguments(self):
"""!
@brief Check input arguments of CLIQUE algorithm and if one of them is not correct then appropriate exception
is thrown.
"""
if len(self.__data) == 0:
raise ValueError("Empty input data. Data should contain at least one point.")
if self.__amount_intervals <= 0:
raise ValueError("Incorrect amount of intervals '%d'. Amount of intervals value should be greater than 0." % self.__amount_intervals)
if self.__density_threshold < 0:
raise ValueError("Incorrect density threshold '%f'. Density threshold should not be negative." % self.__density_threshold) | 0.008499 |
def run(self):
"""
Blocking method that run the server.
"""
if self.tasks:
logger.info('Registered tasks: %s' % ', '.join(self.tasks))
else:
logger.info('No tasks registered')
logger.info('Listening on %s ...' % self.bind)
self.socket.bind(self.bind)
self.running = True
self.pre_run()
while self.running:
msg = self.socket.recv_pyobj()
action = msg.get('action')
if action == 'run_task':
if msg.get('name') not in self.tasks:
print(msg.get('name'))
print(self.tasks)
response = {
'success': False, 'exception': TaskNotFound(msg.get('name'))}
self.socket.send_pyobj(response)
else:
task = Task(str(uuid.uuid4()), msg.get('name'), [], {})
if msg.get('args'):
task.args = msg.get('args', [])
if msg.get('kwargs'):
task.kwargs = msg.get('kwargs', {})
self.task_result_backend.add_task(task)
self.queue.put(task.id)
self.socket.send_pyobj({'success': True, 'task': task})
elif action == 'get_result':
task_id = msg.get('id')
task = self.task_result_backend.get_task(task_id)
if task:
response = {'success': True, 'task': task}
else:
response = {
'success': False,
'id': task_id,
'exception': TaskNotFound(task_id)}
self.socket.send_pyobj(response)
elif action == 'wait':
task_id = msg.get('id')
task = self.task_result_backend.get_task(task_id)
if task:
while not task.terminated:
task = self.task_result_backend.get_task(task_id)
time.sleep(1)
response = {'success': True, 'task': task}
else:
response = {
'success': False,
'id': task_id,
'exception': TaskNotFound(task_id)}
self.socket.send_pyobj(response)
else:
response = {'success': False, 'exception': MessageMalformed()}
self.socket.send_pyobj(response) | 0.001165 |
def __fetch_crate_versions(self, crate_id):
"""Get crate versions data"""
raw_versions = self.client.crate_attribute(crate_id, "versions")
version_downloads = json.loads(raw_versions)
return version_downloads | 0.00823 |
def histograms(self, analytes=None, bins=25, logy=False,
filt=False, colourful=True):
"""
Plot histograms of analytes.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
colourful : bool
If True, histograms are colourful :)
Returns
-------
(fig, axes)
"""
if analytes is None:
analytes = self.analytes
if self.focus_stage in ['ratio', 'calibrated']:
analytes = [a for a in analytes if self.internal_standard not in a]
if colourful:
cmap = self.cmaps
else:
cmap = None
self.get_focus(filt=filt)
fig, axes = plot.histograms(self.focus, keys=analytes,
bins=bins, logy=logy, cmap=cmap)
return fig, axes | 0.002297 |
def run(self):
'''
Gather currently connected minions and update the cache
'''
new_mins = list(salt.utils.minions.CkMinions(self.opts).connected_ids())
cc = cache_cli(self.opts)
cc.get_cached()
cc.put_cache([new_mins])
log.debug('ConCache CacheWorker update finished') | 0.009036 |
def get(
self: 'Option[Mapping[K,V]]',
key: K,
default=None
) -> 'Option[V]':
"""
Gets a mapping value by key in the contained value or returns
``default`` if the key doesn't exist.
Args:
key: The mapping key.
default: The defauilt value.
Returns:
* ``Some`` variant of the mapping value if the key exists
and the value is not None.
* ``Some(default)`` if ``default`` is not None.
* :py:data:`NONE` if ``default`` is None.
Examples:
>>> Some({'hi': 1}).get('hi')
Some(1)
>>> Some({}).get('hi', 12)
Some(12)
>>> NONE.get('hi', 12)
Some(12)
>>> NONE.get('hi')
NONE
"""
if self._is_some:
return self._type.maybe(self._val.get(key, default))
return self._type.maybe(default) | 0.003099 |
def validate(self):
"""Validate the parameters of the run. Raises self.Error if invalid parameters."""
errors = []
app = errors.append
if not self.hint_cores >= self.mpi_procs * self.omp_threads >= self.min_cores:
app("self.hint_cores >= mpi_procs * omp_threads >= self.min_cores not satisfied")
if self.omp_threads > self.hw.cores_per_node:
app("omp_threads > hw.cores_per_node")
if self.mem_per_proc > self.hw.mem_per_node:
app("mem_mb >= self.hw.mem_per_node")
if not self.max_mem_per_proc >= self.mem_per_proc >= self.min_mem_per_proc:
app("self.max_mem_per_proc >= mem_mb >= self.min_mem_per_proc not satisfied")
if self.priority <= 0:
app("priority must be > 0")
if not (1 <= self.min_cores <= self.hw.num_cores >= self.hint_cores):
app("1 <= min_cores <= hardware num_cores >= hint_cores not satisfied")
if errors:
raise self.Error(str(self) + "\n".join(errors)) | 0.007692 |
def strip(self, text, *args, **kwargs):
"""
Try to maintain parity with what is extracted by extract since strip
will most likely be used in conjunction with extract
"""
if OEMBED_DEFAULT_PARSE_HTML:
extracted = self.extract_oembeds_html(text, *args, **kwargs)
else:
extracted = self.extract_oembeds(text, *args, **kwargs)
matches = [r['original_url'] for r in extracted]
match_handler = lambda m: m.group() not in matches and m.group() or ''
return re.sub(URL_RE, match_handler, text) | 0.008347 |
def matching(self):
"""Return found matching SBo packages
"""
for sbo in self.package_not_found:
for pkg in self.data:
if sbo in pkg and pkg not in self.blacklist:
self.package_found.append(pkg) | 0.007519 |
def append(self, clause, weight=None):
"""
Add one more clause to WCNF formula. This method additionally
updates the number of variables, i.e. variable ``self.nv``, used in
the formula.
The clause can be hard or soft depending on the ``weight``
argument. If no weight is set, the clause is considered to be hard.
:param clause: a new clause to add.
:param weight: integer weight of the clause.
:type clause: list(int)
:type weight: integer or None
.. code-block:: python
>>> from pysat.formula import WCNF
>>> cnf = WCNF()
>>> cnf.append([-1, 2])
>>> cnf.append([1], weight=10)
>>> cnf.append([-2], weight=20)
>>> print cnf.hard
[[-1, 2]]
>>> print cnf.soft
[[1], [-2]]
>>> print cnf.wght
[10, 20]
"""
self.nv = max([abs(l) for l in clause] + [self.nv])
if weight:
self.soft.append(clause)
self.wght.append(weight)
else:
self.hard.append(clause) | 0.002451 |
def approve(self, peer_jid):
"""
(Pre-)approve a subscription request from `peer_jid`.
:param peer_jid: The peer to (pre-)approve.
This sends a ``"subscribed"`` presence to the peer; if the peer has
previously asked for a subscription, this will seal the deal and create
the subscription.
If the peer has not requested a subscription (yet), it is marked as
pre-approved by the server. A future subscription request by the peer
will then be confirmed by the server automatically.
.. note::
Pre-approval is an OPTIONAL feature in :rfc:`6121`. It is announced
as a stream feature.
"""
self.client.enqueue(
stanza.Presence(type_=structs.PresenceType.SUBSCRIBED,
to=peer_jid)
) | 0.002367 |
def crop(self, cropping):
"""
Set `a:srcRect` child to crop according to *cropping* values.
"""
srcRect = self._add_srcRect()
srcRect.l, srcRect.t, srcRect.r, srcRect.b = cropping | 0.009132 |
def allowed_target_sdp_states(self):
"""Return a list of allowed target states for the current state."""
_current_state = self._sdp_state.current_state
_allowed_target_states = self._sdp_state.allowed_target_states[
_current_state]
return json.dumps(dict(allowed_target_sdp_states=
_allowed_target_states)) | 0.007874 |
def count_exceptions(self, c, broker):
"""
Count exceptions as processing proceeds
"""
if c in broker.exceptions:
self.counts['exception'] += len(broker.exceptions[c])
return self | 0.008658 |
def _get_accepted(self, graph):
"""
Find the accepted states
Args:
graph (DFA): The DFA states
Return:
list: Returns the list of the accepted states
"""
accepted = []
for state in graph.states:
if state.final != TropicalWeight(float('inf')):
accepted.append(state)
return accepted | 0.005051 |
def emit(self, record):
"""Send a LogRecord to the callback function, after preparing it
for serialization."""
try:
self._callback(self.prepare(record))
except Exception:
self.handleError(record) | 0.007968 |
def to_dict(self):
"""Return a dictionary representation of the SemI."""
make = lambda pair: (pair[0], pair[1].to_dict())
return dict(
variables=dict(make(v) for v in self.variables.items()),
properties=dict(make(p) for p in self.properties.items()),
roles=dict(make(r) for r in self.roles.items()),
predicates=dict(make(p) for p in self.predicates.items())
) | 0.006834 |
def get_page_tags_from_request(request, page_lookup, lang, site, title=False):
"""
Get the list of tags attached to a Page or a Title from a request from usual
`page_lookup` parameters.
:param request: request object
:param page_lookup: a valid page_lookup argument
:param lang: a language code
:param site: a site id
:param title: a boolean to extract the Page (if False) or Title instance
:return: list of tags
:type: List
"""
from cms.templatetags.cms_tags import _get_page_by_untyped_arg
from cms.utils import get_language_from_request, get_site_id
from django.core.cache import cache
try:
from cms.utils import get_cms_setting
except ImportError:
from cms.utils.conf import get_cms_setting
site_id = get_site_id(site)
if lang is None:
lang = get_language_from_request(request)
cache_key = get_cache_key(request, page_lookup, lang, site, title)
tags_list = cache.get(cache_key)
if not tags_list:
page = _get_page_by_untyped_arg(page_lookup, request, site_id)
if page:
if title:
tags_list = get_title_tags(page, lang)
else:
tags_list = get_page_tags(page)
cache.set(cache_key, tags_list, timeout=get_cms_setting('CACHE_DURATIONS')['content'])
if not tags_list:
tags_list = ()
return tags_list | 0.002131 |
def _wait_for_transfer_threads(self, terminate):
# type: (SyncCopy, bool) -> None
"""Wait for download threads
:param SyncCopy self: this
:param bool terminate: terminate threads
"""
if terminate:
self._synccopy_terminate = terminate
for thr in self._transfer_threads:
blobxfer.util.join_thread(thr) | 0.007916 |
def djfrontend_jquery(version=None):
"""
Returns jQuery JavaScript file according to version number.
TEMPLATE_DEBUG returns full file, otherwise returns minified file from Google CDN with local fallback.
Included in HTML5 Boilerplate.
"""
if version is None:
version = getattr(settings, 'DJFRONTEND_JQUERY', DJFRONTEND_JQUERY_DEFAULT)
if getattr(settings, 'TEMPLATE_DEBUG', False):
template = '<script src="{static}djfrontend/js/jquery/{v}/jquery.js"></script>'
else:
template = (
'<script src="//ajax.googleapis.com/ajax/libs/jquery/{v}/jquery.min.js"></script>'
'<script>window.jQuery || document.write(\'<script src="{static}djfrontend/js/jquery/{v}/jquery.min.js"><\/script>\')</script>')
return format_html(template, static=_static_url, v=version) | 0.008363 |
def strand_unknown(db, transcript):
"""
for unstranded data with novel transcripts single exon genes
will have no strand information. single exon novel genes are also
a source of noise in the Cufflinks assembly so this removes them
"""
features = list(db.children(transcript))
strand = features[0].strand
if strand == ".":
return True
else:
return False | 0.002469 |
def open_file(path, grib_errors='warn', **kwargs):
"""Open a GRIB file as a ``cfgrib.Dataset``."""
if 'mode' in kwargs:
warnings.warn("the `mode` keyword argument is ignored and deprecated", FutureWarning)
kwargs.pop('mode')
stream = messages.FileStream(path, message_class=cfmessage.CfMessage, errors=grib_errors)
return Dataset(*build_dataset_components(stream, **kwargs)) | 0.007389 |
def parse(self,fileName,offset):
'''Parses synset from file <fileName>
from offset <offset>
'''
p = Parser()
p.file = open(fileName, 'rb')
a = p.parse_synset(offset=offset)
p.file.close()
self.__dict__.update(a.__dict__) | 0.014085 |
def generate_password(length=8, lower=True, upper=True, number=True):
"""
generates a simple password. We should not really use this in production.
:param length: the length of the password
:param lower: True of lower case characters are allowed
:param upper: True if upper case characters are allowed
:param number: True if numbers are allowed
:return:
"""
lletters = "abcdefghijklmnopqrstuvwxyz"
uletters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# This doesn't guarantee both lower and upper cases will show up
alphabet = lletters + uletters
digit = "0123456789"
mypw = ""
def _random_character(texts):
return texts[random.randrange(len(texts))]
if not lower:
alphabet = uletters
elif not upper:
alphabet = lletters
for i in range(length):
# last half length will be filled with numbers
if number and i >= int(length / 2):
mypw = mypw + _random_character(digit)
else:
mypw = mypw + _random_character(alphabet)
return mypw | 0.001878 |
def RawBytesToScriptHash(raw):
"""
Get a hash of the provided raw bytes using the ripemd160 algorithm.
Args:
raw (bytes): byte array of raw bytes. e.g. b'\xAA\xBB\xCC'
Returns:
UInt160:
"""
rawh = binascii.unhexlify(raw)
rawhashstr = binascii.unhexlify(bytes(Crypto.Hash160(rawh), encoding='utf-8'))
return UInt160(data=rawhashstr) | 0.007126 |
def overlaps(self,junc,tolerance=0):
"""see if junction overlaps with tolerance"""
if not self.left.overlaps(junc.left,padding=tolerance): return False
if not self.right.overlaps(junc.right,padding=tolerance): return False
return True | 0.028 |
def list_file_extensions(path: str, reportevery: int = 1) -> List[str]:
"""
Returns a sorted list of every file extension found in a directory
and its subdirectories.
Args:
path: path to scan
reportevery: report directory progress after every *n* steps
Returns:
sorted list of every file extension found
"""
extensions = set()
count = 0
for root, dirs, files in os.walk(path):
count += 1
if count % reportevery == 0:
log.debug("Walking directory {}: {!r}", count, root)
for file in files:
filename, ext = os.path.splitext(file)
extensions.add(ext)
return sorted(list(extensions)) | 0.001416 |
def copy(string, **kwargs):
"""Copy given string into system clipboard."""
window = Tk()
window.withdraw()
window.clipboard_clear()
window.clipboard_append(string)
window.destroy()
return | 0.004651 |
def get_port_for_ip_address(context, ip_id, id, fields=None):
"""Retrieve a port.
: param context: neutron api request context
: param id: UUID representing the port to fetch.
: param fields: a list of strings that are valid keys in a
port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
"""
LOG.info("get_port %s for tenant %s fields %s" %
(id, context.tenant_id, fields))
addr = db_api.ip_address_find(context, id=ip_id, scope=db_api.ONE)
if not addr:
raise q_exc.IpAddressNotFound(addr_id=ip_id)
filters = {'ip_address_id': [ip_id]}
results = db_api.port_find(context, id=id, fields=fields,
scope=db_api.ONE, **filters)
if not results:
raise n_exc.PortNotFound(port_id=id)
return v._make_port_for_ip_dict(addr, results) | 0.001067 |
def next_channel_from_routes(
available_routes: List['RouteState'],
channelidentifiers_to_channels: Dict,
transfer_amount: PaymentWithFeeAmount,
lock_timeout: BlockTimeout,
) -> Optional[NettingChannelState]:
""" Returns the first route that may be used to mediated the transfer.
The routing service can race with local changes, so the recommended routes
must be validated.
Args:
available_routes: Current available routes that may be used, it's
assumed that the available_routes list is ordered from best to
worst.
channelidentifiers_to_channels: Mapping from channel identifier
to NettingChannelState.
transfer_amount: The amount of tokens that will be transferred
through the given route.
lock_timeout: Number of blocks until the lock expires, used to filter
out channels that have a smaller settlement window.
Returns:
The next route.
"""
for route in available_routes:
channel_state = channelidentifiers_to_channels.get(route.channel_identifier)
if not channel_state:
continue
if is_channel_usable(channel_state, transfer_amount, lock_timeout):
return channel_state
return None | 0.001542 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.