text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def words_for_language(language_code):
"""
Return the math words for a language code.
The language_code should be an ISO 639-2 language code.
https://www.loc.gov/standards/iso639-2/php/code_list.php
"""
word_groups = word_groups_for_language(language_code)
words = []
for group in word_groups:
words.extend(word_groups[group].keys())
return words | 0.002551 |
def __init(self,code):
"""Initialize a `MucStatus` element from a status code.
:Parameters:
- `code`: the status code.
:Types:
- `code`: `int`
"""
code=int(code)
if code<0 or code>999:
raise ValueError("Bad status code")
self.code=code | 0.021341 |
def _makepass(password, hasher='sha256'):
'''
Create a znc compatible hashed password
'''
# Setup the hasher
if hasher == 'sha256':
h = hashlib.sha256(password)
elif hasher == 'md5':
h = hashlib.md5(password)
else:
return NotImplemented
c = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
"0123456789!?.,:;/*-+_()"
r = {
'Method': h.name,
'Salt': ''.join(random.SystemRandom().choice(c) for x in range(20)),
}
# Salt the password hash
h.update(r['Salt'])
r['Hash'] = h.hexdigest()
return r | 0.001618 |
def find_start_point(self):
"""
Find the first location in our array that is not empty
"""
for i, row in enumerate(self.data):
for j, _ in enumerate(row):
if self.data[i, j] != 0: # or not np.isfinite(self.data[i,j]):
return i, j | 0.006452 |
def value(self):
"""Read-only property containing data returned from function."""
if self.ready is True:
flag, load = self.__queue.get()
if flag:
return load
raise load | 0.008475 |
def write_section(self, name, features, info, f):
"""Provides formatting for one section (e.g. hydrogen bonds)"""
if not len(info) == 0:
f.write('\n\n### %s ###\n' % name)
f.write('%s\n' % '\t'.join(features))
for line in info:
f.write('%s\n' % '\t'.join(map(str, line))) | 0.0059 |
def login(self, username: str, password: str, course: int) -> requests.Response:
"""
η»ε
₯θͺ²η¨
"""
try:
# ζδ½ζιθ³θ¨
payload = {
'name': username,
'passwd': password,
'rdoCourse': course
}
# εε³ε試η»ε
₯ηεζ
return self.__session.post(
self.__url + '/Login', data=payload, timeout=0.5, verify=False)
except requests.exceptions.Timeout:
return None | 0.005906 |
def keys_delete(cls, fqdn, key):
"""Delete a key for a domain."""
return cls.json_delete('%s/domains/%s/keys/%s' %
(cls.api_url, fqdn, key)) | 0.010695 |
def del_character(self, name):
"""Remove the Character from the database entirely.
This also deletes all its history. You'd better be sure.
"""
self.query.del_character(name)
self.del_graph(name)
del self.character[name] | 0.007407 |
def remove(self, *rules, _validate=True):
# type: (Iterable[Type[Rule]], bool) -> None
"""
Remove rules from the set.
:param rules: Rules to remove.
:param _validate: True if the rule should be validated before deleting.
This parameter is only for internal use.
:raise NotRuleException: If the parameter doesn't inherit from Rule.
:raise RuleException: If the syntax of the rule is invalid.
:raise KeyError: If the rule is not in the grammar.
"""
all_rules = set()
for rule in rules:
if _validate:
self._validate_rule(rule)
for r in self._split_rules(rule):
if not self.__contains__(rule, _validate=False):
raise KeyError('Rule ' + rule.__name__ + ' is not inside')
all_rules.add(r)
for rule in all_rules:
for side in rule.rule:
for s in side:
self._assign_map[s].discard(rule)
super().remove(rule) | 0.002846 |
def update_file(filename, items):
'''Edits the given file in place, replacing any instances of {key} with the
appropriate value from the provided items dict. If the given filename ends
with ".xml" values will be quoted and escaped for XML.
'''
# TODO: Implement something in the templates to denote whether the value
# being replaced is an XML attribute or a value. Perhaps move to dyanmic
# XML tree building rather than string replacement.
should_escape = filename.endswith('addon.xml')
with open(filename, 'r') as inp:
text = inp.read()
for key, val in items.items():
if should_escape:
val = saxutils.quoteattr(val)
text = text.replace('{%s}' % key, val)
output = text
with open(filename, 'w') as out:
out.write(output) | 0.001224 |
def parse(source, handler):
'''
Convert XML 1.0 to MicroXML
source - XML 1.0 input
handler - MicroXML events handler
Returns uxml, extras
uxml - MicroXML element extracted from the source
extras - information to be preserved but not part of MicroXML, e.g. namespaces
'''
h = expat_callbacks(handler)
p = xml.parsers.expat.ParserCreate(namespace_separator=' ')
p.StartElementHandler = h.start_element
p.EndElementHandler = h.end_element
p.CharacterDataHandler = h.char_data
p.StartNamespaceDeclHandler = h.start_namespace
p.EndNamespaceDeclHandler = h.end_namespace
p.Parse(source)
return p | 0.003026 |
def recursive_xy_divide(elems, avg_font_size):
"""
Recursively group/divide the document by white stripes
by projecting elements onto alternating axes as intervals.
avg_font_size: the minimum gap size between elements below
which we consider interval continuous.
"""
log = logging.getLogger(__name__)
log.info(avg_font_size)
objects = list(elems.mentions)
objects.extend(elems.segments)
bboxes = []
# A tree that is a list of its children
# bboxes can be recursively reconstructed from
# the leaves
def divide(objs, bbox, h_split=True, is_single=False):
"""
Recursive wrapper for splitting a list of objects
with bounding boxes.
h_split: whether to split along y axis, otherwise
we split along x axis.
"""
if not objs:
return []
# range start/end indices
axis = 1 if h_split else 0
intervals, groups = project_onto(objs, axis, avg_font_size)
# base case where we can not actually divide
single_child = len(groups) == 1
# Can not divide in both X and Y, stop
if is_single and single_child:
bboxes.append(bbox)
return objs
else:
children = []
for interval, group in zip(intervals, groups):
# Create the bbox for the subgroup
sub_bbox = np.array(bbox)
sub_bbox[[axis, axis + 2]] = interval
# Append the sub-document tree
child = divide(group, sub_bbox, not h_split, single_child)
children.append(child)
return children
full_page_bbox = (0, 0, elems.layout.width, elems.layout.height)
# Filter out invalid objects
objects = [o for o in objects if inside(full_page_bbox, o.bbox)]
log.info("avg_font_size for dividing", avg_font_size)
tree = divide(objects, full_page_bbox) if objects else []
return bboxes, tree | 0.000504 |
def yaml_to_dict(yaml_str=None, str_or_buffer=None, ordered=False):
"""
Load YAML from a string, file, or buffer (an object with a .read method).
Parameters are mutually exclusive.
Parameters
----------
yaml_str : str, optional
A string of YAML.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
ordered: bool, optional, default False
If True, an OrderedDict is returned.
Returns
-------
dict
Conversion from YAML.
"""
if not yaml_str and not str_or_buffer:
raise ValueError('One of yaml_str or str_or_buffer is required.')
# determine which load method to use
if ordered:
loader = __ordered_load
else:
loader = yaml.load
if yaml_str:
d = loader(yaml_str)
elif isinstance(str_or_buffer, str):
with open(str_or_buffer) as f:
d = loader(f)
else:
d = loader(str_or_buffer)
return d | 0.001006 |
def decode(self, string, legacy=False):
"""
Decode a string according to the current alphabet into a UUID
Raises ValueError when encountering illegal characters
or a too-long string.
If string too short, fills leftmost (MSB) bits with 0.
Pass `legacy=True` if your UUID was encoded with a ShortUUID version
prior to 0.6.0.
"""
if legacy:
string = string[::-1]
return _uu.UUID(int=string_to_int(string, self._alphabet)) | 0.003906 |
def parse(readDataInstance):
"""
Returns a new L{TLSDirectory} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object containing data to create a new L{TLSDirectory} object.
@rtype: L{TLSDirectory}
@return: A new {TLSDirectory} object.
"""
tlsDir = TLSDirectory()
tlsDir.startAddressOfRawData.value = readDataInstance.readDword()
tlsDir.endAddressOfRawData.value = readDataInstance.readDword()
tlsDir.addressOfIndex.value = readDataInstance.readDword()
tlsDir.addressOfCallbacks.value = readDataInstance.readDword()
tlsDir.sizeOfZeroFill.value = readDataInstance.readDword()
tlsDir.characteristics.value = readDataInstance.readDword()
return tlsDir | 0.007229 |
def perf_total(self, value):
"""The perf_total property.
Args:
value (string). the property value.
"""
if value == self._defaults['perfTotal'] and 'perfTotal' in self._values:
del self._values['perfTotal']
else:
self._values['perfTotal'] = value | 0.012121 |
def clear_git_lock(role, remote=None, **kwargs):
'''
.. versionadded:: 2015.8.2
Remove the update locks for Salt components (gitfs, git_pillar, winrepo)
which use gitfs backend code from salt.utils.gitfs.
.. note::
Running :py:func:`cache.clear_all <salt.runners.cache.clear_all>` will
not include this function as it does for pillar, grains, and mine.
Additionally, executing this function with a ``role`` of ``gitfs`` is
equivalent to running ``salt-run fileserver.clear_lock backend=git``.
role
Which type of lock to remove (``gitfs``, ``git_pillar``, or
``winrepo``)
remote
If specified, then any remotes which contain the passed string will
have their lock cleared. For example, a ``remote`` value of **github**
will remove the lock from all github.com remotes.
type : update,checkout,mountpoint
The types of lock to clear. Can be one or more of ``update``,
``checkout``, and ``mountpoint``, and can be passed either as a
comma-separated or Python list.
.. versionadded:: 2015.8.8
.. versionchanged:: 2018.3.0
``mountpoint`` lock type added
CLI Examples:
.. code-block:: bash
salt-run cache.clear_git_lock gitfs
salt-run cache.clear_git_lock git_pillar
salt-run cache.clear_git_lock git_pillar type=update
salt-run cache.clear_git_lock git_pillar type=update,checkout
salt-run cache.clear_git_lock git_pillar type='["update", "mountpoint"]'
'''
kwargs = salt.utils.args.clean_kwargs(**kwargs)
type_ = salt.utils.args.split_input(
kwargs.pop('type', ['update', 'checkout', 'mountpoint']))
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
if role == 'gitfs':
git_objects = [
salt.utils.gitfs.GitFS(
__opts__,
__opts__['gitfs_remotes'],
per_remote_overrides=salt.fileserver.gitfs.PER_REMOTE_OVERRIDES,
per_remote_only=salt.fileserver.gitfs.PER_REMOTE_ONLY
)
]
elif role == 'git_pillar':
git_objects = []
for ext_pillar in __opts__['ext_pillar']:
key = next(iter(ext_pillar))
if key == 'git':
if not isinstance(ext_pillar['git'], list):
continue
obj = salt.utils.gitfs.GitPillar(
__opts__,
ext_pillar['git'],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY)
git_objects.append(obj)
elif role == 'winrepo':
winrepo_dir = __opts__['winrepo_dir']
winrepo_remotes = __opts__['winrepo_remotes']
git_objects = []
for remotes, base_dir in (
(winrepo_remotes, winrepo_dir),
(__opts__['winrepo_remotes_ng'], __opts__['winrepo_dir_ng'])
):
obj = salt.utils.gitfs.WinRepo(
__opts__,
remotes,
per_remote_overrides=salt.runners.winrepo.PER_REMOTE_OVERRIDES,
per_remote_only=salt.runners.winrepo.PER_REMOTE_ONLY,
global_only=salt.runners.winrepo.GLOBAL_ONLY,
cache_root=base_dir)
git_objects.append(obj)
else:
raise SaltInvocationError('Invalid role \'{0}\''.format(role))
ret = {}
for obj in git_objects:
for lock_type in type_:
cleared, errors = _clear_lock(obj.clear_lock,
role,
remote=remote,
lock_type=lock_type)
if cleared:
ret.setdefault('cleared', []).extend(cleared)
if errors:
ret.setdefault('errors', []).extend(errors)
if not ret:
return 'No locks were removed'
return ret | 0.000978 |
def get_labels(self, request_data, project=None, top=None, skip=None):
"""GetLabels.
Get a collection of shallow label references.
:param :class:`<TfvcLabelRequestData> <azure.devops.v5_0.tfvc.models.TfvcLabelRequestData>` request_data: labelScope, name, owner, and itemLabelFilter
:param str project: Project ID or project name
:param int top: Max number of labels to return
:param int skip: Number of labels to skip
:rtype: [TfvcLabelRef]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if request_data is not None:
if request_data.label_scope is not None:
query_parameters['requestData.labelScope'] = request_data.label_scope
if request_data.name is not None:
query_parameters['requestData.name'] = request_data.name
if request_data.owner is not None:
query_parameters['requestData.owner'] = request_data.owner
if request_data.item_label_filter is not None:
query_parameters['requestData.itemLabelFilter'] = request_data.item_label_filter
if request_data.max_item_count is not None:
query_parameters['requestData.maxItemCount'] = request_data.max_item_count
if request_data.include_links is not None:
query_parameters['requestData.includeLinks'] = request_data.include_links
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
response = self._send(http_method='GET',
location_id='a5d9bd7f-b661-4d0e-b9be-d9c16affae54',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TfvcLabelRef]', self._unwrap_collection(response)) | 0.005116 |
def dependent_on_composite_state(self, job_record):
""" :return instance of <NodesCompositeState> """
assert isinstance(job_record, Job)
tree = self.get_tree(job_record.process_name)
node = tree.get_node(job_record.process_name, job_record.timeperiod)
return node.dependent_on_composite_state() | 0.005988 |
def load_config(cls):
""" Load global and local configuration files and update if needed."""
config_file = os.path.expanduser(cls.home_config)
global_conf = cls.load(config_file, 'global')
cls.load(cls.local_config, 'local')
# update global configuration if needed
cls.update_config(config_file, global_conf) | 0.005618 |
def _handle_one(self):
"""
Handle one read/write cycle.
"""
ready_to_read, ready_to_write, in_error = select.select(
[self.request], [self.request], [self.request], 0.1)
if in_error:
raise self.Disconnect()
# Write any commands to the client
while self.send_queue and ready_to_write:
msg = self.send_queue.pop(0)
self._send(msg)
# See if the client has any commands for us.
if ready_to_read:
self._handle_incoming() | 0.003636 |
def create_date(past=False, max_years_future=10, max_years_past=10):
"""
Create a random valid date
If past, then dates can be in the past
If into the future, then no more than max_years into the future
If it's not, then it can't be any older than max_years_past
"""
if past:
start = datetime.datetime.today() - datetime.timedelta(days=max_years_past * 365)
#Anywhere between 1980 and today plus max_ears
num_days = (max_years_future * 365) + start.day
else:
start = datetime.datetime.today()
num_days = max_years_future * 365
random_days = random.randint(1, num_days)
random_date = start + datetime.timedelta(days=random_days)
return(random_date) | 0.004098 |
def asarray(self, key=None, series=None):
"""Return image data of multiple TIFF pages as numpy array.
By default the first image series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages to return as array.
"""
if key is None and series is None:
series = 0
if series is not None:
pages = self.series[series].pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, int):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError('key must be an int, slice, or sequence')
if len(pages) == 1:
return pages[0].asarray()
elif self.is_nih:
result = numpy.vstack(p.asarray(colormapped=False,
squeeze=False) for p in pages)
if pages[0].is_palette:
result = numpy.take(pages[0].color_map, result, axis=1)
result = numpy.swapaxes(result, 0, 1)
else:
if self.is_ome and any(p is None for p in pages):
firstpage = next(p for p in pages if p)
nopage = numpy.zeros_like(
firstpage.asarray())
result = numpy.vstack((p.asarray() if p else nopage)
for p in pages)
if key is None:
try:
result.shape = self.series[series].shape
except ValueError:
warnings.warn("failed to reshape %s to %s" % (
result.shape, self.series[series].shape))
result.shape = (-1,) + pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result | 0.000962 |
def validate(self, size):
"""
Ensure that the size of the dimension matches the number of bands in the
scale
Raises:
ValueError: when the dimension size and number of bands don't match
"""
msg = 'scale and array size must match, ' \
'but were scale: {self.scale.n_bands}, array size: {size}'
if size != len(self.scale):
raise ValueError(msg.format(**locals())) | 0.008734 |
def match_keyword(self, keyword, string_match_type=DEFAULT_STRING_MATCH_TYPE, match=True):
"""Adds a keyword to match.
Multiple keywords can be added to perform a boolean ``OR`` among
them. A keyword may be applied to any of the elements defined in
this object such as the display name, description or any method
defined in an interface implemented by this object.
arg: keyword (string): keyword to match
arg: string_match_type (osid.type.Type): the string match
type
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: InvalidArgument - ``keyword`` is not of
``string_match_type``
raise: NullArgument - ``keyword`` or ``string_match_type`` is
``null``
raise: Unsupported -
``supports_string_match_type(string_match_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Note: this currently ignores match argument
match_value = self._get_string_match_value(keyword, string_match_type)
for field_name in self._keyword_fields:
if field_name not in self._keyword_terms:
self._keyword_terms[field_name] = {'$in': list()}
self._keyword_terms[field_name]['$in'].append(match_value) | 0.002104 |
def infer_domain(terms):
"""
Infer the domain from a collection of terms.
The algorithm for inferring domains is as follows:
- If all input terms have a domain of GENERIC, the result is GENERIC.
- If there is exactly one non-generic domain in the input terms, the result
is that domain.
- Otherwise, an AmbiguousDomain error is raised.
Parameters
----------
terms : iterable[zipline.pipeline.term.Term]
Returns
-------
inferred : Domain or NotSpecified
Raises
------
AmbiguousDomain
Raised if more than one concrete domain is present in the input terms.
"""
domains = {t.domain for t in terms}
num_domains = len(domains)
if num_domains == 0:
return GENERIC
elif num_domains == 1:
return domains.pop()
elif num_domains == 2 and GENERIC in domains:
domains.remove(GENERIC)
return domains.pop()
else:
# Remove GENERIC if it's present before raising. Showing it to the user
# is confusing because it doesn't contribute to the error.
domains.discard(GENERIC)
raise AmbiguousDomain(sorted(domains, key=repr)) | 0.00085 |
def rsa_decrypt_base64_encoded_key(rsaprivatekey, enckey):
# type: (cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey,
# str) -> bytes
"""Decrypt an RSA encrypted key encoded as base64
:param rsaprivatekey: RSA private key
:type rsaprivatekey:
cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey
:param str enckey: base64-encoded key
:rtype: bytes
:return: decrypted key
"""
return rsaprivatekey.decrypt(
base64.b64decode(enckey),
cryptography.hazmat.primitives.asymmetric.padding.OAEP(
mgf=cryptography.hazmat.primitives.asymmetric.padding.MGF1(
algorithm=cryptography.hazmat.primitives.hashes.SHA1()
),
algorithm=cryptography.hazmat.primitives.hashes.SHA1(),
label=None,
)
) | 0.001189 |
def _dens(self,R,z,phi=0.,t=0.):
"""
NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2015-06-15 - Written - Bovy (IAS)
"""
return 3./4./nu.pi*self._b2*(R**2.+z**2.+self._b2)**-2.5 | 0.013015 |
def get_data(self, simulation_step=None, error_category=None):
"""
Parameters
----------
simulation_step: if not given, returns a raw report
error_category: if only one argument is specified, swaps dataframe report
"""
if simulation_step is None and error_category is None:
return self._df.dropna(axis="rows", how="all")
if simulation_step is not None:
if simulation_step not in self._simulation_step_list:
raise RuntimeError("The simulation_step '%s' is not referred in the error file." % simulation_step)
if error_category is not None:
if error_category not in self.CATEGORIES:
raise RuntimeError("The error_cat '%s' is wrong." % error_category)
iterables = [simulation_step, error_category]
columns = pd.MultiIndex.from_product(iterables)
series = self._df[simulation_step][error_category].dropna(axis="rows", how="all")
df = pd.DataFrame(index=series.index, columns=columns)
df[simulation_step] = series
return df
return self._df[simulation_step].dropna(axis="rows", how="all")
if error_category is not None:
if error_category not in self.CATEGORIES:
raise RuntimeError("The error_category '%s' is wrong." % error_category)
df = self._df.copy()
df.columns = df.columns.swaplevel(0, 1)
return df[error_category].dropna(axis="rows", how="all") | 0.004425 |
def upper2_for_ramp_wall(self) -> Set[Point2]:
""" Returns the 2 upper ramp points of the main base ramp required for the supply depot and barracks placement properties used in this file. """
if len(self.upper) > 5:
# NOTE: this was way too slow on large ramps
return set() # HACK: makes this work for now
# FIXME: please do
upper2 = sorted(list(self.upper), key=lambda x: x.distance_to(self.bottom_center), reverse=True)
while len(upper2) > 2:
upper2.pop()
return set(upper2) | 0.007067 |
def get_file_row_generator(file_path, separator, encoding=None):
"""
Reads an separated value file row by row.
Inputs: - file_path: The path of the separated value format file.
- separator: The delimiter among values (e.g. ",", "\t", " ")
- encoding: The encoding used in the stored text.
Yields: - words: A list of strings corresponding to each of the file's rows.
"""
with open(file_path, encoding=encoding) as file_object:
for line in file_object:
words = line.strip().split(separator)
yield words | 0.003436 |
def pretty_print (obj, indent=False):
"""
pretty print a JSON object
"""
if indent:
return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))
else:
return json.dumps(obj, sort_keys=True) | 0.012605 |
def _handle_auth_success(self, stream, success):
"""Handle successful authentication.
Send <success/> and mark the stream peer authenticated.
[receiver only]
"""
if not self._check_authorization(success.properties, stream):
element = ElementTree.Element(FAILURE_TAG)
ElementTree.SubElement(element, SASL_QNP + "invalid-authzid")
return True
authzid = success.properties.get("authzid")
if authzid:
peer = JID(success.authzid)
elif "username" in success.properties:
peer = JID(success.properties["username"], stream.me.domain)
else:
# anonymous
peer = None
stream.set_peer_authenticated(peer, True) | 0.002625 |
def _updateHiddenStateTrajectories(self):
"""Sample a new set of state trajectories from the conditional distribution P(S | T, E, O)
"""
self.model.hidden_state_trajectories = list()
for trajectory_index in range(self.nobs):
hidden_state_trajectory = self._sampleHiddenStateTrajectory(self.observations[trajectory_index])
self.model.hidden_state_trajectories.append(hidden_state_trajectory)
return | 0.010823 |
def _event(self, event):
"""Converts a TraceEvent proto into a catapult trace event python value."""
result = dict(
pid=event.device_id,
tid=event.resource_id,
name=event.name,
ts=event.timestamp_ps / 1000000.0)
if event.duration_ps:
result['ph'] = _TYPE_COMPLETE
result['dur'] = event.duration_ps / 1000000.0
else:
result['ph'] = _TYPE_INSTANT
result['s'] = _SCOPE_THREAD
for key in dict(event.args):
if 'args' not in result:
result['args'] = {}
result['args'][key] = event.args[key]
return result | 0.011706 |
def hashsum(filename):
"""Return a hash of the file From <http://stackoverflow.com/a/7829658>"""
with open(filename, mode='rb') as f:
d = hashlib.sha1()
for buf in iter(partial(f.read, 2**20), b''):
d.update(buf)
return d.hexdigest() | 0.00365 |
def track_name_event(self, name):
"""Return the bytes for a track name meta event."""
l = self.int_to_varbyte(len(name))
return '\x00' + META_EVENT + TRACK_NAME + l + name | 0.015385 |
def add_columns(tree_view, df_py_dtypes, list_store):
'''
Add columns to a `gtk.TreeView` for the types listed in `df_py_dtypes`.
Args:
tree_view (gtk.TreeView) : Tree view to append columns to.
df_py_dtypes (pandas.DataFrame) : Data frame containing type
information for one or more columns in `list_store`.
list_store (gtk.ListStore) : Model data.
Returns:
None
'''
tree_view.set_model(list_store)
for column_i, (i, dtype_i) in df_py_dtypes[['i', 'dtype']].iterrows():
tree_column_i = gtk.TreeViewColumn(column_i)
tree_column_i.set_name(column_i)
if dtype_i in (int, long):
property_name = 'text'
cell_renderer_i = gtk.CellRendererSpin()
elif dtype_i == float:
property_name = 'text'
cell_renderer_i = gtk.CellRendererSpin()
elif dtype_i in (bool, ):
property_name = 'active'
cell_renderer_i = gtk.CellRendererToggle()
elif dtype_i in (str, ):
property_name = 'text'
cell_renderer_i = gtk.CellRendererText()
else:
raise ValueError('No cell renderer for dtype: %s' % dtype_i)
cell_renderer_i.set_data('column_i', i)
cell_renderer_i.set_data('column', tree_column_i)
tree_column_i.pack_start(cell_renderer_i, True)
tree_column_i.add_attribute(cell_renderer_i, property_name, i)
tree_view.append_column(tree_column_i) | 0.000668 |
def tr(text, kword, color):
""" tr(text, keyword, color)
"""
return re.sub(kword, colorize(BgColor.Null, Base.Null, color, kword), text) | 0.006757 |
def to_png_file(self, fname: str):
"""
write a '.png' file.
"""
cmd = pipes.Template()
cmd.append('dot -Tpng > %s' % fname, '-.')
with cmd.open('pipefile', 'w') as f:
f.write(self.to_dot()) | 0.008032 |
def processor(ctx, processor_cls, process_time_limit, enable_stdout_capture=True, get_object=False):
"""
Run Processor.
"""
g = ctx.obj
Processor = load_cls(None, None, processor_cls)
processor = Processor(projectdb=g.projectdb,
inqueue=g.fetcher2processor, status_queue=g.status_queue,
newtask_queue=g.newtask_queue, result_queue=g.processor2result,
enable_stdout_capture=enable_stdout_capture,
process_time_limit=process_time_limit)
g.instances.append(processor)
if g.get('testing_mode') or get_object:
return processor
processor.run() | 0.005814 |
def getStreamNetworkAsGeoJson(self, session, withNodes=True):
"""
Retrieve the stream network geometry in GeoJSON format.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
withNodes (bool, optional): Include nodes. Defaults to False.
Returns:
str: GeoJSON string.
"""
features_list = []
# Assemble link features
for link in self.streamLinks:
link_geoJson = link.getAsGeoJson(session)
if link_geoJson:
link_geometry = json.loads(link.getAsGeoJson(session))
link_properties = {"link_number": link.linkNumber,
"type": link.type,
"num_elements": link.numElements,
"dx": link.dx,
"erode": link.erode,
"subsurface": link.subsurface}
link_feature = {"type": "Feature",
"geometry": link_geometry,
"properties": link_properties,
"id": link.id}
features_list.append(link_feature)
# Assemble node features
if withNodes:
for node in link.nodes:
node_geoJson = node.getAsGeoJson(session)
if node_geoJson:
node_geometry = json.loads(node_geoJson)
node_properties = {"link_number": link.linkNumber,
"node_number": node.nodeNumber,
"elevation": node.elevation}
node_feature = {"type": "Feature",
"geometry": node_geometry,
"properties": node_properties,
"id": node.id}
features_list.append(node_feature)
feature_collection = {"type": "FeatureCollection",
"features": features_list}
return json.dumps(feature_collection) | 0.001341 |
def _get_section(self, section, count):
"""Read the next I{count} records from the wire data and add them to
the specified section.
@param section: the section of the message to which to add records
@type section: list of dns.rrset.RRset objects
@param count: the number of records to read
@type count: int"""
if self.updating or self.one_rr_per_rrset:
force_unique = True
else:
force_unique = False
seen_opt = False
for i in xrange(0, count):
rr_start = self.current
(name, used) = dns.name.from_wire(self.wire, self.current)
absolute_name = name
if not self.message.origin is None:
name = name.relativize(self.message.origin)
self.current = self.current + used
(rdtype, rdclass, ttl, rdlen) = \
struct.unpack('!HHIH',
self.wire[self.current:self.current + 10])
self.current = self.current + 10
if rdtype == dns.rdatatype.OPT:
if not section is self.message.additional or seen_opt:
raise BadEDNS
self.message.payload = rdclass
self.message.ednsflags = ttl
self.message.edns = (ttl & 0xff0000) >> 16
self.message.options = []
current = self.current
optslen = rdlen
while optslen > 0:
(otype, olen) = \
struct.unpack('!HH',
self.wire[current:current + 4])
current = current + 4
opt = dns.edns.option_from_wire(otype, self.wire, current, olen)
self.message.options.append(opt)
current = current + olen
optslen = optslen - 4 - olen
seen_opt = True
elif rdtype == dns.rdatatype.TSIG:
if not (section is self.message.additional and
i == (count - 1)):
raise BadTSIG
if self.message.keyring is None:
raise UnknownTSIGKey('got signed message without keyring')
secret = self.message.keyring.get(absolute_name)
if secret is None:
raise UnknownTSIGKey("key '%s' unknown" % name)
self.message.tsig_ctx = \
dns.tsig.validate(self.wire,
absolute_name,
secret,
int(time.time()),
self.message.request_mac,
rr_start,
self.current,
rdlen,
self.message.tsig_ctx,
self.message.multi,
self.message.first)
self.message.had_tsig = True
else:
if ttl < 0:
ttl = 0
if self.updating and \
(rdclass == dns.rdataclass.ANY or
rdclass == dns.rdataclass.NONE):
deleting = rdclass
rdclass = self.zone_rdclass
else:
deleting = None
if deleting == dns.rdataclass.ANY or \
(deleting == dns.rdataclass.NONE and \
section is self.message.answer):
covers = dns.rdatatype.NONE
rd = None
else:
rd = dns.rdata.from_wire(rdclass, rdtype, self.wire,
self.current, rdlen,
self.message.origin)
covers = rd.covers()
if self.message.xfr and rdtype == dns.rdatatype.SOA:
force_unique = True
rrset = self.message.find_rrset(section, name,
rdclass, rdtype, covers,
deleting, True, force_unique)
if not rd is None:
rrset.add(rd, ttl)
self.current = self.current + rdlen | 0.003104 |
def get_relevant_versions(self, package_name: str):
"""Return a tuple: (latest release, latest stable)
If there are different, it means the latest is not a stable
"""
versions = self.get_ordered_versions(package_name)
pre_releases = [version for version in versions if not version.is_prerelease]
return (
versions[-1],
pre_releases[-1]
) | 0.007194 |
def define_frequencies(Ne, explicitly_antisymmetric=False):
u"""Define all frequencies omega_level, omega, gamma.
>>> from sympy import pprint
>>> pprint(define_frequencies(2), use_unicode=True)
β β‘ 0 Οβββ€ β‘ 0 Ξ³βββ€β
β[Οβ, Οβ], β’ β₯, β’ β₯β
β β£Οββ 0 β¦ β£Ξ³ββ 0 β¦β
We can make these matrices explicitly antisymmetric.
>>> pprint(define_frequencies(2, explicitly_antisymmetric=True),
... use_unicode=True)
β β‘ 0 -Οβββ€ β‘ 0 -Ξ³βββ€β
β[Οβ, Οβ], β’ β₯, β’ β₯β
β β£Οββ 0 β¦ β£Ξ³ββ 0 β¦β
"""
omega_level = [Symbol('omega_'+str(i+1), real=True) for i in range(Ne)]
if Ne > 9:
opening = "\\"
comma = ","
open_brace = "{"
close_brace = "}"
else:
opening = r""
comma = ""
open_brace = ""
close_brace = ""
omega = []; gamma = []
for i in range(Ne):
row_omega = []; row_gamma = []
for j in range(Ne):
if i == j:
om = 0; ga = 0
elif i > j:
om = Symbol(opening+r"omega_" +
open_brace+str(i+1)+comma+str(j+1) +
close_brace, real=True)
ga = Symbol(opening+r"gamma_" +
open_brace+str(i+1)+comma+str(j+1) +
close_brace, real=True)
elif explicitly_antisymmetric:
om = -Symbol(opening+r"omega_" +
open_brace+str(j+1)+comma+str(i+1) +
close_brace, real=True)
ga = -Symbol(opening+r"gamma_" +
open_brace+str(j+1)+comma+str(i+1) +
close_brace, real=True)
else:
om = Symbol(opening+r"omega_" +
open_brace+str(i+1)+comma+str(j+1) +
close_brace, real=True)
ga = Symbol(opening+r"gamma_" +
open_brace+str(i+1)+comma+str(j+1) +
close_brace, real=True)
row_omega += [om]
row_gamma += [ga]
omega += [row_omega]
gamma += [row_gamma]
omega = Matrix(omega)
gamma = Matrix(gamma)
return omega_level, omega, gamma | 0.001676 |
def _get_key_alias_from_cache(self, key_arn):
'''
Find a key's alias by looking up its key_arn in the KEY_METADATA
cache. This function will only work after a key has been lookedup by
its alias and is meant as a convenience function for turning an ARN
that's already been looked up back into its alias.
'''
for alias in self.KEY_METADATA:
if self.KEY_METADATA[alias]['KeyMetadata']['Arn'] == key_arn:
return alias
return None | 0.003868 |
def is_validated(self):
"""
Returns True if this instance is validated.
Note that resolving this property requires a DB query, so if you've a
very large amount of receipts you should prefetch (see django's
``select_related``) the ``validation`` field. Even so, a DB query *may*
be triggered.
If you need a large list of validated receipts, you should actually
filter them via a QuerySet::
Receipt.objects.filter(validation__result==RESULT_APPROVED)
:rtype: bool
"""
# Avoid the DB lookup if possible:
if not self.receipt_number:
return False
try:
return self.validation.result == ReceiptValidation.RESULT_APPROVED
except ReceiptValidation.DoesNotExist:
return False | 0.00241 |
def broadcast(self, gossip_message, message_type, exclude=None):
"""Broadcast gossip messages.
Broadcast the message to all peers unless they are in the excluded
list.
Args:
gossip_message: The message to be broadcast.
message_type: Type of the message.
exclude: A list of connection_ids that should be excluded from this
broadcast.
"""
with self._lock:
if exclude is None:
exclude = []
for connection_id in self._peers.copy():
if connection_id not in exclude and \
self._network.is_connection_handshake_complete(
connection_id):
self.send(
message_type,
gossip_message.SerializeToString(),
connection_id,
one_way=True) | 0.002116 |
def imageToColor(url: str, scale=200, mode='rgb'):
"""ε° url ζεηεΎηζηΊ―δΈΊδΈδΈͺι’θ²"""
from PIL import Image
import colorsys
if url:
response = urllib.request.urlopen(url)
img_buffer = io.BytesIO(response.read())
img = Image.open(img_buffer)
img = img.convert('RGBA')
img.thumbnail((scale, scale))
statistics = {'r': 0, 'g': 0, 'b': 0, 'coef': 0}
for cnt, (r, g, b, a) in img.getcolors(img.size[0] * img.size[1]):
hsv = colorsys.rgb_to_hsv(r / 255, g / 255, b / 255)
saturation = hsv[1] * 255
coefficient = (saturation * cnt * a) + 0.01 # avoid 0
statistics['r'] += coefficient * r
statistics['g'] += coefficient * g
statistics['b'] += coefficient * b
statistics['coef'] += coefficient
color = (
int(statistics['r'] / statistics['coef']),
int(statistics['g'] / statistics['coef']),
int(statistics['b'] / statistics['coef'])
)
if mode.lower() == 'rgb':
return color
elif mode.lower() == 'hex':
return "#%0.2X%0.2X%0.2X" % color
else:
return color
else:
return False | 0.001451 |
def index(ref_file, out_dir, data):
"""Create a STAR index in the defined reference directory.
"""
(ref_dir, local_file) = os.path.split(ref_file)
gtf_file = dd.get_gtf_file(data)
if not utils.file_exists(gtf_file):
raise ValueError("%s not found, could not create a star index." % (gtf_file))
if not utils.file_exists(out_dir):
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
num_cores = dd.get_cores(data)
cmd = ("STAR --genomeDir {tx_out_dir} --genomeFastaFiles {ref_file} "
"--runThreadN {num_cores} "
"--runMode genomeGenerate --sjdbOverhang 99 --sjdbGTFfile {gtf_file}")
do.run(cmd.format(**locals()), "Index STAR")
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
shutil.move(tx_out_dir, out_dir)
return out_dir | 0.004484 |
def export(string, template=None, **extra):
"""
Decorator for registering view functions and adding
templates to it.
"""
def wrapped(f):
endpoint = (f.__module__ + "." + f.__name__)[16:]
if template is not None:
old_f = f
def f(**kwargs):
rv = old_f(**kwargs)
if not isinstance(rv, Response):
rv = TemplateResponse(template, **(rv or {}))
return rv
f.__name__ = old_f.__name__
f.__doc__ = old_f.__doc__
exported_views[endpoint] = (f, string, extra)
return f
return wrapped | 0.001543 |
def det_lognl(self, det):
"""Returns the log likelihood of the noise in the given detector.
Parameters
----------
det : str
The name of the detector.
Returns
-------
float :
The log likelihood of the noise in the requested detector.
"""
try:
return self.__det_lognls[det]
except AttributeError:
# hasn't been calculated yet, call lognl to calculate & store
self._lognl()
# now try returning
return self.__det_lognls[det] | 0.003413 |
def show(self, text):
"""
Write the text to the stream and flush immediately.
"""
self.stream.write(text)
self.stream.flush() | 0.011976 |
def context_exists(self, name):
"""Check if a given context exists."""
contexts = self.data['contexts']
for context in contexts:
if context['name'] == name:
return True
return False | 0.008299 |
def get_access_flags_string(self):
"""
Return the access flags string of the field
:rtype: string
"""
if self.access_flags_string == None:
self.access_flags_string = get_access_flags_string(
self.get_access_flags())
if self.access_flags_string == "":
self.access_flags_string = "0x%x" % self.get_access_flags()
return self.access_flags_string | 0.006579 |
def zones(self):
"""
:class:`list` of :class:`stravalib.model.ActivityZone` objects for this activity.
"""
if self._zones is None:
self.assert_bind_client()
self._zones = self.bind_client.get_activity_zones(self.id)
return self._zones | 0.010067 |
def _call(self, x, out=None):
"""Calculate the spatial gradient of ``x``."""
if out is None:
out = self.range.element()
x_arr = x.asarray()
ndim = self.domain.ndim
dx = self.domain.cell_sides
for axis in range(ndim):
with writable_array(out[axis]) as out_arr:
finite_diff(x_arr, axis=axis, dx=dx[axis], method=self.method,
pad_mode=self.pad_mode,
pad_const=self.pad_const,
out=out_arr)
return out | 0.00346 |
def bitwise_xor(self, t):
"""
Operation xor
:param t: The other operand.
"""
# Using same variables as in paper
s = self
new_interval = (s.bitwise_not().bitwise_or(t)).bitwise_not().bitwise_or(s.bitwise_or(t.bitwise_not()).bitwise_not())
return new_interval.normalize() | 0.008902 |
def get_xy_point_from_rgb(self, red_i, green_i, blue_i):
"""Returns an XYPoint object containing the closest available CIE 1931 x, y coordinates
based on the RGB input values."""
red = red_i / 255.0
green = green_i / 255.0
blue = blue_i / 255.0
r = ((red + 0.055) / (1.0 + 0.055))**2.4 if (red > 0.04045) else (red / 12.92)
g = ((green + 0.055) / (1.0 + 0.055))**2.4 if (green > 0.04045) else (green / 12.92)
b = ((blue + 0.055) / (1.0 + 0.055))**2.4 if (blue > 0.04045) else (blue / 12.92)
X = r * 0.664511 + g * 0.154324 + b * 0.162028
Y = r * 0.283881 + g * 0.668433 + b * 0.047685
Z = r * 0.000088 + g * 0.072310 + b * 0.986039
cx = X / (X + Y + Z)
cy = Y / (X + Y + Z)
# Check if the given XY value is within the colourreach of our lamps.
xy_point = XYPoint(cx, cy)
in_reach = self.check_point_in_lamps_reach(xy_point)
if not in_reach:
xy_point = self.get_closest_point_to_point(xy_point)
return xy_point | 0.005597 |
def diversity(layer):
"""Encourage diversity between each batch element.
A neural net feature often responds to multiple things, but naive feature
visualization often only shows us one. If you optimize a batch of images,
this objective will encourage them all to be different.
In particular, it caculuates the correlation matrix of activations at layer
for each image, and then penalizes cossine similarity between them. This is
very similar to ideas in style transfer, except we're *penalizing* style
similarity instead of encouraging it.
Args:
layer: layer to evaluate activation correlations on.
Returns:
Objective.
"""
def inner(T):
layer_t = T(layer)
batch_n, _, _, channels = layer_t.get_shape().as_list()
flattened = tf.reshape(layer_t, [batch_n, -1, channels])
grams = tf.matmul(flattened, flattened, transpose_a=True)
grams = tf.nn.l2_normalize(grams, axis=[1,2], epsilon=1e-10)
return sum([ sum([ tf.reduce_sum(grams[i]*grams[j])
for j in range(batch_n) if j != i])
for i in range(batch_n)]) / batch_n
return inner | 0.007993 |
def get_single_int_pk_colname(table_: Table) -> Optional[str]:
"""
If a table has a single-field (non-composite) integer PK, this will
return its database column name; otherwise, None.
Note that it is legitimate for a database table to have both a composite
primary key and a separate ``IDENTITY`` (``AUTOINCREMENT``) integer field.
This function won't find such columns.
"""
n_pks = 0
int_pk_names = []
for col in table_.columns:
if col.primary_key:
n_pks += 1
if is_sqlatype_integer(col.type):
int_pk_names.append(col.name)
if n_pks == 1 and len(int_pk_names) == 1:
return int_pk_names[0]
return None | 0.001414 |
def check_solver(self, kwargs_lens, kwargs_ps, kwargs_cosmo={}):
"""
test whether the image positions map back to the same source position
:param kwargs_lens:
:param kwargs_ps:
:return: Euclidean distance between the rayshooting of the image positions
"""
if self._solver is True:
image_x, image_y = kwargs_ps[0]['ra_image'], kwargs_ps[0]['dec_image']
image_x, image_y = self.real_image_positions(image_x, image_y, kwargs_cosmo)
dist = self._solver_module.check_solver(image_x, image_y, kwargs_lens)
return np.max(dist)
else:
return 0 | 0.009119 |
def get_sitetree():
"""Returns SiteTree (thread-singleton) object, implementing utility methods.
:rtype: SiteTree
"""
sitetree = getattr(_THREAD_LOCAL, _THREAD_SITETREE, None)
if sitetree is None:
sitetree = SiteTree()
setattr(_THREAD_LOCAL, _THREAD_SITETREE, sitetree)
return sitetree | 0.006098 |
async def status_by_zip(self, zip_code: str) -> dict:
"""Get symptom data for the provided ZIP code."""
try:
location = next((
d for d in await self.user_reports()
if d['zip'] == zip_code))
except StopIteration:
return {}
return await self.status_by_coordinates(
float(location['latitude']), float(location['longitude'])) | 0.004739 |
def is_country(self, text):
"""Check if a piece of text is in the list of countries"""
ct_list = self._just_cts.keys()
if text in ct_list:
return True
else:
return False | 0.008889 |
def _GetStat(self):
"""Retrieves information about the file entry.
Returns:
VFSStat: a stat object.
"""
stat_object = super(APFSFileEntry, self)._GetStat()
# File data stat information.
stat_object.size = self._fsapfs_file_entry.size
# Ownership and permissions stat information.
stat_object.mode = self._fsapfs_file_entry.file_mode & 0x0fff
stat_object.uid = self._fsapfs_file_entry.owner_identifier
stat_object.gid = self._fsapfs_file_entry.group_identifier
# File entry type stat information.
stat_object.type = self.entry_type
# Other stat information.
stat_object.ino = self._fsapfs_file_entry.identifier
stat_object.fs_type = 'APFS'
stat_object.is_allocated = True
return stat_object | 0.001299 |
def drop_pathlist(self, pathlist):
"""Drop path list"""
if pathlist:
files = ["r'%s'" % path for path in pathlist]
if len(files) == 1:
text = files[0]
else:
text = "[" + ", ".join(files) + "]"
if self.new_input_line:
self.on_new_line()
self.insert_text(text)
self.setFocus() | 0.004751 |
def query_params(*frb_fred_params):
"""
Decorator that pops all accepted parameters from method's kwargs and puts
them in the params argument. Modeled after elasticsearch-py client utils strategy.
See https://github.com/elastic/elasticsearch-py/blob/3400179153cc13b6ae2c26734337202569bdfd80/elasticsearch/client/utils.py
"""
def _wrapper(func):
@wraps(func)
def _wrapped(*args, **kwargs):
params = kwargs.pop('params', {})
for p in frb_fred_params:
if p in kwargs:
params[p] = kwargs.pop(p)
return func(*args,params=params,**kwargs)
return _wrapped
return _wrapper | 0.007267 |
def handle_request(self, environ, start_response):
"""Retrieves the route handler and calls the handler returning its the response
:param dict environ: The WSGI environment dictionary for the request
:param start_response:
:return: The WbResponse for the request
:rtype: WbResponse
"""
urls = self.url_map.bind_to_environ(environ)
try:
endpoint, args = urls.match()
# store original script_name (original prefix) before modifications are made
environ['pywb.app_prefix'] = environ.get('SCRIPT_NAME')
response = endpoint(environ, **args)
return response(environ, start_response)
except HTTPException as e:
redir = self._check_refer_redirect(environ)
if redir:
return redir(environ, start_response)
return e(environ, start_response)
except Exception as e:
if self.debug:
traceback.print_exc()
response = self.rewriterapp._error_response(environ, 'Internal Error: ' + str(e), '500 Server Error')
return response(environ, start_response) | 0.00423 |
def parse_for_simple_stems(output, skip_empty=False, skip_same_stems=True):
"""
Parses the output stem lines to produce a list with possible stems
for each word in the output.
:param skip_empty: set True to skip lines without stems (default is False)
:returns: a list of tuples, each containing an original text word and
a list of stems for the given word
"""
lines_with_stems = _get_lines_with_stems(output)
stems = list()
last_word = None
for line in lines_with_stems:
word, stem, _ = line.split("\t")
stem = stem if stem != '-' else None
if skip_empty and (stem is None):
continue
if last_word != word:
stems.append((word, []))
## append new stem only if not on list already
stem = None if skip_same_stems and stem in stems[-1][1] else stem
if stem is not None:
stems[-1][1].append(stem)
last_word = word
return stems | 0.00203 |
def MeshViewers(
shape=(1, 1), titlebar="Mesh Viewers", keepalive=False,
window_width=1280, window_height=960
):
"""Allows subplot-style inspection of primitives in multiple subwindows.
Args:
shape: a tuple indicating the number of vertical and horizontal windows requested
Returns: a list of lists of MeshViewer objects: one per window requested.
"""
if not test_for_opengl():
return Dummy()
mv = MeshViewerLocal(
shape=shape, titlebar=titlebar, uid=None, keepalive=keepalive,
window_width=window_width, window_height=window_height
)
return mv.get_subwindows() | 0.00311 |
def delta_encode(data, axis=-1, out=None):
"""Encode Delta."""
if isinstance(data, (bytes, bytearray)):
data = numpy.frombuffer(data, dtype='u1')
diff = numpy.diff(data, axis=0)
return numpy.insert(diff, 0, data[0]).tobytes()
dtype = data.dtype
if dtype.kind == 'f':
data = data.view('u%i' % dtype.itemsize)
diff = numpy.diff(data, axis=axis)
key = [slice(None)] * data.ndim
key[axis] = 0
diff = numpy.insert(diff, 0, data[tuple(key)], axis=axis)
if dtype.kind == 'f':
return diff.view(dtype)
return diff | 0.001704 |
def list_indexes(self):
"""Get a cursor over the index documents for this collection.
>>> for index in db.test.list_indexes():
... print(index)
...
SON([(u'v', 1), (u'key', SON([(u'_id', 1)])),
(u'name', u'_id_'), (u'ns', u'test.test')])
:Returns:
An instance of :class:`~pymongo.command_cursor.CommandCursor`.
.. versionadded:: 3.0
"""
codec_options = CodecOptions(SON)
coll = self.with_options(codec_options)
with self._socket_for_primary_reads() as (sock_info, slave_ok):
cmd = SON([("listIndexes", self.__name), ("cursor", {})])
if sock_info.max_wire_version > 2:
try:
cursor = self._command(sock_info, cmd, slave_ok,
ReadPreference.PRIMARY,
codec_options)["cursor"]
except OperationFailure as exc:
# Ignore NamespaceNotFound errors to match the behavior
# of reading from *.system.indexes.
if exc.code != 26:
raise
cursor = {'id': 0, 'firstBatch': []}
return CommandCursor(coll, cursor, sock_info.address)
else:
namespace = _UJOIN % (self.__database.name, "system.indexes")
res = helpers._first_batch(
sock_info, self.__database.name, "system.indexes",
{"ns": self.__full_name}, 0, slave_ok, codec_options,
ReadPreference.PRIMARY, cmd,
self.database.client._event_listeners)
data = res["data"]
cursor = {
"id": res["cursor_id"],
"firstBatch": data,
"ns": namespace,
}
# Note that a collection can only have 64 indexes, so we don't
# technically have to pass len(data) here. There will never be
# an OP_GET_MORE call.
return CommandCursor(
coll, cursor, sock_info.address, len(data)) | 0.000904 |
def r_dts_collection(self, objectId=None):
""" DTS Collection Metadata reply for given objectId
:param objectId: Collection Identifier
:return: JSON Format of DTS Collection
"""
try:
j = self.resolver.getMetadata(objectId=objectId).export(Mimetypes.JSON.DTS.Std)
j = jsonify(j)
j.status_code = 200
except NautilusError as E:
return self.dts_error(error_name=E.__class__.__name__, message=E.__doc__)
return j | 0.007813 |
def convert_table(shell_output, delimiter='\t|\s{2,}', output='dict'):
'''
a method to convert a STDOUT shell table into a python data structure
:param shell_output: string from STDOUT with headers
:param delimiter: string with regex pattern delimiting headers
:param output: string with type of structure to output (dict, list or csv)
:return: list of dictionaries or list of lists or string with csv format
'''
# retrieve header columns
import re
gap_pattern = re.compile(delimiter)
output_lines = shell_output.splitlines()
column_headers = gap_pattern.split(output_lines[0])
blank_index = column_headers.index('')
if blank_index > -1:
column_headers.pop(blank_index)
# generate indices tuples
indices = []
for i in range(len(column_headers)):
if i + 1 < len(column_headers):
indices.append((
output_lines[0].find(column_headers[i]),
output_lines[0].find(column_headers[i + 1])
))
else:
indices.append((
output_lines[0].find(column_headers[i]),
-1
))
# add headers to output
python_list = []
csv_string = ''
if output == 'dict':
pass
elif output == 'list':
python_list.append(column_headers)
elif output == 'csv':
for i in range(len(column_headers)):
if i:
csv_string += ','
csv_string += column_headers[i]
else:
raise ValueError('output argument must be one of dict, list or csv values.')
# add rows to output
for i in range(1, len(output_lines)):
if output == 'dict':
row_details = {}
for j in range(len(column_headers)):
row_details[column_headers[j]] = output_lines[i][indices[j][0]:indices[j][1]].rstrip()
python_list.append(row_details)
elif output == 'list':
row_list = []
for j in range(len(column_headers)):
row_list.append(output_lines[i][indices[j][0]:indices[j][1]]).rstrip()
python_list.append(row_list)
elif output == 'csv':
csv_string += '\n'
for j in range(len(column_headers)):
if j:
csv_string += ','
csv_string += output_lines[i][indices[j][0]:indices[j][1]].rstrip()
# return output
if csv_string:
return csv_string
return python_list | 0.005192 |
def insert(self, i, tab_index):
"""Insert the widget (at tab index) in the position i (index)."""
_id = id(self.editor.tabs.widget(tab_index))
self.history.insert(i, _id) | 0.010152 |
def shutdown(self):
'''Call the dbus proxy to start the shutdown.'''
if self._proxy:
os.sync()
self._proxy(*self._args) | 0.012579 |
def split_func(string):
"""
Take a string like 'requiredIf("arg_name")'
return the function name and the argument:
(requiredIf, arg_name)
"""
ind = string.index("(")
return string[:ind], string[ind+1:-1].strip('"') | 0.004132 |
def _makeResult(self):
"""Return a Result that doesn't print dots.
Nose's ResultProxy will wrap it, and other plugins can still print
stuff---but without smashing into our progress bar, care of
ProgressivePlugin's stderr/out wrapping.
"""
return ProgressiveResult(self._cwd,
self._totalTests,
self.stream,
config=self.config) | 0.004219 |
def subscribe(request):
"""
Takes POST data (``email`` and optional ``next`` fields), submitting the ``email`` field to
the newsletter provider for subscription to a mailing list, and redirecting the user to the value
of ``next`` (this can also be provided in the querystring), or the homepage if no follow-on URL is
supplied, with a message in the ``django.contrib.messages`` queue to let them know it was successful.
If the email address is invalid or the subscription process was unsuccessful, the user is redirected
to the follow-on URL and a message placed in the ``django.contrib.messages`` queue letting them know
what the issue was.
"""
email = request.POST.get('email')
next = request.POST.get('next', request.GET.get('next', '/'))
valid = False
if not email:
messages.error(request, u'Please enter your email address')
else:
try:
validate_email(email)
valid = True
except ValidationError:
messages.error(request, u'Please enter a valid email address')
if valid:
shortcuts.subscribe(email, list_id = 'newsletter')
messages.success(request, u'Thanks for subscribing to our newsletter.')
return HttpResponseRedirect(next) | 0.010828 |
def solvent_per_layer(self):
"""Determine the number of solvent molecules per single layer. """
if self._solvent_per_layer:
return self._solvent_per_layer
assert not (self.solvent_per_lipid is None and self.n_solvent is None)
if self.solvent_per_lipid is not None:
assert self.n_solvent is None
self._solvent_per_layer = self.n_lipids_per_layer * self.solvent_per_lipid
elif self.n_solvent is not None:
assert self.solvent_per_lipid is None
self._solvent_per_layer = self.n_solvent / 2
return self._solvent_per_layer | 0.004792 |
def evaluate(self, node, filename=None):
"""
Evaluate a source string or node, using ``filename`` when
displaying errors.
"""
if isinstance(node, string_types):
self.source = node
kwargs = {'mode': 'eval'}
if filename:
kwargs['filename'] = filename
try:
node = ast.parse(node, **kwargs)
except SyntaxError as e:
s = self.get_fragment(e.offset)
raise SyntaxError('syntax error %s' % s)
node_type = node.__class__.__name__.lower()
handler = self.get_handler(node_type)
if handler is None:
if self.source is None:
s = '(source not available)'
else:
s = self.get_fragment(node.col_offset)
raise SyntaxError("don't know how to evaluate %r %s" % (
node_type, s))
return handler(node) | 0.00209 |
def destroy(self):
"""
destroy a client.
"""
logger.info("destroying snap7 client")
if self.library:
return self.library.Cli_Destroy(byref(self.pointer)) | 0.009756 |
def exp(vector):
"""
Computes a per-element exponent of the passed-in vector.
Args:
vector (TYPE): Description
"""
weld_type = None
if isinstance(vector, LazyOpResult):
weld_type = vector.weld_type
vector = vector.expr
elif isinstance(vector, np.ndarray):
weld_type = numpy_weld_impl.numpy_to_weld_type_mapping[
str(vector.dtype)]
return NumpyArrayWeld(numpy_weld_impl.exp(vector, weld_type), WeldDouble()) | 0.00207 |
def _disks_equal(disk1, disk2):
'''
Test if two disk elements should be considered like the same device
'''
target1 = disk1.find('target')
target2 = disk2.find('target')
source1 = ElementTree.tostring(disk1.find('source')) if disk1.find('source') is not None else None
source2 = ElementTree.tostring(disk2.find('source')) if disk2.find('source') is not None else None
return source1 == source2 and \
target1 is not None and target2 is not None and \
target1.get('bus') == target2.get('bus') and \
disk1.get('device', 'disk') == disk2.get('device', 'disk') and \
target1.get('dev') == target2.get('dev') | 0.004498 |
def map2set(data, relation):
"""
EXPECTING A is_data(relation) THAT MAPS VALUES TO lists
THE LISTS ARE EXPECTED TO POINT TO MEMBERS OF A SET
A set() IS RETURNED
"""
if data == None:
return Null
if isinstance(relation, Data):
Log.error("Does not accept a Data")
if is_data(relation):
try:
# relation[d] is expected to be a list
# return set(cod for d in data for cod in relation[d])
output = set()
for d in data:
for cod in relation.get(d, []):
output.add(cod)
return output
except Exception as e:
Log.error("Expecting a dict with lists in codomain", e)
else:
try:
# relation[d] is expected to be a list
# return set(cod for d in data for cod in relation[d])
output = set()
for d in data:
cod = relation(d)
if cod == None:
continue
output.add(cod)
return output
except Exception as e:
Log.error("Expecting a dict with lists in codomain", e)
return Null | 0.002525 |
def get_queryset(self):
"""Only display unpublished content to authenticated users, filter by
query parameter if present."""
# Get base queryset from DispatchPublishableMixin
queryset = self.get_publishable_queryset()
queryset = queryset.order_by('-updated_at')
# Optionally filter by a query parameter
q = self.request.query_params.get('q')
if q:
queryset = queryset.filter(title__icontains=q)
return queryset | 0.004008 |
def local_attention_1d(q, k, v, block_length=128, filter_width=100, name=None):
"""Strided block local self-attention.
The sequence is divided into blocks of length block_length. Attention for a
given query position can see all memory positions in the corresponding block
and filter_width many positions to the left and right of the block.
Args:
q: a Tensor with shape [batch, heads, length, depth_k]
k: a Tensor with shape [batch, heads, length, depth_k]
v: a Tensor with shape [batch, heads, length, depth_v]
block_length: an integer
filter_width: an integer indicating how much to look left and right of the
block.
name: an optional string
Returns:
a Tensor of shape [batch, heads, length, depth_v]
"""
with tf.variable_scope(
name, default_name="local_self_attention_1d", values=[q, k, v]):
# Check that q, k, v have the same shape except in their depth dimension.
q.get_shape()[:-1].assert_is_compatible_with(k.get_shape()[:-1])
q.get_shape()[:-1].assert_is_compatible_with(v.get_shape()[:-1])
batch_size, num_heads, original_length, _ = common_layers.shape_list(q)
# Pad query, key, value to ensure multiple of corresponding lengths.
def pad_to_multiple(x, pad_length):
x_length = common_layers.shape_list(x)[2]
return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]])
def pad_l_and_r(x, pad_length):
return tf.pad(x, [[0, 0], [0, 0], [pad_length, pad_length], [0, 0]])
# Set up query blocks.
# [batch, heads, blocks_q, block_length, depth_k]
q = pad_to_multiple(q, block_length)
q = reshape_by_blocks(q, common_layers.shape_list(q), block_length)
total_query_blocks = common_layers.shape_list(q)[2]
# Set up key and value blocks.
# [batch, heads, blocks_k, block_length, depth_k]
blocks_per_filter_width = filter_width // block_length
remaining_items = filter_width % block_length
k = pad_to_multiple(k, block_length)
v = pad_to_multiple(v, block_length)
k = pad_l_and_r(k, filter_width + block_length - remaining_items)
v = pad_l_and_r(v, filter_width + block_length - remaining_items)
k = reshape_by_blocks(k, common_layers.shape_list(k), block_length)
v = reshape_by_blocks(v, common_layers.shape_list(v), block_length)
total_kv_blocks = common_layers.shape_list(k)[2]
slices = []
# prepare the left-most and right-most partial blocks if needed
if remaining_items:
first_partial_block_k = tf.slice(
k, [0, 0, 0, block_length - remaining_items, 0],
[-1, -1, total_query_blocks, -1, -1])
first_partial_block_v = tf.slice(
v, [0, 0, 0, block_length - remaining_items, 0],
[-1, -1, total_query_blocks, -1, -1])
last_partial_block_k = tf.slice(
k, [0, 0, total_kv_blocks - total_query_blocks, 0, 0],
[-1, -1, -1, remaining_items, -1])
last_partial_block_v = tf.slice(
v, [0, 0, total_kv_blocks - total_query_blocks, 0, 0],
[-1, -1, -1, remaining_items, -1])
slices.append((first_partial_block_k, first_partial_block_v))
slices.append((last_partial_block_k, last_partial_block_v))
# Prepare the rest of the blocks
first_block_index = 1 if remaining_items else 0
attention_blocks = 2 * blocks_per_filter_width + 1
for i in range(first_block_index, attention_blocks + first_block_index):
block_k = tf.slice(k, [0, 0, i, 0, 0],
[-1, -1, total_query_blocks, -1, -1])
block_v = tf.slice(v, [0, 0, i, 0, 0],
[-1, -1, total_query_blocks, -1, -1])
slices.append((block_k, block_v))
# [batch, heads, blocks_q, block_length + 2 * filter_width, depth_k]
k = tf.concat([s[0] for s in slices], axis=3)
v = tf.concat([s[1] for s in slices], axis=3)
attention_bias = tf.expand_dims(embedding_to_padding(k) * -1e9, axis=-2)
depth_v = common_layers.shape_list(v)[-1]
output = dot_product_attention(
q,
k,
v,
attention_bias,
dropout_rate=0.,
name="local_1d",
make_image_summary=False)
output = tf.reshape(output, [batch_size, num_heads, -1, depth_v])
# Remove the padding if introduced.
output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1])
output.set_shape([None if isinstance(dim, tf.Tensor) else dim for dim in
(batch_size, num_heads, original_length, depth_v)])
return output | 0.003547 |
def proposals(ctx, account):
""" List proposals
"""
proposals = Proposals(account)
t = PrettyTable(
[
"id",
"expiration",
"proposer",
"required approvals",
"available approvals",
"review period time",
"proposal",
]
)
t.align = "l"
for proposal in proposals:
if proposal.proposer:
proposer = Account(proposal.proposer, peerplays_instance=ctx.peerplays)[
"name"
]
else:
proposer = "n/a"
t.add_row(
[
proposal["id"],
proposal["expiration_time"],
proposer,
[
Account(x)["name"]
for x in (
proposal["required_active_approvals"]
+ proposal["required_owner_approvals"]
)
],
json.dumps(
[Account(x)["name"] for x in proposal["available_active_approvals"]]
+ proposal["available_key_approvals"]
+ proposal["available_owner_approvals"],
indent=1,
),
proposal.get("review_period_time", None),
json.dumps(proposal["proposed_transaction"], indent=4),
]
)
click.echo(str(t)) | 0.002092 |
def hkdf_extract(salt, input_key_material, hash=hashlib.sha512):
'''
Extract a pseudorandom key suitable for use with hkdf_expand
from the input_key_material and a salt using HMAC with the
provided hash (default SHA-512).
salt should be a random, application-specific byte string. If
salt is None or the empty string, an all-zeros string of the same
length as the hash's block size will be used instead per the RFC.
See the HKDF draft RFC and paper for usage notes.
'''
hash_len = hash().digest_size
if salt == None or len(salt) == 0:
salt = bytearray((0,) * hash_len)
return hmac.new(bytes(salt), buffer(input_key_material), hash).digest() | 0.025875 |
def setKeySequenceCounter(self, iKeySequenceValue):
""" set the Key sequence counter corresponding to Thread Network master key
Args:
iKeySequenceValue: key sequence value
Returns:
True: successful to set the key sequence
False: fail to set the key sequence
"""
print '%s call setKeySequenceCounter' % self.port
print iKeySequenceValue
try:
cmd = WPANCTL_CMD + 'setprop Network:KeyIndex %s' % str(iKeySequenceValue)
if self.__sendCommand(cmd)[0] != 'Fail':
time.sleep(1)
return True
else:
return False
except Exception, e:
ModuleHelper.WriteIntoDebugLogger('setKeySequenceCounter() Error: ' + str(e)) | 0.006266 |
def get_obj_values(obj, translated_field_names):
"""
get the translated field values from translatable fields of an object
:param obj:
:param translated_field_names:
:return:
"""
# set of translated fields to list
fields = list(translated_field_names)
values = {field: getattr(obj, field) for field in fields}
return values | 0.00495 |
def exampleRand(S, A):
"""WARNING: This will delete a database with the same name as 'db'."""
db = "MDP-%sx%s.db" % (S, A)
if os.path.exists(db):
os.remove(db)
conn = sqlite3.connect(db)
with conn:
c = conn.cursor()
cmd = '''
CREATE TABLE info (name TEXT, value INTEGER);
INSERT INTO info VALUES('states', %s);
INSERT INTO info VALUES('actions', %s);''' % (S, A)
c.executescript(cmd)
for a in range(1, A+1):
cmd = '''
CREATE TABLE transition%s (row INTEGER, col INTEGER, prob REAL);
CREATE TABLE reward%s (state INTEGER PRIMARY KEY ASC, val REAL);
''' % (a, a)
c.executescript(cmd)
cmd = "INSERT INTO reward%s(val) VALUES(?)" % a
c.executemany(cmd, zip(random(S).tolist()))
for s in xrange(1, S+1):
# to be usefully represented as a sparse matrix, the number of
# nonzero entries should be less than 1/3 of dimesion of the
# matrix, so S/3
n = randint(1, S//3)
# timeit [90894] * 20330
# ==> 10000 loops, best of 3: 141 us per loop
# timeit (90894*np.ones(20330, dtype=int)).tolist()
# ==> 1000 loops, best of 3: 548 us per loop
col = (permutation(arange(1,S+1))[0:n]).tolist()
val = random(n)
val = (val / val.sum()).tolist()
cmd = "INSERT INTO transition%s VALUES(?, ?, ?)" % a
c.executemany(cmd, zip([s] * n, col, val))
cmd = "CREATE UNIQUE INDEX Pidx%s ON transition%s (row, col);" % (a, a)
c.execute(cmd)
# return the name of teh database
return db | 0.002782 |
def training_loop(hparams, output_dir, report_fn=None, report_metric=None):
"""Run the main training loop."""
if report_fn:
assert report_metric is not None
# Directories
subdirectories = [
"data", "tmp", "world_model", ("world_model", "debug_videos"),
"policy", "eval_metrics"
]
directories = setup_directories(output_dir, subdirectories)
epoch = -1
data_dir = directories["data"]
env = rl_utils.setup_env(
hparams, batch_size=hparams.real_batch_size,
max_num_noops=hparams.max_num_noops,
rl_env_max_episode_steps=hparams.rl_env_max_episode_steps
)
env.start_new_epoch(epoch, data_dir)
if hparams.wm_policy_param_sharing:
policy_model_dir = directories["world_model"]
else:
policy_model_dir = directories["policy"]
learner = rl_utils.LEARNERS[hparams.base_algo](
hparams.frame_stack_size, policy_model_dir,
policy_model_dir, hparams.epochs
)
# Timing log function
log_relative_time = make_relative_timing_fn()
# Per-epoch state
epoch_metrics = []
metrics = {}
# Collect data from the real environment.
policy_model_dir = directories["policy"]
tf.logging.info("Initial training of the policy in real environment.")
train_agent_real_env(env, learner, hparams, epoch)
metrics["mean_reward/train/clipped"] = rl_utils.compute_mean_reward(
env.current_epoch_rollouts(), clipped=True
)
tf.logging.info("Mean training reward (initial): {}".format(
metrics["mean_reward/train/clipped"]
))
env.generate_data(data_dir)
eval_metrics_writer = tf.summary.FileWriter(
directories["eval_metrics"]
)
world_model_steps_num = 0
for epoch in range(hparams.epochs):
log = make_log_fn(epoch, log_relative_time)
# Train world model
log("Training world model")
world_model_steps_num = train_world_model(
env, data_dir, directories["world_model"], hparams,
world_model_steps_num, epoch
)
# Train agent
log("Training policy in simulated environment.")
train_agent(env, learner, directories["world_model"], hparams, epoch)
env.start_new_epoch(epoch, data_dir)
# Train agent on real env (short)
log("Training policy in real environment.")
train_agent_real_env(env, learner, hparams, epoch)
if hparams.stop_loop_early:
return 0.0
env.generate_data(data_dir)
metrics = load_metrics(directories["eval_metrics"], epoch)
if metrics:
# Skip eval if metrics have already been written for this epoch. Otherwise
# we'd overwrite them with wrong data.
log("Metrics found for this epoch, skipping evaluation.")
else:
metrics["mean_reward/train/clipped"] = rl_utils.compute_mean_reward(
env.current_epoch_rollouts(), clipped=True
)
log("Mean training reward: {}".format(
metrics["mean_reward/train/clipped"]
))
eval_metrics = rl_utils.evaluate_all_configs(hparams, policy_model_dir)
log("Agent eval metrics:\n{}".format(pprint.pformat(eval_metrics)))
metrics.update(eval_metrics)
if hparams.eval_world_model:
debug_video_path = os.path.join(
directories["world_model", "debug_videos"],
"{}.avi".format(env.current_epoch)
)
wm_metrics = rl_utils.evaluate_world_model(
env, hparams, directories["world_model"], debug_video_path
)
log("World model eval metrics:\n{}".format(pprint.pformat(wm_metrics)))
metrics.update(wm_metrics)
rl_utils.summarize_metrics(eval_metrics_writer, metrics, epoch)
# Report metrics
if report_fn:
if report_metric == "mean_reward":
metric_name = rl_utils.get_metric_name(
sampling_temp=hparams.eval_sampling_temps[0],
max_num_noops=hparams.eval_max_num_noops,
clipped=False
)
report_fn(eval_metrics[metric_name], epoch)
else:
report_fn(eval_metrics[report_metric], epoch)
epoch_metrics.append(metrics)
# Return the evaluation metrics from the final epoch
return epoch_metrics[-1] | 0.011438 |
def run(self):
"""Continuously read data from the source and attempt to parse a valid
message from the buffer of bytes. When a message is parsed, passes it
off to the callback if one is set.
"""
message_buffer = b""
while self.running:
try:
message_buffer += self.source.read_logs()
except DataSourceError as e:
if self.running:
LOG.warn("Can't read logs from data source -- stopping: %s", e)
break
except NotImplementedError as e:
LOG.info("%s doesn't support logging" % self)
break
while True:
if "\x00" not in message_buffer:
break
record, _, remainder = message_buffer.partition(b"\x00")
self.record(record)
message_buffer = remainder | 0.003264 |
def redirect_to_terms_accept(current_path='/', slug='default'):
"""Redirect the user to the terms and conditions accept page."""
redirect_url_parts = list(urlparse(ACCEPT_TERMS_PATH))
if slug != 'default':
redirect_url_parts[2] += slug
querystring = QueryDict(redirect_url_parts[4], mutable=True)
querystring[TERMS_RETURNTO_PARAM] = current_path
redirect_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlunparse(redirect_url_parts)) | 0.002012 |
def getcombovalue(self, window_name, object_name):
"""
Get current selected combobox value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: selected item on success, else LdtpExecutionError on failure.
@rtype: string
"""
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
return self._get_title(object_handle) | 0.006443 |
def dev_from_index(self, if_index):
"""Return interface name from interface index"""
try:
if_index = int(if_index) # Backward compatibility
return next(iface for iface in six.itervalues(self)
if iface.win_index == if_index)
except (StopIteration, RuntimeError):
if str(if_index) == "1":
# Test if the loopback interface is set up
if isinstance(scapy.consts.LOOPBACK_INTERFACE, NetworkInterface): # noqa: E501
return scapy.consts.LOOPBACK_INTERFACE
raise ValueError("Unknown network interface index %r" % if_index) | 0.003017 |
def save(self, filename=None):
""" Saves a constructed Morse-Smale Complex in json file
@ In, filename, a filename for storing the hierarchical
merging of features and the base level partitions of the
data
"""
if filename is None:
filename = "morse_smale_complex.json"
with open(filename, "w") as fp:
fp.write(self.to_json()) | 0.004796 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.