text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def construct_entry_with_release(focus, issues, manager, log, releases, rest):
"""
Releases 'eat' the entries in their line's list and get added to the
final data structure. They also inform new release-line 'buffers'.
Release lines, once the release obj is removed, should be empty or a
comma-separated list of issue numbers.
"""
log("release for line %r" % focus.minor)
# Check for explicitly listed issues first
explicit = None
if rest[0].children:
explicit = [x.strip() for x in rest[0][0].split(',')]
# Do those by themselves since they override all other logic
if explicit:
log("Explicit issues requested: %r" % (explicit,))
# First scan global issue dict, dying if not found
missing = [i for i in explicit if i not in issues]
if missing:
raise ValueError(
"Couldn't find issue(s) #{} in the changelog!".format(
', '.join(missing)))
# Obtain the explicitly named issues from global list
entries = []
for i in explicit:
for flattened_issue_item in itertools.chain(issues[i]):
entries.append(flattened_issue_item)
# Create release
log("entries in this release: %r" % (entries,))
releases.append({
'obj': focus,
'entries': entries,
})
# Introspect these entries to determine which buckets they should get
# removed from (it's not "all of them"!)
for obj in entries:
if obj.type == 'bug':
# Major bugfix: remove from unreleased_feature
if obj.major:
log("Removing #%s from unreleased" % obj.number)
# TODO: consider making a LineManager method somehow
manager[focus.family]['unreleased_feature'].remove(obj)
# Regular bugfix: remove from bucket for this release's
# line + unreleased_bugfix
else:
if obj in manager[focus.family]['unreleased_bugfix']:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_bugfix'].remove(obj)
if obj in manager[focus.family][focus.minor]:
log("Removing #%s from %s" % (obj.number, focus.minor))
manager[focus.family][focus.minor].remove(obj)
# Regular feature/support: remove from unreleased_feature
# Backported feature/support: remove from bucket for this
# release's line (if applicable) + unreleased_feature
else:
log("Removing #%s from unreleased" % obj.number)
manager[focus.family]['unreleased_feature'].remove(obj)
if obj in manager[focus.family].get(focus.minor, []):
manager[focus.family][focus.minor].remove(obj)
# Implicit behavior otherwise
else:
# Unstable prehistory -> just dump 'unreleased' and continue
if manager.unstable_prehistory:
# TODO: need to continue making LineManager actually OO, i.e. do
# away with the subdicts + keys, move to sub-objects with methods
# answering questions like "what should I give you for a release"
# or whatever
log("in unstable prehistory, dumping 'unreleased'")
releases.append({
'obj': focus,
# NOTE: explicitly dumping 0, not focus.family, since this
# might be the last pre-historical release and thus not 0.x
'entries': manager[0]['unreleased'][:],
})
manager[0]['unreleased'] = []
# If this isn't a 0.x release, it signals end of prehistory, make a
# new release bucket (as is also done below in regular behavior).
# Also acts like a sentinel that prehistory is over.
if focus.family != 0:
manager[focus.family][focus.minor] = []
# Regular behavior from here
else:
# New release line/branch detected. Create it & dump unreleased
# features.
if focus.minor not in manager[focus.family]:
log("not seen prior, making feature release & bugfix bucket")
manager[focus.family][focus.minor] = []
# TODO: this used to explicitly say "go over everything in
# unreleased_feature and dump if it's feature, support or major
# bug". But what the hell else would BE in unreleased_feature?
# Why not just dump the whole thing??
#
# Dump only the items in the bucket whose family this release
# object belongs to, i.e. 1.5.0 should only nab the 1.0
# family's unreleased feature items.
releases.append({
'obj': focus,
'entries': manager[focus.family]['unreleased_feature'][:],
})
manager[focus.family]['unreleased_feature'] = []
# Existing line -> empty out its bucket into new release.
# Skip 'major' bugs as those "belong" to the next release (and will
# also be in 'unreleased_feature' - so safe to nuke the entire
# line)
else:
log("pre-existing, making bugfix release")
# TODO: as in other branch, I don't get why this wasn't just
# dumping the whole thing - why would major bugs be in the
# regular bugfix buckets?
entries = manager[focus.family][focus.minor][:]
releases.append({'obj': focus, 'entries': entries})
manager[focus.family][focus.minor] = []
# Clean out the items we just released from
# 'unreleased_bugfix'. (Can't nuke it because there might
# be some unreleased bugs for other release lines.)
for x in entries:
if x in manager[focus.family]['unreleased_bugfix']:
manager[focus.family]['unreleased_bugfix'].remove(x) | 0.00016 |
def extract_cosponsors(bill):
"""
Return a list of list relating cosponsors to legislation.
"""
logger.debug("Extracting Cosponsors")
cosponsor_map = []
cosponsors = bill.get('cosponsors', [])
bill_id = bill.get('bill_id', None)
for co in cosponsors:
co_list = []
co_list.append(co.get('thomas_id'))
co_list.append(bill_id)
co_list.append(co.get('district'))
co_list.append(co.get('state'))
cosponsor_map.append(co_list)
logger.debug("End Extractioning Cosponsors")
return cosponsor_map | 0.001733 |
def bvlpdu_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
return key_value_contents(use_dict=use_dict, as_class=as_class,
key_values=(
('function', 'RegisterForeignDevice'),
('ttl', self.bvlciTimeToLive),
)) | 0.009146 |
def uid_gid(user, group, fd=None, path=None):
'''Get uid and gid from either uid/gid, user name/group name, or from the
environment of the calling process, or optionally from an fd, or
optionally from a path'''
type_msg = u'{0} must be a string or integer, not: {1}'
nosuch_msg = u'no such {0}: {1}'
if fd is not None and path is not None:
raise ValueError(u'received path and fd arguments, need one or'\
u' neither, not both.')
# -1 is don't change for chown/fchown
user = -1 if user is None else user
group = -1 if group is None else group
try:
user = int(user)
except (TypeError, ValueError):
try:
user = pwd.getpwnam(user).pw_uid
except TypeError:
raise TypeError(type_msg.format(u'user', user.__class__.__name__))
except KeyError:
raise FSQEnvError(errno.EINVAL, nosuch_msg.format(u'user', user))
except TypeError:
raise TypeError(type_msg.format(u'group', group.__class__.__name__))
try:
group = int(group)
except ValueError:
try:
group = grp.getgrnam(group).gr_gid
except TypeError:
raise TypeError(type_msg.format(u'group',
group.__class__.__name__))
except KeyError:
raise FSQEnvError(errno.EINVAL, nosuch_msg.format(u'group',
group))
except TypeError:
raise TypeError(type_msg.format(u'group', group.__class__.__name__))
return user, group | 0.001279 |
def handle_async_gen(gen: Any, gen_obj: Any) -> Any:
"""
处理异步生成器
"""
if gen is None:
return None
if asyncio.iscoroutine(gen):
try:
temp = yield from gen
gen_obj.send(temp)
return
except Exception as error:
try:
gen = gen_obj.throw(error)
return (yield from handle_async_gen(gen, gen_obj))
except StopIteration:
return None
else:
return gen | 0.001988 |
def _build_arguments(self):
"""
build arguments for command.
"""
self._parser.add_argument(
'image_name',
metavar='IMAGE_NAME',
type=six.text_type,
help='Name of the image example: \"namespace/repository\"'
) | 0.006757 |
def parse_zone(zonefile=None, zone=None):
'''
Parses a zone file. Can be passed raw zone data on the API level.
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.parse_zone /var/lib/named/example.com.zone
'''
if zonefile:
try:
with salt.utils.files.fopen(zonefile, 'r') as fp_:
zone = salt.utils.stringutils.to_unicode(fp_.read())
except Exception:
pass
if not zone:
return 'Error: Zone data was not found'
zonedict = {}
mode = 'single'
for line in zone.splitlines():
comps = line.split(';')
line = comps[0].strip()
if not line:
continue
comps = line.split()
if line.startswith('$'):
zonedict[comps[0].replace('$', '')] = comps[1]
continue
if '(' in line and ')' not in line:
mode = 'multi'
multi = ''
if mode == 'multi':
multi += ' {0}'.format(line)
if ')' in line:
mode = 'single'
line = multi.replace('(', '').replace(')', '')
else:
continue
if 'ORIGIN' in zonedict:
comps = line.replace('@', zonedict['ORIGIN']).split()
else:
comps = line.split()
if 'SOA' in line:
if comps[1] != 'IN':
comps.pop(1)
zonedict['ORIGIN'] = comps[0]
zonedict['NETWORK'] = comps[1]
zonedict['SOURCE'] = comps[3]
zonedict['CONTACT'] = comps[4].replace('.', '@', 1)
zonedict['SERIAL'] = comps[5]
zonedict['REFRESH'] = _to_seconds(comps[6])
zonedict['RETRY'] = _to_seconds(comps[7])
zonedict['EXPIRE'] = _to_seconds(comps[8])
zonedict['MINTTL'] = _to_seconds(comps[9])
continue
if comps[0] == 'IN':
comps.insert(0, zonedict['ORIGIN'])
if not comps[0].endswith('.') and 'NS' not in line:
comps[0] = '{0}.{1}'.format(comps[0], zonedict['ORIGIN'])
if comps[2] == 'NS':
zonedict.setdefault('NS', []).append(comps[3])
elif comps[2] == 'MX':
if 'MX' not in zonedict:
zonedict.setdefault('MX', []).append({'priority': comps[3],
'host': comps[4]})
elif comps[3] in ('A', 'AAAA'):
zonedict.setdefault(comps[3], {})[comps[0]] = {
'TARGET': comps[4],
'TTL': comps[1],
}
else:
zonedict.setdefault(comps[2], {})[comps[0]] = comps[3]
return zonedict | 0.000375 |
def harmonize_ocean(ocean, elevation, ocean_level):
"""
The goal of this function is to make the ocean floor less noisy.
The underwater erosion should cause the ocean floor to be more uniform
"""
shallow_sea = ocean_level * 0.85
midpoint = shallow_sea / 2.0
ocean_points = numpy.logical_and(elevation < shallow_sea, ocean)
shallow_ocean = numpy.logical_and(elevation < midpoint, ocean_points)
elevation[shallow_ocean] = midpoint - ((midpoint - elevation[shallow_ocean]) / 5.0)
deep_ocean = numpy.logical_and(elevation > midpoint, ocean_points)
elevation[deep_ocean] = midpoint + ((elevation[deep_ocean] - midpoint) / 5.0) | 0.004484 |
def lyap_e_len(**kwargs):
"""
Helper function that calculates the minimum number of data points required
to use lyap_e.
Note that none of the required parameters may be set to None.
Kwargs:
kwargs(dict):
arguments used for lyap_e (required: emb_dim, matrix_dim, min_nb
and min_tsep)
Returns:
minimum number of data points required to call lyap_e with the given
parameters
"""
m = (kwargs['emb_dim'] - 1) // (kwargs['matrix_dim'] - 1)
# minimum length required to find single orbit vector
min_len = kwargs['emb_dim']
# we need to follow each starting point of an orbit vector for m more steps
min_len += m
# we need min_tsep * 2 + 1 orbit vectors to find neighbors for each
min_len += kwargs['min_tsep'] * 2
# we need at least min_nb neighbors for each orbit vector
min_len += kwargs['min_nb']
return min_len | 0.013825 |
def protege_data(datas_str, sens):
"""
Used to crypt/decrypt data before saving locally.
Override if securit is needed.
bytes -> str when decrypting
str -> bytes when crypting
:param datas_str: When crypting, str. when decrypting bytes
:param sens: True to crypt, False to decrypt
"""
return bytes(datas_str, encoding="utf8") if sens else str(datas_str, encoding="utf8") | 0.004914 |
def flick(self, element, x, y, speed):
"""Deprecated use touch('drag', { fromX, fromY, toX, toY, duration(s) }) instead.
Flick on the touch screen using finger motion events.
This flickcommand starts at a particulat screen location.
Support:
iOS
Args:
element(WebElement): WebElement Object where the flick starts.
x(float}: The x offset in pixels to flick by.
y(float): The y offset in pixels to flick by.
speed(float) The speed in pixels per seconds.
Returns:
WebDriver object.
"""
self._execute(Command.FLICK, {
'element': element.element_id,
'x': x,
'y': y,
'speed': speed
}) | 0.003831 |
def devid(self):
"""
Two-tuple containing device's vendor ID and model ID (hex).
"""
d = self.device
vend_id = d.get('ID_VENDOR_ID')
model_id = d.get('ID_MODEL_ID')
return (vend_id, model_id) | 0.008097 |
def plot_input(ace_model, fname='ace_input.png'):
"""Plot the transforms."""
if not plt:
raise ImportError('Cannot plot without the matplotlib package')
plt.rcParams.update({'font.size': 8})
plt.figure()
num_cols = len(ace_model.x) / 2 + 1
for i in range(len(ace_model.x)):
plt.subplot(num_cols, 2, i + 1)
plt.plot(ace_model.x[i], ace_model.y, '.')
plt.xlabel('x{0}'.format(i))
plt.ylabel('y')
plt.tight_layout()
if fname:
plt.savefig(fname)
else:
plt.show() | 0.001812 |
def pop_loop_instrs(setup_loop_instr, queue):
"""
Determine whether setup_loop_instr is setting up a for-loop or a
while-loop. Then pop the loop instructions from queue.
The easiest way to tell the difference is to look at the target of the
JUMP_ABSOLUTE instruction at the end of the loop. If it jumps to a
FOR_ITER, then this is a for-loop. Otherwise it's a while-loop.
The jump we want to inspect is the first JUMP_ABSOLUTE instruction prior to
the jump target of `setup_loop_instr`.
Parameters
----------
setup_loop_instr : instructions.SETUP_LOOP
First instruction of the loop being parsed.
queue : collections.deque
Queue of unprocessed instructions.
Returns
-------
loop_type : str, {'for', 'while'}
The kind of loop being constructed.
loop_instrs : deque
The instructions forming body of the loop.
else_instrs : deque
The instructions forming the else-block of the loop.
Side Effects
------------
Pops all returned instructions from `queue`.
"""
# Grab everything from left side of the queue until the jump target of
# SETUP_LOOP.
body = popwhile(op.is_not(setup_loop_instr.arg), queue, side='left')
# Anything after the last POP_BLOCK instruction is the else-block.
else_body = popwhile(not_a(instrs.POP_BLOCK), body, side='right')
jump_to_top, pop_block = body[-2], body[-1]
if not isinstance(jump_to_top, instrs.JUMP_ABSOLUTE):
raise DecompilationError(
"Penultimate instruction of loop body is "
"%s, not JUMP_ABSOLUTE." % jump_to_top,
)
if not isinstance(pop_block, instrs.POP_BLOCK):
raise DecompilationError(
"Last instruction of loop body is "
"%s, not pop_block." % pop_block,
)
loop_expr = jump_to_top.arg
if isinstance(loop_expr, instrs.FOR_ITER):
return 'for', body, else_body
return 'while', body, else_body | 0.000501 |
def items(self):
"""Yield the async reuslts for the context."""
for key, task in self._tasks:
if not (task and task.result):
yield key, None
else:
yield key, json.loads(task.result)["payload"] | 0.007576 |
def on_hazard_exposure_bookmark_toggled(self, enabled):
"""Update the UI when the user toggles the bookmarks radiobutton.
:param enabled: The status of the radiobutton.
:type enabled: bool
"""
if enabled:
self.bookmarks_index_changed()
else:
self.ok_button.setEnabled(True)
self._populate_coordinates() | 0.005222 |
def is_node_highlighted(graph: BELGraph, node: BaseEntity) -> bool:
"""Returns if the given node is highlighted.
:param graph: A BEL graph
:param node: A BEL node
:type node: tuple
:return: Does the node contain highlight information?
:rtype: bool
"""
return NODE_HIGHLIGHT in graph.node[node] | 0.003067 |
def has_logs(self):
"""
Check if log files are available and return file names if they exist.
:return: list
"""
found_files = []
if self.logpath is None:
return found_files
if os.path.exists(self.logpath):
for root, _, files in os.walk(os.path.abspath(self.logpath)):
for fil in files:
found_files.append(os.path.join(root, fil))
return found_files | 0.004219 |
def _validate_hands(hands, missing):
'''
Validates hands, based on values that
are supposed to be missing from them.
:param list hands: list of Hand objects to validate
:param list missing: list of sets that indicate the values
that are supposed to be missing from
the respective Hand objects
:return: True if no Hand objects contain values that they
are supposed to be missing; False otherwise
'''
for h, m in zip(hands, missing):
for value in m:
if dominoes.hand.contains_value(h, value):
return False
return True | 0.001534 |
def set_row_heights(self, pcts=None, amts=None, maxs=None, mins=None):
"""
:param pcts: the percent of available height to use or ratio is also ok
:param amts: (Array or scalar) the fixed height of the rows
:param maxs: (Array or scalar) the maximum height of the rows (only use when pcts is used)
:param mins: (Array or scalar) the minimum height of the rows (only used when pcts is used)
:return:
"""
for arr, attr in zip([pcts, amts, maxs, mins], ['weight', 'value', 'max', 'min']):
if arr is not None:
if not np.isscalar(arr):
if len(arr) != len(self.formatted_values.index):
raise ValueError(
'%s: expected %s rows but got %s' % (attr, len(arr), len(self.formatted_values.index)))
self.rowattrs.ix[:, attr] = arr
return self | 0.006543 |
async def get_resources(self, **kwargs) -> dict:
"""Get a list of resources.
:raises PvApiError when an error occurs.
"""
resources = await self.request.get(self._base_path, **kwargs)
self._sanitize_resources(resources)
return resources | 0.007018 |
def preview(self, **query_params):
"""Returns a streaming handle to this job's preview search results.
Unlike :class:`splunklib.results.ResultsReader`, which requires a job to
be finished to
return any results, the ``preview`` method returns any results that have
been generated so far, whether the job is running or not. The
returned search results are the raw data from the server. Pass
the handle returned to :class:`splunklib.results.ResultsReader` to get a
nice, Pythonic iterator over objects, as in::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
job = service.jobs.create("search * | head 5")
rr = results.ResultsReader(job.preview())
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
print '%s: %s' % (result.type, result.message)
elif isinstance(result, dict):
# Normal events are returned as dicts
print result
if rr.is_preview:
print "Preview of a running search job."
else:
print "Job is finished. Results are final."
This method makes one roundtrip to the server, plus at most
two more if
the ``autologin`` field of :func:`connect` is set to ``True``.
:param query_params: Additional parameters (optional). For a list of valid
parameters, see `GET search/jobs/{search_id}/results_preview
<http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTsearch#GET_search.2Fjobs.2F.7Bsearch_id.7D.2Fresults_preview>`_
in the REST API documentation.
:type query_params: ``dict``
:return: The ``InputStream`` IO handle to this job's preview results.
"""
query_params['segmentation'] = query_params.get('segmentation', 'none')
return self.get("results_preview", **query_params).body | 0.002826 |
def message(self):
'the standard message which can be transfer'
return {
'source':
'account',
'frequence':
self.frequence,
'account_cookie':
self.account_cookie,
'portfolio_cookie':
self.portfolio_cookie,
'user_cookie':
self.user_cookie,
'broker':
self.broker,
'market_type':
self.market_type,
'strategy_name':
self.strategy_name,
'current_time':
str(self._currenttime),
'allow_sellopen':
self.allow_sellopen,
'allow_margin':
self.allow_margin,
'allow_t0':
self.allow_t0,
'margin_level':
self.margin_level,
'init_assets':
self.init_assets,
'init_cash':
self.init_cash,
'init_hold':
self.init_hold.to_dict(),
'commission_coeff':
self.commission_coeff,
'tax_coeff':
self.tax_coeff,
'cash':
self.cash,
'history':
self.history,
'trade_index':
self.time_index_max,
'running_time':
str(datetime.datetime.now())
if self.running_time is None else str(self.running_time),
'quantaxis_version':
self.quantaxis_version,
'running_environment':
self.running_environment,
'start_date':
self.start_date,
'end_date':
self.end_date,
'frozen':
self.frozen,
'finished_id':
self.finishedOrderid
} | 0.001122 |
def rsem_stats_table(self):
""" Take the parsed stats from the rsem report and add them to the
basic stats table at the top of the report """
headers = OrderedDict()
headers['alignable_percent'] = {
'title': '% Alignable'.format(config.read_count_prefix),
'description': '% Alignable reads'.format(config.read_count_desc),
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn'
}
self.general_stats_addcols(self.rsem_mapped_data, headers) | 0.003597 |
def enable_autozoom(self, option):
"""Set ``autozoom`` behavior.
Parameters
----------
option : {'on', 'override', 'once', 'off'}
Option for zoom behavior. A list of acceptable options can
also be obtained by :meth:`get_autozoom_options`.
Raises
------
ginga.ImageView.ImageViewError
Invalid option.
"""
option = option.lower()
assert(option in self.autozoom_options), \
ImageViewError("Bad autozoom option '%s': must be one of %s" % (
str(self.autozoom_options)))
self.t_.set(autozoom=option) | 0.003086 |
def classical(group, src_filter, gsims, param, monitor=Monitor()):
"""
Compute the hazard curves for a set of sources belonging to the same
tectonic region type for all the GSIMs associated to that TRT.
The arguments are the same as in :func:`calc_hazard_curves`, except
for ``gsims``, which is a list of GSIM instances.
:returns:
a dictionary {grp_id: pmap} with attributes .grp_ids, .calc_times,
.eff_ruptures
"""
if not hasattr(src_filter, 'sitecol'): # a sitecol was passed
src_filter = SourceFilter(src_filter, {})
# Get the parameters assigned to the group
src_mutex = getattr(group, 'src_interdep', None) == 'mutex'
rup_mutex = getattr(group, 'rup_interdep', None) == 'mutex'
cluster = getattr(group, 'cluster', None)
# Compute the number of ruptures
grp_ids = set()
for src in group:
if not src.num_ruptures:
# src.num_ruptures is set when parsing the XML, but not when
# the source is instantiated manually, so it is set here
src.num_ruptures = src.count_ruptures()
# This sets the proper TOM in case of a cluster
if cluster:
src.temporal_occurrence_model = FatedTOM(time_span=1)
# Updating IDs
grp_ids.update(src.src_group_ids)
# Now preparing context
maxdist = src_filter.integration_distance
imtls = param['imtls']
trunclevel = param.get('truncation_level')
cmaker = ContextMaker(
src.tectonic_region_type, gsims, maxdist, param, monitor)
# Prepare the accumulator for the probability maps
pmap = AccumDict({grp_id: ProbabilityMap(len(imtls.array), len(gsims))
for grp_id in grp_ids})
rupdata = {grp_id: [] for grp_id in grp_ids}
# AccumDict of arrays with 3 elements weight, nsites, calc_time
calc_times = AccumDict(accum=numpy.zeros(3, numpy.float32))
eff_ruptures = AccumDict(accum=0) # grp_id -> num_ruptures
# Computing hazard
for src, s_sites in src_filter(group): # filter now
t0 = time.time()
try:
poemap = cmaker.poe_map(src, s_sites, imtls, trunclevel,
rup_indep=not rup_mutex)
except Exception as err:
etype, err, tb = sys.exc_info()
msg = '%s (source id=%s)' % (str(err), src.source_id)
raise etype(msg).with_traceback(tb)
if src_mutex: # mutex sources, there is a single group
for sid in poemap:
pcurve = pmap[src.src_group_id].setdefault(sid, 0)
pcurve += poemap[sid] * src.mutex_weight
elif poemap:
for gid in src.src_group_ids:
pmap[gid] |= poemap
if len(cmaker.rupdata):
for gid in src.src_group_ids:
rupdata[gid].append(cmaker.rupdata)
calc_times[src.id] += numpy.array(
[src.weight, len(s_sites), time.time() - t0])
# storing the number of contributing ruptures too
eff_ruptures += {gid: getattr(poemap, 'eff_ruptures', 0)
for gid in src.src_group_ids}
# Updating the probability map in the case of mutually exclusive
# sources
group_probability = getattr(group, 'grp_probability', None)
if src_mutex and group_probability:
pmap[src.src_group_id] *= group_probability
# Processing cluster
if cluster:
tom = getattr(group, 'temporal_occurrence_model')
pmap = _cluster(param, tom, imtls, gsims, grp_ids, pmap)
# Return results
for gid, data in rupdata.items():
if len(data):
rupdata[gid] = numpy.concatenate(data)
return dict(pmap=pmap, calc_times=calc_times, eff_ruptures=eff_ruptures,
rup_data=rupdata) | 0.000264 |
def make_parser():
"""
Create a parser which is suitably configured for parsing an XMPP XML
stream. It comes equipped with :class:`XMPPLexicalHandler`.
"""
p = xml.sax.make_parser()
p.setFeature(xml.sax.handler.feature_namespaces, True)
p.setFeature(xml.sax.handler.feature_external_ges, False)
p.setProperty(xml.sax.handler.property_lexical_handler,
XMPPLexicalHandler)
return p | 0.002309 |
def tail(self, path, tail_length=1024, append=False):
# Note: append is currently not implemented.
''' Show the end of the file - default 1KB, supports up to the Hadoop block size.
:param path: Path to read
:type path: string
:param tail_length: The length to read from the end of the file - default 1KB, up to block size.
:type tail_length: int
:param append: Currently not implemented
:type append: bool
:returns: a generator that yields strings
'''
#TODO: Make tail support multiple files at a time, like most other methods do
if not path:
raise InvalidInputException("tail: no path given")
block_size = self.serverdefaults()['blockSize']
if tail_length > block_size:
raise InvalidInputException("tail: currently supports length up to the block size (%d)" % (block_size,))
if tail_length <= 0:
raise InvalidInputException("tail: tail_length cannot be less than or equal to zero")
processor = lambda path, node: self._handle_tail(path, node, tail_length, append)
for item in self._find_items([path], processor, include_toplevel=True,
include_children=False, recurse=False):
if item:
yield item | 0.008227 |
def create_eager_metrics(metric_names, weights_fn=common_layers.weights_all):
"""Create metrics accumulators and averager for Eager mode.
Args:
metric_names: list<str> from Metrics enum
weights_fn: function that takes labels and returns a weights mask. Defaults
to weights of all 1, i.e. common_layers.weights_all. Use
common_layers.weights_nonzero if labels have 0-padding.
Returns:
(accum_fn(predictions, targets) => None,
result_fn() => dict<str metric_name, float avg_val>
"""
metric_fns = dict(
[(name, METRICS_FNS[name]) for name in metric_names])
return create_eager_metrics_internal(metric_fns, weights_fn) | 0.006033 |
def to_array(self):
"""
Convert the RiakLinkPhase to a format that can be output into
JSON. Used internally.
"""
stepdef = {'bucket': self._bucket,
'tag': self._tag,
'keep': self._keep}
return {'link': stepdef} | 0.006757 |
def androlyze_main(session, filename):
"""
Start an interactive shell
:param session: Session file to load
:param filename: File to analyze, can be APK or DEX (or ODEX)
"""
from androguard.core.androconf import ANDROGUARD_VERSION, CONF
from IPython.terminal.embed import InteractiveShellEmbed
from traitlets.config import Config
from androguard.misc import init_print_colors
from androguard.session import Session, Load
from colorama import Fore
import colorama
import atexit
# Import commonly used classes, for further usage...
from androguard.core.bytecodes.apk import APK
from androguard.core.bytecodes.dvm import DalvikVMFormat
from androguard.core.analysis.analysis import Analysis
colorama.init()
if session:
print("Restoring session '{}'...".format(session))
s = CONF['SESSION'] = Load(session)
print("Successfully restored {}".format(s))
# TODO Restore a, d, dx etc...
else:
s = CONF["SESSION"] = Session(export_ipython=True)
if filename:
("Loading apk {}...".format(os.path.basename(filename)))
print("Please be patient, this might take a while.")
filetype = androconf.is_android(filename)
print("Found the provided file is of type '{}'".format(filetype))
if filetype not in ['DEX', 'DEY', 'APK']:
print(Fore.RED + "This file type is not supported by androlyze for auto loading right now!" + Fore.RESET, file=sys.stderr)
print("But your file is still available:")
print(">>> filename")
print(repr(filename))
print()
else:
with open(filename, "rb") as fp:
raw = fp.read()
h = s.add(apk, raw)
print("Added file to session: SHA256::{}".format(h))
if filetype == 'APK':
print("Loaded APK file...")
a, d, dx = s.get_objects_apk(digest=h)
print(">>> a")
print(a)
print(">>> d")
print(d)
print(">>> dx")
print(dx)
print()
elif filetype in ['DEX', 'DEY']:
print("Loaded DEX file...")
for h_, d, dx in s.get_objects_dex():
if h == h_:
break
print(">>> d")
print(d)
print(">>> dx")
print(dx)
print()
def shutdown_hook():
"""Save the session on exit, if wanted"""
if not s.isOpen():
return
try:
res = input("Do you want to save the session? (y/[n])?").lower()
except (EOFError, KeyboardInterrupt):
pass
else:
if res == "y":
# TODO: if we already started from a session, probably we want to save it under the same name...
# TODO: be able to take any filename you want
fname = s.save()
print("Saved Session to file: '{}'".format(fname))
cfg = Config()
_version_string = "Androguard version {}".format(ANDROGUARD_VERSION)
ipshell = InteractiveShellEmbed(config=cfg, banner1="{} started"
.format(_version_string))
atexit.register(shutdown_hook)
init_print_colors()
ipshell() | 0.000881 |
def _select_theory(theories):
"""Return the most likely spacing convention given different options.
Given a dictionary of convention options as keys and their occurrence
as values, return the convention that occurs the most, or ``None`` if
there is no clear preferred style.
"""
if theories:
values = tuple(theories.values())
best = max(values)
confidence = float(best) / sum(values)
if confidence > 0.5:
return tuple(theories.keys())[values.index(best)] | 0.003527 |
def _cluster_by(end_iter, attr1, attr2, cluster_distance):
"""Cluster breakends by specified attributes.
"""
ClusterInfo = namedtuple("ClusterInfo", ["chroms", "clusters", "lookup"])
chr_clusters = {}
chroms = []
brends_by_id = {}
for brend in end_iter:
if not chr_clusters.has_key(brend.chrom1):
chroms.append(brend.chrom1)
chr_clusters[brend.chrom1] = ClusterTree(cluster_distance, 1)
brends_by_id[int(brend.name)] = brend
chr_clusters[brend.chrom1].insert(getattr(brend, attr1),
getattr(brend, attr2),
int(brend.name))
return ClusterInfo(chroms, chr_clusters, brends_by_id) | 0.002703 |
def add_to_item_list_by_name(self, item_urls, item_list_name):
""" Instruct the server to add the given items to the specified
Item List (which will be created if it does not already exist)
:type item_urls: List or ItemGroup
:param item_urls: List of URLs for the items to add,
or an ItemGroup object
:type item_list_name: String
:param item_list_name: name of the item list to retrieve
:rtype: String
:returns: the server success message, if successful
:raises: APIError if the request was not successful
"""
url_name = urlencode((('name', item_list_name),))
request_url = '/item_lists?' + url_name
data = json.dumps({'items': list(item_urls)})
resp = self.api_request(request_url, method='POST', data=data)
return self.__check_success(resp) | 0.00227 |
def fit_select_best(X, y):
"""
Selects the best fit of the estimators already implemented by choosing the
model with the smallest mean square error metric for the trained values.
"""
models = [fit(X,y) for fit in [fit_linear, fit_quadratic]]
errors = map(lambda model: mse(y, model.predict(X)), models)
return min(zip(models, errors), key=itemgetter(1))[0] | 0.005195 |
def user_absent(name):
'''
Ensure a user is not present
name
username to remove if it exists
Examples:
.. code-block:: yaml
delete:
onyx.user_absent:
- name: daniel
'''
ret = {'name': name,
'result': False,
'changes': {},
'comment': ''}
old_user = __salt__['onyx.cmd']('get_user', username=name)
if not old_user:
ret['result'] = True
ret['comment'] = 'User does not exist'
return ret
if __opts__['test'] is True and old_user:
ret['result'] = None
ret['comment'] = 'User will be removed'
ret['changes']['old'] = old_user
ret['changes']['new'] = ''
return ret
__salt__['onyx.cmd']('remove_user', username=name)
if __salt__['onyx.cmd']('get_user', username=name):
ret['comment'] = 'Failed to remove user'
else:
ret['result'] = True
ret['comment'] = 'User removed'
ret['changes']['old'] = old_user
ret['changes']['new'] = ''
return ret | 0.000933 |
def add_step(self, step):
"""
Adds a new step to the waterfall.
:param step: Step to add
:return: Waterfall dialog for fluent calls to `add_step()`.
"""
if not step:
raise TypeError('WaterfallDialog.add_step(): step cannot be None.')
self._steps.append(step)
return self | 0.008357 |
def maybe_call_fn_and_grads(fn,
fn_arg_list,
result=None,
grads=None,
check_non_none_grads=True,
name=None):
"""Calls `fn` and computes the gradient of the result wrt `args_list`."""
with tf.compat.v1.name_scope(name, 'maybe_call_fn_and_grads',
[fn_arg_list, result, grads]):
fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
else [fn_arg_list])
result, grads = _value_and_gradients(fn, fn_arg_list, result, grads)
if not all(r.dtype.is_floating
for r in (result if is_list_like(result) else [result])): # pylint: disable=superfluous-parens
raise TypeError('Function result must be a `Tensor` with `float` '
'`dtype`.')
if len(fn_arg_list) != len(grads):
raise ValueError('Function args must be in one-to-one correspondence '
'with grads.')
if check_non_none_grads and any(g is None for g in grads):
raise ValueError('Encountered `None` gradient.\n'
' fn_arg_list: {}\n'
' grads: {}'.format(fn_arg_list, grads))
return result, grads | 0.005431 |
def _estimate_bkg_rms(self, xmin, xmax, ymin, ymax):
"""
Estimate the background noise mean and RMS.
The mean is estimated as the median of data.
The RMS is estimated as the IQR of data / 1.34896.
Parameters
----------
xmin, xmax, ymin, ymax : int
The bounding region over which the bkg/rms will be calculated.
Returns
-------
ymin, ymax, xmin, xmax : int
A copy of the input parameters
bkg, rms : float
The calculated background and noise.
"""
data = self.global_data.data_pix[ymin:ymax, xmin:xmax]
pixels = np.extract(np.isfinite(data), data).ravel()
if len(pixels) < 4:
bkg, rms = np.NaN, np.NaN
else:
pixels.sort()
p25 = pixels[int(pixels.size / 4)]
p50 = pixels[int(pixels.size / 2)]
p75 = pixels[int(pixels.size / 4 * 3)]
iqr = p75 - p25
bkg, rms = p50, iqr / 1.34896
# return the input and output data so we know what we are doing
# when compiling the results of multiple processes
return ymin, ymax, xmin, xmax, bkg, rms | 0.001663 |
def logout_allowed(service):
"""Check if a given service identifier should be sent a logout request."""
if hasattr(settings, 'MAMA_CAS_SERVICES'):
return _is_allowed('logout_allowed', service)
if hasattr(settings, 'MAMA_CAS_ENABLE_SINGLE_SIGN_OUT'):
warnings.warn(
'The MAMA_CAS_ENABLE_SINGLE_SIGN_OUT setting is deprecated. SLO '
'should be configured using MAMA_CAS_SERVICES.', DeprecationWarning)
return getattr(settings, 'MAMA_CAS_ENABLE_SINGLE_SIGN_OUT', False) | 0.003824 |
def _get_trusted_comma(self, trusted, value):
"""Get the real value from a comma-separated header based on the
configured number of trusted proxies.
:param trusted: Number of values to trust in the header.
:param value: Header value to parse.
:return: The real value, or ``None`` if there are fewer values
than the number of trusted proxies.
.. versionadded:: 0.15
"""
if not (trusted and value):
return
values = [x.strip() for x in value.split(",")]
if len(values) >= trusted:
return values[-trusted] | 0.003231 |
def clean():
"""Cleans up temporary resources
Tries to clean up:
1. The temporary update branch used during ``temple update``
2. The primary update branch used during ``temple update``
"""
temple.check.in_git_repo()
current_branch = _get_current_branch()
update_branch = temple.constants.UPDATE_BRANCH_NAME
temp_update_branch = temple.constants.TEMP_UPDATE_BRANCH_NAME
if current_branch in (update_branch, temp_update_branch):
err_msg = (
'You must change from the "{}" branch since it will be deleted during cleanup'
).format(current_branch)
raise temple.exceptions.InvalidCurrentBranchError(err_msg)
if temple.check._has_branch(update_branch):
temple.utils.shell('git branch -D {}'.format(update_branch))
if temple.check._has_branch(temp_update_branch):
temple.utils.shell('git branch -D {}'.format(temp_update_branch)) | 0.00216 |
def get_condition(self, condition_id):
"""Retrieve the condition for a condition_id.
:param condition_id: id of the condition, str
:return:
"""
condition = self.contract_concise.getCondition(condition_id)
if condition and len(condition) == 7:
return ConditionValues(*condition)
return None | 0.005571 |
def end_grouping(self):
"""
Raises IndexError when no group is open.
"""
close = self._open.pop()
if not close:
return
if self._open:
self._open[-1].extend(close)
elif self._undoing:
self._redo.append(close)
else:
self._undo.append(close)
self.notify() | 0.005376 |
def legislator_vote_value(self):
'''If this vote was accessed through the legislator.votes_manager,
return the value of this legislator's vote.
'''
if not hasattr(self, 'legislator'):
msg = ('legislator_vote_value can only be called '
'from a vote accessed by legislator.votes_manager.')
raise ValueError(msg)
leg_id = self.legislator.id
for k in ('yes', 'no', 'other'):
for leg in self[k + '_votes']:
if leg['leg_id'] == leg_id:
return k | 0.003466 |
def restore(self):
"""Restore the saved value for the attribute of the object."""
if self.proxy_object is None:
if self.getter:
setattr(self.getter_class, self.attr_name, self.getter)
elif self.is_local:
setattr(self.orig_object, self.attr_name, self.orig_value)
else:
# Was not a local, safe to delete:
delattr(self.orig_object, self.attr_name)
else:
setattr(sys.modules[self.orig_object.__module__],
self.orig_object.__name__,
self.orig_object) | 0.003215 |
def globals(self):
"""Iterates over the defined Globals."""
defglobal = lib.EnvGetNextDefglobal(self._env, ffi.NULL)
while defglobal != ffi.NULL:
yield Global(self._env, defglobal)
defglobal = lib.EnvGetNextDefglobal(self._env, defglobal) | 0.006944 |
def get_file_by_id(self, file_id):
"""
Get folder details for a file id.
:param file_id: str: uuid of the file
:return: File
"""
return self._create_item_response(
self.data_service.get_file(file_id),
File
) | 0.006969 |
def ReqConnect(self, pAddress: str):
"""连接行情前置
:param pAddress:
"""
self.q.CreateApi()
spi = self.q.CreateSpi()
self.q.RegisterSpi(spi)
self.q.OnFrontConnected = self._OnFrontConnected
self.q.OnFrontDisconnected = self._OnFrontDisConnected
self.q.OnRspUserLogin = self._OnRspUserLogin
self.q.OnRtnDepthMarketData = self._OnRtnDepthMarketData
self.q.OnRspSubMarketData = self._OnRspSubMarketData
self.q.RegCB()
self.q.RegisterFront(pAddress)
self.q.Init() | 0.003509 |
def dns_encode(x, check_built=False):
"""Encodes a bytes string into the DNS format
:param x: the string
:param check_built: detect already-built strings and ignore them
:returns: the encoded bytes string
"""
if not x or x == b".":
return b"\x00"
if check_built and b"." not in x and (
orb(x[-1]) == 0 or (orb(x[-2]) & 0xc0) == 0xc0
):
# The value has already been processed. Do not process it again
return x
# Truncate chunks that cannot be encoded (more than 63 bytes..)
x = b"".join(chb(len(y)) + y for y in (k[:63] for k in x.split(b".")))
if x[-1:] != b"\x00":
x += b"\x00"
return x | 0.001475 |
def sign(self, data: bytes, v: int = 27) -> Signature:
""" Sign data hash with local private key """
assert v in (0, 27), 'Raiden is only signing messages with v in (0, 27)'
_hash = eth_sign_sha3(data)
signature = self.private_key.sign_msg_hash(message_hash=_hash)
sig_bytes = signature.to_bytes()
# adjust last byte to v
return sig_bytes[:-1] + bytes([sig_bytes[-1] + v]) | 0.007009 |
def add_client(self, client_identifier):
"""Add a client."""
if client_identifier in self.clients:
_LOGGER.error('%s already in group %s', client_identifier, self.identifier)
return
new_clients = self.clients
new_clients.append(client_identifier)
yield from self._server.group_clients(self.identifier, new_clients)
_LOGGER.info('added %s to %s', client_identifier, self.identifier)
self._server.client(client_identifier).callback()
self.callback() | 0.005607 |
def determine_apache_port(public_port, singlenode_mode=False):
'''
Description: Determine correct apache listening port based on public IP +
state of the cluster.
public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if singlenode_mode:
i += 1
elif len(peer_units()) > 0 or is_clustered():
i += 1
return public_port - (i * 10) | 0.001845 |
def is_scipy_sparse(arr):
"""
Check whether an array-like is a scipy.sparse.spmatrix instance.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a scipy.sparse.spmatrix instance.
Notes
-----
If scipy is not installed, this function will always return False.
Examples
--------
>>> from scipy.sparse import bsr_matrix
>>> is_scipy_sparse(bsr_matrix([1, 2, 3]))
True
>>> is_scipy_sparse(pd.SparseArray([1, 2, 3]))
False
>>> is_scipy_sparse(pd.SparseSeries([1, 2, 3]))
False
"""
global _is_scipy_sparse
if _is_scipy_sparse is None:
try:
from scipy.sparse import issparse as _is_scipy_sparse
except ImportError:
_is_scipy_sparse = lambda _: False
return _is_scipy_sparse(arr) | 0.002222 |
def set_log_level(log_level):
"""
Set logging level of this module. Using
`logbook <https://logbook.readthedocs.io/en/stable/>`__ module for logging.
:param int log_level:
One of the log level of
`logbook <https://logbook.readthedocs.io/en/stable/api/base.html>`__.
Disabled logging if ``log_level`` is ``logbook.NOTSET``.
:raises LookupError: If ``log_level`` is an invalid value.
"""
if not LOGBOOK_INSTALLED:
return
# validate log level
logbook.get_level_name(log_level)
if log_level == logger.level:
return
if log_level == logbook.NOTSET:
set_logger(is_enable=False)
else:
set_logger(is_enable=True)
logger.level = log_level
tabledata.set_log_level(log_level)
sqliteschema.set_log_level(log_level)
try:
import pytablereader
pytablereader.set_log_level(log_level)
except ImportError:
pass | 0.001057 |
def crs(self, crs):
"""Setter for extent_crs property.
:param crs: The coordinate reference system for the analysis boundary.
:type crs: QgsCoordinateReferenceSystem
"""
if isinstance(crs, QgsCoordinateReferenceSystem):
self._crs = crs
self._is_ready = False
else:
raise InvalidExtentError('%s is not a valid CRS object.' % crs) | 0.004843 |
def retrieve_info(self):
"""Query Bugzilla API to retrieve the needed infos."""
scheme = urlparse(self.url).scheme
netloc = urlparse(self.url).netloc
query = urlparse(self.url).query
if scheme not in ('http', 'https'):
return
for item in query.split('&'):
if 'id=' in item:
ticket_id = item.split('=')[1]
break
else:
return
bugzilla_url = '%s://%s/%s%s' % (scheme, netloc, _URI_BASE, ticket_id)
result = requests.get(bugzilla_url)
self.status_code = result.status_code
if result.status_code == 200:
tree = ElementTree.fromstring(result.content)
self.title = tree.findall("./bug/short_desc").pop().text
self.issue_id = tree.findall("./bug/bug_id").pop().text
self.reporter = tree.findall("./bug/reporter").pop().text
self.assignee = tree.findall("./bug/assigned_to").pop().text
self.status = tree.findall("./bug/bug_status").pop().text
self.product = tree.findall("./bug/product").pop().text
self.component = tree.findall("./bug/component").pop().text
self.created_at = tree.findall("./bug/creation_ts").pop().text
self.updated_at = tree.findall("./bug/delta_ts").pop().text
try:
self.closed_at = (
tree.findall("./bug/cf_last_closed").pop().text
)
except IndexError:
# cf_last_closed is present only if the issue has been closed
# if not present it raises an IndexError, meaning the issue
# isn't closed yet, which is a valid use case.
pass | 0.001134 |
def annotation_spec_path(cls, project, location, dataset, annotation_spec):
"""Return a fully-qualified annotation_spec string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}",
project=project,
location=location,
dataset=dataset,
annotation_spec=annotation_spec,
) | 0.006772 |
def du(path):
'''
Put it all together!
'''
size, err = calc(path)
if err:
return err
else:
hr, unit = convert(size)
hr = str(hr)
result = hr + " " + unit
return result | 0.004329 |
def load_token(data):
"""Load the oauth2server token from data dump."""
from invenio_oauth2server.models import Token
data['expires'] = iso2dt_or_none(data['expires'])
load_common(Token, data) | 0.004808 |
def _do_magic_import(self, rule, p_selectors, p_parents, p_children, scope, media, c_lineno, c_property, c_codestr, code, name):
"""
Implements @import for sprite-maps
Imports magic sprite map directories
"""
if callable(STATIC_ROOT):
files = sorted(STATIC_ROOT(name))
else:
glob_path = os.path.join(STATIC_ROOT, name)
files = glob.glob(glob_path)
files = sorted((file[len(STATIC_ROOT):], None) for file in files)
if files:
# Build magic context
map_name = os.path.normpath(
os.path.dirname(name)).replace('\\', '_').replace('/', '_')
kwargs = {}
def setdefault(var, val):
_var = '$' + map_name + '-' + var
if _var in rule[CONTEXT]:
kwargs[var] = interpolate(rule[CONTEXT][_var], rule)
else:
rule[CONTEXT][_var] = val
kwargs[var] = interpolate(val, rule)
return rule[CONTEXT][_var]
setdefault(
'sprite-base-class', StringValue('.' + map_name + '-sprite'))
setdefault('sprite-dimensions', BooleanValue(False))
position = setdefault('position', NumberValue(0, '%'))
spacing = setdefault('spacing', NumberValue(0))
repeat = setdefault('repeat', StringValue('no-repeat'))
names = tuple(os.path.splitext(
os.path.basename(file))[0] for file, storage in files)
for n in names:
setdefault(n + '-position', position)
setdefault(n + '-spacing', spacing)
setdefault(n + '-repeat', repeat)
sprite_map = _sprite_map(name, **kwargs)
rule[CONTEXT]['$' + map_name + '-' + 'sprites'] = sprite_map
ret = '''
@import "compass/utilities/sprites/base";
// All sprites should extend this class
// The %(map_name)s-sprite mixin will do so for you.
#{$%(map_name)s-sprite-base-class} {
background: $%(map_name)s-sprites;
}
// Use this to set the dimensions of an element
// based on the size of the original image.
@mixin %(map_name)s-sprite-dimensions($name) {
@include sprite-dimensions($%(map_name)s-sprites, $name);
}
// Move the background position to display the sprite.
@mixin %(map_name)s-sprite-position($name, $offset-x: 0, $offset-y: 0) {
@include sprite-position($%(map_name)s-sprites, $name, $offset-x, $offset-y);
}
// Extends the sprite base class and set the background position for the desired sprite.
// It will also apply the image dimensions if $dimensions is true.
@mixin %(map_name)s-sprite($name, $dimensions: $%(map_name)s-sprite-dimensions, $offset-x: 0, $offset-y: 0) {
@extend #{$%(map_name)s-sprite-base-class};
@include sprite($%(map_name)s-sprites, $name, $dimensions, $offset-x, $offset-y);
}
@mixin %(map_name)s-sprites($sprite-names, $dimensions: $%(map_name)s-sprite-dimensions) {
@include sprites($%(map_name)s-sprites, $sprite-names, $%(map_name)s-sprite-base-class, $dimensions);
}
// Generates a class for each sprited image.
@mixin all-%(map_name)s-sprites($dimensions: $%(map_name)s-sprite-dimensions) {
@include %(map_name)s-sprites(%(sprites)s, $dimensions);
}
''' % {'map_name': map_name, 'sprites': ' '.join(names)}
return ret | 0.003119 |
def __authorize(self, client_id, client_secret, credit_card_id, **kwargs):
"""Call documentation: `/credit_card/authorize
<https://www.wepay.com/developer/reference/credit_card#authorize>`_,
plus extra keyword parameter:
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
"""
params = {
'client_id': client_id,
'client_secret': client_secret,
'credit_card_id': credit_card_id
}
return self.make_call(self.__authorize, params, kwargs) | 0.005 |
def parametrized_bottleneck(x, hparams):
"""Meta-function calling all the above bottlenecks with hparams."""
if hparams.bottleneck_kind == "tanh_discrete":
d, _ = tanh_discrete_bottleneck(
x, hparams.bottleneck_bits, hparams.bottleneck_noise * 0.5,
hparams.discretize_warmup_steps, hparams.mode)
return d, 0.0
if hparams.bottleneck_kind == "isemhash":
return isemhash_bottleneck(
x, hparams.bottleneck_bits, hparams.bottleneck_noise * 0.5,
hparams.discretize_warmup_steps, hparams.mode,
hparams.isemhash_noise_dev, hparams.isemhash_mix_prob)
if hparams.bottleneck_kind == "vq":
return vq_discrete_bottleneck(x, hparams.bottleneck_bits, hparams.vq_beta,
hparams.vq_decay, hparams.vq_epsilon)
if hparams.bottleneck_kind == "em":
return vq_discrete_bottleneck(
x,
hparams.bottleneck_bits,
hparams.vq_beta,
hparams.vq_decay,
hparams.vq_epsilon,
soft_em=True,
num_samples=hparams.vq_num_samples)
if hparams.bottleneck_kind == "gumbel_softmax":
return gumbel_softmax_discrete_bottleneck(
x,
hparams.bottleneck_bits,
hparams.vq_beta,
hparams.vq_decay,
hparams.vq_epsilon,
hparams.temperature_warmup_steps,
hard=False,
summary=True)
raise ValueError(
"Unsupported hparams.bottleneck_kind %s" % hparams.bottleneck_kind) | 0.005521 |
def start_rest_api(host, port, connection, timeout, registry,
client_max_size=None):
"""Builds the web app, adds route handlers, and finally starts the app.
"""
loop = asyncio.get_event_loop()
connection.open()
app = web.Application(loop=loop, client_max_size=client_max_size)
app.on_cleanup.append(lambda app: connection.close())
# Add routes to the web app
LOGGER.info('Creating handlers for validator at %s', connection.url)
handler = RouteHandler(loop, connection, timeout, registry)
app.router.add_post('/batches', handler.submit_batches)
app.router.add_get('/batch_statuses', handler.list_statuses)
app.router.add_post('/batch_statuses', handler.list_statuses)
app.router.add_get('/state', handler.list_state)
app.router.add_get('/state/{address}', handler.fetch_state)
app.router.add_get('/blocks', handler.list_blocks)
app.router.add_get('/blocks/{block_id}', handler.fetch_block)
app.router.add_get('/batches', handler.list_batches)
app.router.add_get('/batches/{batch_id}', handler.fetch_batch)
app.router.add_get('/transactions', handler.list_transactions)
app.router.add_get(
'/transactions/{transaction_id}',
handler.fetch_transaction)
app.router.add_get('/receipts', handler.list_receipts)
app.router.add_post('/receipts', handler.list_receipts)
app.router.add_get('/peers', handler.fetch_peers)
app.router.add_get('/status', handler.fetch_status)
subscriber_handler = StateDeltaSubscriberHandler(connection)
app.router.add_get('/subscriptions', subscriber_handler.subscriptions)
app.on_shutdown.append(lambda app: subscriber_handler.on_shutdown())
# Start app
LOGGER.info('Starting REST API on %s:%s', host, port)
web.run_app(
app,
host=host,
port=port,
access_log=LOGGER,
access_log_format='%r: %s status, %b size, in %Tf s') | 0.000513 |
def find_hosted_zone(Id=None, Name=None, PrivateZone=None,
region=None, key=None, keyid=None, profile=None):
'''
Find a hosted zone with the given characteristics.
Id
The unique Zone Identifier for the Hosted Zone. Exclusive with Name.
Name
The domain name associated with the Hosted Zone. Exclusive with Id.
Note this has the potential to match more then one hosted zone (e.g. a public and a private
if both exist) which will raise an error unless PrivateZone has also been passed in order
split the different.
PrivateZone
Boolean - Set to True if searching for a private hosted zone.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example:
.. code-block:: bash
salt myminion boto3_route53.find_hosted_zone Name=salt.org. \
profile='{"region": "us-east-1", "keyid": "A12345678AB", "key": "xblahblahblah"}'
'''
if not _exactly_one((Id, Name)):
raise SaltInvocationError('Exactly one of either Id or Name is required.')
if PrivateZone is not None and not isinstance(PrivateZone, bool):
raise SaltInvocationError('If set, PrivateZone must be a bool (e.g. True / False).')
if Id:
ret = get_hosted_zone(Id, region=region, key=key, keyid=keyid, profile=profile)
else:
ret = get_hosted_zones_by_domain(Name, region=region, key=key, keyid=keyid, profile=profile)
if PrivateZone is not None:
ret = [m for m in ret if m['HostedZone']['Config']['PrivateZone'] is PrivateZone]
if len(ret) > 1:
log.error(
'Request matched more than one Hosted Zone (%s). Refine your '
'criteria and try again.', [z['HostedZone']['Id'] for z in ret]
)
ret = []
return ret | 0.005086 |
def delete(self):
"""Delete this job."""
self.conn.delete(self.jid)
self.reserved = False | 0.017699 |
def setup_server_users(server):
"""
Seeds all users returned by get_seed_users() IF there are no users seed yet
i.e. system.users collection is empty
"""
"""if not should_seed_users(server):
log_verbose("Not seeding users for server '%s'" % server.id)
return"""
log_info("Checking if there are any users that need to be added for "
"server '%s'..." % server.id)
seed_users = server.get_seed_users()
count_new_users = 0
# Note: If server member of a replica then don't setup admin
# users because primary server will do that at replinit
# Now create admin ones
if not server.is_slave():
count_new_users += setup_server_admin_users(server)
for dbname, db_seed_users in seed_users.items():
# create the admin ones last so we won't have an auth issue
if dbname in ["admin", "local"]:
continue
count_new_users += setup_server_db_users(server, dbname, db_seed_users)
if count_new_users > 0:
log_info("Added %s users." % count_new_users)
else:
log_verbose("Did not add any new users.") | 0.001762 |
def setup_global_logging():
"""
Initializes capture of stdout/stderr, Python warnings, and exceptions;
redirecting them to the loggers for the modules from which they originated.
"""
global global_logging_started
if not PY3K:
sys.exc_clear()
if global_logging_started:
return
orig_logger_class = logging.getLoggerClass()
logging.setLoggerClass(StreamTeeLogger)
try:
stdout_logger = logging.getLogger(__name__ + '.stdout')
stderr_logger = logging.getLogger(__name__ + '.stderr')
finally:
logging.setLoggerClass(orig_logger_class)
stdout_logger.setLevel(logging.INFO)
stderr_logger.setLevel(logging.ERROR)
stdout_logger.set_stream(sys.stdout)
stderr_logger.set_stream(sys.stderr)
sys.stdout = stdout_logger
sys.stderr = stderr_logger
exception_logger = logging.getLogger(__name__ + '.exc')
sys.excepthook = LoggingExceptionHook(exception_logger)
logging.captureWarnings(True)
rawinput = 'input' if PY3K else 'raw_input'
builtins._original_raw_input = getattr(builtins, rawinput)
setattr(builtins, rawinput, global_logging_raw_input)
global_logging_started = True | 0.00083 |
def get_algorithm(self, name):
"""
Gets a single algorithm by its unique name.
:param str name: Either a fully-qualified XTCE name or an alias in the
format ``NAMESPACE/NAME``.
:rtype: .Algorithm
"""
name = adapt_name_for_rest(name)
url = '/mdb/{}/algorithms{}'.format(self._instance, name)
response = self._client.get_proto(url)
message = mdb_pb2.AlgorithmInfo()
message.ParseFromString(response.content)
return Algorithm(message) | 0.00367 |
def _dump_multipoint(obj, decimals):
"""
Dump a GeoJSON-like MultiPoint object to WKT.
Input parameters and return value are the MULTIPOINT equivalent to
:func:`_dump_point`.
"""
coords = obj['coordinates']
mp = 'MULTIPOINT (%s)'
points = (' '.join(_round_and_pad(c, decimals)
for c in pt) for pt in coords)
# Add parens around each point.
points = ('(%s)' % pt for pt in points)
mp %= ', '.join(points)
return mp | 0.002062 |
async def close(self):
"""
Terminate the ICE agent, ending ICE processing and streams.
"""
if self.__isClosed:
return
self.__isClosed = True
self.__setSignalingState('closed')
# stop senders / receivers
for transceiver in self.__transceivers:
await transceiver.stop()
if self.__sctp:
await self.__sctp.stop()
# stop transports
for transceiver in self.__transceivers:
await transceiver._transport.stop()
await transceiver._transport.transport.stop()
if self.__sctp:
await self.__sctp.transport.stop()
await self.__sctp.transport.transport.stop()
self.__updateIceConnectionState()
# no more events will be emitted, so remove all event listeners
# to facilitate garbage collection.
self.remove_all_listeners() | 0.002172 |
def get_from_config_setting_cascade(self, sec_param_list, default=None, warn_on_none_level=logging.WARN):
"""return the first non-None setting from a series where each
element in `sec_param_list` is a section, param pair suitable for
a get_config_setting call.
Note that non-None values for overrides for this ConfigWrapper instance will cause
this call to only evaluate the first element in the cascade.
"""
for section, param in sec_param_list:
r = self.get_config_setting(section, param, default=None, warn_on_none_level=None)
if r is not None:
return r
section, param = sec_param_list[-1]
if default is None:
_warn_missing_setting(section, param, self._config_filename, warn_on_none_level)
return default | 0.007101 |
def body(self):
""" String from `wsgi.input`.
"""
if self._body is None:
if self._fieldstorage is not None:
raise ReadBodyTwiceError()
clength = int(self.environ('CONTENT_LENGTH') or 0)
self._body = self._environ['wsgi.input'].read(clength)
if isinstance(self._body, bytes):
self._body = self._body.decode('utf8')
return self._body | 0.004494 |
def clean(cls, cpf):
u"""
Retorna apenas os dígitos do CPF.
>>> CPF.clean('581.194.436-59')
'58119443659'
"""
if isinstance(cpf, six.string_types):
cpf = int(re.sub('[^0-9]', '', cpf))
return '{0:011d}'.format(cpf) | 0.007042 |
def try_render(filepath=None, content=None, **options):
"""
Compile and render template and return the result as a string.
:param filepath: Absolute or relative path to the template file
:param content: Template content (str)
:param options: Keyword options passed to :func:`render` defined above.
:return: Compiled result (str) or None
"""
if filepath is None and content is None:
raise ValueError("Either 'path' or 'content' must be some value!")
tmpl_s = filepath or content[:10] + " ..."
LOGGER.debug("Compiling: %s", tmpl_s)
try:
if content is None:
render_opts = anyconfig.utils.filter_options(RENDER_OPTS, options)
return render(filepath, **render_opts)
render_s_opts = anyconfig.utils.filter_options(RENDER_S_OPTS, options)
return render_s(content, **render_s_opts)
except Exception as exc:
LOGGER.warning("Failed to compile '%s'. It may not be a template.%s"
"exc=%r", tmpl_s, os.linesep, exc)
return None | 0.000944 |
def cli(context, mongodb, username, password, authdb, host, port, loglevel, config, demo):
"""scout: manage interactions with a scout instance."""
# log_format = "%(message)s" if sys.stdout.isatty() else None
log_format = None
coloredlogs.install(level=loglevel, fmt=log_format)
LOG.info("Running scout version %s", __version__)
LOG.debug("Debug logging enabled.")
mongo_config = {}
cli_config = {}
if config:
LOG.debug("Use config file %s", config)
with open(config, 'r') as in_handle:
cli_config = yaml.load(in_handle)
mongo_config['mongodb'] = (mongodb or cli_config.get('mongodb') or 'scout')
if demo:
mongo_config['mongodb'] = 'scout-demo'
mongo_config['host'] = (host or cli_config.get('host') or 'localhost')
mongo_config['port'] = (port or cli_config.get('port') or 27017)
mongo_config['username'] = username or cli_config.get('username')
mongo_config['password'] = password or cli_config.get('password')
mongo_config['authdb'] = authdb or cli_config.get('authdb') or mongo_config['mongodb']
mongo_config['omim_api_key'] = cli_config.get('omim_api_key')
if context.invoked_subcommand in ('setup', 'serve'):
mongo_config['adapter'] = None
else:
LOG.info("Setting database name to %s", mongo_config['mongodb'])
LOG.debug("Setting host to %s", mongo_config['host'])
LOG.debug("Setting port to %s", mongo_config['port'])
try:
client = get_connection(**mongo_config)
except ConnectionFailure:
context.abort()
database = client[mongo_config['mongodb']]
LOG.info("Setting up a mongo adapter")
mongo_config['client'] = client
adapter = MongoAdapter(database)
mongo_config['adapter'] = adapter
LOG.info("Check if authenticated...")
try:
for ins_obj in adapter.institutes():
pass
except OperationFailure as err:
LOG.info("User not authenticated")
context.abort()
context.obj = mongo_config | 0.002834 |
def error(self, error):
"""
set the error
"""
# TODO: check length with value?
# TODO: type checks (similar to value)
if self.direction not in ['x', 'y', 'z'] and error is not None:
raise ValueError("error only accepted for x, y, z dimensions")
if isinstance(error, u.Quantity):
error = error.to(self.unit).value
self._error = error | 0.004739 |
def pil_save(self, filename, fformat=None, fill_value=None,
compute=True, **format_kwargs):
"""Save the image to the given *filename* using PIL.
For now, the compression level [0-9] is ignored, due to PIL's lack of
support. See also :meth:`save`.
"""
fformat = fformat or os.path.splitext(filename)[1][1:4]
fformat = check_image_format(fformat)
if fformat == 'png':
# Take care of GeoImage.tags (if any).
format_kwargs['pnginfo'] = self._pngmeta()
img = self.pil_image(fill_value, compute=False)
delay = img.save(filename, fformat, **format_kwargs)
if compute:
return delay.compute()
return delay | 0.004054 |
def console_progress():
""" Return a progress indicator for consoles if
stdout is a tty.
"""
def progress(totalhashed, totalsize):
"Helper"
msg = " " * 30
if totalhashed < totalsize:
msg = "%5.1f%% complete" % (totalhashed * 100.0 / totalsize)
sys.stdout.write(msg + " \r")
sys.stdout.flush()
try:
return progress if sys.stdout.isatty() else None
except AttributeError:
return None | 0.002092 |
def hash_str(data, hasher=None):
"""Checksum hash a string."""
hasher = hasher or hashlib.sha1()
hasher.update(data)
return hasher | 0.006849 |
def dict_factory(cursor, row):
"""
Converts the cursor information from a SQLite query to a dictionary.
:param cursor | <sqlite3.Cursor>
row | <sqlite3.Row>
:return {<str> column: <variant> value, ..}
"""
out = {}
for i, col in enumerate(cursor.description):
out[col[0]] = row[i]
return out | 0.002778 |
def cli(env, account_id):
"""List origin pull mappings."""
manager = SoftLayer.CDNManager(env.client)
origins = manager.get_origins(account_id)
table = formatting.Table(['id', 'media_type', 'cname', 'origin_url'])
for origin in origins:
table.add_row([origin['id'],
origin['mediaType'],
origin.get('cname', formatting.blank()),
origin['originUrl']])
env.fout(table) | 0.002128 |
def relative_filename(self, filename):
"""Return the relative form of `filename`.
The filename will be relative to the current directory when the
`FileLocator` was constructed.
"""
fnorm = os.path.normcase(filename)
if fnorm.startswith(self.relative_dir):
filename = filename[len(self.relative_dir):]
return filename | 0.005181 |
def startup_config_content(self):
"""
Returns the content of the current startup-config file.
"""
config_file = self.startup_config_file
if config_file is None:
return None
try:
with open(config_file, "rb") as f:
return f.read().decode("utf-8", errors="replace")
except OSError as e:
raise IOUError("Can't read startup-config file '{}': {}".format(config_file, e)) | 0.006329 |
def update_unique(self, table_name, fields, data, cond=None, unique_fields=None,
*, raise_if_not_found=False):
"""Update the unique matching element to have a given set of fields.
Parameters
----------
table_name: str
fields: dict or function[dict -> None]
new data/values to insert into the unique element
or a method that will update the elements.
data: dict
Sample data for query
cond: tinydb.Query
which elements to update
unique_fields: list of str
raise_if_not_found: bool
Will raise an exception if the element is not found for update.
Returns
-------
eid: int
The eid of the updated element if found, None otherwise.
"""
eid = find_unique(self.table(table_name), data, unique_fields)
if eid is None:
if raise_if_not_found:
msg = 'Could not find {} with {}'.format(table_name, data)
if cond is not None:
msg += ' where {}.'.format(cond)
raise IndexError(msg)
else:
self.table(table_name).update(_to_string(fields), cond=cond, eids=[eid])
return eid | 0.004658 |
def undeflate(data):
"""Decompresses data for Content-Encoding: deflate.
(the zlib compression is used.)
"""
import zlib
decompressobj = zlib.decompressobj(-zlib.MAX_WBITS)
return decompressobj.decompress(data)+decompressobj.flush() | 0.003906 |
def get_endpoint(cls):
"""
Accessor method to enable omition of endpoint name.
In general we want the class name to be translated to endpoint name,
this way unless otherwise specified will translate class name to
endpoint name.
"""
if cls.endpoint is not None:
return cls.endpoint
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', cls.__name__)
return cls.sanitize_ep(
re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower(),
plural=True
) | 0.00369 |
def get_right_geo_fhs(self, dsid, fhs):
"""Find the right geographical file handlers for given dataset ID *dsid*."""
ds_info = self.ids[dsid]
req_geo, rem_geo = self._get_req_rem_geo(ds_info)
desired, other = split_desired_other(fhs, req_geo, rem_geo)
if desired:
try:
ds_info['dataset_groups'].remove(rem_geo)
except ValueError:
pass
return desired
else:
return other | 0.006048 |
def _create_save_scenario_action(self):
"""Create action for save scenario dialog."""
icon = resources_path('img', 'icons', 'save-as-scenario.svg')
self.action_save_scenario = QAction(
QIcon(icon),
self.tr('Save Current Scenario'), self.iface.mainWindow())
message = self.tr('Save current scenario to text file')
self.action_save_scenario.setStatusTip(message)
self.action_save_scenario.setWhatsThis(message)
# noinspection PyUnresolvedReferences
self.action_save_scenario.triggered.connect(self.save_scenario)
self.add_action(
self.action_save_scenario, add_to_toolbar=self.full_toolbar) | 0.002874 |
def putmask(self, mask, value):
"""
Return a new Index of the values set with the mask.
See Also
--------
numpy.ndarray.putmask
"""
values = self.values.copy()
try:
np.putmask(values, mask, self._convert_for_op(value))
return self._shallow_copy(values)
except (ValueError, TypeError) as err:
if is_object_dtype(self):
raise err
# coerces to object
return self.astype(object).putmask(mask, value) | 0.00367 |
def copy(self,*args,**kwargs):
'''
Returns a copy of the current data object
:param flag: if an argument is provided, this returns an updated copy of current object (ie. equivalent to obj.copy();obj.update(flag)), optimising the memory (
:keyword True deep: deep copies the object (object data will be copied as well).
'''
deep=kwargs.get('deep',True)
if len(args) > 0:
return self.updated_copy(*args)
else :
return copy.deepcopy(self) if deep else copy.copy(self) | 0.0189 |
def charge_credit_card(self, credit_card_psp_object: Model, amount: Money, client_ref: str) -> Tuple[bool, Model]:
"""
:param credit_card_psp_object: an instance representing the credit card in the psp
:param amount: the amount to charge
:param client_ref: a reference that will appear on the customer's credit card report
:return: a tuple (success, payment_psp_object)
"""
pass | 0.011521 |
def put_member(self, name: InstanceName, value: Value,
raw: bool = False) -> "InstanceNode":
"""Return receiver's member with a new value.
If the member is permitted by the schema but doesn't exist, it
is created.
Args:
name: Instance name of the member.
value: New value of the member.
raw: Flag to be set if `value` is raw.
Raises:
NonexistentSchemaNode: If member `name` is not permitted by the
schema.
InstanceValueError: If the receiver's value is not an object.
"""
if not isinstance(self.value, ObjectValue):
raise InstanceValueError(self.json_pointer(), "member of non-object")
csn = self._member_schema_node(name)
newval = self.value.copy()
newval[name] = csn.from_raw(value, self.json_pointer()) if raw else value
return self._copy(newval)._member(name) | 0.005203 |
def onThemeColor(self, color, item):
"""pass theme colors to bottom panel"""
bconf = self.panel_bot.conf
if item == 'grid':
bconf.set_gridcolor(color)
elif item == 'bg':
bconf.set_bgcolor(color)
elif item == 'frame':
bconf.set_framecolor(color)
elif item == 'text':
bconf.set_textcolor(color)
bconf.canvas.draw() | 0.004808 |
def difference(cls, first, second):
"""Tells the numerical difference between two ranks."""
# so we always get a Rank instance even if string were passed in
first, second = cls(first), cls(second)
rank_list = list(cls)
return abs(rank_list.index(first) - rank_list.index(second)) | 0.00625 |
def histogram(data, bins=None, *args, **kwargs):
"""Facade function to create 1D histograms.
This proceeds in three steps:
1) Based on magical parameter bins, construct bins for the histogram
2) Calculate frequencies for the bins
3) Construct the histogram object itself
*Guiding principle:* parameters understood by numpy.histogram should be
understood also by physt.histogram as well and should result in a Histogram1D
object with (h.numpy_bins, h.frequencies) same as the numpy.histogram
output. Additional functionality is a bonus.
This function is also aliased as "h1".
Parameters
----------
data : array_like, optional
Container of all the values (tuple, list, np.ndarray, pd.Series)
bins: int or sequence of scalars or callable or str, optional
If iterable => the bins themselves
If int => number of bins for default binning
If callable => use binning method (+ args, kwargs)
If string => use named binning method (+ args, kwargs)
weights: array_like, optional
(as numpy.histogram)
keep_missed: Optional[bool]
store statistics about how many values were lower than limits
and how many higher than limits (default: True)
dropna: bool
whether to clear data from nan's before histogramming
name: str
name of the histogram
axis_name: str
name of the variable on x axis
adaptive: bool
whether we want the bins to be modifiable
(useful for continuous filling of a priori unknown data)
dtype: type
customize underlying data type: default int64 (without weight) or float (with weights)
Other numpy.histogram parameters are excluded, see the methods of the Histogram1D class itself.
Returns
-------
physt.histogram1d.Histogram1D
See Also
--------
numpy.histogram
"""
import numpy as np
from .histogram1d import Histogram1D, calculate_frequencies
from .binnings import calculate_bins
adaptive = kwargs.pop("adaptive", False)
dtype = kwargs.pop("dtype", None)
if isinstance(data, tuple) and isinstance(data[0], str): # Works for groupby DataSeries
return histogram(data[1], bins, *args, name=data[0], **kwargs)
elif type(data).__name__ == "DataFrame":
raise RuntimeError("Cannot create histogram from a pandas DataFrame. Use Series.")
# Collect arguments (not to send them to binning algorithms)
dropna = kwargs.pop("dropna", True)
weights = kwargs.pop("weights", None)
keep_missed = kwargs.pop("keep_missed", True)
name = kwargs.pop("name", None)
axis_name = kwargs.pop("axis_name", None)
title = kwargs.pop("title", None)
# Convert to array
if data is not None:
array = np.asarray(data) #.flatten()
if dropna:
array = array[~np.isnan(array)]
else:
array = None
# Get binning
binning = calculate_bins(array, bins, *args,
check_nan=not dropna and array is not None,
adaptive=adaptive, **kwargs)
# bins = binning.bins
# Get frequencies
if array is not None:
(frequencies, errors2, underflow, overflow, stats) =\
calculate_frequencies(array, binning=binning,
weights=weights, dtype=dtype)
else:
frequencies = None
errors2 = None
underflow = 0
overflow = 0
stats = {"sum": 0.0, "sum2": 0.0}
# Construct the object
if not keep_missed:
underflow = 0
overflow = 0
if not axis_name:
if hasattr(data, "name"):
axis_name = data.name
elif hasattr(data, "fields") and len(data.fields) == 1 and isinstance(data.fields[0], str):
# Case of dask fields (examples)
axis_name = data.fields[0]
return Histogram1D(binning=binning, frequencies=frequencies,
errors2=errors2, overflow=overflow,
underflow=underflow, stats=stats, dtype=dtype,
keep_missed=keep_missed, name=name, axis_name=axis_name,
title=title) | 0.002142 |
def get_stp_mst_detail_output_msti_port_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop('instance_id')
port = ET.SubElement(msti, "port")
interface_type = ET.SubElement(port, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.00274 |
def validate_public_key(value):
"""
Check that the given value is a valid RSA Public key in either PEM or OpenSSH format. If it is invalid,
raises ``django.core.exceptions.ValidationError``.
"""
is_valid = False
exc = None
for load in (load_pem_public_key, load_ssh_public_key):
if not is_valid:
try:
load(value.encode('utf-8'), default_backend())
is_valid = True
except Exception as e:
exc = e
if not is_valid:
raise ValidationError('Public key is invalid: %s' % exc) | 0.00339 |
def curve_to(self, x, y, x2, y2, x3, y3):
"""draw a curve. (x2, y2) is the middle point of the curve"""
self._add_instruction("curve_to", x, y, x2, y2, x3, y3) | 0.011429 |
def _get_link(self, peer):
"""
Returns a link to the given peer
:return: A Link object
:raise ValueError: Unknown peer
"""
assert isinstance(peer, beans.Peer)
# Look for a link to the peer, using routers
for router in self._routers:
link = router.get_link(peer)
if link:
return link
# Not found
raise ValueError("No link to peer {0}".format(peer)) | 0.004255 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.