text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def select_coins(target, fee, output_size, min_change, *, absolute_fee=False,
consolidate=False, unspents):
'''
Implementation of Branch-and-Bound coin selection defined in Erhart's
Master's thesis An Evaluation of Coin Selection Strategies here:
http://murch.one/wp-content/uploads/2016/11/erhardt2016coinselection.pdf
:param target: The total amount of the outputs in a transaction for which
we try to select the inputs to spend.
:type target: ``int``
:param fee: The number of satoshi per byte for the fee of the transaction.
:type fee: ``int``
:param output_size: A list containing as int the sizes of each output.
:type output_size: ``list`` of ``int`
:param min_change: The minimum amount of satoshis allowed for the
return/change address if there is no perfect match.
:type min_change: ``int``
:param absolute_fee: Whether or not the parameter ``fee`` should be
repurposed to denote the exact fee amount.
:type absolute_fee: ``bool``
:param consolidate: Whether or not the Branch-and-Bound process for finding
a perfect match should be skipped and all unspents
used directly.
:type consolidate: ``bool``
:param unspents: The UTXOs to use as inputs.
:type unspents: ``list`` of :class:`~bit.network.meta.Unspent`
:raises InsufficientFunds: If ``unspents`` does not contain enough balance
to allow spending matching the target.
'''
# The maximum number of tries for Branch-and-Bound:
BNB_TRIES = 1000000
# COST_OF_OVERHEAD excludes the return address of output_size (last element).
COST_OF_OVERHEAD = (8 + sum(output_size[:-1]) + 1) * fee
def branch_and_bound(d, selected_coins, effective_value, target, fee,
sorted_unspents): # pragma: no cover
nonlocal COST_OF_OVERHEAD, BNB_TRIES
BNB_TRIES -= 1
COST_PER_INPUT = 148 * fee # Just typical estimate values
COST_PER_OUTPUT = 34 * fee
# The target we want to match includes cost of overhead for transaction
target_to_match = target + COST_OF_OVERHEAD
# Allowing to pay fee for a whole input and output is rationally
# correct, but increases the fee-rate dramatically for only few inputs.
match_range = COST_PER_INPUT + COST_PER_OUTPUT
# We could allow to spend up to X% more on the fees if we can find a
# perfect match:
# match_range += int(0.1 * fee * sum(u.vsize for u in selected_coins))
# Check for solution and cut criteria:
if effective_value > target_to_match + match_range:
return []
elif effective_value >= target_to_match:
return selected_coins
elif BNB_TRIES <= 0:
return []
elif d >= len(sorted_unspents):
return []
else:
# Randomly explore next branch:
binary_random = randint(0, 1)
if binary_random:
# Explore inclusion branch first, else omission branch:
effective_value_new = effective_value + \
sorted_unspents[d].amount - fee * sorted_unspents[d].vsize
with_this = branch_and_bound(
d + 1,
selected_coins + [sorted_unspents[d]],
effective_value_new,
target,
fee,
sorted_unspents
)
if with_this != []:
return with_this
else:
without_this = branch_and_bound(
d + 1,
selected_coins,
effective_value,
target,
fee,
sorted_unspents
)
return without_this
else:
# As above but explore omission branch first:
without_this = branch_and_bound(
d + 1,
selected_coins,
effective_value,
target,
fee,
sorted_unspents
)
if without_this != []:
return without_this
else:
effective_value_new = effective_value + \
sorted_unspents[d].amount - fee * sorted_unspents[d].vsize
with_this = branch_and_bound(
d + 1,
selected_coins + [sorted_unspents[d]],
effective_value_new,
target,
fee,
sorted_unspents
)
return with_this
sorted_unspents = sorted(unspents, key=lambda u: u.amount, reverse=True)
selected_coins = []
if not consolidate:
# Trying to find a perfect match using Branch-and-Bound:
selected_coins = branch_and_bound(
d=0,
selected_coins=[],
effective_value=0,
target=target,
fee=fee,
sorted_unspents=sorted_unspents
)
remaining = 0
# Fallback: If no match, Single Random Draw with return address:
if selected_coins == []:
unspents = unspents.copy()
# Since we have no information on the user's spending habit it is
# best practice to randomly select UTXOs until we have enough.
if not consolidate:
# To have a deterministic way of inserting inputs when
# consolidating, we only shuffle the unspents otherwise.
shuffle(unspents)
while unspents:
selected_coins.append(unspents.pop(0))
estimated_fee = estimate_tx_fee(
sum(u.vsize for u in selected_coins),
len(selected_coins),
sum(output_size),
len(output_size),
fee
)
estimated_fee = fee if absolute_fee else estimated_fee
remaining = sum(u.amount for u in selected_coins) - target - estimated_fee
if remaining >= min_change and (not consolidate or len(unspents) == 0):
break
else:
raise InsufficientFunds('Balance {} is less than {} (including '
'fee).'.format(sum(
u.amount for u in selected_coins),
target + min_change + estimated_fee))
return selected_coins, remaining | 0.000739 |
def times(self, func, *args):
""" Run a function **n** times.
"""
n = self.obj
i = 0
while n is not 0:
n -= 1
func(i)
i += 1
return self._wrap(func) | 0.008584 |
def get_web_alert(self, web, header="", log=False):
"""Return the alert status relative to the web/url scan return value."""
ret = 'OK'
if web['status'] is None:
ret = 'CAREFUL'
elif web['status'] not in [200, 301, 302]:
ret = 'CRITICAL'
elif web['rtt_warning'] is not None and web['elapsed'] > web['rtt_warning']:
ret = 'WARNING'
# Get stat name
stat_name = self.get_stat_name(header=header)
# Manage threshold
self.manage_threshold(stat_name, ret)
# Manage action
self.manage_action(stat_name,
ret.lower(),
header,
web[self.get_key()])
return ret | 0.005229 |
def from_input(cls, input, workdir=None, manager=None):
"""
Create an instance of `AbinitTask` from an ABINIT input.
Args:
ainput: `AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
return cls(input, workdir=workdir, manager=manager) | 0.00551 |
def handler(self,data):
'''
Function to handle notification data as part of Callback URL handler.
:param str data: data posted to Callback URL by connector.
:return: nothing
'''
if isinstance(data,r.models.Response):
self.log.debug("data is request object = %s", str(data.content))
data = data.content
elif isinstance(data,str):
self.log.info("data is json string with len %d",len(data))
if len(data) == 0:
self.log.warn("Handler received data of 0 length, exiting handler.")
return
else:
self.log.error("Input is not valid request object or json string : %s" %str(data))
return False
try:
data = json.loads(data)
if 'async-responses' in data.keys():
self.async_responses_callback(data)
if 'notifications' in data.keys():
self.notifications_callback(data)
if 'registrations' in data.keys():
self.registrations_callback(data)
if 'reg-updates' in data.keys():
self.reg_updates_callback(data)
if 'de-registrations' in data.keys():
self.de_registrations_callback(data)
if 'registrations-expired' in data.keys():
self.registrations_expired_callback(data)
except:
self.log.error("handle router had an issue and threw an exception")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
self.log.error(sys.exc_info())
del tb | 0.03609 |
def _ctypes_variables(parameter):
"""Returns the local parameter definition for implementing a Fortran wrapper subroutine
for this parameter's parent executable.
"""
if parameter.dimension is not None and ":" in parameter.dimension:
#For arrays that provide input (including 'inout'), we pass the output separately
#through a c-pointer. They will always be input arrays. For pure (out) parameters
#we use *only* a c-ptr, but it needs intent (inout) since it is created by ctypes.
if "in" in parameter.direction or parameter.direction == "":
#We have to get the ctypes-compatible data type for the parameters.
#stype = _ctypes_dtype(parameter)
splice = _ctypes_splice(parameter)
name = parameter.ctypes_parameter()[0]
if parameter.direction == "(inout)" and not ("allocatable" in parameter.modifiers or
"pointer" in parameter.modifiers):
return (parameter.definition(optionals=False, customdim=splice), False)
else:
return ("{}, intent(in) :: {}({})".format(parameter.strtype, name, splice), False)
else:
if parameter.dtype == "logical":
stype = _ctypes_dtype(parameter)
suffix = "_c"
modifiers = None
elif hasattr(parameter, "parameters"):
stype = None
suffix = "_f"
modifiers = ["intent(out)"]
else:
stype = None
suffix = ""
modifiers = None
return (parameter.definition(ctype=stype, optionals=False, suffix=suffix,
modifiers=modifiers), False) | 0.009153 |
def main():
"""Main function for :command:`fabulous-image`."""
import optparse
parser = optparse.OptionParser()
parser.add_option(
"-w", "--width", dest="width", type="int", default=None,
help=("Width of printed image in characters. Default: %default"))
(options, args) = parser.parse_args(args=sys.argv[1:])
for imgpath in args:
for line in Image(imgpath, options.width):
printy(line) | 0.002242 |
def _add_complex(self, members, is_association=False):
"""Assemble a Complex statement."""
params = {'color': '#0000ff',
'arrowhead': 'dot',
'arrowtail': 'dot',
'dir': 'both'}
for m1, m2 in itertools.combinations(members, 2):
if self._has_complex_node(m1, m2):
continue
if is_association:
m1_key = _get_node_key(m1.concept)
m2_key = _get_node_key(m2.concept)
else:
m1_key = _get_node_key(m1)
m2_key = _get_node_key(m2)
edge_key = (set([m1_key, m2_key]), 'complex')
if edge_key in self.existing_edges:
return
self.existing_edges.append(edge_key)
self._add_edge(m1_key, m2_key, **params) | 0.002372 |
def create_object(self, data, view_kwargs):
"""Create an object through sqlalchemy
:param dict data: the data validated by marshmallow
:param dict view_kwargs: kwargs from the resource view
:return DeclarativeMeta: an object from sqlalchemy
"""
self.before_create_object(data, view_kwargs)
relationship_fields = get_relationships(self.resource.schema, model_field=True)
nested_fields = get_nested_fields(self.resource.schema, model_field=True)
join_fields = relationship_fields + nested_fields
obj = self.model(**{key: value
for (key, value) in data.items() if key not in join_fields})
self.apply_relationships(data, obj)
self.apply_nested_fields(data, obj)
self.session.add(obj)
try:
self.session.commit()
except JsonApiException as e:
self.session.rollback()
raise e
except Exception as e:
self.session.rollback()
raise JsonApiException("Object creation error: " + str(e), source={'pointer': '/data'})
self.after_create_object(obj, data, view_kwargs)
return obj | 0.004992 |
def get_overridden_calculated_entry(self):
"""Gets the calculated entry this entry overrides.
return: (osid.grading.GradeEntry) - the calculated entry
raise: IllegalState - ``overrides_calculated_entry()`` is
``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not bool(self._my_map['overriddenCalculatedEntryId']):
raise errors.IllegalState('this GradeEntry has no overridden_calculated_entry')
mgr = self._get_provider_manager('GRADING')
if not mgr.supports_grade_entry_lookup():
raise errors.OperationFailed('Grading does not support GradeEntry lookup')
lookup_session = mgr.get_grade_entry_lookup_session(proxy=getattr(self, "_proxy", None))
lookup_session.use_federated_gradebook_view()
osid_object = lookup_session.get_grade_entry(self.get_overridden_calculated_entry_id())
return osid_object | 0.006329 |
def distance_to_contact(D, alpha=1):
"""Compute contact matrix from input distance matrix. Distance values of
zeroes are given the largest contact count otherwise inferred non-zero
distance values.
"""
if callable(alpha):
distance_function = alpha
else:
try:
a = np.float64(alpha)
def distance_function(x):
return 1 / (x ** (1 / a))
except TypeError:
print("Alpha parameter must be callable or an array-like")
raise
except ZeroDivisionError:
raise ValueError("Alpha parameter must be non-zero")
m = np.max(distance_function(D[D != 0]))
M = np.zeros(D.shape)
M[D != 0] = distance_function(D[D != 0])
M[D == 0] = m
return M | 0.001289 |
def get_kafka_brokers():
"""
Parses the KAKFA_URL and returns a list of hostname:port pairs in the format
that kafka-python expects.
"""
# NOTE: The Kafka environment variables need to be present. If using
# Apache Kafka on Heroku, they will be available in your app configuration.
if not os.environ.get('KAFKA_URL'):
raise RuntimeError('The KAFKA_URL config variable is not set.')
return ['{}:{}'.format(parsedUrl.hostname, parsedUrl.port) for parsedUrl in
[urlparse(url) for url in os.environ.get('KAFKA_URL').split(',')]] | 0.003466 |
def _update(self):
""" update num_inst and sum_metric """
aps = []
for k, v in self.records.items():
recall, prec = self._recall_prec(v, self.counts[k])
ap = self._average_precision(recall, prec)
aps.append(ap)
if self.num is not None and k < (self.num - 1):
self.sum_metric[k] = ap
self.num_inst[k] = 1
if self.num is None:
self.num_inst = 1
self.sum_metric = np.mean(aps)
else:
self.num_inst[-1] = 1
self.sum_metric[-1] = np.mean(aps) | 0.003311 |
def can_lookup_assets(self):
"""Tests if this user can perform ``Asset`` lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations.
:return: ``false`` if lookup methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
url_path = construct_url('authorization',
bank_id=self._catalog_idstr)
return self._get_request(url_path)['assetHints']['canLookup'] | 0.003886 |
def get_application_configurations(self, name=None):
"""Retrieves application configurations for this instance.
Args:
name (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a
regular expression. If `name` is not supplied, then all application configurations are returned.
Returns:
list(ApplicationConfiguration): A list of application configurations matching the given `name`.
.. versionadded 1.12
"""
if hasattr(self, 'applicationConfigurations'):
return self._get_elements(self.applicationConfigurations, 'applicationConfigurations', ApplicationConfiguration, None, name) | 0.010624 |
def content_present(self, x: int, y: int) -> bool:
"""Determines if a line or printed text is at the given location."""
# Text?
if (x, y) in self.entries:
return True
# Vertical line?
if any(v.x == x and v.y1 < y < v.y2 for v in self.vertical_lines):
return True
# Horizontal line?
if any(line_y == y and x1 < x < x2
for line_y, x1, x2, _ in self.horizontal_lines):
return True
return False | 0.003929 |
def locate(self, path):
"""
Find a config item along a path; leading slash is optional and ignored.
"""
return Zconfig(lib.zconfig_locate(self._as_parameter_, path), False) | 0.009804 |
def calc_targetedrelease_v1(self):
"""Calculate the targeted water release for reducing drought events,
taking into account both the required water release and the actual
inflow into the dam.
Some dams are supposed to maintain a certain degree of low flow
variability downstream. In case parameter |RestrictTargetedRelease|
is set to `True`, method |calc_targetedrelease_v1| simulates
this by (approximately) passing inflow as outflow whenever inflow
is below the value of the threshold parameter
|NearDischargeMinimumThreshold|. If parameter |RestrictTargetedRelease|
is set to `False`, does nothing except assigning the value of sequence
|RequiredRelease| to sequence |TargetedRelease|.
Required control parameter:
|RestrictTargetedRelease|
|NearDischargeMinimumThreshold|
Required derived parameters:
|NearDischargeMinimumSmoothPar1|
|dam_derived.TOY|
Required flux sequence:
|RequiredRelease|
Calculated flux sequence:
|TargetedRelease|
Used auxiliary method:
|smooth_logistic1|
Basic equation:
:math:`TargetedRelease =
w \\cdot RequiredRelease + (1-w) \\cdot Inflow`
:math:`w = smooth_{logistic1}(
Inflow-NearDischargeMinimumThreshold, NearDischargeMinimumSmoothPar1)`
Examples:
As in the examples above, define a short simulation time period first:
>>> from hydpy import pub
>>> pub.timegrids = '2001.03.30', '2001.04.03', '1d'
Prepare the dam model:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> derived.toy.update()
We start with enabling |RestrictTargetedRelease|:
>>> restricttargetedrelease(True)
Define a minimum discharge value for a cross section immediately
downstream of 6 m³/s for the summer months and of 4 m³/s for the
winter months:
>>> neardischargeminimumthreshold(_11_1_12=6.0, _03_31_12=6.0,
... _04_1_12=4.0, _10_31_12=4.0)
Also define related tolerance values that are 1 m³/s in summer and
0 m³/s in winter:
>>> neardischargeminimumtolerance(_11_1_12=0.0, _03_31_12=0.0,
... _04_1_12=2.0, _10_31_12=2.0)
>>> derived.neardischargeminimumsmoothpar1.update()
Prepare a test function that calculates the targeted water release
based on the parameter values defined above and for inflows into
the dam ranging from 0 m³/s to 10 m³/s:
>>> from hydpy import UnitTest
>>> test = UnitTest(model, model.calc_targetedrelease_v1,
... last_example=21,
... parseqs=(fluxes.inflow,
... fluxes.targetedrelease))
>>> test.nexts.inflow = numpy.arange(0.0, 10.5, .5)
Firstly, assume the required release of water for reducing droughts
has already been determined to be 10 m³/s:
>>> fluxes.requiredrelease = 10.
On May 31, the tolerance value is 0 m³/s. Hence the targeted
release jumps from the inflow value to the required release
when exceeding the threshold value of 6 m³/s:
>>> model.idx_sim = pub.timegrids.init['2001.03.31']
>>> test()
| ex. | inflow | targetedrelease |
----------------------------------
| 1 | 0.0 | 0.0 |
| 2 | 0.5 | 0.5 |
| 3 | 1.0 | 1.0 |
| 4 | 1.5 | 1.5 |
| 5 | 2.0 | 2.0 |
| 6 | 2.5 | 2.5 |
| 7 | 3.0 | 3.0 |
| 8 | 3.5 | 3.5 |
| 9 | 4.0 | 4.0 |
| 10 | 4.5 | 4.5 |
| 11 | 5.0 | 5.0 |
| 12 | 5.5 | 5.5 |
| 13 | 6.0 | 8.0 |
| 14 | 6.5 | 10.0 |
| 15 | 7.0 | 10.0 |
| 16 | 7.5 | 10.0 |
| 17 | 8.0 | 10.0 |
| 18 | 8.5 | 10.0 |
| 19 | 9.0 | 10.0 |
| 20 | 9.5 | 10.0 |
| 21 | 10.0 | 10.0 |
On April 1, the threshold value is 4 m³/s and the tolerance value
is 2 m³/s. Hence there is a smooth transition for inflows ranging
between 2 m³/s and 6 m³/s:
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> test()
| ex. | inflow | targetedrelease |
----------------------------------
| 1 | 0.0 | 0.00102 |
| 2 | 0.5 | 0.503056 |
| 3 | 1.0 | 1.009127 |
| 4 | 1.5 | 1.527132 |
| 5 | 2.0 | 2.08 |
| 6 | 2.5 | 2.731586 |
| 7 | 3.0 | 3.639277 |
| 8 | 3.5 | 5.064628 |
| 9 | 4.0 | 7.0 |
| 10 | 4.5 | 8.676084 |
| 11 | 5.0 | 9.543374 |
| 12 | 5.5 | 9.861048 |
| 13 | 6.0 | 9.96 |
| 14 | 6.5 | 9.988828 |
| 15 | 7.0 | 9.996958 |
| 16 | 7.5 | 9.999196 |
| 17 | 8.0 | 9.999796 |
| 18 | 8.5 | 9.999951 |
| 19 | 9.0 | 9.99999 |
| 20 | 9.5 | 9.999998 |
| 21 | 10.0 | 10.0 |
An required release substantially below the threshold value is
a rather unlikely scenario, but is at least instructive regarding
the functioning of the method (when plotting the results
graphically...):
>>> fluxes.requiredrelease = 2.
On May 31, the relationship between targeted release and inflow
is again highly discontinous:
>>> model.idx_sim = pub.timegrids.init['2001.03.31']
>>> test()
| ex. | inflow | targetedrelease |
----------------------------------
| 1 | 0.0 | 0.0 |
| 2 | 0.5 | 0.5 |
| 3 | 1.0 | 1.0 |
| 4 | 1.5 | 1.5 |
| 5 | 2.0 | 2.0 |
| 6 | 2.5 | 2.5 |
| 7 | 3.0 | 3.0 |
| 8 | 3.5 | 3.5 |
| 9 | 4.0 | 4.0 |
| 10 | 4.5 | 4.5 |
| 11 | 5.0 | 5.0 |
| 12 | 5.5 | 5.5 |
| 13 | 6.0 | 4.0 |
| 14 | 6.5 | 2.0 |
| 15 | 7.0 | 2.0 |
| 16 | 7.5 | 2.0 |
| 17 | 8.0 | 2.0 |
| 18 | 8.5 | 2.0 |
| 19 | 9.0 | 2.0 |
| 20 | 9.5 | 2.0 |
| 21 | 10.0 | 2.0 |
And on April 1, it is again absolutely smooth:
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> test()
| ex. | inflow | targetedrelease |
----------------------------------
| 1 | 0.0 | 0.000204 |
| 2 | 0.5 | 0.500483 |
| 3 | 1.0 | 1.001014 |
| 4 | 1.5 | 1.501596 |
| 5 | 2.0 | 2.0 |
| 6 | 2.5 | 2.484561 |
| 7 | 3.0 | 2.908675 |
| 8 | 3.5 | 3.138932 |
| 9 | 4.0 | 3.0 |
| 10 | 4.5 | 2.60178 |
| 11 | 5.0 | 2.273976 |
| 12 | 5.5 | 2.108074 |
| 13 | 6.0 | 2.04 |
| 14 | 6.5 | 2.014364 |
| 15 | 7.0 | 2.005071 |
| 16 | 7.5 | 2.00177 |
| 17 | 8.0 | 2.000612 |
| 18 | 8.5 | 2.00021 |
| 19 | 9.0 | 2.000072 |
| 20 | 9.5 | 2.000024 |
| 21 | 10.0 | 2.000008 |
For required releases equal with the threshold value, there is
generally no jump in the relationship. But on May 31, there
remains a discontinuity in the first derivative:
>>> fluxes.requiredrelease = 6.
>>> model.idx_sim = pub.timegrids.init['2001.03.31']
>>> test()
| ex. | inflow | targetedrelease |
----------------------------------
| 1 | 0.0 | 0.0 |
| 2 | 0.5 | 0.5 |
| 3 | 1.0 | 1.0 |
| 4 | 1.5 | 1.5 |
| 5 | 2.0 | 2.0 |
| 6 | 2.5 | 2.5 |
| 7 | 3.0 | 3.0 |
| 8 | 3.5 | 3.5 |
| 9 | 4.0 | 4.0 |
| 10 | 4.5 | 4.5 |
| 11 | 5.0 | 5.0 |
| 12 | 5.5 | 5.5 |
| 13 | 6.0 | 6.0 |
| 14 | 6.5 | 6.0 |
| 15 | 7.0 | 6.0 |
| 16 | 7.5 | 6.0 |
| 17 | 8.0 | 6.0 |
| 18 | 8.5 | 6.0 |
| 19 | 9.0 | 6.0 |
| 20 | 9.5 | 6.0 |
| 21 | 10.0 | 6.0 |
On April 1, this second order discontinuity is smoothed with
the help of a little hump around the threshold:
>>> fluxes.requiredrelease = 4.
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> test()
| ex. | inflow | targetedrelease |
----------------------------------
| 1 | 0.0 | 0.000408 |
| 2 | 0.5 | 0.501126 |
| 3 | 1.0 | 1.003042 |
| 4 | 1.5 | 1.50798 |
| 5 | 2.0 | 2.02 |
| 6 | 2.5 | 2.546317 |
| 7 | 3.0 | 3.091325 |
| 8 | 3.5 | 3.620356 |
| 9 | 4.0 | 4.0 |
| 10 | 4.5 | 4.120356 |
| 11 | 5.0 | 4.091325 |
| 12 | 5.5 | 4.046317 |
| 13 | 6.0 | 4.02 |
| 14 | 6.5 | 4.00798 |
| 15 | 7.0 | 4.003042 |
| 16 | 7.5 | 4.001126 |
| 17 | 8.0 | 4.000408 |
| 18 | 8.5 | 4.000146 |
| 19 | 9.0 | 4.000051 |
| 20 | 9.5 | 4.000018 |
| 21 | 10.0 | 4.000006 |
Repeating the above example with the |RestrictTargetedRelease| flag
disabled results in identical values for sequences |RequiredRelease|
and |TargetedRelease|:
>>> restricttargetedrelease(False)
>>> test()
| ex. | inflow | targetedrelease |
----------------------------------
| 1 | 0.0 | 4.0 |
| 2 | 0.5 | 4.0 |
| 3 | 1.0 | 4.0 |
| 4 | 1.5 | 4.0 |
| 5 | 2.0 | 4.0 |
| 6 | 2.5 | 4.0 |
| 7 | 3.0 | 4.0 |
| 8 | 3.5 | 4.0 |
| 9 | 4.0 | 4.0 |
| 10 | 4.5 | 4.0 |
| 11 | 5.0 | 4.0 |
| 12 | 5.5 | 4.0 |
| 13 | 6.0 | 4.0 |
| 14 | 6.5 | 4.0 |
| 15 | 7.0 | 4.0 |
| 16 | 7.5 | 4.0 |
| 17 | 8.0 | 4.0 |
| 18 | 8.5 | 4.0 |
| 19 | 9.0 | 4.0 |
| 20 | 9.5 | 4.0 |
| 21 | 10.0 | 4.0 |
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
if con.restricttargetedrelease:
flu.targetedrelease = smoothutils.smooth_logistic1(
flu.inflow-con.neardischargeminimumthreshold[
der.toy[self.idx_sim]],
der.neardischargeminimumsmoothpar1[der.toy[self.idx_sim]])
flu.targetedrelease = (flu.targetedrelease * flu.requiredrelease +
(1.-flu.targetedrelease) * flu.inflow)
else:
flu.targetedrelease = flu.requiredrelease | 0.00008 |
def _get_interfaces(self):
"""Get a list of interfaces on this hosting device.
:return: List of the interfaces
"""
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
itfcs_raw = parse.find_lines("^interface GigabitEthernet")
itfcs = [raw_if.strip().split(' ')[1] for raw_if in itfcs_raw]
LOG.debug("Interfaces on hosting device: %s", itfcs)
return itfcs | 0.004566 |
def _folder_item_instrument(self, analysis_brain, item):
"""Fills the analysis' instrument to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
item['Instrument'] = ''
if not analysis_brain.getInstrumentEntryOfResults:
# Manual entry of results, instrument is not allowed
item['Instrument'] = _('Manual')
item['replace']['Instrument'] = \
'<a href="#">{}</a>'.format(t(_('Manual')))
return
# Instrument can be assigned to this analysis
is_editable = self.is_analysis_edition_allowed(analysis_brain)
self.show_methodinstr_columns = True
instrument = self.get_instrument(analysis_brain)
if is_editable:
# Edition allowed
voc = self.get_instruments_vocabulary(analysis_brain)
if voc:
# The service has at least one instrument available
item['Instrument'] = instrument.UID() if instrument else ''
item['choices']['Instrument'] = voc
item['allow_edit'].append('Instrument')
return
if instrument:
# Edition not allowed
instrument_title = instrument and instrument.Title() or ''
instrument_link = get_link(instrument.absolute_url(),
instrument_title)
item['Instrument'] = instrument_title
item['replace']['Instrument'] = instrument_link
return | 0.001231 |
def write_ioc_string(root, force=False):
"""
Serialize an IOC, as defined by a set of etree Elements, to a String.
:param root: etree Element to serialize. Should have the tag 'OpenIOC'
:param force: Skip the root node tag check.
:return:
"""
root_tag = 'OpenIOC'
if not force and root.tag != root_tag:
raise ValueError('Root tag is not "{}".'.format(root_tag))
default_encoding = 'utf-8'
tree = root.getroottree()
# noinspection PyBroadException
try:
encoding = tree.docinfo.encoding
except:
log.debug('Failed to get encoding from docinfo')
encoding = default_encoding
return et.tostring(tree, encoding=encoding, xml_declaration=True, pretty_print=True) | 0.004038 |
def network_io_counters():
"""Return network I/O statistics for every network interface
installed on the system as a dict of raw tuples.
"""
f = open("/proc/net/dev", "r")
try:
lines = f.readlines()
finally:
f.close()
retdict = {}
for line in lines[2:]:
colon = line.find(':')
assert colon > 0, line
name = line[:colon].strip()
fields = line[colon+1:].strip().split()
bytes_recv = int(fields[0])
packets_recv = int(fields[1])
errin = int(fields[2])
dropin = int(fields[2])
bytes_sent = int(fields[8])
packets_sent = int(fields[9])
errout = int(fields[10])
dropout = int(fields[11])
retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv,
errin, errout, dropin, dropout)
return retdict | 0.001138 |
def get_proof(self):
"""
Get a proof produced when deciding the formula.
"""
if self.lingeling and self.prfile:
self.prfile.seek(0)
return [line.rstrip() for line in self.prfile.readlines()] | 0.007968 |
def walk_snmp_values(sess, helper, oid, check):
""" return a snmp value or exits the plugin with unknown"""
try:
snmp_walk = sess.walk_oid(oid)
result_list = []
for x in range(len(snmp_walk)):
result_list.append(snmp_walk[x].val)
if result_list != []:
return result_list
else:
raise SnmpException("No content")
except SnmpException:
helper.exit(summary="No response from device for {} ({})".format(check, oid),
exit_code=unknown, perfdata='') | 0.004894 |
def _parse_pypi_json_package_info(self, package_name, current_version, response):
"""
:type package_name: str
:type current_version: version.Version
:type response: requests.models.Response
"""
data = response.json()
all_versions = [version.parse(vers) for vers in data['releases'].keys()]
filtered_versions = [vers for vers in all_versions if not vers.is_prerelease and not vers.is_postrelease]
if not filtered_versions: # pragma: nocover
return False, 'error while parsing version'
latest_version = max(filtered_versions)
# even if user did not choose prerelease, if the package from requirements is pre/post release, use it
if self._prerelease or current_version.is_postrelease or current_version.is_prerelease:
prerelease_versions = [vers for vers in all_versions if vers.is_prerelease or vers.is_postrelease]
if prerelease_versions:
latest_version = max(prerelease_versions)
try:
try:
latest_version_info = data['releases'][str(latest_version)][0]
except KeyError: # pragma: nocover
# non-RFC versions, get the latest from pypi response
latest_version = version.parse(data['info']['version'])
latest_version_info = data['releases'][str(latest_version)][0]
except Exception: # pragma: nocover
return False, 'error while parsing version'
upload_time = latest_version_info['upload_time'].replace('T', ' ')
return {
'name': package_name,
'current_version': current_version,
'latest_version': latest_version,
'upgrade_available': current_version < latest_version,
'upload_time': upload_time
}, 'success' | 0.004301 |
def get_order_matchresults(self, order_id, _async=False):
"""
查询某个订单的成交明细
:param order_id:
:return:
"""
params = {}
path = f'/v1/order/orders/{order_id}/matchresults'
return api_key_get(params, path, _async=_async) | 0.007194 |
def key_to_str(modifiers, key, mods_table = mods, key_table = wx, key_prefix = 'WXK_'):
"""
Returns a human-readable version of numerical modifiers and key.
To make the key suitable for global hotkey usage, supply:
mods_table = global_mods, key_table = win32con, key_prefix = 'VK_'
"""
logger.debug('Converting (%s, %s) to string.', modifiers, key)
if not key:
key_str = 'NONE'
else:
key_str = None
res = ''
for value, name in mods_table.items():
if (modifiers & value):
res += name + '+'
for x in dir(key_table):
if x.startswith(key_prefix):
if getattr(key_table, x) == key:
key_str = converts.get(x, x[len(key_prefix):])
if not key_str:
key_str = chr(key)
res += key_str
logger.debug('Final result: %s.', res)
return res | 0.035573 |
def keep_path(self, path):
"""
Given a path, returns True if the path should be kept, False if it should be cut.
"""
if len(path.addr_trace) < 2:
return True
return self.should_take_exit(path.addr_trace[-2], path.addr_trace[-1]) | 0.010676 |
def __store(self, stored_object, overwrite=False):
"""
Store a variable into the storage.
:param StoredObject stored_object: The descriptor describing start address and the variable.
:param bool overwrite: Whether existing objects should be overwritten or not. True to make a strong update,
False to make a weak update.
:return: None
"""
start = stored_object.start
object_size = stored_object.size
end = start + object_size
# region items in the middle
overlapping_items = list(self._storage.irange(start, end-1))
# is there a region item that begins before the start and overlaps with this variable?
floor_key, floor_item = self._get_container(start)
if floor_item is not None and floor_key not in overlapping_items:
# insert it into the beginning
overlapping_items.insert(0, floor_key)
# scan through the entire list of region items, split existing regions and insert new regions as needed
to_update = {start: RegionObject(start, object_size, {stored_object})}
last_end = start
for floor_key in overlapping_items:
item = self._storage[floor_key]
if item.start < start:
# we need to break this item into two
a, b = item.split(start)
if overwrite:
b.set_object(stored_object)
else:
self._add_object_with_check(b, stored_object)
to_update[a.start] = a
to_update[b.start] = b
last_end = b.end
elif item.start > last_end:
# there is a gap between the last item and the current item
# fill in the gap
new_item = RegionObject(last_end, item.start - last_end, {stored_object})
to_update[new_item.start] = new_item
last_end = new_item.end
elif item.end > end:
# we need to split this item into two
a, b = item.split(end)
if overwrite:
a.set_object(stored_object)
else:
self._add_object_with_check(a, stored_object)
to_update[a.start] = a
to_update[b.start] = b
last_end = b.end
else:
if overwrite:
item.set_object(stored_object)
else:
self._add_object_with_check(item, stored_object)
to_update[item.start] = item
self._storage.update(to_update) | 0.002606 |
def make_python_patterns(additional_keywords=[], additional_builtins=[]):
"Strongly inspired from idlelib.ColorDelegator.make_pat"
kwlist = keyword.kwlist + additional_keywords
builtinlist = [str(name) for name in dir(builtins)
if not name.startswith('_')] + additional_builtins
repeated = set(kwlist) & set(builtinlist)
for repeated_element in repeated:
kwlist.remove(repeated_element)
kw = r"\b" + any("keyword", kwlist) + r"\b"
builtin = r"([^.'\"\\#]\b|^)" + any("builtin", builtinlist) + r"\b"
comment = any("comment", [r"#[^\n]*"])
instance = any("instance", [r"\bself\b",
r"\bcls\b",
(r"^\s*@([a-zA-Z_][a-zA-Z0-9_]*)"
r"(\.[a-zA-Z_][a-zA-Z0-9_]*)*")])
number_regex = [r"\b[+-]?[0-9]+[lLjJ]?\b",
r"\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b",
r"\b[+-]?0[oO][0-7]+[lL]?\b",
r"\b[+-]?0[bB][01]+[lL]?\b",
r"\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?[jJ]?\b"]
if PY3:
prefix = "r|u|R|U|f|F|fr|Fr|fR|FR|rf|rF|Rf|RF|b|B|br|Br|bR|BR|rb|rB|Rb|RB"
else:
prefix = "r|u|ur|R|U|UR|Ur|uR|b|B|br|Br|bR|BR"
sqstring = r"(\b(%s))?'[^'\\\n]*(\\.[^'\\\n]*)*'?" % prefix
dqstring = r'(\b(%s))?"[^"\\\n]*(\\.[^"\\\n]*)*"?' % prefix
uf_sqstring = r"(\b(%s))?'[^'\\\n]*(\\.[^'\\\n]*)*(\\)$(?!')$" % prefix
uf_dqstring = r'(\b(%s))?"[^"\\\n]*(\\.[^"\\\n]*)*(\\)$(?!")$' % prefix
sq3string = r"(\b(%s))?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(''')?" % prefix
dq3string = r'(\b(%s))?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(""")?' % prefix
uf_sq3string = r"(\b(%s))?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(\\)?(?!''')$" \
% prefix
uf_dq3string = r'(\b(%s))?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(\\)?(?!""")$' \
% prefix
# Needed to achieve correct highlighting in Python 3.6+
# See issue 7324
if PY36_OR_MORE:
# Based on
# https://github.com/python/cpython/blob/
# 81950495ba2c36056e0ce48fd37d514816c26747/Lib/tokenize.py#L117
# In order: Hexnumber, Binnumber, Octnumber, Decnumber,
# Pointfloat + Exponent, Expfloat, Imagnumber
number_regex = [
r"\b[+-]?0[xX](?:_?[0-9A-Fa-f])+[lL]?\b",
r"\b[+-]?0[bB](?:_?[01])+[lL]?\b",
r"\b[+-]?0[oO](?:_?[0-7])+[lL]?\b",
r"\b[+-]?(?:0(?:_?0)*|[1-9](?:_?[0-9])*)[lL]?\b",
r"\b((\.[0-9](?:_?[0-9])*')|\.[0-9](?:_?[0-9])*)"
"([eE][+-]?[0-9](?:_?[0-9])*)?[jJ]?\b",
r"\b[0-9](?:_?[0-9])*([eE][+-]?[0-9](?:_?[0-9])*)?[jJ]?\b",
r"\b[0-9](?:_?[0-9])*[jJ]\b"]
number = any("number", number_regex)
string = any("string", [sq3string, dq3string, sqstring, dqstring])
ufstring1 = any("uf_sqstring", [uf_sqstring])
ufstring2 = any("uf_dqstring", [uf_dqstring])
ufstring3 = any("uf_sq3string", [uf_sq3string])
ufstring4 = any("uf_dq3string", [uf_dq3string])
return "|".join([instance, kw, builtin, comment,
ufstring1, ufstring2, ufstring3, ufstring4, string,
number, any("SYNC", [r"\n"])]) | 0.002695 |
def p_expr_XOR_expr(p):
""" expr : expr XOR expr
"""
p[0] = make_binary(p.lineno(2), 'XOR', p[1], p[3], lambda x, y: (x and not y) or (not x and y)) | 0.0125 |
def getDbNames(self):
"""This function returns the list of open databases"""
request = []
request.append(uu({'-dbnames': '' }))
result = self._doRequest(request)
result = FMResultset.FMResultset(result)
dbNames = []
for dbName in result.resultset:
dbNames.append(string.lower(dbName['DATABASE_NAME']))
return dbNames | 0.035398 |
def format_cftime_datetime(date):
"""Converts a cftime.datetime object to a string with the format:
YYYY-MM-DD HH:MM:SS.UUUUUU
"""
return '{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}.{:06d}'.format(
date.year, date.month, date.day, date.hour, date.minute, date.second,
date.microsecond) | 0.003165 |
def add_release(self, release):
""" Add a release object if it does not already exist """
for r in self.releases:
if r.version == release.version:
return
self.releases.append(release) | 0.008511 |
def write(path, content, encoding="UTF-8", append=False, raw=False):
"""Write *content* to file *path*"""
mode = 'wb' if not append else 'ab'
with OPEN_FUNC(path, mode) as _file:
if raw:
import shutil
shutil.copyfileobj(content, _file)
else:
_file.write(content.encode(encoding)) | 0.002915 |
def types(self):
''' Returns an iterator over the types of the neurites in the object.
If the object is a tree, then one value is returned.
'''
neurites = self._obj.neurites if hasattr(self._obj, 'neurites') else (self._obj,)
return (neu.type for neu in neurites) | 0.009772 |
def unassign_assessment_part_from_bank(self, assessment_part_id, bank_id):
"""Removes an ``AssessmentPart`` from an ``Bank``.
arg: assessment_part_id (osid.id.Id): the ``Id`` of the
``AssessmentPart``
arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank``
raise: NotFound - ``assessment_part_id`` or ``bank_id`` not
found or ``assessment_part_id`` not assigned to
``bank_id``
raise: NullArgument - ``assessment_part_id`` or ``bank_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
mgr = self._get_provider_manager('ASSESSMENT', local=True)
lookup_session = mgr.get_bank_lookup_session(proxy=self._proxy)
lookup_session.get_bank(bank_id) # to raise NotFound
self._unassign_object_from_catalog(assessment_part_id, bank_id) | 0.00194 |
def unregister(callback, event=None):
"""
Inverse operation of `register` (though not a decorator). Client-less
`remove_event_handler
<telethon.client.updates.UpdateMethods.remove_event_handler>`
variant. **Note that this won't remove handlers from the client**,
because it simply can't, so you would generally use this before
adding the handlers to the client.
This method is here for symmetry. You will rarely need to
unregister events, since you can simply just not add them
to any client.
If no event is given, all events for this callback are removed.
Returns how many callbacks were removed.
"""
found = 0
if event and not isinstance(event, type):
event = type(event)
handlers = getattr(callback, _HANDLERS_ATTRIBUTE, [])
handlers.append((event, callback))
i = len(handlers)
while i:
i -= 1
ev = handlers[i]
if not event or isinstance(ev, event):
del handlers[i]
found += 1
return found | 0.000969 |
def ADOSC(frame, fast=3, slow=10, high_col='high', low_col='low', close_col='close', vol_col='Volume'):
"""Chaikin A/D oscillator"""
return _frame_to_series(frame, [high_col, low_col, close_col, vol_col], talib.ADOSC, fast, slow) | 0.012658 |
def get_version():
"""Get version from package resources."""
requirement = pkg_resources.Requirement.parse("yoda")
provider = pkg_resources.get_provider(requirement)
return provider.version | 0.004878 |
def probe(self, hosts):
'''
.. seealso:: :attr:`probe`
'''
def __send_probe(host):
ping = self.m(
'',
cmdd=dict(
cmd=' '.join([
self.__ping_cmd,
self.__num,
self.__net_if,
self.__packetsize,
host
])
),
critical=False,
verbose=False
)
up = True if ping.get('returncode') == 0 else False
self.__probe_results[host] = {'up': up}
if up:
p = ping.get('out')
loss = _search(rxlss, p)
ms = _findall(rxmst, p)
rtt = _search(rxrtt, p)
if loss:
loss = loss.group('loss')
self.__probe_results[host].update(dict(
ms=ms,
loss=loss,
rtt=rtt.groupdict()
))
hosts = to_list(hosts)
pool_size = (
len(hosts)
if len(hosts) <= self.__max_pool_size else
self.__max_pool_size
)
pool = _Pool(pool_size)
pool.map(__send_probe, hosts)
pool.close()
pool.join() | 0.001474 |
def get_closest(self, lon, lat, depth=0):
"""
Get the closest object to the given longitude and latitude
and its distance.
:param lon: longitude in degrees
:param lat: latitude in degrees
:param depth: depth in km (default 0)
:returns: (object, distance)
"""
xyz = spherical_to_cartesian(lon, lat, depth)
min_dist, idx = self.kdtree.query(xyz)
return self.objects[idx], min_dist | 0.004283 |
def _hl_as_string(self, highlight):
"""
Given a solr string of highlighted text, returns the
str representations
For example:
"Foo <em>Muscle</em> bar <em>atrophy</em>, generalized"
Returns:
"Foo Muscle bar atrophy, generalized"
:return: str
"""
# dummy tags to make it valid xml
dummy_xml = "<p>" + highlight + "</p>"
try:
element_tree = ET.fromstring(dummy_xml)
except ET.ParseError:
raise ET.ParseError
return "".join(list(element_tree.itertext())) | 0.003407 |
def classify_intersection8(s, curve1, surface1, curve2, surface2):
"""Image for :func:`._surface_helpers.classify_intersection` docstring."""
if NO_IMAGES:
return
ax = classify_help(s, curve1, surface1, curve2, surface2, None)
ax.set_xlim(-1.125, 1.125)
ax.set_ylim(-0.125, 1.125)
save_image(ax.figure, "classify_intersection8.png") | 0.00274 |
def validate_regexp(pattern, flags=0):
"""
Validate the field matches the given regular expression.
Should work with anything that supports '==' operator.
:param pattern: Regular expresion to match. String or regular expression instance.
:param pattern: Flags for the regular expression.
:raises: ``ValidationError('equal')``
"""
regex = re.compile(pattern, flags) if isinstance(pattern, str) else pattern
def regexp_validator(field, data):
if field.value is None:
return
if regex.match(str(field.value)) is None:
raise ValidationError('regexp', pattern=pattern)
return regexp_validator | 0.002994 |
def validate(style):
"""Check `style` against pyout.styling.schema.
Parameters
----------
style : dict
Style object to validate.
Raises
------
StyleValidationError if `style` is not valid.
"""
try:
import jsonschema
except ImportError:
return
try:
jsonschema.validate(style, schema)
except jsonschema.ValidationError as exc:
new_exc = StyleValidationError(exc)
# Don't dump the original jsonschema exception because it is already
# included in the StyleValidationError's message.
new_exc.__cause__ = None
raise new_exc | 0.00156 |
def ContrastNormalization(alpha=1.0, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Augmenter that changes the contrast of images.
dtype support:
See ``imgaug.augmenters.contrast.LinearContrast``.
Parameters
----------
alpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Strength of the contrast normalization. Higher values than 1.0
lead to higher contrast, lower values decrease the contrast.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value will be sampled per image from
the range ``a <= x <= b`` and be used as the alpha value.
* If a list, then a random value will be sampled per image from
that list.
* If a StochasticParameter, then this parameter will be used to
sample the alpha value per image.
per_channel : bool or float, optional
Whether to use the same value for all channels (False)
or to sample a new value for each channel (True).
If this value is a float ``p``, then for ``p`` percent of all images
`per_channel` will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> iaa.ContrastNormalization((0.5, 1.5))
Decreases oder improves contrast per image by a random factor between
0.5 and 1.5. The factor 0.5 means that any difference from the center value
(i.e. 128) will be halved, leading to less contrast.
>>> iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5)
Same as before, but for 50 percent of all images the normalization is done
independently per channel (i.e. factors can vary per channel for the same
image). In the other 50 percent of all images, the factor is the same for
all channels.
"""
# placed here to avoid cyclic dependency
from . import contrast as contrast_lib
return contrast_lib.LinearContrast(alpha=alpha, per_channel=per_channel, name=name, deterministic=deterministic,
random_state=random_state) | 0.002004 |
def addIndividual(self, individual):
"""
Adds the specified individual to this dataset.
"""
id_ = individual.getId()
self._individualIdMap[id_] = individual
self._individualIds.append(id_)
self._individualNameMap[individual.getName()] = individual | 0.006601 |
def open_display(self):
"""Establishes connection with X server and prepares objects
necessary to retrieve and send data.
"""
self.close_display() # Properly finish previous open_display()
XkbIgnoreExtension(False)
display_name = None
major = c_int(XkbMajorVersion)
minor = c_int(XkbMinorVersion)
reason = c_int()
self._display = XkbOpenDisplay(
display_name,
None, None, byref(major), byref(minor), byref(reason))
if not self._display:
if reason.value in OPEN_DISPLAY_ERRORS:
# Assume POSIX conformance
display_name = os.getenv("DISPLAY") or "default"
raise X11Error(OPEN_DISPLAY_ERRORS[reason.value].format(
libname="xkbgroup",
used_major=XkbMajorVersion,
used_minor=XkbMinorVersion,
found_major=major.value,
found_minor=minor.value,
display_name=display_name)
+ ".")
else:
raise X11Error("Unknown error {} from XkbOpenDisplay.".format(reason.value))
self._keyboard_description = XkbGetMap(self._display, 0, XkbUseCoreKbd)
if not self._keyboard_description:
self.close_display()
raise X11Error("Failed to get keyboard description.")
# Controls mask doesn't affect the availability of xkb->ctrls->num_groups anyway
# Just use a valid value, and xkb->ctrls->num_groups will be definitely set
status = XkbGetControls(self._display, XkbAllControlsMask, self._keyboard_description)
if status != Success:
self.close_display()
raise X11Error(GET_CONTROLS_ERRORS[status] + ".")
names_mask = XkbSymbolsNameMask | XkbGroupNamesMask
status = XkbGetNames(self._display, names_mask, self._keyboard_description)
if status != Success:
self.close_display()
raise X11Error(GET_NAMES_ERRORS[status] + ".") | 0.003354 |
def emit(self, record):
"""Actually log the specified logging record.
Overrides the default emit behavior of ``StreamHandler``.
See https://docs.python.org/2/library/logging.html#handler-objects
:type record: :class:`logging.LogRecord`
:param record: The record to be logged.
"""
message = super(CloudLoggingHandler, self).format(record)
self.transport.send(record, message, resource=self.resource, labels=self.labels) | 0.006186 |
def init_key_jar(public_path='', private_path='', key_defs='', owner='',
read_only=True):
"""
A number of cases here:
1. A private path is given
a. The file exists and a JWKS is found there.
From that JWKS a KeyJar instance is built.
b.
If the private path file doesn't exit the key definitions are
used to build a KeyJar instance. A JWKS with the private keys are
written to the file named in private_path.
If a public path is also provided a JWKS with public keys are written
to that file.
2. A public path is given but no private path.
a. If the public path file exists then the JWKS in that file is used to
construct a KeyJar.
b. If no such file exists then a KeyJar will be built
based on the key_defs specification and a JWKS with the public keys
will be written to the public path file.
3. If neither a public path nor a private path is given then a KeyJar is
built based on the key_defs specification and no JWKS will be written
to file.
In all cases a KeyJar instance is returned
The keys stored in the KeyJar will be stored under the '' identifier.
:param public_path: A file path to a file that contains a JWKS with public
keys
:param private_path: A file path to a file that contains a JWKS with
private keys.
:param key_defs: A definition of what keys should be created if they are
not already available
:param owner: The owner of the keys
:param read_only: This function should not attempt to write anything
to a file system.
:return: An instantiated :py:class;`oidcmsg.key_jar.KeyJar` instance
"""
if private_path:
if os.path.isfile(private_path):
_jwks = open(private_path, 'r').read()
_kj = KeyJar()
_kj.import_jwks(json.loads(_jwks), owner)
if key_defs:
_kb = _kj.issuer_keys[owner][0]
_diff = key_diff(_kb, key_defs)
if _diff:
if read_only:
logger.error('Not allowed to write to disc!')
else:
update_key_bundle(_kb, _diff)
_kj.issuer_keys[owner] = [_kb]
jwks = _kj.export_jwks(private=True, issuer=owner)
fp = open(private_path, 'w')
fp.write(json.dumps(jwks))
fp.close()
else:
_kj = build_keyjar(key_defs, owner=owner)
if not read_only:
jwks = _kj.export_jwks(private=True, issuer=owner)
head, tail = os.path.split(private_path)
if head and not os.path.isdir(head):
os.makedirs(head)
fp = open(private_path, 'w')
fp.write(json.dumps(jwks))
fp.close()
if public_path and not read_only:
jwks = _kj.export_jwks(issuer=owner) # public part
head, tail = os.path.split(public_path)
if head and not os.path.isdir(head):
os.makedirs(head)
fp = open(public_path, 'w')
fp.write(json.dumps(jwks))
fp.close()
elif public_path:
if os.path.isfile(public_path):
_jwks = open(public_path, 'r').read()
_kj = KeyJar()
_kj.import_jwks(json.loads(_jwks), owner)
if key_defs:
_kb = _kj.issuer_keys[owner][0]
_diff = key_diff(_kb, key_defs)
if _diff:
if read_only:
logger.error('Not allowed to write to disc!')
else:
update_key_bundle(_kb, _diff)
_kj.issuer_keys[owner] = [_kb]
jwks = _kj.export_jwks(issuer=owner)
fp = open(private_path, 'w')
fp.write(json.dumps(jwks))
fp.close()
else:
_kj = build_keyjar(key_defs, owner=owner)
if not read_only:
_jwks = _kj.export_jwks(issuer=owner)
head, tail = os.path.split(public_path)
if head and not os.path.isdir(head):
os.makedirs(head)
fp = open(public_path, 'w')
fp.write(json.dumps(_jwks))
fp.close()
else:
_kj = build_keyjar(key_defs, owner=owner)
return _kj | 0.000218 |
def has_child_bins(self, bin_id):
"""Tests if a bin has any children.
arg: bin_id (osid.id.Id): the ``Id`` of a bin
return: (boolean) - ``true`` if the ``bin_id`` has children,
``false`` otherwise
raise: NotFound - ``bin_id`` not found
raise: NullArgument - ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.has_child_bins
if self._catalog_session is not None:
return self._catalog_session.has_child_catalogs(catalog_id=bin_id)
return self._hierarchy_session.has_children(id_=bin_id) | 0.002413 |
def install(path, capture_error=False): # type: (str, bool) -> None
"""Install a Python module in the executing Python environment.
Args:
path (str): Real path location of the Python module.
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
"""
cmd = '%s -m pip install -U . ' % _process.python_executable()
if has_requirements(path):
cmd += '-r requirements.txt'
logger.info('Installing module with the following command:\n%s', cmd)
_process.check_error(shlex.split(cmd), _errors.InstallModuleError, cwd=path, capture_error=capture_error) | 0.00561 |
def store_new(self, coll, path, mtime):
"""Load a collections metadata file and store it
:param str coll: The name of the collection the metadata is for
:param str path: The path to the collections metadata file
:param float mtime: The current mtime of the collections metadata file
:return: The collections metadata
:rtype: dict
"""
obj = load_yaml_config(path)
self.cache[coll] = (mtime, obj)
return obj | 0.004115 |
def _set_sla_data(self, test_id, metrics):
"""
Get sla data from each metric and set it in the _Analysis object specified by test_id to make it available
for retrieval
:return: currently always returns CONSTANTS.OK. Maybe enhanced in future to return additional status
"""
for metric in metrics:
self._analyses[test_id].sla_data[metric.label] = metric.sla_map
return CONSTANTS.OK | 0.009685 |
def get_template_as_json(template_id, **kwargs):
"""
Get a template (including attribute and dataset definitions) as a JSON
string. This is just a wrapper around the get_template_as_dict function.
"""
user_id = kwargs['user_id']
return json.dumps(get_template_as_dict(template_id, user_id=user_id)) | 0.006061 |
def findObjects(path):
"""Finds objects in pairtree.
Given a path that corresponds to a pairtree, walk it and look for
non-shorty (it's ya birthday) directories.
"""
objects = []
if not os.path.isdir(path):
return []
contents = os.listdir(path)
for item in contents:
fullPath = os.path.join(path, item)
if not os.path.isdir(fullPath):
# deal with a split end at this point
# we might want to consider a normalize option
return [path]
else:
if isShorty(item):
objects = objects + findObjects(fullPath)
else:
objects.append(fullPath)
return objects | 0.001414 |
def get_handler(query_result_type, return_type):
""" Find the appropriate return type handler to convert the query result to the desired return type
:param query_result_type: type, desired return type
:param return_type: type, actual return type
:return: callable, function that will handle the conversion
"""
try:
return FormatterRegistry.get_by_take_and_return_type(query_result_type, return_type)
except (IndexError, AttributeError, KeyError):
raise IndexError(
'Could not find function in conversion list for input type %s and return type %s' % (
query_result_type, return_type)) | 0.008621 |
def business_date(self, business_date):
"""
Force the business_date to always be a date
:param business_date:
:return:
"""
if business_date is not None:
if isinstance(business_date, type_check):
self._business_date = parse(business_date).date()
else:
self._business_date= business_date | 0.007712 |
def get_biased_correlations(data, threshold= 10):
"""
Gets the highest few correlations for each bit, across the entirety of the
data. Meant to provide a comparison point for the pairwise correlations
reported in the literature, which are typically between neighboring neurons
tuned to the same inputs. We would expect these neurons to be among the most
correlated in any region, so pairwise correlations between most likely do not
provide an unbiased estimator of correlations between arbitrary neurons.
"""
data = data.toDense()
correlations = numpy.corrcoef(data, rowvar = False)
highest_correlations = []
for row in correlations:
highest_correlations += sorted(row, reverse = True)[1:threshold+1]
return numpy.mean(highest_correlations) | 0.015544 |
def check_content(self):
"""Check content of URL.
@return: True if content can be parsed, else False
"""
if self.do_check_content and self.valid:
# check content and recursion
try:
if self.can_get_content():
self.aggregate.plugin_manager.run_content_plugins(self)
if self.allows_recursion():
return True
except tuple(ExcList):
value = self.handle_exception()
self.add_warning(_("could not get content: %(msg)s") %
{"msg": str(value)}, tag=WARN_URL_ERROR_GETTING_CONTENT)
return False | 0.004386 |
def list_groups(self, filtr, url_prefix, auth, session, send_opts):
"""Get the groups the logged in user is a member of.
Optionally filter by 'member' or 'maintainer'.
Args:
filtr (string|None): ['member'|'maintainer'] or defaults to None.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(list[string]): List of group names.
Raises:
requests.HTTPError on failure.
"""
req = self.get_group_request(
'GET', 'application/json', url_prefix, auth)
if filtr is not None:
if not filtr == 'member' and not filtr == 'maintainer':
raise RuntimeError(
'filtr must be either "member", "maintainer", or None.')
req.params = {'filter': filtr}
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 200:
resp_json = resp.json()
return resp_json['groups']
msg = ('List groups failed, got HTTP response: ({}) - {}'.format(
resp.status_code, resp.text))
raise HTTPError(msg, request = req, response = resp) | 0.004834 |
def no_type_check_decorator(decorator):
"""Decorator to give another decorator the @no_type_check effect.
This wraps the decorator with something that wraps the decorated
function in @no_type_check.
"""
@functools.wraps(decorator)
def wrapped_decorator(*args, **kwds):
func = decorator(*args, **kwds)
func = no_type_check(func)
return func
return wrapped_decorator | 0.002387 |
def community_post_comments(self, post_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/post_comments#list-comments"
api_path = "/api/v2/community/posts/{post_id}/comments.json"
api_path = api_path.format(post_id=post_id)
return self.call(api_path, **kwargs) | 0.009585 |
def playlist_subscribe(self, playlist):
"""Subscribe to a public playlist.
Parameters:
playlist (dict): A public playlist dict.
Returns:
dict: Playlist information.
"""
mutation = mc_calls.PlaylistBatch.create(
playlist['name'],
playlist['description'],
'SHARED',
owner_name=playlist.get('ownerName', ''),
share_token=playlist['shareToken']
)
response_body = self._call(
mc_calls.PlaylistBatch,
mutation
).body
playlist_id = response_body['mutate_response'][0]['id']
return self.playlist(playlist_id) | 0.037975 |
def search(self, fields=None, query=None, filters=None):
"""Search for entities.
:param fields: A set naming which fields should be used when generating
a search query. If ``None``, all values on the entity are used. If
an empty set, no values are used.
:param query: A dict containing a raw search query. This is melded in
to the generated search query like so: ``{generated:
query}.update({manual: query})``.
:param filters: A dict. Used to filter search results locally.
:return: A list of entities, all of type ``type(self)``.
"""
results = self.search_json(fields, query)['results']
results = self.search_normalize(results)
entities = []
for result in results:
content_view_components = result.get('content_view_component')
if content_view_components is not None:
del result['content_view_component']
entity = type(self)(self._server_config, **result)
if content_view_components:
entity.content_view_component = [
ContentViewComponent(
self._server_config,
composite_content_view=result['id'],
id=cvc_id,
)
for cvc_id in content_view_components
]
entities.append(entity)
if filters is not None:
entities = self.search_filter(entities, filters)
return entities | 0.00128 |
def create(dataset, target, features=None,
penalty=1.0, solver='auto',
feature_rescaling=True,
convergence_threshold = _DEFAULT_SOLVER_OPTIONS['convergence_threshold'],
lbfgs_memory_level = _DEFAULT_SOLVER_OPTIONS['lbfgs_memory_level'],
max_iterations = _DEFAULT_SOLVER_OPTIONS['max_iterations'],
class_weights = None,
validation_set = 'auto',
verbose=True):
"""
Create a :class:`~turicreate.svm_classifier.SVMClassifier` to predict the class of a binary
target variable based on a model of which side of a hyperplane the example
falls on. In addition to standard numeric and categorical types, features
can also be extracted automatically from list- or dictionary-type SFrame
columns.
This loss function for the SVM model is the sum of an L1 mis-classification
loss (multiplied by the 'penalty' term) and a l2-norm on the weight vectors.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type. String target variables are
automatically mapped to integers in alphabetical order of the variable
values. For example, a target variable with 'cat' and 'dog' as possible
values is mapped to 0 and 1 respectively with 0 being the base class
and 1 being the reference class.
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert them to array in
case all entries in the list are of numeric types and separate them
out into different columns if they are of mixed type.
penalty : float, optional
Penalty term on the mis-classification loss of the model. The larger
this weight, the more the model coefficients shrink toward 0. The
larger the penalty, the lower is the emphasis placed on misclassified
examples, and the classifier would spend more time maximizing the
margin for correctly classified examples. The default value is 1.0;
this parameter must be set to a value of at least 1e-10.
solver : string, optional
Name of the solver to be used to solve the problem. See the
references for more detail on each solver. Available solvers are:
- *auto (default)*: automatically chooses the best solver (from the ones
listed below) for the data and model parameters.
- *lbfgs*: lLimited memory BFGS (``lbfgs``) is a robust solver for wide
datasets(i.e datasets with many coefficients).
The solvers are all automatically tuned and the default options should
function well. See the solver options guide for setting additional
parameters for each of the solvers.
feature_rescaling : bool, default = true
Feature rescaling is an important pre-processing step that ensures
that all features are on the same scale. An l2-norm rescaling is
performed to make sure that all features are of the same norm. Categorical
features are also rescaled by rescaling the dummy variables that
are used to represent them. The coefficients are returned in original
scale of the problem.
convergence_threshold :
Convergence is tested using variation in the training objective. The
variation in the training objective is calculated using the difference
between the objective values between two steps. Consider reducing this
below the default value (0.01) for a more accurately trained model.
Beware of overfitting (i.e a model that works well only on the training
data) if this parameter is set to a very low value.
max_iterations : int, optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model. Consider
increasing this (the default value is 10) if the training accuracy is
low and the *Grad-Norm* in the display is large.
lbfgs_memory_level : int, optional
The L-BFGS algorithm keeps track of gradient information from the
previous ``lbfgs_memory_level`` iterations. The storage requirement for
each of these gradients is the ``num_coefficients`` in the problem.
Increasing the ``lbfgs_memory_level`` can help improve the quality of
the model trained. Setting this to more than ``max_iterations`` has the
same effect as setting it to ``max_iterations``.
class_weights : {dict, `auto`}, optional
Weights the examples in the training data according to the given class
weights. If set to `None`, all classes are supposed to have weight one. The
`auto` mode set the class weight to be inversely proportional to number of
examples in the training data with the given class.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : SVMClassifier
A trained model of type
:class:`~turicreate.svm_classifier.SVMClassifier`.
See Also
--------
SVMClassifier
Notes
-----
- Categorical variables are encoded by creating dummy variables. For
a variable with :math:`K` categories, the encoding creates :math:`K-1`
dummy variables, while the first category encountered in the data is used
as the baseline.
- For prediction and evaluation of SVM models with sparse dictionary
inputs, new keys/columns that were not seen during training are silently
ignored.
- The penalty parameter is analogous to the 'C' term in the C-SVM. See the
reference on training SVMs for more details.
- Any 'None' values in the data will result in an error being thrown.
- A constant term of '1' is automatically added for the model intercept to
model the bias term.
- Note that the hinge loss is approximated by the scaled logistic loss
function. (See user guide for details)
References
----------
- `Wikipedia - Support Vector Machines
<http://en.wikipedia.org/wiki/svm>`_
- Zhang et al. - Modified Logistic Regression: An Approximation to
SVM and its Applications in Large-Scale Text Categorization (ICML 2003)
Examples
--------
Given an :class:`~turicreate.SFrame` ``sf``, a list of feature columns
[``feature_1`` ... ``feature_K``], and a target column ``target`` with 0 and
1 values, create a
:class:`~turicreate.svm.SVMClassifier` as follows:
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.svm_classifier.create(data, 'is_expensive')
"""
# Regression model names.
model_name = "classifier_svm"
solver = solver.lower()
model = _sl.create(dataset, target, model_name, features=features,
validation_set = validation_set, verbose = verbose,
penalty = penalty,
feature_rescaling = feature_rescaling,
convergence_threshold = convergence_threshold,
lbfgs_memory_level = lbfgs_memory_level,
max_iterations = max_iterations,
class_weights = class_weights)
return SVMClassifier(model.__proxy__) | 0.005876 |
def preprocess_plain_text_file(self, filename, pmid, extra_annotations):
"""Preprocess a plain text file for use with ISI reder.
Preprocessing results in a new text file with one sentence
per line.
Parameters
----------
filename : str
The name of the plain text file
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
with codecs.open(filename, 'r', encoding='utf-8') as f:
content = f.read()
self.preprocess_plain_text_string(content, pmid,
extra_annotations) | 0.002299 |
def load_facts(self, facts):
"""Load a set of facts into the CLIPS data base.
The C equivalent of the CLIPS load-facts command.
Facts can be loaded from a string or from a text file.
"""
facts = facts.encode()
if os.path.exists(facts):
ret = lib.EnvLoadFacts(self._env, facts)
if ret == -1:
raise CLIPSError(self._env)
else:
ret = lib.EnvLoadFactsFromString(self._env, facts, -1)
if ret == -1:
raise CLIPSError(self._env)
return ret | 0.003442 |
def share_column_widths(self, tables, shared_limit=None):
"""
To have this table use sync with the columns in tables
Note, this will need to be called on the other tables to be fully
synced.
:param tables: list of SeabornTables to share column widths
:param shared_limit: int if diff is greater than this than ignore it.
:return: None
"""
for table in tables:
record = (table, shared_limit)
if not record in self.shared_tables and table is not self:
self.shared_tables.append(record) | 0.004942 |
def update_settings(self, index, newvalues):
"""
Update Settings of an index.
(See :ref:`es-guide-reference-api-admin-indices-update-settings`)
"""
path = make_path(index, "_settings")
return self.conn._send_request('PUT', path, newvalues) | 0.00692 |
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
self.stdin.write(input)
self.stdin.close()
elif self.stdout:
stdout = self.stdout.read()
self.stdout.close()
elif self.stderr:
stderr = self.stderr.read()
self.stderr.close()
self.wait()
return (stdout, stderr)
return self._communicate(input) | 0.001807 |
def from_raw(self, raw: RawScalar) -> Optional[bytes]:
"""Override superclass method."""
try:
return base64.b64decode(raw, validate=True)
except TypeError:
return None | 0.009302 |
def preprocess(img):
"""Preprocess 210x160x3 uint8 frame into 6400 (80x80) 1D float vector."""
# Crop the image.
img = img[35:195]
# Downsample by factor of 2.
img = img[::2, ::2, 0]
# Erase background (background type 1).
img[img == 144] = 0
# Erase background (background type 2).
img[img == 109] = 0
# Set everything else (paddles, ball) to 1.
img[img != 0] = 1
return img.astype(np.float).ravel() | 0.002232 |
def parse_event_xml(self, event_data) -> dict:
"""Parse metadata xml."""
event = {}
event_xml = event_data.decode()
message = MESSAGE.search(event_xml)
if not message:
return {}
event[EVENT_OPERATION] = message.group(EVENT_OPERATION)
topic = TOPIC.search(event_xml)
if topic:
event[EVENT_TOPIC] = topic.group(EVENT_TOPIC)
source = SOURCE.search(event_xml)
if source:
event[EVENT_SOURCE] = source.group(EVENT_SOURCE)
event[EVENT_SOURCE_IDX] = source.group(EVENT_SOURCE_IDX)
data = DATA.search(event_xml)
if data:
event[EVENT_TYPE] = data.group(EVENT_TYPE)
event[EVENT_VALUE] = data.group(EVENT_VALUE)
_LOGGER.debug(event)
return event | 0.002427 |
def send_miniprogrampage_message(
self, user_id, title, appid, pagepath, thumb_media_id, kf_account=None
):
"""
发送小程序卡片(要求小程序与公众号已关联)
:param user_id: 用户 ID 。 就是你收到的 `Message` 的 source
:param title: 小程序卡片的标题
:param appid: 小程序的 appid,要求小程序的 appid 需要与公众号有关联关系
:param pagepath: 小程序的页面路径,跟 app.json 对齐,支持参数,比如 pages/index/index?foo=bar
:param thumb_media_id: 小程序卡片图片的媒体 ID,小程序卡片图片建议大小为 520*416
:param kf_account: 需要以某个客服帐号来发消息时指定的客服账户
:return: 返回的 JSON 数据包
"""
data = {
"touser": user_id,
"msgtype": "miniprogrampage",
"miniprogrampage": {
"title": title,
"appid": appid,
"pagepath": pagepath,
"thumb_media_id": thumb_media_id
}
}
if kf_account is not None:
data["customservice"] = {"kf_account": kf_account}
return self.post(
url="https://api.weixin.qq.com/cgi-bin/message/custom/send",
data=data
) | 0.003721 |
def process_rules(self, path: Path, system: System):
"""writes the templates read from the rules document"""
self.context.update({
'system': system,
})
document = FileSystem.load_yaml(path, required=True)
for module, rules in document.items():
click.secho('process: {0}'.format(module), fg='green')
self._process_rules(rules, system) | 0.00489 |
def from_view(cls, view, *methods, name=None):
"""Create a handler class from function or coroutine."""
docs = getattr(view, '__doc__', None)
view = to_coroutine(view)
methods = methods or ['GET']
if METH_ANY in methods:
methods = METH_ALL
def proxy(self, *args, **kwargs):
return view(*args, **kwargs)
params = {m.lower(): proxy for m in methods}
params['methods'] = methods
if docs:
params['__doc__'] = docs
return type(name or view.__name__, (cls,), params) | 0.003448 |
def disconnect_entry_signals():
"""
Disconnect all the signals on Entry model.
"""
post_save.disconnect(
sender=Entry,
dispatch_uid=ENTRY_PS_PING_DIRECTORIES)
post_save.disconnect(
sender=Entry,
dispatch_uid=ENTRY_PS_PING_EXTERNAL_URLS)
post_save.disconnect(
sender=Entry,
dispatch_uid=ENTRY_PS_FLUSH_SIMILAR_CACHE)
post_delete.disconnect(
sender=Entry,
dispatch_uid=ENTRY_PD_FLUSH_SIMILAR_CACHE) | 0.002049 |
def disconnect(self, frame):
"""
Handles the DISCONNECT command: Unbinds the connection.
Clients are supposed to send this command, but in practice it should not be
relied upon.
"""
self.engine.log.debug("Disconnect")
self.engine.unbind() | 0.010169 |
def get_quizzes(self, course_id):
"""
List quizzes for a given course
https://canvas.instructure.com/doc/api/quizzes.html#method.quizzes_api.index
"""
url = QUIZZES_API.format(course_id)
data = self._get_resource(url)
quizzes = []
for datum in data:
quizzes.append(Quiz(data=datum))
return quizzes | 0.005236 |
def ToScriptHash(self, address):
"""
Retrieve the script_hash based from an address.
Args:
address (str): a base58 encoded address.
Raises:
ValuesError: if an invalid address is supplied or the coin version is incorrect
Exception: if the address string does not start with 'A' or the checksum fails
Returns:
UInt160: script hash.
"""
if len(address) == 34:
if address[0] == 'A':
data = b58decode(address)
if data[0] != self.AddressVersion:
raise ValueError('Not correct Coin Version')
checksum = Crypto.Default().Hash256(data[:21])[:4]
if checksum != data[21:]:
raise Exception('Address format error')
return UInt160(data=data[1:21])
else:
raise Exception('Address format error')
else:
raise ValueError('Not correct Address, wrong length.') | 0.003891 |
def visit_children_decor(func):
"See Interpreter"
@wraps(func)
def inner(cls, tree):
values = cls.visit_children(tree)
return func(cls, values)
return inner | 0.005319 |
def rename_file(self, relativePath, newRelativePath,
force=False, raiseError=True, ntrials=3):
"""
Rename a file in the repository. It insures renaming the file in the system.
:Parameters:
#. relativePath (string): The relative to the repository path of
the file that needst to be renamed.
#. newRelativePath (string): The new relative to the repository path
of where to move and rename the file.
#. force (boolean): Whether to force renaming even when another
repository file exists. In this case old repository file
will be removed from the repository and the system as well.
#. raiseError (boolean): Whether to raise encountered error instead
of returning failure.
#. ntrials (int): After aquiring all locks, ntrials is the maximum
number of trials allowed before failing.
In rare cases, when multiple processes
are accessing the same repository components, different processes
can alter repository components between successive lock releases
of some other process. Bigger number of trials lowers the
likelyhood of failure due to multiple processes same time
alteration.
:Returns:
#. success (boolean): Whether renaming the file was successful.
#. message (None, string): Some explanatory message or error reason
why directory was not updated.
"""
assert isinstance(raiseError, bool), "raiseError must be boolean"
assert isinstance(force, bool), "force must be boolean"
assert isinstance(ntrials, int), "ntrials must be integer"
assert ntrials>0, "ntrials must be >0"
# check old name and path
relativePath = self.to_repo_relative_path(path=relativePath, split=False)
realPath = os.path.join(self.__path,relativePath)
fPath, fName = os.path.split(realPath)
# check new name and path
newRelativePath = self.to_repo_relative_path(path=newRelativePath, split=False)
newRealPath = os.path.join(self.__path,newRelativePath)
nfPath, nfName = os.path.split(newRealPath)
# lock old file
LO = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(fPath,self.__fileLock%fName))
acquired, code = LO.acquire_lock()
if not acquired:
error = "Code %s. Unable to aquire the lock for old file '%s'"%(code,relativePath)
assert not raiseError, error
return False, error
# add directory
try:
success, reason = self.add_directory(nfPath, raiseError=False, ntrials=ntrials)
except Exception as err:
reason = "Unable to add directory (%s)"%(str(err))
success = False
if not success:
LO.release_lock()
assert not raiseError, reason
return False, reason
# create new file lock
LN = Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(nfPath,self.__fileLock%nfName))
acquired, code = LN.acquire_lock()
if not acquired:
LO.release_lock()
error = "Code %s. Unable to aquire the lock for new file path '%s'"%(code,newRelativePath)
assert not raiseError, error
return False, error
# rename file
for _trial in range(ntrials):
renamed = False
error = None
try:
# check whether it's a repository file
isRepoFile,fileOnDisk, infoOnDisk, classOnDisk = self.is_repository_file(relativePath)
assert isRepoFile, "file '%s' is not a repository file"%(relativePath,)
assert fileOnDisk, "file '%s' is found on disk"%(relativePath,)
assert infoOnDisk, "%s is found on disk"%self.__fileInfo%fName
assert classOnDisk, "%s is found on disk"%self.__fileClass%fName
# get new file path
nisRepoFile,nfileOnDisk,ninfoOnDisk,nclassOnDisk = self.is_repository_file(newRelativePath)
assert not nisRepoFile or force, "New file path is a registered repository file, set force to True to proceed regardless"
# get parent directories list
oDirList = self.__get_repository_directory(fPath)
nDirList = self.__get_repository_directory(nfPath)
# remove new file and all repository files from disk
if os.path.isfile(newRealPath):
os.remove(newRealPath)
if os.path.isfile(os.path.join(nfPath,self.__fileInfo%nfName)):
os.remove(os.path.join(nfPath,self.__fileInfo%nfName))
if os.path.isfile(os.path.join(nfPath,self.__fileClass%nfName)):
os.remove(os.path.join(nfPath,self.__fileClass%nfName))
# move old file to new path
os.rename(realPath, newRealPath)
os.rename(os.path.join(fPath,self.__fileInfo%fName), os.path.join(nfPath,self.__fileInfo%nfName))
os.rename(os.path.join(fPath,self.__fileClass%fName), os.path.join(nfPath,self.__fileClass%nfName))
# update list
findex = oDirList.index(fName)
oDirList.pop(findex)
# update new list
if nfName not in nDirList:
nDirList.append(nfName)
except Exception as err:
renamed = False
error = str(err)
if self.DEBUG_PRINT_FAILED_TRIALS: print("Trial %i failed in Repository.%s (%s). Set Repository.DEBUG_PRINT_FAILED_TRIALS to False to mute"%(_trial, inspect.stack()[1][3], str(error)))
else:
renamed = True
break
# release locks
LO.release_lock()
LN.release_lock()
# always clean old file lock
try:
if os.path.isfile(os.path.join(fPath,self.__fileLock%fName)):
os.remove(os.path.join(fPath,self.__fileLock%fName))
except:
pass
# return
assert renamed or not raiseError, "Unable to rename file '%s' to '%s' after %i trials (%s)"%(relativePath, newRelativePath, ntrials, error,)
#assert renamed or not raiseError, '\n'.join(message)
return renamed, error | 0.011917 |
def boolValue(self):
"""
returns : (boolean) Value
"""
if self.lastValue == 1 or self.lastValue == "active":
self._key = 1
self._boolKey = True
else:
self._key = 0
self._boolKey = False
return self._boolKey | 0.006623 |
def get_possible_initializer_keys(cls, use_peepholes=False,
use_projection=False):
"""Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
w_gates: weight for gates
b_gates: bias of gates
w_f_diag: weight for prev_cell -> forget gate peephole
w_i_diag: weight for prev_cell -> input gate peephole
w_o_diag: weight for prev_cell -> output gate peephole
Args:
cls:The class.
use_peepholes: Boolean that indicates whether peephole connections are
used.
use_projection: Boolean that indicates whether a recurrent projection
layer is used.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
"""
possible_keys = cls.POSSIBLE_INITIALIZER_KEYS.copy()
if not use_peepholes:
possible_keys.difference_update(
{cls.W_F_DIAG, cls.W_I_DIAG, cls.W_O_DIAG})
if not use_projection:
possible_keys.difference_update({cls.W_H_PROJECTION})
return possible_keys | 0.003562 |
def get_share_url_with_dirname(uk, shareid, dirname):
'''得到共享目录的链接'''
return ''.join([
const.PAN_URL, 'wap/link',
'?shareid=', shareid,
'&uk=', uk,
'&dir=', encoder.encode_uri_component(dirname),
'&third=0',
]) | 0.003559 |
def from_blob(self, blob, store=current_store,
extra_args=None, extra_kwargs=None):
"""Stores the ``blob`` (byte string) for the image
into the ``store``.
:param blob: the byte string for the image
:type blob: :class:`str`
:param store: the storage to store the image data.
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:param extra_args: additional arguments to pass to the model's
constructor.
:type extra_args: :class:`collections.abc.Sequence`
:param extra_kwargs: additional keyword arguments to pass to the
model's constructor.
:type extra_kwargs: :class:`typing.Mapping`\ [:class:`str`,
:class:`object`]
:returns: the created image instance
:rtype: :class:`Image`
.. versionadded:: 1.0.0
The ``extra_args`` and ``extra_kwargs`` options.
"""
data = io.BytesIO(blob)
return self.from_raw_file(data, store, original=True,
extra_args=extra_args,
extra_kwargs=extra_kwargs) | 0.002999 |
def remove(self, key):
"""
Removes the mapping for a key from this map if it is present. The map will not contain a mapping for the
specified key once the call returns.
**Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), key of the mapping to be deleted.
:return: (object), the previous value associated with key, or None if there was no mapping for key.
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(replicated_map_remove_codec, key_data, key=key_data) | 0.008119 |
def _TerminateProcess(self, process):
"""Terminate a process.
Args:
process (MultiProcessBaseProcess): process to terminate.
"""
pid = process.pid
logger.warning('Terminating process: (PID: {0:d}).'.format(pid))
process.terminate()
# Wait for the process to exit.
process.join(timeout=self._PROCESS_JOIN_TIMEOUT)
if process.is_alive():
logger.warning('Killing process: (PID: {0:d}).'.format(pid))
self._KillProcess(pid) | 0.006303 |
def maybe_infer_to_datetimelike(value, convert_dates=False):
"""
we might have a array (or single object) that is datetime like,
and no dtype is passed don't change the value unless we find a
datetime/timedelta set
this is pretty strict in that a datetime/timedelta is REQUIRED
in addition to possible nulls/string likes
Parameters
----------
value : np.array / Series / Index / list-like
convert_dates : boolean, default False
if True try really hard to convert dates (such as datetime.date), other
leave inferred dtype 'date' alone
"""
# TODO: why not timedelta?
if isinstance(value, (ABCDatetimeIndex, ABCPeriodIndex,
ABCDatetimeArray, ABCPeriodArray)):
return value
elif isinstance(value, ABCSeries):
if isinstance(value._values, ABCDatetimeIndex):
return value._values
v = value
if not is_list_like(v):
v = [v]
v = np.array(v, copy=False)
# we only care about object dtypes
if not is_object_dtype(v):
return value
shape = v.shape
if not v.ndim == 1:
v = v.ravel()
if not len(v):
return value
def try_datetime(v):
# safe coerce to datetime64
try:
# GH19671
v = tslib.array_to_datetime(v,
require_iso8601=True,
errors='raise')[0]
except ValueError:
# we might have a sequence of the same-datetimes with tz's
# if so coerce to a DatetimeIndex; if they are not the same,
# then these stay as object dtype, xref GH19671
try:
from pandas._libs.tslibs import conversion
from pandas import DatetimeIndex
values, tz = conversion.datetime_to_datetime64(v)
return DatetimeIndex(values).tz_localize(
'UTC').tz_convert(tz=tz)
except (ValueError, TypeError):
pass
except Exception:
pass
return v.reshape(shape)
def try_timedelta(v):
# safe coerce to timedelta64
# will try first with a string & object conversion
from pandas import to_timedelta
try:
return to_timedelta(v)._ndarray_values.reshape(shape)
except Exception:
return v.reshape(shape)
inferred_type = lib.infer_datetimelike_array(ensure_object(v))
if inferred_type == 'date' and convert_dates:
value = try_datetime(v)
elif inferred_type == 'datetime':
value = try_datetime(v)
elif inferred_type == 'timedelta':
value = try_timedelta(v)
elif inferred_type == 'nat':
# if all NaT, return as datetime
if isna(v).all():
value = try_datetime(v)
else:
# We have at least a NaT and a string
# try timedelta first to avoid spurious datetime conversions
# e.g. '00:00:01' is a timedelta but technically is also a datetime
value = try_timedelta(v)
if lib.infer_dtype(value, skipna=False) in ['mixed']:
# cannot skip missing values, as NaT implies that the string
# is actually a datetime
value = try_datetime(v)
return value | 0.000298 |
def update(self, body):
"""
Update the MessageInstance
:param unicode body: The text of the message you want to send
:returns: Updated MessageInstance
:rtype: twilio.rest.api.v2010.account.message.MessageInstance
"""
data = values.of({'Body': body, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return MessageInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
) | 0.003273 |
def merge(left, right):
"""
deep merge dictionary on the left with the one
on the right.
Fill in left dictionary with right one where
the value of the key from the right one in
the left one is missing or None.
"""
if isinstance(left, dict) and isinstance(right, dict):
for key, value in right.items():
if key not in left:
left[key] = value
elif left[key] is None:
left[key] = value
else:
left[key] = merge(left[key], value)
return left | 0.001773 |
def _validate_many(args, specs, defaults,passed_conditions,value_conditions,
allow_unknowns,unknowns_spec):
'''
Similar to validate but validates multiple objects at once, each with their own specification.
Fill objects that were specified but not provided with NotPassed or default values
Apply `value_condition` to object dictionary as a whole
'''
validated_args = builtins.dict()
passed_but_not_specified = set(args.keys()) - set(specs.keys())
if passed_but_not_specified:
if not allow_unknowns:
raise ValueError(('Arguments {} were passed but not specified (use ' +
'`allow_unknowns=True` to avoid this error)'.format(passed_but_not_specified)))
else:
for arg in passed_but_not_specified:
if unknowns_spec is not None:
specs[arg] = unknowns_spec
if passed_conditions:
validate(args, Dict(passed_conditions=passed_conditions))
for arg in specs:
if (not arg in args) or NotPassed(args[arg]):
if arg in defaults:
if isinstance(defaults[arg],DefaultGenerator):
validated_args[arg] = defaults[arg]()
else:
validated_args[arg] = defaults[arg]
else:
validated_args[arg] = NotPassed
else:#Default values and NotPassed values are not validated. Former has advantage that default values need to be `correct` without validation and thus encourage the user to pass stuff that doesn't need validation, and is therefore faster
validated_args[arg] = validate(args[arg], specs[arg])
if value_conditions:
validated_args = validate(validated_args, value_conditions)
return validated_args | 0.01168 |
def close (self, force=True): # File-like object.
"""This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
and SIGINT). """
if not self.closed:
self.flush()
os.close (self.child_fd)
time.sleep(self.delayafterclose) # Give kernel time to update process status.
if self.isalive():
if not self.terminate(force):
raise ExceptionPexpect ('close() could not terminate the child using terminate()')
self.child_fd = -1
self.closed = True | 0.009889 |
def stop_app(self, package_name, clear=False):
'''
Stop application
Args:
package_name: string like com.example.app1
clear: bool, remove user data
Returns:
None
'''
if clear:
self.adb_shell(['pm', 'clear', package_name])
else:
self.adb_shell(['am', 'force-stop', package_name])
return self | 0.004819 |
def timetree(params):
"""
implementeing treetime tree
"""
if params.relax is None:
relaxed_clock_params = None
elif params.relax==[]:
relaxed_clock_params=True
elif len(params.relax)==2:
relaxed_clock_params={'slack':params.relax[0], 'coupling':params.relax[1]}
dates = utils.parse_dates(params.dates)
if len(dates)==0:
print("No valid dates -- exiting.")
return 1
if assure_tree(params, tmp_dir='timetree_tmp'):
print("No tree -- exiting.")
return 1
outdir = get_outdir(params, '_treetime')
gtr = create_gtr(params)
infer_gtr = params.gtr=='infer'
###########################################################################
### READ IN VCF
###########################################################################
#sets ref and fixed_pi to None if not VCF
aln, ref, fixed_pi = read_if_vcf(params)
is_vcf = True if ref is not None else False
branch_length_mode = params.branch_length_mode
#variable-site-only trees can have big branch lengths, the auto setting won't work.
if is_vcf or (params.aln and params.sequence_length):
if branch_length_mode == 'auto':
branch_length_mode = 'joint'
###########################################################################
### SET-UP and RUN
###########################################################################
if params.aln is None and params.sequence_length is None:
print("one of arguments '--aln' and '--sequence-length' is required.", file=sys.stderr)
return 1
myTree = TreeTime(dates=dates, tree=params.tree, ref=ref,
aln=aln, gtr=gtr, seq_len=params.sequence_length,
verbose=params.verbose)
myTree.tip_slack=params.tip_slack
if not myTree.one_mutation:
print("TreeTime setup failed, exiting")
return 1
# coalescent model options
try:
coalescent = float(params.coalescent)
if coalescent<10*myTree.one_mutation:
coalescent = None
except:
if params.coalescent in ['opt', 'const', 'skyline']:
coalescent = params.coalescent
else:
print("unknown coalescent model specification, has to be either "
"a float, 'opt', 'const' or 'skyline' -- exiting")
return 1
# determine whether confidence intervals are to be computed and how the
# uncertainty in the rate estimate should be treated
calc_confidence = params.confidence
if params.clock_std_dev:
vary_rate = params.clock_std_dev if calc_confidence else False
elif params.confidence and params.covariation:
vary_rate = True
elif params.confidence:
print("\nOutside of covariance aware mode TreeTime cannot estimate confidence intervals "
"without specified standard deviation of the clock rate Please specify '--clock-std-dev' "
"or rerun with '--covariance'. Will proceed without confidence estimation")
vary_rate = False
calc_confidence = False
else:
vary_rate = False
# RUN
root = None if params.keep_root else params.reroot
success = myTree.run(root=root, relaxed_clock=relaxed_clock_params,
resolve_polytomies=(not params.keep_polytomies),
Tc=coalescent, max_iter=params.max_iter,
fixed_clock_rate=params.clock_rate,
n_iqd=params.clock_filter,
time_marginal="assign" if calc_confidence else False,
vary_rate = vary_rate,
branch_length_mode = branch_length_mode,
fixed_pi=fixed_pi,
use_covariation = params.covariation)
if success==ttconf.ERROR: # if TreeTime.run failed, exit
print("\nTreeTime run FAILED: please check above for errors and/or rerun with --verbose 4.\n")
return 1
###########################################################################
### OUTPUT and saving of results
###########################################################################
if infer_gtr:
print('\nInferred GTR model:')
print(myTree.gtr)
print(myTree.date2dist)
basename = get_basename(params, outdir)
if coalescent in ['skyline', 'opt', 'const']:
print("Inferred coalescent model")
if coalescent=='skyline':
print_save_plot_skyline(myTree, plot=basename+'skyline.pdf', save=basename+'skyline.tsv', screen=True)
else:
Tc = myTree.merger_model.Tc.y[0]
print(" --T_c: \t %1.2e \toptimized inverse merger rate in units of substitutions"%Tc)
print(" --T_c: \t %1.2e \toptimized inverse merger rate in years"%(Tc/myTree.date2dist.clock_rate))
print(" --N_e: \t %1.2e \tcorresponding 'effective population size' assuming 50 gen/year\n"%(Tc/myTree.date2dist.clock_rate*50))
# plot
import matplotlib.pyplot as plt
from .treetime import plot_vs_years
leaf_count = myTree.tree.count_terminals()
label_func = lambda x: (x.name if x.is_terminal() and ((leaf_count<30
and (not params.no_tip_labels))
or params.tip_labels) else '')
plot_vs_years(myTree, show_confidence=False, label_func=label_func,
confidence=0.9 if params.confidence else None)
tree_fname = (outdir + params.plot_tree)
plt.savefig(tree_fname)
print("--- saved tree as \n\t %s\n"%tree_fname)
plot_rtt(myTree, outdir + params.plot_rtt)
if params.relax:
fname = outdir+'substitution_rates.tsv'
print("--- wrote branch specific rates to\n\t %s\n"%fname)
with open(fname, 'w') as fh:
fh.write("#node\tclock_length\tmutation_length\trate\tfold_change\n")
for n in myTree.tree.find_clades(order="preorder"):
if n==myTree.tree.root:
continue
g = n.branch_length_interpolator.gamma
fh.write("%s\t%1.3e\t%1.3e\t%1.3e\t%1.2f\n"%(n.name, n.clock_length, n.mutation_length, myTree.date2dist.clock_rate*g, g))
export_sequences_and_tree(myTree, basename, is_vcf, params.zero_based,
timetree=True, confidence=calc_confidence)
return 0 | 0.009594 |
def _Backward3_T_Ph(P, h):
"""Backward equation for region 3, T=f(P,h)
Parameters
----------
P : float
Pressure, [MPa]
h : float
Specific enthalpy, [kJ/kg]
Returns
-------
T : float
Temperature, [K]
"""
hf = _h_3ab(P)
if h <= hf:
T = _Backward3a_T_Ph(P, h)
else:
T = _Backward3b_T_Ph(P, h)
return T | 0.002551 |
def _process_datum(self, data, input_reader, ctx, transient_shard_state):
"""Process a single data piece.
Call mapper handler on the data.
Args:
data: a datum to process.
input_reader: input reader.
ctx: mapreduce context
transient_shard_state: transient shard state.
Returns:
True if scan should be continued, False if scan should be stopped.
"""
if data is not input_readers.ALLOW_CHECKPOINT:
self.slice_context.incr(context.COUNTER_MAPPER_CALLS)
handler = transient_shard_state.handler
if isinstance(handler, map_job.Mapper):
handler(self.slice_context, data)
else:
if input_reader.expand_parameters:
result = handler(*data)
else:
result = handler(data)
if util.is_generator(result):
for output in result:
if isinstance(output, operation.Operation):
output(ctx)
else:
output_writer = transient_shard_state.output_writer
if not output_writer:
logging.warning(
"Handler yielded %s, but no output writer is set.", output)
else:
output_writer.write(output)
if self._time() - self._start_time >= parameters.config._SLICE_DURATION_SEC:
return False
return True | 0.01037 |
def download_file(self, remote_path, local_path, progress=None):
"""Downloads file from WebDAV server and save it locally.
More information you can find by link http://webdav.org/specs/rfc4918.html#rfc.section.9.4
:param remote_path: the path to remote file for downloading.
:param local_path: the path to save file locally.
:param progress: progress function. Not supported now.
"""
urn = Urn(remote_path)
if self.is_dir(urn.path()):
raise OptionNotValid(name='remote_path', value=remote_path)
if os.path.isdir(local_path):
raise OptionNotValid(name='local_path', value=local_path)
if os.path.sep in local_path:
os.makedirs(local_path.rsplit(os.path.sep, 1)[0], exist_ok=True)
if not self.check(urn.path()):
raise RemoteResourceNotFound(urn.path())
with open(local_path, 'wb') as local_file:
response = self.execute_request('download', urn.quote())
for block in response.iter_content(1024):
local_file.write(block) | 0.002717 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.