text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_params(self):
"""Gets current parameters.
Returns
-------
(arg_params, aux_params)
A pair of dictionaries each mapping parameter names to NDArray values. This
is a merged dictionary of all the parameters in the modules.
"""
assert self.binded and self.params_initialized
arg_params = dict()
aux_params = dict()
for module in self._modules:
arg, aux = module.get_params()
arg_params.update(arg)
aux_params.update(aux)
return (arg_params, aux_params) | 0.005 |
def get_regular_expressions(taxonomy_name, rebuild=False, no_cache=False):
"""Return a list of patterns compiled from the RDF/SKOS ontology.
Uses cache if it exists and if the taxonomy hasn't changed.
"""
# Translate the ontology name into a local path. Check if the name
# relates to an existing ontology.
onto_name, onto_path, onto_url = _get_ontology(taxonomy_name)
if not onto_path:
raise TaxonomyError("Unable to locate the taxonomy: '%s'."
% taxonomy_name)
cache_path = _get_cache_path(onto_name)
current_app.logger.debug(
'Taxonomy discovered, now we load it '
'(from cache: %s, onto_path: %s, cache_path: %s)'
% (not no_cache, onto_path, cache_path)
)
if os.access(cache_path, os.R_OK):
if os.access(onto_path, os.R_OK):
if rebuild or no_cache:
current_app.logger.debug(
"Cache generation was manually forced.")
return _build_cache(onto_path, skip_cache=no_cache)
else:
# ontology file not found. Use the cache instead.
current_app.logger.warning(
"The ontology couldn't be located. However "
"a cached version of it is available. Using it as a "
"reference."
)
return _get_cache(cache_path, source_file=onto_path)
if (os.path.getmtime(cache_path) >
os.path.getmtime(onto_path)):
# Cache is more recent than the ontology: use cache.
current_app.logger.debug(
"Normal situation, cache is older than ontology,"
" so we load it from cache"
)
return _get_cache(cache_path, source_file=onto_path)
else:
# Ontology is more recent than the cache: rebuild cache.
current_app.logger.warning(
"Cache '%s' is older than '%s'. "
"We will rebuild the cache" %
(cache_path, onto_path)
)
return _build_cache(onto_path, skip_cache=no_cache)
elif os.access(onto_path, os.R_OK):
if not no_cache and\
os.path.exists(cache_path) and\
not os.access(cache_path, os.W_OK):
raise TaxonomyError('We cannot read/write into: %s. '
'Aborting!' % cache_path)
elif not no_cache and os.path.exists(cache_path):
current_app.logger.warning(
'Cache %s exists, but is not readable!' % cache_path)
current_app.logger.info(
"Cache not available. Building it now: %s" % onto_path)
return _build_cache(onto_path, skip_cache=no_cache)
else:
raise TaxonomyError("We miss both source and cache"
" of the taxonomy: %s" % taxonomy_name) | 0.000347 |
def parse(text):
"""Parse the given text into metadata and strip it for a Markdown parser.
:param text: text to be parsed
"""
rv = {}
m = META.match(text)
while m:
key = m.group(1)
value = m.group(2)
value = INDENTATION.sub('\n', value.strip())
rv[key] = value
text = text[len(m.group(0)):]
m = META.match(text)
return rv, text | 0.002463 |
def template(self, name):
"""
Set an active template to use with our Postman.
This changes the call signature of send.
Arguments:
- `name`: str
Return: None
Exceptions: None
"""
self.plain, self.html = self._find_tpls(name)
if not self.plain:
self.plain = self._find_tpl(name)
try:
self.send = self._sendtpl
yield
finally:
self.plain, self.html = None, None
self.send = self._send | 0.003711 |
async def SetTools(self, agent_tools):
'''
agent_tools : typing.Sequence[~EntityVersion]
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Upgrader',
request='SetTools',
version=1,
params=_params)
_params['agent-tools'] = agent_tools
reply = await self.rpc(msg)
return reply | 0.004264 |
def choice(choices=[], message='Pick something.', default=None, title=''):
"""
Present the user with a list of choices.
return the choice that he selects.
return None if he cancels the selection selection.
:ref:`screenshots<choice>`
:param choices: a list of the choices to be displayed
:param message: message to be displayed.
:param title: window title
:param default: default string of choice
:rtype: None or string
"""
return backend_api.opendialog("choice", dict(choices=choices, message=message, default=default, title=title)) | 0.003436 |
def raise_(tp, value=None, tb=None):
"""
A function that matches the Python 2.x ``raise`` statement. This
allows re-raising exceptions with the cls value and traceback on
Python 2 and 3.
"""
if value is not None and isinstance(tp, Exception):
raise TypeError("instance exception may not have a separate value")
if value is not None:
exc = tp(value)
else:
exc = tp
if exc.__traceback__ is not tb:
raise exc.with_traceback(tb)
raise exc | 0.001976 |
def exception_handler(exc, context):
"""
Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's built-in `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised.
"""
if isinstance(exc, exceptions.APIException):
headers = {}
if getattr(exc, 'auth_header', None):
headers['WWW-Authenticate'] = exc.auth_header
if getattr(exc, 'wait', None):
headers['Retry-After'] = '%d' % exc.wait
if isinstance(exc.detail, (list, dict)):
data = exc.detail
else:
data = {'message': exc.detail}
set_rollback()
return Response(data, status=exc.status_code, headers=headers)
elif isinstance(exc, Http404):
msg = _('Not found.')
data = {'message': six.text_type(msg)}
set_rollback()
return Response(data, status=status.HTTP_404_NOT_FOUND)
elif isinstance(exc, PermissionDenied):
msg = _('Permission denied.')
data = {'message': six.text_type(msg)}
set_rollback()
return Response(data, status=status.HTTP_403_FORBIDDEN)
# Note: Unhandled exceptions will raise a 500 error.
return None | 0.000743 |
def validate(self, value):
"""
Accepts: str, unicode, bool
Returns: bool
"""
if isinstance(value, bool):
return value
if isinstance(value, (str, unicode)):
if value.lower() == "true":
value = True
elif value.lower() == "false":
value = False
else:
raise ValueError("Not a boolean: %r" % (value, ))
value = super(Boolean, self).validate(value)
if not isinstance(value, bool):
raise ValueError("Not a boolean: %r" % (value, ))
return value | 0.003241 |
def load_from_file(filename):
"""
Load a list of filenames from an external text file.
"""
if os.path.isdir(filename):
logger.error("Err: File '%s' is a directory", filename)
return None
if not os.path.isfile(filename):
logger.error("Err: File '%s' does not exist", filename)
return None
try:
with open(filename, 'r') as sourcefile:
songs = [line.strip() for line in sourcefile]
except IOError as error:
logger.exception(error)
return None
songs = set(Song.from_filename(song) for song in songs)
return songs.difference({None}) | 0.001582 |
def fix_pin(self, line):
"""
Fix dependency by removing post-releases from versions
and loosing constraints on internal packages.
Drop packages from ignore set
Also populate packages set
"""
dep = Dependency(line)
if dep.valid:
if dep.package in self.ignore:
ignored_version = self.ignore[dep.package]
if ignored_version is not None:
# ignored_version can be None to disable conflict detection
if dep.version and dep.version != ignored_version:
logger.error(
"Package %s was resolved to different "
"versions in different environments: %s and %s",
dep.package, dep.version, ignored_version,
)
raise RuntimeError(
"Please add constraints for the package "
"version listed above"
)
return None
self.packages[dep.package] = dep.version
if self.forbid_post or dep.is_compatible:
# Always drop post for internal packages
dep.drop_post()
return dep.serialize()
return line.strip() | 0.00148 |
def summarize_mutation_io(name, type, required=False):
"""
This function returns the standard summary for mutations inputs
and outputs
"""
return dict(
name=name,
type=type,
required=required
) | 0.004016 |
def setRequest(self, endPointReference, action):
'''Call For Request
'''
self._action = action
self.header_pyobjs = None
pyobjs = []
namespaceURI = self.wsAddressURI
addressTo = self._addressTo
messageID = self._messageID = "uuid:%s" %time.time()
# Set Message Information Headers
# MessageID
typecode = GED(namespaceURI, "MessageID")
pyobjs.append(typecode.pyclass(messageID))
# Action
typecode = GED(namespaceURI, "Action")
pyobjs.append(typecode.pyclass(action))
# To
typecode = GED(namespaceURI, "To")
pyobjs.append(typecode.pyclass(addressTo))
# From
typecode = GED(namespaceURI, "From")
mihFrom = typecode.pyclass()
mihFrom._Address = self.anonymousURI
pyobjs.append(mihFrom)
if endPointReference:
if hasattr(endPointReference, 'typecode') is False:
raise EvaluateException, 'endPointReference must have a typecode attribute'
if isinstance(endPointReference.typecode, \
GTD(namespaceURI ,'EndpointReferenceType')) is False:
raise EvaluateException, 'endPointReference must be of type %s' \
%GTD(namespaceURI ,'EndpointReferenceType')
ReferenceProperties = getattr(endPointReference, '_ReferenceProperties', None)
if ReferenceProperties is not None:
for v in getattr(ReferenceProperties, '_any', ()):
if not hasattr(v,'typecode'):
raise EvaluateException, '<any> element, instance missing typecode attribute'
pyobjs.append(v)
self.header_pyobjs = tuple(pyobjs) | 0.011306 |
def secondYAxis(requestContext, seriesList):
"""
Graph the series on the secondary Y axis.
"""
for series in seriesList:
series.options['secondYAxis'] = True
series.name = 'secondYAxis(%s)' % series.name
return seriesList | 0.003891 |
def create_query_future(self, key):
"""
Create and return a :class:`asyncio.Future` for the given `hash_`
function and `node` URL. The future is referenced internally and used
by any calls to :meth:`lookup` which are made while the future is
pending. The future is removed from the internal storage automatically
when a result or exception is set for it.
This allows for deduplication of queries for the same hash.
"""
fut = asyncio.Future()
fut.add_done_callback(
functools.partial(self._erase_future, key)
)
self._lookup_cache[key] = fut
return fut | 0.002999 |
def start(self, timeout=None):
"""
Startup of the node.
:param join: optionally wait for the process to end (default : True)
:return: None
"""
assert super(PyrosBase, self).start(timeout=timeout)
# Because we currently use this to setup connection
return self.name | 0.006079 |
def rewire_inputs(data_list):
"""Rewire inputs of provided data objects.
Input parameter is a list of original and copied data object model
instances: ``[{'original': original, 'copy': copy}]``. This
function finds which objects reference other objects (in the list)
on the input and replaces original objects with the copies (mutates
copies' inputs).
"""
if len(data_list) < 2:
return data_list
mapped_ids = {bundle['original'].id: bundle['copy'].id for bundle in data_list}
for bundle in data_list:
updated = False
copy = bundle['copy']
for field_schema, fields in iterate_fields(copy.input, copy.process.input_schema):
name = field_schema['name']
value = fields[name]
if field_schema['type'].startswith('data:') and value in mapped_ids:
fields[name] = mapped_ids[value]
updated = True
elif field_schema['type'].startswith('list:data:') and any([id_ in mapped_ids for id_ in value]):
fields[name] = [mapped_ids[id_] if id_ in mapped_ids else id_ for id_ in value]
updated = True
if updated:
copy.save()
return data_list | 0.004847 |
def _extract_clublog_header(self, cty_xml_filename):
"""
Extract the header of the Clublog XML File
"""
cty_header = {}
try:
with open(cty_xml_filename, "r") as cty:
raw_header = cty.readline()
cty_date = re.search("date='.+'", raw_header)
if cty_date:
cty_date = cty_date.group(0).replace("date=", "").replace("'", "")
cty_date = datetime.strptime(cty_date[:19], '%Y-%m-%dT%H:%M:%S')
cty_date.replace(tzinfo=UTC)
cty_header["Date"] = cty_date
cty_ns = re.search("xmlns='.+[']", raw_header)
if cty_ns:
cty_ns = cty_ns.group(0).replace("xmlns=", "").replace("'", "")
cty_header['NameSpace'] = cty_ns
if len(cty_header) == 2:
self._logger.debug("Header successfully retrieved from CTY File")
elif len(cty_header) < 2:
self._logger.warning("Header could only be partically retrieved from CTY File")
self._logger.warning("Content of Header: ")
for key in cty_header:
self._logger.warning(str(key)+": "+str(cty_header[key]))
return cty_header
except Exception as e:
self._logger.error("Clublog CTY File could not be opened / modified")
self._logger.error("Error Message: " + str(e))
return | 0.004775 |
def guess_depth(packages):
"""
Guess the optimal depth to use for the given list of arguments.
Args:
packages (list of str): list of packages.
Returns:
int: guessed depth to use.
"""
if len(packages) == 1:
return packages[0].count('.') + 2
return min(p.count('.') for p in packages) + 1 | 0.002941 |
def events(self, since=None, until=None, filters=None, decode=None):
"""
Get real-time events from the server. Similar to the ``docker events``
command.
Args:
since (UTC datetime or int): Get events from this point
until (UTC datetime or int): Get events until this point
filters (dict): Filter the events by event time, container or image
decode (bool): If set to true, stream will be decoded into dicts on
the fly. False by default.
Returns:
A :py:class:`docker.types.daemon.CancellableStream` generator
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for event in client.events(decode=True)
... print(event)
{u'from': u'image/with:tag',
u'id': u'container-id',
u'status': u'start',
u'time': 1423339459}
...
or
>>> events = client.events()
>>> for event in events:
... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
if isinstance(since, datetime):
since = utils.datetime_to_timestamp(since)
if isinstance(until, datetime):
until = utils.datetime_to_timestamp(until)
if filters:
filters = utils.convert_filters(filters)
params = {
'since': since,
'until': until,
'filters': filters
}
url = self._url('/events')
response = self._get(url, params=params, stream=True, timeout=None)
stream = self._stream_helper(response, decode=decode)
return types.CancellableStream(stream, response) | 0.001089 |
def variance_inflation_factor(regressors, hasconst=False):
"""Calculate variance inflation factor (VIF) for each all `regressors`.
A wrapper/modification of statsmodels:
statsmodels.stats.outliers_influence.variance_inflation_factor
One recommendation is that if VIF is greater than 5, then the explanatory
variable `x` is highly collinear with the other explanatory
variables, and the parameter estimates will have large standard errors
because of this. [source: StatsModels]
Parameters
----------
regressors: DataFrame
DataFrame containing the entire set of regressors
hasconst : bool, default False
If False, a column vector will be added to `regressors` for use in
OLS
Example
-------
# Generate some data
from datetime import date
from pandas_datareader.data import DataReader as dr
syms = {'TWEXBMTH' : 'usd',
'T10Y2YM' : 'term_spread',
'PCOPPUSDM' : 'copper'
}
start = date(2000, 1, 1)
data = (dr(syms.keys(), 'fred', start)
.pct_change()
.dropna())
data = data.rename(columns = syms)
print(variance_inflation_factor(data))
usd 1.31609
term_spread 1.03793
copper 1.37055
dtype: float64
"""
if not hasconst:
regressors = add_constant(regressors, prepend=False)
k = regressors.shape[1]
def vif_sub(x, regressors):
x_i = regressors.iloc[:, x]
mask = np.arange(k) != x
x_not_i = regressors.iloc[:, mask]
rsq = linear_model.OLS(x_i, x_not_i, missing="drop").fit().rsquared_adj
vif = 1.0 / (1.0 - rsq)
return vif
vifs = pd.Series(np.arange(k), index=regressors.columns)
vifs = vifs.apply(vif_sub, args=(regressors,))
# Find the constant column (probably called 'const', but not necessarily
# and drop it. `is_nonzero_const` borrowed from statsmodels.add_constant
is_nonzero_const = np.ptp(regressors.values, axis=0) == 0
is_nonzero_const &= np.all(regressors != 0.0, axis=0)
vifs.drop(vifs.index[is_nonzero_const], inplace=True)
return vifs | 0.00045 |
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next | 0.00361 |
def get_interface(self, component):
"""
Gets given Component interface.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17b0d70>
:param component: Component to get the interface.
:type component: unicode
:return: Component interface.
:rtype: object
"""
profile = self.get_profile(component)
if profile:
return profile.interface | 0.003195 |
def delete(self, fnames=None):
"""Delete files"""
if fnames is None:
fnames = self.get_selected_filenames()
multiple = len(fnames) > 1
yes_to_all = None
for fname in fnames:
if fname == self.proxymodel.path_list[0]:
self.sig_delete_project.emit()
else:
yes_to_all = self.delete_file(fname, multiple, yes_to_all)
if yes_to_all is not None and not yes_to_all:
# Canceled
break | 0.00363 |
def _validate_profile(self, bag):
"""
Validate against OCRD BagIt profile (bag-info fields, algos etc)
"""
if not self.profile_validator.validate(bag):
raise Exception(str(self.profile_validator.report)) | 0.008097 |
def wait_init(self):
"""
Block until init_done flag is set or until init_wait_timeout happens.
:return: value of init_done
"""
init_done = self.init_done.wait(timeout=self.init_wait_timeout)
if not init_done:
if hasattr(self, "peek"):
app = self.config.get("application")
if app:
bef_init_cmds = app.get("cli_ready_trigger")
if bef_init_cmds in self.peek(): # pylint: disable=no-member
init_done = True
return init_done | 0.005128 |
def wrap(self, values):
"""Pack an iterable of dict into a Value
>>> T=NTTable([('A', 'ai'), ('B', 'as')])
>>> V = T.wrap([
{'A':42, 'B':'one'},
{'A':43, 'B':'two'},
])
"""
if isinstance(values, Value):
return values
cols = dict([(L, []) for L in self.labels])
try:
# unzip list of dict
for V in values:
for L in self.labels:
try:
cols[L].append(V[L])
except (IndexError, KeyError):
pass
# allow omit empty columns
for L in self.labels:
V = cols[L]
if len(V) == 0:
del cols[L]
try:
return self.Value(self.type, {
'labels': self.labels,
'value': cols,
})
except:
_log.error("Failed to encode '%s' with %s", cols, self.labels)
raise
except:
_log.exception("Failed to wrap: %s", values)
raise | 0.003463 |
def make_DID(name_type, address, index):
"""
Standard way of making a DID.
name_type is "name" or "subdomain"
"""
if name_type not in ['name', 'subdomain']:
raise ValueError("Require 'name' or 'subdomain' for name_type")
if name_type == 'name':
address = virtualchain.address_reencode(address)
else:
# what's the current version byte?
vb = keylib.b58check.b58check_version_byte(address)
if vb == bitcoin_blockchain.version_byte:
# singlesig
vb = SUBDOMAIN_ADDRESS_VERSION_BYTE
else:
vb = SUBDOMAIN_ADDRESS_MULTISIG_VERSION_BYTE
address = virtualchain.address_reencode(address, version_byte=vb)
return 'did:stack:v0:{}-{}'.format(address, index) | 0.001297 |
def read(database, table, key):
"""Does a single read operation."""
with database.snapshot() as snapshot:
result = snapshot.execute_sql('SELECT u.* FROM %s u WHERE u.id="%s"' %
(table, key))
for row in result:
key = row[0]
for i in range(NUM_FIELD):
field = row[i + 1] | 0.002762 |
def main(argv=None):
"""Validate text parsed with FSM or validate an FSM via command line."""
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], 'h', ['help'])
except getopt.error as msg:
raise Usage(msg)
for opt, _ in opts:
if opt in ('-h', '--help'):
print(__doc__)
print(help_msg)
return 0
if not args or len(args) > 4:
raise Usage('Invalid arguments.')
# If we have an argument, parse content of file and display as a template.
# Template displayed will match input template, minus any comment lines.
with open(args[0], 'r') as template:
fsm = TextFSM(template)
print('FSM Template:\n%s\n' % fsm)
if len(args) > 1:
# Second argument is file with example cli input.
# Prints parsed tabular result.
with open(args[1], 'r') as f:
cli_input = f.read()
table = fsm.ParseText(cli_input)
print('FSM Table:')
result = str(fsm.header) + '\n'
for line in table:
result += str(line) + '\n'
print(result, end='')
if len(args) > 2:
# Compare tabular result with data in third file argument.
# Exit value indicates if processed data matched expected result.
with open(args[2], 'r') as f:
ref_table = f.read()
if ref_table != result:
print('Data mis-match!')
return 1
else:
print('Data match!') | 0.018651 |
def weave(
target, advices, pointcut=None, ctx=None, depth=1, public=False,
pointcut_application=None, ttl=None
):
"""Weave advices on target with input pointcut.
:param callable target: target from where checking pointcut and
weaving advices.
:param advices: advices to weave on target.
:param ctx: target ctx (class or instance).
:param pointcut: condition for weaving advices on joinpointe.
The condition depends on its type.
:type pointcut:
- NoneType: advices are weaved on target.
- str: target name is compared to pointcut regex.
- function: called with target in parameter, if True, advices will
be weaved on target.
:param int depth: class weaving depthing.
:param bool public: (default True) weave only on public members.
:param routine pointcut_application: routine which applies a pointcut when
required. _Joinpoint().apply_pointcut by default. Such routine has
to take in parameters a routine called target and its related
function called function. Its result is the interception function.
:param float ttl: time to leave for weaved advices.
:return: the intercepted functions created from input target or a tuple
with intercepted functions and ttl timer.
:rtype: list
:raises: AdviceError if pointcut is not None, not callable neither a str.
"""
result = []
# initialize advices
if isroutine(advices):
advices = [advices]
if advices:
# initialize pointcut
# do nothing if pointcut is None or is callable
if pointcut is None or callable(pointcut):
pass
# in case of str, use a name matcher
elif isinstance(pointcut, string_types):
pointcut = _namematcher(pointcut)
else:
error_msg = "Wrong pointcut to check weaving on {0}."
error_msg = error_msg.format(target)
advice_msg = "Must be None, or be a str or a function/method."
right_msg = "Not {0}".format(type(pointcut))
raise AdviceError(
"{0} {1} {2}".format(error_msg, advice_msg, right_msg)
)
if ctx is None:
ctx = find_ctx(elt=target)
_weave(
target=target, advices=advices, pointcut=pointcut, depth=depth,
depth_predicate=_publiccallable if public else callable, ctx=ctx,
intercepted=result, pointcut_application=pointcut_application
)
if ttl is not None:
kwargs = {
'target': target,
'advices': advices,
'pointcut': pointcut,
'depth': depth,
'public': public,
'ctx': ctx
}
timer = Timer(ttl, unweave, kwargs=kwargs)
timer.start()
result = result, timer
return result | 0.000342 |
def transition_issue(self, issue, transition, fields=None, comment=None, worklog=None, **fieldargs):
"""Perform a transition on an issue.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored. Field values will be set on the issue as part of the transition process.
:param issue: ID or key of the issue to perform the transition on
:param transition: ID or name of the transition to perform
:param comment: *Optional* String to add as comment to the issue when
performing the transition.
:param fields: a dict containing field names and the values to use.
If present, all other keyword arguments will be ignored
"""
transitionId = None
try:
transitionId = int(transition)
except Exception:
# cannot cast to int, so try to find transitionId by name
transitionId = self.find_transitionid_by_name(issue, transition)
if transitionId is None:
raise JIRAError("Invalid transition name. %s" % transition)
data = {
'transition': {
'id': transitionId}}
if comment:
data['update'] = {'comment': [{'add': {'body': comment}}]}
if worklog:
data['update'] = {'worklog': [{'add': {'timeSpent': worklog}}]}
if fields is not None:
data['fields'] = fields
else:
fields_dict = {}
for field in fieldargs:
fields_dict[field] = fieldargs[field]
data['fields'] = fields_dict
url = self._get_url('issue/' + str(issue) + '/transitions')
r = self._session.post(
url, data=json.dumps(data))
try:
r_json = json_loads(r)
except ValueError as e:
logging.error("%s\n%s" % (e, r.text))
raise e
return r_json | 0.002874 |
def feature_names(self):
"""Get feature names (column labels).
Returns
-------
feature_names : list or None
"""
if self._feature_names is None:
self._feature_names = ['f{0}'.format(i) for i in range(self.num_col())]
return self._feature_names | 0.009646 |
def main(argv):
"""Train on examples and export the updated model weights."""
tf_records = argv[1:]
logging.info("Training on %s records: %s to %s",
len(tf_records), tf_records[0], tf_records[-1])
with utils.logged_timer("Training"):
train(*tf_records)
if FLAGS.export_path:
dual_net.export_model(FLAGS.export_path)
if FLAGS.freeze:
if FLAGS.use_tpu:
dual_net.freeze_graph_tpu(FLAGS.export_path)
else:
dual_net.freeze_graph(FLAGS.export_path) | 0.001855 |
def with_name(self, name):
"""Sets the name scope for future operations."""
self._head = self._head.with_name(name)
return self | 0.007194 |
def add_retout_site(self, node):
"""
Add a custom retout site.
Retout (returning to outside of the function) sites are very rare. It mostly occurs during CFG recovery when we
incorrectly identify the beginning of a function in the first iteration, and then correctly identify that
function later in the same iteration (function alignments can lead to this bizarre case). We will mark all edges
going out of the header of that function as a outside edge, because all successors now belong to the
incorrectly-identified function. This identification error will be fixed in the second iteration of CFG
recovery. However, we still want to keep track of jumpouts/retouts during the first iteration so other logic in
CFG recovery still work.
:param node: The address of the basic block that control flow leaves the current function after a call.
:return: None
"""
self._register_nodes(True, node)
self._retout_sites.add(node)
self._add_endpoint(node, 'return') | 0.008326 |
def get_help_data(filepath):
"""
Get the json data from a help file
Args:
filepath (str): The file path for the help file
Returns:
data: The json data from a help file
"""
try:
with open(filepath, 'r') as file:
return _json.load(file, object_pairs_hook=OrderedDict)
except Exception as e:
logger.error("Could not load file {}".format(filepath))
logger.exception(e)
return {} | 0.002151 |
def extract_lightcurve(
log,
spectrumFiles,
userExplosionDay,
extendLightCurveTail,
obsmode):
"""
*Extract the requested lightcurve from list of spectrum files*
**Key Arguments:**
- ``log`` -- logger
- ``spectrumFiles`` -- list of the spectrum files
- ``userExplosionDay`` -- explosion day for transient as set by the user in the settings file
- ``extendLightCurveTail`` -- extend the tail of the lightcurve by extrapolating last two data points
- ``obsmode`` -- the observation mode (generally a filter system and filter type, e.g. "sdss,g")
**Return:**
- ``magnitudes`` -- numpy array of the magnitudes
- ``times`` -- numpy array of the corresponding times
"""
################ > IMPORTS ################
## STANDARD LIB ##
import re
## THIRD PARTY ##
import numpy as np
## LOCAL APPLICATION ##
################ > VARIABLE SETTINGS ######
reTime = re.compile(r't((\+|\-)\d{3}\.\d{2})')
magnitudes = []
times = []
fileNameTimes = []
# log.info('spectrumFiles[-1] %s' % (spectrumFiles[-1],))
################ >ACTION(S) ################
for thisFile in spectrumFiles:
# log.warning('here is the file %s' % (thisFile,))
# mul = 1.
# if reTime.search(thisFile).group(2) == "-":
# mul = -1.
thisTime = float(reTime.search(thisFile).group(1))
fileNameTimes.append(thisTime)
log.debug('time in %s is: %s' % (thisFile, thisTime))
wavelengthArray, fluxArray = extract_spectra_from_file(log, thisFile)
try:
log.debug("attempting to find the magnitude from spectrum")
thisMag = calcphot(
log,
wavelengthArray=wavelengthArray,
fluxArray=fluxArray,
obsmode=obsmode
)
magnitudes.append(thisMag)
times.append(thisTime)
except Exception as e:
log.warning(
"could not find the magnitude from spectrum %s using the filter %s - failed with this error: %s " %
(thisFile, obsmode, str(e),))
pass
# APPEND AN EXPLOSION DAY AND MAG
if len(magnitudes) > 3:
finalTime = max(times)
firstTime = min(times)
firstMag = magnitudes[times.index(firstTime)]
lastMag = magnitudes[times.index(finalTime)]
log.debug('times: %s' % (times,))
log.debug('magnitudes: %s' % (magnitudes,))
sortedTime = sorted(times[:])
log.debug('sortedTime: %s' % (sortedTime,))
secondLastTime = sortedTime[-2]
secondLastMag = magnitudes[times.index(secondLastTime)]
iterations = 2.
magDrop = 6.
if (userExplosionDay is not None and userExplosionDay is not False):
x2 = firstTime
x1 = userExplosionDay
y2 = firstMag
y1 = firstMag + magDrop
m = (y1 - y2) / (x1 - x2)
c = y1 - m * x1
upperTimeLimit = (firstMag - c) / m
thisRange = upperTimeLimit - userExplosionDay
delta = 0
magDropNow = magDrop
# log.debug('firstTime, userExplosionDay, increment: %s, %s, %s' % (firstTime, userExplosionDay))
log.debug('range: %s' %
(np.arange(userExplosionDay, upperTimeLimit, thisRange / iterations + 1),))
for t in np.arange(userExplosionDay, upperTimeLimit, thisRange / iterations):
log.debug('new time: %s' % (t,))
magDropNow -= delta
delta = magDrop / iterations
log.debug('magDrop, delta : %s, %s' % (magDropNow, delta,))
times.append(t)
magnitudes.append(firstMag + magDropNow)
log.debug('new mag: %s' % (firstMag + magDropNow,))
if extendLightCurveTail:
x2 = secondLastTime
x1 = finalTime
y2 = secondLastMag
y1 = lastMag
log.debug('finalTime, lastMag: %s, %s' % (finalTime, lastMag,))
log.debug('secondLastTime, secondLastMag: %s, %s' %
(secondLastTime, secondLastMag))
m = (y1 - y2) / (x1 - x2)
c = y1 - m * x1
upperTimeLimit = (lastMag + 4 - c) / m
thisRange = upperTimeLimit - finalTime
delta = 0
magDrop = 4
magDropNow = 0
for t in np.arange(finalTime, upperTimeLimit, thisRange / iterations):
magDropNow += delta
delta = magDrop / iterations
times.append(t)
magnitudes.append(lastMag + magDropNow)
log.debug('finding magnitudes and times from spectrum : %s' % (thisFile,))
log.debug('magnitudes, times: %s, %s' % (magnitudes, times))
return magnitudes, times | 0.003044 |
def _set_interface_vlan_ospf_conf(self, v, load=False):
"""
Setter method for interface_vlan_ospf_conf, mapped from YANG variable /routing_system/interface/ve/ip/interface_vlan_ospf_conf (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_vlan_ospf_conf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_vlan_ospf_conf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=interface_vlan_ospf_conf.interface_vlan_ospf_conf, is_container='container', presence=False, yang_name="interface-vlan-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFVlanInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_vlan_ospf_conf must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=interface_vlan_ospf_conf.interface_vlan_ospf_conf, is_container='container', presence=False, yang_name="interface-vlan-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFVlanInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""",
})
self.__interface_vlan_ospf_conf = t
if hasattr(self, '_set'):
self._set() | 0.005914 |
def post(param_map, url=URL):
"""Posts a `param_map` created with `config` to
the FlashAir config.cgi entrypoint"""
prepped_request = _prep_post(url=url, **param_map)
return cgi.send(prepped_request) | 0.004651 |
def with_headers(self, headers):
"""Create a new request with added headers
Parameters
----------
headers: Mapping
the headers to add
"""
return self.replace(headers=_merge_maps(self.headers, headers)) | 0.007634 |
def normalize(data):
"""
Function to normalize data to have mean 0 and unity standard deviation
(also called z-transform)
Parameters
----------
data : numpy.ndarray
Returns
-------
numpy.ndarray
z-transform of input array
"""
data = data.astype(float)
data -= data.mean()
return data / data.std() | 0.018135 |
def put_tagging(Bucket,
region=None, key=None, keyid=None, profile=None, **kwargs):
'''
Given a valid config, update the tags for a bucket.
Returns {updated: true} if tags were updated and returns
{updated: False} if tags were not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.put_tagging my_bucket my_role [...]
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
tagslist = []
for k, v in six.iteritems(kwargs):
if six.text_type(k).startswith('__'):
continue
tagslist.append({'Key': six.text_type(k), 'Value': six.text_type(v)})
conn.put_bucket_tagging(Bucket=Bucket, Tagging={
'TagSet': tagslist,
})
return {'updated': True, 'name': Bucket}
except ClientError as e:
return {'updated': False, 'error': __utils__['boto3.get_error'](e)} | 0.003115 |
def true_events(network, previous_state, current_state, next_state,
indices=None, major_complex=None):
"""Return all mechanisms that have true causes and true effects within the
complex.
Args:
network (Network): The network to analyze.
previous_state (tuple[int]): The state of the network at ``t - 1``.
current_state (tuple[int]): The state of the network at ``t``.
next_state (tuple[int]): The state of the network at ``t + 1``.
Keyword Args:
indices (tuple[int]): The indices of the major complex.
major_complex (AcSystemIrreducibilityAnalysis): The major complex. If
``major_complex`` is given then ``indices`` is ignored.
Returns:
tuple[Event]: List of true events in the major complex.
"""
# TODO: validate triplet of states
if major_complex:
nodes = major_complex.subsystem.node_indices
elif indices:
nodes = indices
else:
major_complex = compute.major_complex(network, current_state)
nodes = major_complex.subsystem.node_indices
return events(network, previous_state, current_state, next_state, nodes) | 0.000852 |
def get_authinfo(request):
"""Get authentication info from the encrypted message."""
if (("files_iv" not in request.session) or ("files_text" not in request.session) or ("files_key" not in request.COOKIES)):
return False
"""
Decrypt the password given the SERVER-side IV, SERVER-side
ciphertext, and CLIENT-side key.
See note above on why this is done.
"""
iv = base64.b64decode(request.session["files_iv"])
text = base64.b64decode(request.session["files_text"])
key = base64.b64decode(request.COOKIES["files_key"])
obj = AES.new(key, AES.MODE_CFB, iv)
password = obj.decrypt(text)
username = request.session["filecenter_username"] if "filecenter_username" in request.session else request.user.username
return {"username": username, "password": password} | 0.003593 |
def path_helper(self, operations, view, **kwargs):
"""Path helper that allows passing a bottle view function."""
operations.update(yaml_utils.load_operations_from_docstring(view.__doc__))
app = kwargs.get('app', _default_app)
route = self._route_for_view(app, view)
return self.bottle_path_to_openapi(route.rule) | 0.008523 |
def relabel_map(label_image, mapping, key=lambda x, y: x[y]):
r"""
Relabel an image using the supplied mapping.
The ``mapping`` can be any kind of subscriptable object. The respective region id is used
to access the new value from the ``mapping``. The ``key`` keyword parameter can be used to
supply another access function. The ``key`` function must have the signature
key(mapping, region-id) and return the new region-id to assign.
Parameters
----------
label_image : array_like
A nD label map.
mapping : dictionary or subscriptable object
A mapping object.
key : function
Can be used to defined the key-access to the ``mapping`` object.
Returns
-------
relabel_map : ndarray
A label map with new region ids.
Raises
------
ArgumentError
If a region id is missing in the supplied mapping
"""
label_image = scipy.array(label_image)
def _map(x):
try:
return key(mapping, x)
except Exception as e:
raise ArgumentError('No conversion for region id {} found in the supplied mapping. Error: {}'.format(x, e))
vmap = scipy.vectorize(_map, otypes=[label_image.dtype])
return vmap(label_image) | 0.010023 |
def _set_subject_alt(self, name, values):
"""
Replaces all existing asn1crypto.x509.GeneralName objects of the choice
represented by the name parameter with the values
:param name:
A unicode string of the choice name of the x509.GeneralName object
:param values:
A list of unicode strings to use as the values for the new
x509.GeneralName objects
"""
if self._subject_alt_name is not None:
filtered_general_names = []
for general_name in self._subject_alt_name:
if general_name.name != name:
filtered_general_names.append(general_name)
self._subject_alt_name = x509.GeneralNames(filtered_general_names)
else:
self._subject_alt_name = x509.GeneralNames()
if values is not None:
for value in values:
new_general_name = x509.GeneralName(name=name, value=value)
self._subject_alt_name.append(new_general_name)
if len(self._subject_alt_name) == 0:
self._subject_alt_name = None | 0.001765 |
def applyEdits(self,
addFeatures=[],
updateFeatures=[],
deleteFeatures=None,
gdbVersion=None,
rollbackOnFailure=True):
"""
This operation adds, updates, and deletes features to the
associated feature layer or table in a single call.
Inputs:
addFeatures - The array of features to be added. These
features should be common.general.Feature
objects, or they should be a
common.general.FeatureSet object.
updateFeatures - The array of features to be updateded.
These features should be common.Feature
objects
deleteFeatures - string of OIDs to remove from service
gdbVersion - Geodatabase version to apply the edits.
rollbackOnFailure - Optional parameter to specify if the
edits should be applied only if all
submitted edits succeed. If false, the
server will apply the edits that succeed
even if some of the submitted edits fail.
If true, the server will apply the edits
only if all edits succeed. The default
value is true.
Output:
dictionary of messages
"""
editURL = self._url + "/applyEdits"
params = {"f": "json",
'rollbackOnFailure' : rollbackOnFailure
}
if not gdbVersion is None:
params['gdbVersion'] = gdbVersion
if len(addFeatures) > 0 and \
isinstance(addFeatures[0], Feature):
params['adds'] = json.dumps([f.asDictionary for f in addFeatures],
default=_date_handler)
elif isinstance(addFeatures, FeatureSet):
params['adds'] = json.dumps([f.asDictionary for f in addFeatures],
default=_date_handler)
if len(updateFeatures) > 0 and \
isinstance(updateFeatures[0], Feature):
params['updates'] = json.dumps([f.asDictionary for f in updateFeatures],
default=_date_handler)
if deleteFeatures is not None and \
isinstance(deleteFeatures, str):
params['deletes'] = deleteFeatures
return self._post(url=editURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url) | 0.004557 |
def logical_not_expr(self):
"""
logical_not_expr: 'not' logical_not_expr
| comparison
"""
if self.token.nature == Nature.NOT:
token = self.token
self._process(Nature.NOT)
return UnaryOperation(op=token, right=self.logical_not_expr())
else:
return self.comparison() | 0.005319 |
def make_video_cache(self, days=None):
"""Save videos on _cache_videos to avoid dups."""
if days is None:
days = self._min_days_vdo_cache
self._cached_videos = self.videos(days) | 0.00939 |
def unknown_command(self, args):
'''handle mode switch by mode name as command'''
mode_mapping = self.master.mode_mapping()
mode = args[0].upper()
if mode in mode_mapping:
self.master.set_mode(mode_mapping[mode])
return True
return False | 0.006645 |
def shutdown_notebook(request, username):
"""Stop any running notebook for a user."""
manager = get_notebook_manager(request)
if manager.is_running(username):
manager.stop_notebook(username) | 0.004739 |
def weekofyear(self, first_day_of_week=SATURDAY):
"""weekofyear(first_day_of_week=SATURDAY)
:param first_day_of_week: One of the
:py:data:`khayyam.SATURDAY`,
:py:data:`khayyam.SUNDAY`,
:py:data:`khayyam.MONDAY`,
:py:data:`khayyam.TUESDAY`,
:py:data:`khayyam.WEDNESDAY`,
:py:data:`khayyam.THURSDAY` or
:py:data:`khayyam.FRIDAY`
:return: The week number of the year.
:rtype: int
"""
first_day_of_year = self.firstdayofyear()
days = (self - first_day_of_year).days
offset = first_day_of_week - first_day_of_year.weekday()
if offset < 0:
offset += 7
if days < offset:
return 0
return int((days - offset) / 7 + 1) | 0.002395 |
def discard(self, element, multiplicity=None):
"""Removes the `element` from the multiset.
If multiplicity is ``None``, all occurrences of the element are removed:
>>> ms = Multiset('aab')
>>> ms.discard('a')
2
>>> sorted(ms)
['b']
Otherwise, the multiplicity is subtracted from the one in the multiset and the
old multiplicity is removed:
>>> ms = Multiset('aab')
>>> ms.discard('a', 1)
2
>>> sorted(ms)
['a', 'b']
In contrast to :meth:`remove`, this does not raise an error if the
element is not in the multiset:
>>> ms = Multiset('a')
>>> ms.discard('b')
0
>>> sorted(ms)
['a']
It is also not an error to remove more elements than are in the set:
>>> ms.remove('a', 2)
1
>>> sorted(ms)
[]
Args:
element:
The element to remove from the multiset.
multiplicity:
An optional multiplicity i.e. count of elements to remove.
Returns:
The multiplicity of the element in the multiset before
the removal.
"""
_elements = self._elements
if element in _elements:
old_multiplicity = _elements[element]
if multiplicity is None or multiplicity >= old_multiplicity:
del _elements[element]
self._total -= old_multiplicity
elif multiplicity < 0:
raise ValueError("Multiplicity must not be negative")
elif multiplicity > 0:
_elements[element] -= multiplicity
self._total -= multiplicity
return old_multiplicity
else:
return 0 | 0.002214 |
def append(self, item):
""" Appends the *item* to the end of the `Sequence`.
:param item: any :class:`Structure`, :class:`Sequence`, :class:`Array`
or :class:`Field` instance.
"""
if not is_any(item):
raise MemberTypeError(self, item, member=len(self))
self._data.append(item) | 0.005865 |
def write_pad_codewords(buff, version, capacity, length):
"""\
Writes the pad codewords iff the data does not fill the capacity of the
symbol.
:param buff: The byte buffer.
:param int version: The (Micro) QR Code version.
:param int capacity: The total capacity of the symbol (incl. error correction)
:param int length: Length of the data bit stream.
"""
# ISO/IEC 18004:2015(E) -- 7.4.10 Bit stream to codeword conversion (page 32)
# The message bit stream shall then be extended to fill the data capacity
# of the symbol corresponding to the Version and Error Correction Level, as
# defined in Table 8, by adding the Pad Codewords 11101100 and 00010001
# alternately. For Micro QR Code versions M1 and M3 symbols, the final data
# codeword is 4 bits long. The Pad Codeword used in the final data symbol
# character position in Micro QR Code versions M1 and M3 symbols shall be
# represented as 0000.
write = buff.extend
if version in (consts.VERSION_M1, consts.VERSION_M3):
write([0] * (capacity - length))
else:
pad_codewords = ((1, 1, 1, 0, 1, 1, 0, 0), (0, 0, 0, 1, 0, 0, 0, 1))
for i in range(capacity // 8 - length // 8):
write(pad_codewords[i % 2]) | 0.002364 |
def encryptPassword(self, login, passwd):
"""Encrypt credentials using the google publickey, with the
RSA algorithm"""
# structure of the binary key:
#
# *-------------------------------------------------------*
# | modulus_length | modulus | exponent_length | exponent |
# *-------------------------------------------------------*
#
# modulus_length and exponent_length are uint32
binaryKey = b64decode(config.GOOGLE_PUBKEY)
# modulus
i = utils.readInt(binaryKey, 0)
modulus = utils.toBigInt(binaryKey[4:][0:i])
# exponent
j = utils.readInt(binaryKey, i + 4)
exponent = utils.toBigInt(binaryKey[i + 8:][0:j])
# calculate SHA1 of the pub key
digest = hashes.Hash(hashes.SHA1(), backend=default_backend())
digest.update(binaryKey)
h = b'\x00' + digest.finalize()[0:4]
# generate a public key
der_data = encode_dss_signature(modulus, exponent)
publicKey = load_der_public_key(der_data, backend=default_backend())
# encrypt email and password using pubkey
to_be_encrypted = login.encode() + b'\x00' + passwd.encode()
ciphertext = publicKey.encrypt(
to_be_encrypted,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
)
)
return urlsafe_b64encode(h + ciphertext) | 0.001322 |
def _set_master(self, v, load=False):
"""
Setter method for master, mapped from YANG variable /ntp/master (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_master is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_master() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'2 .. 15']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(8), is_leaf=True, yang_name="master", rest_name="master", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'NTP Master', u'cli-full-command': None, u'callpoint': u'ntp_master_cp', u'sort-priority': u'34'}}, namespace='urn:brocade.com:mgmt:brocade-ntp', defining_module='brocade-ntp', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """master must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'2 .. 15']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(8), is_leaf=True, yang_name="master", rest_name="master", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'NTP Master', u'cli-full-command': None, u'callpoint': u'ntp_master_cp', u'sort-priority': u'34'}}, namespace='urn:brocade.com:mgmt:brocade-ntp', defining_module='brocade-ntp', yang_type='uint32', is_config=True)""",
})
self.__master = t
if hasattr(self, '_set'):
self._set() | 0.004225 |
def _departmentsVoc(self):
"""Vocabulary of available departments
"""
query = {
"portal_type": "Department",
"is_active": True
}
results = api.search(query, "bika_setup_catalog")
items = map(lambda dept: (api.get_uid(dept), api.get_title(dept)),
results)
dept_uids = map(api.get_uid, results)
# Currently assigned departments
depts = self.getDepartments()
# If one department assigned to the Lab Contact is disabled, it will
# be shown in the list until the department has been unassigned.
for dept in depts:
uid = api.get_uid(dept)
if uid in dept_uids:
continue
items.append((uid, api.get_title(dept)))
return api.to_display_list(items, sort_by="value", allow_empty=False) | 0.002288 |
def human_filesize(i):
"""
'human-readable' file size (i.e. 13 KB, 4.1 MB, 102 bytes, etc).
"""
bytes = float(i)
if bytes < 1024:
return u"%d Byte%s" % (bytes, bytes != 1 and u"s" or u"")
if bytes < 1024 * 1024:
return u"%.1f KB" % (bytes / 1024)
if bytes < 1024 * 1024 * 1024:
return u"%.1f MB" % (bytes / (1024 * 1024))
return u"%.1f GB" % (bytes / (1024 * 1024 * 1024)) | 0.002336 |
def parse_negation_operation(operation: str) -> Tuple[bool, str]:
"""Parse the negation modifier in an operation."""
_operation = operation.strip()
if not _operation:
raise QueryParserException('Operation is not valid: {}'.format(operation))
negation = False
if _operation[0] == '~':
negation = True
_operation = _operation[1:]
return negation, _operation.strip() | 0.004854 |
def retry(exception_to_check, tries=5, delay=5, multiplier=2):
'''Tries to call the wrapped function again, after an incremental delay
:param exception_to_check: Exception(s) to check for, before retrying.
:type exception_to_check: Exception
:param tries: Number of time to retry before failling.
:type tries: int
:param delay: time in second to sleep before retrying.
:type delay: int
:param multiplier: multiply the delay each time the exception_to_check
occurs.
:type multiplier: int
'''
def deco_retry(func):
'''Creates the retry decorator'''
@wraps(func)
def func_retry(*args, **kwargs):
'''Actual wrapped function'''
if multiplier >= 1 is not True:
raise ValueError(
'multiplier = {}. It has to be superior to 1.'.format(
multiplier
)
)
mtries, mdelay = tries, delay
while mtries > 1:
try:
return func(*args, **kwargs)
except exception_to_check as err:
message = "%s, retrying in %d seconds..." % (
str(err), mdelay)
print(message)
sleep(mdelay)
mtries -= 1
mdelay *= multiplier
return func(*args, **kwargs)
return func_retry
return deco_retry | 0.000679 |
def get_cropping_offset(crop, epsilon):
"""
Calculates the cropping offset for the cropped image. This only calculates
the offset for one dimension (X or Y). This should be called twice to get
the offsets for the X and Y dimensions.
:param str crop: A percentage cropping value for the plane. This is in the
form of something like '50%'.
:param float epsilon: The difference between the original image's dimension
(X or Y) and the desired crop window.
:rtype: int
:returns: The cropping offset for the given dimension.
"""
m = _CROP_PERCENT_PATTERN.match(crop)
if not m:
raise ThumbnailParseError('Unrecognized crop option: %s' % crop)
value = int(m.group('value')) # we only take ints in the regexp
unit = m.group('unit')
if unit == '%':
value = epsilon * value / 100.0
# return ∈ [0, epsilon]
return int(max(0, min(value, epsilon))) | 0.002139 |
def first_run(self, known_block_number):
""" Blocking call to update the local state, if necessary. """
assert self.callbacks, 'callbacks not set'
latest_block = self.chain.get_block(block_identifier='latest')
log.debug(
'Alarm task first run',
known_block_number=known_block_number,
latest_block_number=latest_block['number'],
latest_gas_limit=latest_block['gasLimit'],
latest_block_hash=to_hex(latest_block['hash']),
)
self.known_block_number = known_block_number
self.chain_id = self.chain.network_id
self._maybe_run_callbacks(latest_block) | 0.00299 |
def actnorm_scale(name, x, logscale_factor=3., reverse=False, init=False):
"""Per-channel scaling of x."""
x_shape = common_layers.shape_list(x)
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
# Variance initialization logic.
assert len(x_shape) == 2 or len(x_shape) == 4
if len(x_shape) == 2:
x_var = tf.reduce_mean(x**2, [0], keepdims=True)
logdet_factor = 1
var_shape = (1, x_shape[1])
elif len(x_shape) == 4:
x_var = tf.reduce_mean(x**2, [0, 1, 2], keepdims=True)
logdet_factor = x_shape[1]*x_shape[2]
var_shape = (1, 1, 1, x_shape[3])
init_value = tf.log(1.0 / (tf.sqrt(x_var) + 1e-6)) / logscale_factor
logs = get_variable_ddi("logs", var_shape, initial_value=init_value,
init=init)
logs = logs * logscale_factor
# Function and reverse function.
if not reverse:
x = x * tf.exp(logs)
else:
x = x * tf.exp(-logs)
# Objective calculation, h * w * sum(log|s|)
dlogdet = tf.reduce_sum(logs) * logdet_factor
if reverse:
dlogdet *= -1
return x, dlogdet | 0.011797 |
def _get_struct_format(self, size):
"""
Get's the format specified for use in struct. This is only designed
for 1, 2, 4, or 8 byte values and will throw an exception if it is
anything else.
:param size: The size as an int
:return: The struct format specifier for the size specified
"""
if isinstance(size, types.LambdaType):
size = size(self.structure)
struct_format = {
1: 'B',
2: 'H',
4: 'L',
8: 'Q'
}
if size not in struct_format.keys():
raise InvalidFieldDefinition("Cannot struct format of size %s"
% size)
return struct_format[size] | 0.00267 |
def _connect_attempt(self, mode='default_reset', esp32r0_delay=False):
""" A single connection attempt, with esp32r0 workaround options """
# esp32r0_delay is a workaround for bugs with the most common auto reset
# circuit and Windows, if the EN pin on the dev board does not have
# enough capacitance.
#
# Newer dev boards shouldn't have this problem (higher value capacitor
# on the EN pin), and ESP32 revision 1 can't use this workaround as it
# relies on a silicon bug.
#
# Details: https://github.com/espressif/esptool/issues/136
last_error = None
# If we're doing no_sync, we're likely communicating as a pass through
# with an intermediate device to the ESP32
if mode == "no_reset_no_sync":
return last_error
# issue reset-to-bootloader:
# RTS = either CH_PD/EN or nRESET (both active low = chip in reset
# DTR = GPIO0 (active low = boot to flasher)
#
# DTR & RTS are active low signals,
# ie True = pin @ 0V, False = pin @ VCC.
if mode != 'no_reset':
self._setDTR(False) # IO0=HIGH
self._setRTS(True) # EN=LOW, chip in reset
time.sleep(0.1)
if esp32r0_delay:
# Some chips are more likely to trigger the esp32r0
# watchdog reset silicon bug if they're held with EN=LOW
# for a longer period
time.sleep(1.2)
self._setDTR(True) # IO0=LOW
self._setRTS(False) # EN=HIGH, chip out of reset
if esp32r0_delay:
# Sleep longer after reset.
# This workaround only works on revision 0 ESP32 chips,
# it exploits a silicon bug spurious watchdog reset.
time.sleep(0.4) # allow watchdog reset to occur
time.sleep(0.05)
self._setDTR(False) # IO0=HIGH, done
for _ in range(5):
try:
self.flush_input()
self._port.flushOutput()
self.sync()
return None
except FatalError as e:
if esp32r0_delay:
print('_', end='')
else:
print('.', end='')
sys.stdout.flush()
time.sleep(0.05)
last_error = e
return last_error | 0.001226 |
def sample_given_context(self, c, c_dims):
'''
Sample the region with max progress among regions that have the same context
c: context value on c_dims dimensions
c_dims: w.r.t sensory space dimensions
'''
index = self.discrete_progress.sample_given_context(c, c_dims, self.space)
return self.space.rand_value(index).flatten()[list(set(range(len(self.space.cardinalities))) - set(c_dims))] | 0.011062 |
def get_options(self, gradebook_id):
"""Get options for gradebook.
Get options dictionary for a gradebook. Options include gradebook
attributes.
Args:
gradebook_id (str): unique identifier for gradebook, i.e. ``2314``
Returns:
An example return value is:
.. code-block:: python
{
u'data':
{
u'accessLevel': u'class',
u'archived': False,
u'calc_on_approved_only': False,
u'configured': None,
u'courseName': u'',
u'courseNumber': u'mitxdemosite',
u'deriveOverallGrades': False,
u'gradebookEwsEnabled': False,
u'gradebookId': 1293808,
u'gradebookName': u'Gradebook for mitxdemosite',
u'gradebookReadOnly': False,
u'gradebookVisibleToAdvisors': False,
u'graders_change_approved': False,
u'hideExcuseButtonInUI': False,
u'homeworkBetaEnabled': False,
u'membershipQualifier': u'/project/mitxdemosite',
u'membershipSource': u'stellar',
u'student_sees_actual_grades': True,
u'student_sees_category_info': True,
u'student_sees_comments': True,
u'student_sees_cumulative_score': True,
u'student_sees_histograms': True,
u'student_sees_submissions': False,
u'ta_approves': False,
u'ta_change_approved': False,
u'ta_configures': False,
u'ta_edits': False,
u'use_grade_weighting': False,
u'usingAttendance': False,
u'versionCompatible': 4,
u'versionCompatibleString': u'General Availability'
},
}
"""
end_point = 'gradebook/options/{gradebookId}'.format(
gradebookId=gradebook_id or self.gradebook_id)
options = self.get(end_point)
return options['data'] | 0.000832 |
def command(db, channel, command, *args):
"""
Utility function to issue a command to all Turnstile instances.
:param db: The database handle.
:param channel: The control channel all Turnstile instances are
listening on.
:param command: The command, as plain text. Currently, only
'reload' and 'ping' are recognized.
All remaining arguments are treated as arguments for the command;
they will be stringified and sent along with the command to the
control channel. Note that ':' is an illegal character in
arguments, but no warnings will be issued if it is used.
"""
# Build the command we're sending
cmd = [command]
cmd.extend(str(a) for a in args)
# Send it out
db.publish(channel, ':'.join(cmd)) | 0.001247 |
def from_dict(document):
"""Create attribute definition form Json-like object represenation.
Parameters
----------
document : dict
Json-like object represenation
Returns
-------
AttributeDefinition
"""
if 'default' in document:
default = document['default']
else:
default = None
return AttributeDefinition(
document['id'],
document['name'],
document['description'],
AttributeType.from_dict(document['type']),
default=default
) | 0.003226 |
def __calculate_centers(self):
"""!
@brief Calculate center using membership of each cluster.
@return (list) Updated clusters as list of clusters. Each cluster contains indexes of objects from data.
@return (numpy.array) Updated centers.
"""
dimension = self.__data.shape[1]
centers = numpy.zeros((len(self.__centers), dimension))
for i in range(len(self.__centers)):
# multiplication '@' requires python version 3.5
centers[i] = numpy.divide(self.__membership[:, i] @ self.__data, numpy.sum(self.__membership[:, i]))
return centers | 0.006163 |
def prune_dupes(self):
"""Remove all but the last entry for a given resource URI.
Returns the number of entries removed. Also removes all entries for a
given URI where the first entry is a create and the last entry is a
delete.
"""
n = 0
pruned1 = []
seen = set()
deletes = {}
for r in reversed(self.resources):
if (r.uri in seen):
n += 1
if (r.uri in deletes):
deletes[r.uri] = r.change
else:
pruned1.append(r)
seen.add(r.uri)
if (r.change == 'deleted'):
deletes[r.uri] = r.change
# go through all deletes and prune if first was create
pruned2 = []
for r in reversed(pruned1):
if (r.uri in deletes and deletes[r.uri] == 'created'):
n += 1
else:
pruned2.append(r)
self.resources = pruned2
return(n) | 0.001963 |
async def volume(gc: GroupControl, volume):
"""Adjust volume [-100, 100]"""
click.echo("Setting volume to %s" % volume)
click.echo(await gc.set_group_volume(volume)) | 0.00565 |
def _insert_or_update(self, resourcetype, source, mode='insert', hhclass='Service'):
"""
Insert or update a record in the repository
"""
keywords = []
if self.filter is not None:
catalog = Catalog.objects.get(id=int(self.filter.split()[-1]))
try:
if hhclass == 'Layer':
# TODO: better way of figuring out duplicates
match = Layer.objects.filter(name=source.name,
title=source.title,
abstract=source.abstract,
is_monitored=False)
matches = match.all()
if matches:
if mode == 'insert':
raise RuntimeError('HHypermap error: Layer %d \'%s\' already exists' % (
matches[0].id, source.title))
elif mode == 'update':
match.update(
name=source.name,
title=source.title,
abstract=source.abstract,
is_monitored=False,
xml=source.xml,
wkt_geometry=source.wkt_geometry,
anytext=util.get_anytext([source.title, source.abstract, source.keywords_csv])
)
service = get_service(source.xml)
res, keywords = create_layer_from_metadata_xml(resourcetype, source.xml,
monitor=False, service=service,
catalog=catalog)
res.save()
LOGGER.debug('Indexing layer with id %s on search engine' % res.uuid)
index_layer(res.id, use_cache=True)
else:
if resourcetype == 'http://www.opengis.net/cat/csw/2.0.2':
res = Endpoint(url=source, catalog=catalog)
else:
res = Service(type=HYPERMAP_SERVICE_TYPES[resourcetype], url=source, catalog=catalog)
res.save()
if keywords:
for kw in keywords:
res.keywords.add(kw)
except Exception as err:
raise RuntimeError('HHypermap error: %s' % err)
# return a list of ids that were inserted or updated
ids = []
if hhclass == 'Layer':
ids.append({'identifier': res.uuid, 'title': res.title})
else:
if resourcetype == 'http://www.opengis.net/cat/csw/2.0.2':
for res in Endpoint.objects.filter(url=source).all():
ids.append({'identifier': res.uuid, 'title': res.url})
else:
for res in Service.objects.filter(url=source).all():
ids.append({'identifier': res.uuid, 'title': res.title})
return ids | 0.002952 |
def query(self, req, timeout=None, metadata=None, credentials=None):
"""Runs query operation."""
return self.stub.Query(req, timeout=timeout, metadata=metadata,
credentials=credentials) | 0.008621 |
def _digest_auth_in_stage2(self, username, _unused, stanza):
"""Handle the second stage (<iq type='set'/>) of legacy "digest"
authentication.
[server only]"""
digest=stanza.xpath_eval("a:query/a:digest",{"a":"jabber:iq:auth"})
if digest:
digest=digest[0].getContent()
if not digest:
self.__logger.debug("No digest found in digest auth request")
iq=stanza.make_error_response("bad-request")
self.send(iq)
return
password,pwformat=self.get_password(username)
if not password or pwformat!="plain":
iq=stanza.make_error_response("bad-request")
e=iq.get_error()
e.add_custom_condition('jabber:iq:auth:error',"user-unauthorized")
self.send(iq)
return
mydigest = hashlib.sha1(to_utf8(self.stream_id)+to_utf8(password)).hexdigest()
if mydigest==digest:
iq=stanza.make_result_response()
self.send(iq)
self.peer_authenticated=True
self.auth_method_used="digest"
self.state_change("authorized",self.peer)
self._post_auth()
else:
self.__logger.debug("Digest auth failed: %r != %r" % (digest,mydigest))
iq=stanza.make_error_response("bad-request")
e=iq.get_error()
e.add_custom_condition('jabber:iq:auth:error',"user-unauthorized")
self.send(iq) | 0.01626 |
def streamitem_to_key_data(si):
'''
extract the parts of a StreamItem that go into a kvlayer key,
convert StreamItem to blob for storage.
return (kvlayer key tuple), data blob
'''
key = key_for_stream_item(si)
data = streamcorpus.serialize(si)
errors, data = streamcorpus.compress_and_encrypt(data)
assert not errors, errors
return key, data | 0.002618 |
def logfile(targetfile="ros.log"):
"""
Set the file for Quilt to log to
targetfile:
Change the file to log to.
"""
log = logging.getLogger(__name__)
log.basicConfig(filename=str(targetfile)) | 0.004587 |
def rnumlistwithreplacement(howmany, max, min=0):
"""Returns a list of howmany integers with a maximum value = max.
The minimum value defaults to zero."""
if checkquota() < 1:
raise Exception("Your www.random.org quota has already run out.")
requestparam = build_request_parameterWR(howmany, min, max)
request = urllib.request.Request(requestparam)
request.add_header('User-Agent', 'randomwrapy/0.1 very alpha')
opener = urllib.request.build_opener()
numlist = opener.open(request).read()
return numlist.split() | 0.001802 |
def normprob(d, snrs, inds=None, version=2):
""" Uses observed SNR distribution to calculate normal probability SNR
Uses state dict to calculate number of trials.
snrs is list of all snrs in distribution.
version used to toggle for tests. version 2 is fastest and returns zeros for filtered snr values.
Returns list of expected snr given each input value's frequency of occurrence via the normal probability assumption
"""
if not inds: inds = range(len(snrs))
# define norm quantile functions
Z = lambda quan: np.sqrt(2)*erfinv( 2*quan - 1)
quan = lambda ntrials, i: (ntrials + 1/2. - i)/ntrials
# calc number of trials
npix = d['npixx']*d['npixy']
if d.has_key('goodintcount'):
nints = d['goodintcount']
else:
nints = d['nints']
ndms = len(d['dmarr'])
dtfactor = np.sum([1./i for i in d['dtarr']]) # assumes dedisperse-all algorithm
ntrials = npix*nints*ndms*dtfactor
logger.info('Calculating normal probability distribution for npix*nints*ndms*dtfactor = %d' % (ntrials))
# calc normal quantile
if version == 2:
# purely sort and numpy-based
sortinds = np.argsort(snrs[inds])
lenpos = len(np.where(snrs[inds] >= 0)[0])
lenneg = len(np.where(snrs[inds] < 0)[0])
unsortinds = np.zeros(len(sortinds), dtype=int)
unsortinds[sortinds] = np.arange(len(sortinds))
rank = np.concatenate( (np.arange(1, lenneg+1), np.arange(1, lenpos+1)[::-1]) )
logger.debug('{} {}'.format(rank, sortinds))
zval = Z(quan(ntrials, rank[unsortinds]))
if inds != range(len(snrs)): # add zeros for filtered data to match length to original snr array
zval = np.array([zval[inds.index(i)] if i in inds else 0 for i in range(len(snrs))])
elif version == 1:
# numpy array based
snrpos = snrs[inds][np.where(snrs[inds] > 0)]
snrneg = snrs[inds][np.where(snrs[inds] < 0)]
snrsortpos = np.sort(snrpos)[::-1]
snrsortneg = np.sort(snrneg)
logger.debug('Sorted pos/neg SNRs')
zval = []
for i,snr in enumerate(snrs):
if i in inds:
if snr in snrsortpos:
zval.append(Z(quan(ntrials, np.where(snr == snrsortpos)[0][0]+1)))
elif snr in snrsortneg:
zval.append(Z(quan(ntrials, np.where(snr == snrsortneg)[0][0]+1)))
elif version == 0:
# list based
snrsortpos = []
snrsortneg = []
for i in inds:
if snrs[i] > 0:
snrsortpos.append(snrs[i])
elif snrs[i] < 0:
snrsortneg.append(abs(snrs[i]))
snrsortpos = sorted(snrsortpos, reverse=True)
snrsortneg = sorted(snrsortneg, reverse=True)
logger.debug('Sorted pos/neg SNRs')
zval = []
for (i, snr) in enumerate(snrs):
if snr >= 0 and i in inds:
zval.append(Z(quan(ntrials, snrsortpos.index(snr)+1)))
elif snr < 0 and i in inds:
zval.append(Z(quan(ntrials, snrsortneg.index(abs(snr))+1)))
else:
zval.append(0)
return zval | 0.006244 |
def relation_get(attribute=None, unit=None, rid=None):
"""Attempt to use leader-get if supported in the current version of Juju,
otherwise falls back on relation-get.
Note that we only attempt to use leader-get if the provided rid is a peer
relation id or no relation id is provided (in which case we assume we are
within the peer relation context).
"""
try:
if rid in relation_ids('cluster'):
return leader_get(attribute, rid)
else:
raise NotImplementedError
except NotImplementedError:
return _relation_get(attribute=attribute, rid=rid, unit=unit) | 0.001587 |
def _get_account_policy(name):
'''
Get the entire accountPolicy and return it as a dictionary. For use by this
module only
:param str name: The user name
:return: a dictionary containing all values for the accountPolicy
:rtype: dict
:raises: CommandExecutionError on user not found or any other unknown error
'''
cmd = 'pwpolicy -u {0} -getpolicy'.format(name)
try:
ret = salt.utils.mac_utils.execute_return_result(cmd)
except CommandExecutionError as exc:
if 'Error: user <{0}> not found'.format(name) in exc.strerror:
raise CommandExecutionError('User not found: {0}'.format(name))
raise CommandExecutionError('Unknown error: {0}'.format(exc.strerror))
try:
policy_list = ret.split('\n')[1].split(' ')
policy_dict = {}
for policy in policy_list:
if '=' in policy:
key, value = policy.split('=')
policy_dict[key] = value
return policy_dict
except IndexError:
return {} | 0.000956 |
def repr_setattr(self, class_data):
"""Create code like this::
person = Person(name='Jack', person_id=1)
self.name____Jack = person
self.person_id____1 = person
person = Person(name='Paul', person_id=2)
self.name____Paul = person
self.person_id____2 = person
"""
def get_indexable_attributes(class_data):
def isvalid(text):
for char in r"""~`!#%^&*()+=[]{}|\:;"'/.,<> """:
if char in text:
return False
return True
indexable_attributes = list()
for key, value in class_data.get("metadata", dict()).items():
if isinstance(value, _int_type):
indexable_attributes.append(key)
elif isinstance(value, _str_type):
if isvalid(value):
indexable_attributes.append(key)
return indexable_attributes
if "subclass" in class_data:
for subclass_data in class_data["subclass"]:
instancename = self.formatted_instancename(subclass_data["classname"])
self.lines.append(self.Tab2 + self.repr_new_instance(subclass_data))
indexable_attributes = get_indexable_attributes(subclass_data)
for key, value in self.sorted_dict(subclass_data.get("metadata", dict())):
if key in indexable_attributes:
if isinstance(value, _int_type):
if value < 0:
self.lines.append(self.Tab2 + "self.%s____neg%s = %s" % (
key, -value, instancename))
else:
self.lines.append(self.Tab2 + "self.%s____%s = %s" % (
key, value, instancename))
else:
self.lines.append(self.Tab2 + "self.%s____%s = %s" % (
key, value, instancename))
self.lines.append(self.Tab2) | 0.006687 |
def _http_request(url,
method='GET',
headers=None,
data=None):
'''
Make the HTTP request and return the body as python object.
'''
req = requests.request(method,
url,
headers=headers,
data=data)
ret = _default_ret()
ok_status = METHOD_OK_STATUS.get(method, 200)
if req.status_code != ok_status:
ret.update({
'comment': req.json().get('error', '')
})
return ret
ret.update({
'result': True,
'out': req.json() if method != 'DELETE' else None # no body when DELETE
})
return ret | 0.002845 |
def escape_regex_special_chars(api_path):
"""
Turns the non prametrized path components into strings subtable for using
as a regex pattern. This primarily involves escaping special characters so
that the actual character is matched in the regex.
"""
def substitute(string, replacements):
pattern, repl = replacements
return re.sub(pattern, repl, string)
return functools.reduce(substitute, REGEX_REPLACEMENTS, api_path) | 0.002151 |
def sa(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False):
"""Compute the Spectral Angle (SA).
.. image:: /pictures/SA.png
**Range:** -π/2 ≤ SA < π/2, closer to 0 is better.
**Notes:** The spectral angle metric measures the angle between the two vectors in hyperspace.
It indicates how well the shape of the two series match – not magnitude.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The Spectral Angle value.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.sa(sim, obs)
0.10816831366492945
References
----------
- Robila, S.A., Gershman, A., 2005. Spectral matching accuracy in processing hyperspectral
data, Signals, Circuits and Systems, 2005. ISSCS 2005. International Symposium on. IEEE,
pp. 163-166.
"""
# Treats data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
a = np.dot(simulated_array, observed_array)
b = np.linalg.norm(simulated_array) * np.linalg.norm(observed_array)
return np.arccos(a / b) | 0.004771 |
def bfd(self, **kwargs):
"""Configure BFD for BGP globally.
Args:
rbridge_id (str): Rbridge to configure. (1, 225, etc)
tx (str): BFD transmit interval in milliseconds (300, 500, etc)
rx (str): BFD receive interval in milliseconds (300, 500, etc)
multiplier (str): BFD multiplier. (3, 7, 5, etc)
delete (bool): True if BFD configuration should be deleted.
Default value will be False if not specified.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `tx`, `rx`, or `multiplier` is not passed.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.230']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.bfd(rx='300', tx='300', multiplier='3',
... rbridge_id='230')
... output = dev.bgp.bfd(rx='300', tx='300', multiplier='3',
... rbridge_id='230', get=True)
... output = dev.bgp.bfd(rx='300', tx='300', multiplier='3',
... rbridge_id='230', delete=True)
"""
kwargs['min_tx'] = kwargs.pop('tx')
kwargs['min_rx'] = kwargs.pop('rx')
kwargs['delete'] = kwargs.pop('delete', False)
callback = kwargs.pop('callback', self._callback)
bfd_tx = self._bfd_tx(**kwargs)
bfd_rx = self._bfd_rx(**kwargs)
bfd_multiplier = self._bfd_multiplier(**kwargs)
if kwargs.pop('get', False):
return self._get_bfd(bfd_tx, bfd_rx, bfd_multiplier)
config = pynos.utilities.merge_xml(bfd_tx, bfd_rx)
config = pynos.utilities.merge_xml(config, bfd_multiplier)
return callback(config) | 0.000907 |
def from_spec(spec, kwargs):
"""
Creates an agent from a specification dict.
"""
agent = util.get_object(
obj=spec,
predefined_objects=tensorforce.agents.agents,
kwargs=kwargs
)
assert isinstance(agent, Agent)
return agent | 0.006369 |
def channels_rename(self, room_id, name, **kwargs):
"""Changes the name of the channel."""
return self.__call_api_post('channels.rename', roomId=room_id, name=name, kwargs=kwargs) | 0.015385 |
def add_semantic_data(self, path_as_list, value, key):
""" Adds a semantic data entry.
:param list path_as_list: The path in the vividict to enter the value
:param value: The value of the new entry.
:param key: The key of the new entry.
:return:
"""
assert isinstance(key, string_types)
target_dict = self.get_semantic_data(path_as_list)
target_dict[key] = value
return path_as_list + [key] | 0.004246 |
def add_elasticache_node(self, node, cluster, region):
''' Adds an ElastiCache node to the inventory and index, as long as
it is addressable '''
# Only want available nodes unless all_elasticache_nodes is True
if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
return
# Select the best destination address
dest = node['Endpoint']['Address']
if not dest:
# Skip nodes we cannot address (e.g. private VPC subnet)
return
node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
# Add to index
self.index[dest] = [region, node_id]
# Inventory: Group by node ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[node_id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', node_id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
# Inventory: Group by parameter group (done at cluster level)
# Inventory: Group by replication group (done at cluster level)
# Inventory: Group by ElastiCache Cluster
if self.group_by_elasticache_cluster:
self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
# Global Tag: all ElastiCache nodes
self.push(self.inventory, 'elasticache_nodes', dest)
host_info = self.get_host_info_dict_from_describe_dict(node)
if dest in self.inventory["_meta"]["hostvars"]:
self.inventory["_meta"]["hostvars"][dest].update(host_info)
else:
self.inventory["_meta"]["hostvars"][dest] = host_info | 0.003119 |
def get_setup_attribute(attribute, setup_path):
"""
Runs the project's setup.py script in a process with an arg that
will print out the value for a particular attribute such as author
or version, and returns the value.
"""
args = ["python", setup_path, "--%s" % attribute]
return Popen(args, stdout=PIPE).communicate()[0].decode('utf-8').strip() | 0.002681 |
def check_task_status_and_id(task_json):
"""
Read status of import json and parse
:param task_json: status json to parse
:return: (stillRunning, imageId)
"""
if task_json.get('ImportImageTasks') is not None:
task = task_json['ImportImageTasks'][0]
else:
task = task_json
current_status = task['Status']
image_id = task['ImportTaskId']
if current_status == 'completed':
print "The import has completed succesfully as ID: {}".format(image_id)
return False, image_id
elif current_status == 'deleting':
print "The import job has been cancelled for some reason"
return False, None
elif current_status == 'deleted':
print "The import job was cancelled"
return False, None
else:
print "The current import job for id {} status is: {}".format(image_id, current_status)
print "sleeping for 30 seconds"
time.sleep(30)
return True, image_id | 0.003714 |
def create_component(self, name, description=None):
"""
Create a sub component in the business component.
:param name: The new component's name.
:param description: The new component's description.
:returns: The created component.
"""
new_comp = Component(name, self.gl, description=description)
new_comp.set_parent_path(self.path)
self.components.append(new_comp)
return new_comp | 0.00432 |
def pull_byte(self, stack_pointer):
""" pulled a byte from stack """
addr = stack_pointer.value
byte = self.memory.read_byte(addr)
# log.info(
# log.error(
# "%x|\tpull $%x from %s stack at $%x\t|%s",
# self.last_op_address, byte, stack_pointer.name, addr,
# self.cfg.mem_info.get_shortest(self.last_op_address)
# )
# FIXME: self.system_stack_pointer += 1
stack_pointer.increment(1)
return byte | 0.004008 |
def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier:
y = copier(x, memo)
else:
try:
issc = issubclass(cls, type)
except TypeError: # cls is not a class (old Boost; see SF #502085)
issc = 0
if issc:
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
y = _reconstruct(x, rv, 1, memo)
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y | 0.002026 |
def choice(self, queues, connection):
"""
Chooses a random queue for messages to specified connection.
@param queues: A C{dict} mapping queue name to queues (sets of frames) to which
specified connection is subscribed.
@type queues: C{dict} of C{str} to C{set} of L{stompclient.frame.Frame}
@param connection: The connection that is going to be delivered the frame(s).
@type connection: L{coilmq.server.StompConnection}
@return: A random queue destination or None if list is empty.
@rtype: C{str}
"""
if not queues:
return None
return random.choice(list(queues.keys())) | 0.008596 |
def create_geometry(self, input_geometry, upper_depth, lower_depth):
'''
If geometry is defined as a numpy array then create instance of
nhlib.geo.point.Point class, otherwise if already instance of class
accept class
:param input_geometry:
Input geometry (point) as either
i) instance of nhlib.geo.point.Point class
ii) numpy.ndarray [Longitude, Latitude]
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km)
'''
self._check_seismogenic_depths(upper_depth, lower_depth)
# Check/create the geometry class
if not isinstance(input_geometry, Point):
if not isinstance(input_geometry, np.ndarray):
raise ValueError('Unrecognised or unsupported geometry '
'definition')
self.geometry = Point(input_geometry[0], input_geometry[1])
else:
self.geometry = input_geometry | 0.001881 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.