text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def squash_unicode(obj):
"""coerce unicode back to bytestrings."""
if isinstance(obj,dict):
for key in obj.keys():
obj[key] = squash_unicode(obj[key])
if isinstance(key, unicode):
obj[squash_unicode(key)] = obj.pop(key)
elif isinstance(obj, list):
for i,v in enumerate(obj):
obj[i] = squash_unicode(v)
elif isinstance(obj, unicode):
obj = obj.encode('utf8')
return obj | 0.006466 |
def evaluate(inputs_stream, predict_fun, metric_funs, rng):
"""Evaluate.
Args:
inputs_stream: iterable of inputs to evaluate on.
predict_fun: function from inputs to predictions. params should already be
partially applied.
metric_funs: dict from metric name to metric function, which takes inputs
and predictions and returns a scalar metric value.
rng: random number generator.
Returns:
metrics: dict from metric name to metric value averaged over the number of
inputs.
"""
metrics = collections.defaultdict(float)
count = 0
for inp in inputs_stream:
count += 1
rng, subrng = jax_random.split(rng)
preds = predict_fun(inp[0], rng=subrng)
for m, f in six.iteritems(metric_funs):
metrics[m] += f(inp, preds)
return {m: v / count for (m, v) in six.iteritems(metrics)} | 0.008314 |
def setup_graph(self):
""" Will setup the assign operator for that variable. """
all_vars = tfv1.global_variables() + tfv1.local_variables()
for v in all_vars:
if v.name == self.var_name:
self.var = v
break
else:
raise ValueError("{} is not a variable in the graph!".format(self.var_name)) | 0.007958 |
def get_file_clusters(self, date):
"""Retrieves file similarity clusters for a given time frame.
Args:
date: the specific date for which we want the clustering details.
Example: 'date': '2013-09-10'
Returns:
A dict with the VT report.
"""
api_name = 'virustotal-file-clusters'
(all_responses, resources) = self._bulk_cache_lookup(api_name, date)
response = self._request_reports("date", date, 'file/clusters')
self._extract_response_chunks(all_responses, response, api_name)
return all_responses | 0.0033 |
def _worst_case_load(self, worst_case_scale_factors,
peakload_consumption_ratio, modes):
"""
Define worst case load time series for each sector.
Parameters
----------
worst_case_scale_factors : dict
Scale factors defined in config file 'config_timeseries.cfg'.
Scale factors describe actual power to nominal power ratio of in
worst-case scenarios.
peakload_consumption_ratio : dict
Ratios of peak load to annual consumption per sector, defined in
config file 'config_timeseries.cfg'
modes : list
List with worst-cases to generate time series for. Can be
'feedin_case', 'load_case' or both.
"""
sectors = ['residential', 'retail', 'industrial', 'agricultural']
lv_power_scaling = np.array(
[worst_case_scale_factors['lv_{}_load'.format(mode)]
for mode in modes])
mv_power_scaling = np.array(
[worst_case_scale_factors['mv_{}_load'.format(mode)]
for mode in modes])
lv = {(sector, 'lv'): peakload_consumption_ratio[sector] *
lv_power_scaling
for sector in sectors}
mv = {(sector, 'mv'): peakload_consumption_ratio[sector] *
mv_power_scaling
for sector in sectors}
self.timeseries.load = pd.DataFrame({**lv, **mv},
index=self.timeseries.timeindex) | 0.003224 |
def reset(self, data, size):
"""
Set new contents for frame
"""
return lib.zframe_reset(self._as_parameter_, data, size) | 0.013158 |
def p_statements_statement(p):
""" statements : statement
| statements_co statement
"""
if len(p) == 2:
p[0] = make_block(p[1])
else:
p[0] = make_block(p[1], p[2]) | 0.004673 |
def as_dict(self):
"""
Json-serializable dict representation of Molecule
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"charge": self._charge,
"spin_multiplicity": self._spin_multiplicity,
"sites": []}
for site in self:
site_dict = site.as_dict()
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
return d | 0.003868 |
def update_validators():
"""
Call this to updade the global nrml.validators
"""
validators.update({
'fragilityFunction.id': valid.utf8, # taxonomy
'vulnerabilityFunction.id': valid.utf8, # taxonomy
'consequenceFunction.id': valid.utf8, # taxonomy
'asset.id': valid.asset_id,
'costType.name': valid.cost_type,
'costType.type': valid.cost_type_type,
'cost.type': valid.cost_type,
'area.type': valid.name,
'isAbsolute': valid.boolean,
'insuranceLimit': valid.positivefloat,
'deductible': valid.positivefloat,
'occupants': valid.positivefloat,
'value': valid.positivefloat,
'retrofitted': valid.positivefloat,
'number': valid.compose(valid.positivefloat, valid.nonzero),
'vulnerabilitySetID': str, # any ASCII string is fine
'vulnerabilityFunctionID': str, # any ASCII string is fine
'lossCategory': valid.utf8, # a description field
'lr': valid.probability,
'lossRatio': valid.positivefloats,
'coefficientsVariation': valid.positivefloats,
'probabilisticDistribution': valid.Choice('LN', 'BT'),
'dist': valid.Choice('LN', 'BT', 'PM'),
'meanLRs': valid.positivefloats,
'covLRs': valid.positivefloats,
'format': valid.ChoiceCI('discrete', 'continuous'),
'mean': valid.positivefloat,
'stddev': valid.positivefloat,
'minIML': valid.positivefloat,
'maxIML': valid.positivefloat,
'limitStates': valid.namelist,
'noDamageLimit': valid.NoneOr(valid.positivefloat),
'loss_type': valid_loss_types,
'losses': valid.positivefloats,
'averageLoss': valid.positivefloat,
'stdDevLoss': valid.positivefloat,
'ffs.type': valid.ChoiceCI('lognormal'),
'assetLifeExpectancy': valid.positivefloat,
'interestRate': valid.positivefloat,
'lossType': valid_loss_types,
'aalOrig': valid.positivefloat,
'aalRetr': valid.positivefloat,
'ratio': valid.positivefloat,
'cf': asset_mean_stddev,
'damage': damage_triple,
'damageStates': valid.namelist,
'taxonomy': taxonomy,
'tagNames': valid.namelist,
}) | 0.000439 |
def get(self, section, option):
"""Gets an option value for a given section.
Args:
section (str): section name
option (str): option name
Returns:
:class:`Option`: Option object holding key/value pair
"""
if not self.has_section(section):
raise NoSectionError(section) from None
section = self.__getitem__(section)
option = self.optionxform(option)
try:
value = section[option]
except KeyError:
raise NoOptionError(option, section)
return value | 0.003339 |
def fastaAlignmentRead(fasta, mapFn=(lambda x : x), l=None):
"""
reads in columns of multiple alignment and returns them iteratively
"""
if l is None:
l = _getMultiFastaOffsets(fasta)
else:
l = l[:]
seqNo = len(l)
for i in xrange(0, seqNo):
j = open(fasta, 'r')
j.seek(l[i])
l[i] = j
column = [sys.maxint]*seqNo
if seqNo != 0:
while True:
for j in xrange(0, seqNo):
i = l[j].read(1)
while i == '\n':
i = l[j].read(1)
column[j] = i
if column[0] == '>' or column[0] == '':
for j in xrange(1, seqNo):
assert column[j] == '>' or column[j] == ''
break
for j in xrange(1, seqNo):
assert column[j] != '>' and column[j] != ''
column[j] = mapFn(column[j])
yield column[:]
for i in l:
i.close() | 0.008147 |
def get_python_version(path):
# type: (str) -> str
"""Get python version string using subprocess from a given path."""
version_cmd = [path, "-c", "import sys; print(sys.version.split()[0])"]
try:
c = vistir.misc.run(
version_cmd,
block=True,
nospin=True,
return_object=True,
combine_stderr=False,
write_to_stdout=False,
)
except OSError:
raise InvalidPythonVersion("%s is not a valid python path" % path)
if not c.out:
raise InvalidPythonVersion("%s is not a valid python path" % path)
return c.out.strip() | 0.001567 |
def update_req(req):
"""Updates a given req object with the latest version."""
if not req.name:
return req, None
info = get_package_info(req.name)
if info['info'].get('_pypi_hidden'):
print('{} is hidden on PyPI and will not be updated.'.format(req))
return req, None
if _is_pinned(req) and _is_version_range(req):
print('{} is pinned to a range and will not be updated.'.format(req))
return req, None
newest_version = _get_newest_version(info)
current_spec = next(iter(req.specifier)) if req.specifier else None
current_version = current_spec.version if current_spec else None
new_spec = Specifier(u'=={}'.format(newest_version))
if not current_spec or current_spec._spec != new_spec._spec:
req.specifier = new_spec
update_info = (
req.name,
current_version,
newest_version)
return req, update_info
return req, None | 0.001036 |
def set_default_init_cli_human_cmds(self): # pylint: disable=no-self-use
"""
Default commands to restore cli to human readable state are echo on, set --vt100 on,
set --retcode false.
:return: List of default commands to restore cli to human readable format
"""
post_cli_cmds = []
post_cli_cmds.append("echo on")
post_cli_cmds.append("set --vt100 on")
post_cli_cmds.append(["set --retcode false", False, False]) # last True is wait=<Boolean>
return post_cli_cmds | 0.009191 |
def rand_imancon(X, rho):
"""Iman-Conover Method to generate random ordinal variables
(Implementation adopted from Ekstrom, 2005)
x : ndarray
<obs x cols> matrix with "cols" ordinal variables
that are uncorrelated.
rho : ndarray
Spearman Rank Correlation Matrix
Links
* Iman, R.L., Conover, W.J., 1982. A distribution-free approach to
inducing rank correlation among input variables. Communications
in Statistics - Simulation and Computation 11, 311–334.
https://doi.org/10.1080/03610918208812265
* Ekstrom, P.-A., n.d. A Simulation Toolbox for Sensitivity Analysis 57.
http://ecolego.facilia.se/ecolego/files/Eikos_thesis.pdf
"""
import numpy as np
import scipy.stats as sstat
import scipy.special as sspec
# data prep
n, d = X.shape
# Ordering
T = np.corrcoef(X, rowvar=0) # T
Q = np.linalg.cholesky(T) # Q
invQ = np.linalg.inv(Q) # inv(Q)
P = np.linalg.cholesky(rho) # P
S = np.dot(invQ, P) # S=P*inv(Q)
# get ranks of 'X'
rnks = np.nan * np.empty((n, d))
for k in range(0, d):
rnks[:, k] = sstat.rankdata(X[:, k], method='average')
# create Rank Scores
rnkscore = -np.sqrt(2.0) * sspec.erfcinv(2.0 * rnks / (n + 1))
# the 'Y' variables have the same correlation matrix
Y = np.dot(rnkscore, S.T)
# get ranks of 'Y'
rnks = np.nan * np.empty((n, d))
for k in range(0, d):
rnks[:, k] = sstat.rankdata(Y[:, k], method='average')
rnks = rnks.astype(int)
# Sort X what will decorrelated X
X = np.sort(X, axis=0)
# Rerank X
Z = np.nan * np.empty((n, d))
for k in range(0, d):
Z[:, k] = X[rnks[:, k] - 1, k]
# done
return Z | 0.000567 |
def add_handlers(self, host_pattern: str, host_handlers: _RuleList) -> None:
"""Appends the given handlers to our handler list.
Host patterns are processed sequentially in the order they were
added. All matching patterns will be considered.
"""
host_matcher = HostMatches(host_pattern)
rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers))
self.default_router.rules.insert(-1, rule)
if self.default_host is not None:
self.wildcard_router.add_rules(
[(DefaultHostMatches(self, host_matcher.host_pattern), host_handlers)]
) | 0.00468 |
def ModifyInstance(self, ModifiedInstance, IncludeQualifiers=None,
PropertyList=None, **extra):
# pylint: disable=invalid-name,line-too-long
"""
Modify the property values of an instance.
This method performs the ModifyInstance operation
(see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all
methods performing such operations.
The `PropertyList` parameter determines the set of properties that are
designated to be modified (see its description for details).
The properties provided in the `ModifiedInstance` parameter specify
the new property values for the properties that are designated to be
modified.
Pywbem sends the property values provided in the `ModifiedInstance`
parameter to the WBEM server as provided; it does not add any default
values for properties not provided but designated to be modified, nor
does it reduce the properties by those not designated to be modified.
The properties that are actually modified by the WBEM server as a result
of this operation depend on a number of things:
* The WBEM server will reject modification requests for key properties
and for properties that are not exposed by the creation class of the
target instance.
* The WBEM server may consider some properties as read-only, as a
result of requirements at the CIM modeling level (schema or
management profiles), or as a result of an implementation decision.
Note that the WRITE qualifier on a property is not a safe indicator
as to whether the property can actually be modified. It is an
expression at the level of the CIM schema that may or may not be
considered in DMTF management profiles or in implementations.
Specifically, a qualifier value of True on a property does not
guarantee modifiability of the property, and a value of False does
not prevent modifiability.
* The WBEM server may detect invalid new values or conflicts resulting
from the new property values and may reject modification of a property
for such reasons.
If the WBEM server rejects modification of a property for any reason,
it will cause this operation to fail and will not modify any property
on the target instance. If this operation succeeds, all properties
designated to be modified have their new values (see the description
of the `ModifiedInstance` parameter for details on how the new values
are determined).
Note that properties (including properties not designated to be
modified) may change their values as an indirect result of this
operation. For example, a property that was not designated to be
modified may be derived from another property that was modified, and
may show a changed value due to that.
If the operation succeeds, this method returns.
Otherwise, this method raises an exception.
Parameters:
ModifiedInstance (:class:`~pywbem.CIMInstance`):
A representation of the modified instance, also indicating its
instance path.
The `path` attribute of this object identifies the instance to be
modified. Its `keybindings` attribute is required. If its
`namespace` attribute is `None`, the default namespace of the
connection will be used. Its `host` attribute will be ignored.
The `classname` attribute of the instance path and the `classname`
attribute of the instance must specify the same class name.
The properties defined in this object specify the new property
values (including `None` for NULL). If a property is designated to
be modified but is not specified in this object, the WBEM server
will use the default value of the property declaration if specified
(including `None`), and otherwise may update the property to any
value (including `None`).
Typically, this object has been retrieved by other operations,
such as :meth:`~pywbem.WBEMConnection.GetInstance`.
IncludeQualifiers (:class:`py:bool`):
Indicates that qualifiers are to be modified as specified in the
`ModifiedInstance` parameter, as follows:
* If `False`, qualifiers not modified.
* If `True`, qualifiers are modified if the WBEM server implements
support for this parameter.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default to be used. :term:`DSP0200`
defines that the server-implemented default is `True`.
This parameter has been deprecated in :term:`DSP0200`. Clients
cannot rely on qualifiers to be modified.
PropertyList (:term:`string` or :term:`py:iterable` of :term:`string`):
This parameter defines which properties are designated to be
modified.
This parameter is an iterable specifying the names of the
properties, or a string that specifies a single property name. In
all cases, the property names are matched case insensitively.
The specified properties are designated to be modified. Properties
not specified are not designated to be modified.
An empty iterable indicates that no properties are designated to be
modified.
If `None`, DSP0200 states that the properties with values different
from the current values in the instance are designated to be
modified, but for all practical purposes this is equivalent to
stating that all properties exposed by the instance are designated
to be modified.
**extra :
Additional keyword arguments are passed as additional operation
parameters to the WBEM server.
Note that :term:`DSP0200` does not define any additional parameters
for this operation.
Raises:
Exceptions described in :class:`~pywbem.WBEMConnection`.
""" # noqa: E501
exc = None
method_name = 'ModifyInstance'
if self._operation_recorders:
self.operation_recorder_reset()
self.operation_recorder_stage_pywbem_args(
method=method_name,
ModifiedInstance=ModifiedInstance,
IncludeQualifiers=IncludeQualifiers,
PropertyList=PropertyList,
**extra)
try:
stats = self.statistics.start_timer('ModifyInstance')
# Must pass a named CIMInstance here (i.e path attribute set)
if ModifiedInstance.path is None:
raise ValueError(
'ModifiedInstance parameter must have path attribute set')
if ModifiedInstance.path.classname is None:
raise ValueError(
'ModifiedInstance parameter must have classname set in '
' path')
if ModifiedInstance.classname is None:
raise ValueError(
'ModifiedInstance parameter must have classname set in '
'instance')
namespace = self._iparam_namespace_from_objectname(
ModifiedInstance.path, 'ModifiedInstance.path')
PropertyList = _iparam_propertylist(PropertyList)
# Strip off host and namespace to avoid producing an INSTANCEPATH or
# LOCALINSTANCEPATH element instead of the desired INSTANCENAME
# element.
instance = ModifiedInstance.copy()
instance.path.namespace = None
instance.path.host = None
self._imethodcall(
method_name,
namespace,
ModifiedInstance=instance,
IncludeQualifiers=IncludeQualifiers,
PropertyList=PropertyList,
has_return_value=False,
**extra)
return
except (CIMXMLParseError, XMLParseError) as exce:
exce.request_data = self.last_raw_request
exce.response_data = self.last_raw_reply
exc = exce
raise
except Exception as exce:
exc = exce
raise
finally:
self._last_operation_time = stats.stop_timer(
self.last_request_len, self.last_reply_len,
self.last_server_response_time, exc)
if self._operation_recorders:
self.operation_recorder_stage_result(None, exc) | 0.000563 |
def plotSet(imgDir, posExTime, outDir, show_legend,
show_plots, save_to_file, ftype):
'''
creates plots showing both found GAUSSIAN peaks, the histogram, a smoothed histogram
from all images within [imgDir]
posExTime - position range of the exposure time in the image name e.g.: img_30s.jpg -> (4,5)
outDir - dirname to save the output images
show_legend - True/False
show_plots - display the result on screen
save_to_file - save the result to file
ftype - file type of the output images
'''
xvals = []
hist = []
peaks = []
exTimes = []
max_border = 0
if not imgDir.exists():
raise Exception("image dir doesn't exist")
for n, f in enumerate(imgDir):
print(f)
try:
# if imgDir.join(f).isfile():
img = imgDir.join(f)
s = FitHistogramPeaks(img)
xvals.append(s.xvals)
hist.append(s.yvals)
# smoothedHist.append(s.yvals2)
peaks.append(s.fitValues())
if s.border() > max_border:
max_border = s.plotBorder()
exTimes.append(float(f[posExTime[0]:posExTime[1] + 1]))
except:
pass
nx = 2
ny = int(len(hist) // nx) + len(hist) % nx
fig, ax = plt.subplots(ny, nx)
# flatten 2d-ax list:
if nx > 1:
ax = [list(i) for i in zip(*ax)] # transpose 2d-list
axx = []
for xa in ax:
for ya in xa:
axx.append(ya)
ax = axx
for x, h, p, e, a in zip(xvals, hist, peaks, exTimes, ax):
a.plot(x, h, label='histogram', thickness=3)
# l1 = a.plot(x, s, label='smoothed')
for n, pi in enumerate(p):
l2 = a.plot(x, pi, label='peak %s' % n, thickness=6)
a.set_xlim(xmin=0, xmax=max_border)
a.set_title('%s s' % e)
# plt.setp([l1,l2], linewidth=2)#, linestyle='--', color='r') # set
# both to dashed
l1 = ax[0].legend() # loc='upper center', bbox_to_anchor=(0.7, 1.05),
l1.draw_frame(False)
plt.xlabel('pixel value')
plt.ylabel('number of pixels')
fig = plt.gcf()
fig.set_size_inches(7 * nx, 3 * ny)
if save_to_file:
p = PathStr(outDir).join('result').setFiletype(ftype)
plt.savefig(p, bbox_inches='tight')
if show_plots:
plt.show() | 0.00164 |
def create(self):
"""
Create an instance of the Time Series Service with the typical
starting settings.
"""
self.service.create()
predix.config.set_env_value(self.use_class, 'ingest_uri',
self.get_ingest_uri())
predix.config.set_env_value(self.use_class, 'ingest_zone_id',
self.get_ingest_zone_id())
predix.config.set_env_value(self.use_class, 'query_uri',
self.get_query_uri())
predix.config.set_env_value(self.use_class, 'query_zone_id',
self.get_query_zone_id()) | 0.00995 |
def unlock_connection(cls, conf, dsn, key=None):
"""
A class method to unlock a connection (given by :code:`dsn`) in the specified
configuration file. Automatically opens the file and writes to it before
closing.
:param str conf: The configuration file to modify
:param str dsn: The name of the connection to unlock
:raises `giraffez.errors.ConfigurationError`: if the connection does not exist
"""
with Config(conf, "w", key) as c:
connection = c.connections.get(dsn, None)
if not connection:
raise ConfigurationError("Unable to unlock connection")
if dsn is None:
dsn = c.settings["connections"]["default"]
if connection.get("lock", None) is None:
raise GiraffeError("Connection '{}' is not locked.".format(dsn))
c.unset_value("connections.{}.lock".format(dsn))
c.write() | 0.006211 |
def schedule_host_svc_downtime(self, host, start_time, end_time, fixed,
trigger_id, duration, author, comment):
"""Schedule a service downtime for each service of an host
Format of the line that triggers function call::
SCHEDULE_HOST_SVC_DOWNTIME;<host_name>;<start_time>;<end_time>;
<fixed>;<trigger_id>;<duration>;<author>;<comment>
:param host: host to schedule downtime
:type host: alignak.object.host.Host
:param start_time: downtime start time
:type start_time:
:param end_time: downtime end time
:type end_time:
:param fixed: is downtime fixed
:type fixed: bool
:param trigger_id: downtime id that triggered this one
:type trigger_id: str
:param duration: downtime duration
:type duration: int
:param author: downtime author
:type author: str
:param comment: downtime comment
:type comment: str
:return: None
"""
for serv in host.services:
self.schedule_svc_downtime(serv, start_time, end_time, fixed,
trigger_id, duration, author, comment) | 0.002461 |
def register_foreign_device(self, addr, ttl):
"""Add a foreign device to the FDT."""
if _debug: BIPBBMD._debug("register_foreign_device %r %r", addr, ttl)
# see if it is an address or make it one
if isinstance(addr, Address):
pass
elif isinstance(addr, str):
addr = Address(addr)
else:
raise TypeError("addr must be a string or an Address")
for fdte in self.bbmdFDT:
if addr == fdte.fdAddress:
break
else:
fdte = FDTEntry()
fdte.fdAddress = addr
self.bbmdFDT.append( fdte )
fdte.fdTTL = ttl
fdte.fdRemain = ttl + 5
# return success
return 0 | 0.006757 |
def _load_output_data_port_models(self):
"""Reloads the output data port models directly from the the state"""
if not self.state_copy_initialized:
return
self.output_data_ports = []
for output_data_port_m in self.state_copy.output_data_ports:
new_op_m = deepcopy(output_data_port_m)
new_op_m.parent = self
new_op_m.data_port = output_data_port_m.data_port
self.output_data_ports.append(new_op_m) | 0.004107 |
def sorted_timeseries(self, ascending=True):
"""Returns a sorted copy of the TimeSeries, preserving the original one.
As an assumption this new TimeSeries is not ordered anymore if a new value is added.
:param boolean ascending: Determines if the TimeSeries will be ordered ascending
or descending.
:return: Returns a new TimeSeries instance sorted in the requested order.
:rtype: TimeSeries
"""
sortorder = 1
if not ascending:
sortorder = -1
data = sorted(self._timeseriesData, key=lambda i: sortorder * i[0])
newTS = TimeSeries(self._normalized)
for entry in data:
newTS.add_entry(*entry)
newTS._sorted = ascending
return newTS | 0.007634 |
def table(self, rows, col_width=2):
'''table will print a table of entries. If the rows is
a dictionary, the keys are interpreted as column names. if
not, a numbered list is used.
'''
labels = [str(x) for x in range(1,len(rows)+1)]
if isinstance(rows, dict):
labels = list(rows.keys())
rows = list(rows.values())
for row in rows:
label = labels.pop(0)
label = label.ljust(col_width)
message = "\t".join(row)
self.custom(prefix=label,
message=message) | 0.008251 |
def get_linenumbertable(self):
"""
a sequence of (code_offset, line_number) pairs.
reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.12
""" # noqa
lnt = self._lnt
if lnt is None:
buff = self.get_attribute("LineNumberTable")
if buff is None:
lnt = tuple()
else:
with unpack(buff) as up:
lnt = tuple(up.unpack_struct_array(_HH))
self._lnt = lnt
return lnt | 0.003676 |
def _zfs_image_create(vm_name,
pool,
disk_name,
hostname_property_name,
sparse_volume,
disk_size,
disk_image_name):
'''
Clones an existing image, or creates a new one.
When cloning an image, disk_image_name refers to the source
of the clone. If not specified, disk_size is used for creating
a new zvol, and sparse_volume determines whether to create
a thin provisioned volume.
The cloned or new volume can have a ZFS property set containing
the vm_name. Use hostname_property_name for specifying the key
of this ZFS property.
'''
if not disk_image_name and not disk_size:
raise CommandExecutionError(
'Unable to create new disk {0}, please specify'
' the disk image name or disk size argument'
.format(disk_name)
)
if not pool:
raise CommandExecutionError(
'Unable to create new disk {0}, please specify'
' the disk pool name'.format(disk_name))
destination_fs = os.path.join(pool,
'{0}.{1}'.format(vm_name, disk_name))
log.debug('Image destination will be %s', destination_fs)
existing_disk = __salt__['zfs.list'](name=pool)
if 'error' in existing_disk:
raise CommandExecutionError(
'Unable to create new disk {0}. {1}'
.format(destination_fs, existing_disk['error'])
)
elif destination_fs in existing_disk:
log.info('ZFS filesystem %s already exists. Skipping creation', destination_fs)
blockdevice_path = os.path.join('/dev/zvol', pool, vm_name)
return blockdevice_path
properties = {}
if hostname_property_name:
properties[hostname_property_name] = vm_name
if disk_image_name:
__salt__['zfs.clone'](
name_a=disk_image_name,
name_b=destination_fs,
properties=properties)
elif disk_size:
__salt__['zfs.create'](
name=destination_fs,
properties=properties,
volume_size=disk_size,
sparse=sparse_volume)
blockdevice_path = os.path.join('/dev/zvol', pool, '{0}.{1}'
.format(vm_name, disk_name))
log.debug('Image path will be %s', blockdevice_path)
return blockdevice_path | 0.000811 |
def get_json(self):
"""Create JSON data for iSCSI initiator.
:returns: JSON data for iSCSI initiator as follows:
{
"DHCPUsage":{
},
"Name":{
},
"IPv4Address":{
},
"SubnetMask":{
},
"GatewayIPv4Address":{
},
"VLANId":{
}
}
"""
if self.dhcp_usage:
return {'DHCPUsage': self.dhcp_usage,
'Name': self.iqn}
else:
return self.get_basic_json() | 0.003135 |
def all_functions_called(self):
'''
list(Function): List of functions reachable from the contract (include super)
'''
all_calls = [f.all_internal_calls() for f in self.functions + self.modifiers] + [self.functions + self.modifiers]
all_calls = [item for sublist in all_calls for item in sublist] + self.functions
all_calls = list(set(all_calls))
all_constructors = [c.constructor for c in self.inheritance]
all_constructors = list(set([c for c in all_constructors if c]))
all_calls = set(all_calls+all_constructors)
return [c for c in all_calls if isinstance(c, Function)] | 0.007587 |
def _create_request_record(self, identifier, rtype, name, content, ttl, priority): # pylint: disable=too-many-arguments
"""Creates record for Subreg API calls"""
record = collections.OrderedDict()
# Mandatory content
# Just for update - not for creation
if identifier is not None:
record['id'] = identifier
record['type'] = rtype
# Just for creation - not for update
if name is not None:
record['name'] = self._relative_name(name)
# Optional content
if content is not None:
record['content'] = content
if ttl is not None:
record['ttl'] = ttl
if priority is not None:
record['prio'] = priority
return record | 0.003866 |
def propmerge(into, data_from):
""" Merge JSON schema requirements into a dictionary """
newprops = copy.deepcopy(into)
for prop, propval in six.iteritems(data_from):
if prop not in newprops:
newprops[prop] = propval
continue
new_sp = newprops[prop]
for subprop, spval in six.iteritems(propval):
if subprop not in new_sp:
new_sp[subprop] = spval
elif subprop == 'enum':
new_sp[subprop] = set(spval) & set(new_sp[subprop])
elif subprop == 'type':
if spval != new_sp[subprop]:
raise TypeError("Type cannot conflict in allOf'")
elif subprop in ('minLength', 'minimum'):
new_sp[subprop] = (new_sp[subprop] if
new_sp[subprop] > spval else spval)
elif subprop in ('maxLength', 'maximum'):
new_sp[subprop] = (new_sp[subprop] if
new_sp[subprop] < spval else spval)
elif subprop == 'multipleOf':
if new_sp[subprop] % spval == 0:
new_sp[subprop] = spval
else:
raise AttributeError(
"Cannot set conflicting multipleOf values")
else:
new_sp[subprop] = spval
newprops[prop] = new_sp
return newprops | 0.000699 |
def get_interface_detail_output_interface_ifindex(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_interface_detail = ET.Element("get_interface_detail")
config = get_interface_detail
output = ET.SubElement(get_interface_detail, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
ifindex = ET.SubElement(interface, "ifindex")
ifindex.text = kwargs.pop('ifindex')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.002413 |
def load_terminfo(terminal_name=None, fallback='vt100'):
"""
If the environment variable TERM is unset try with `fallback` if not empty.
vt100 is a popular terminal supporting ANSI X3.64.
"""
terminal_name = os.getenv('TERM')
if not terminal_name:
if not fallback:
raise TerminfoError('Environment variable TERM is unset and no fallback was requested')
else:
terminal_name = fallback
if os.getenv('TERMINFO'):
# from man terminfo(5):
# if the environment variable TERMINFO is set,
# only that directory is searched
terminfo_locations = [os.getenv('TERMINFO')]
else:
terminfo_locations = [] # from most to least important
if os.getenv('TERMINFO_DIRS'):
for i in os.getenv('TERMINFO_DIRS').split(':'):
# from man terminfo(5)
# An empty directory name is interpreted as /usr/share/terminfo.
terminfo_locations.append(i or '/usr/share/terminfo')
terminfo_locations += [
os.path.expanduser('~/.terminfo'),
'/etc/terminfo',
'/usr/local/ncurses/share/terminfo',
'/lib/terminfo',
'/usr/share/terminfo'
]
# remove duplicates preserving order
terminfo_locations = list(OrderedDict.fromkeys(terminfo_locations))
terminfo_path = None
for dirpath in terminfo_locations:
path = os.path.join(dirpath, terminal_name[0], terminal_name)
if os.path.exists(path):
terminfo_path = path
break
if not path:
raise TerminfoError("Couldn't find a terminfo file for terminal '%s'" % terminal_name)
from terminfo_index import BOOLEAN_CAPABILITIES, NUMBER_CAPABILITIES, STRING_CAPABILITIES
data = open(terminfo_path, 'rb').read()
# header (see man term(5), STORAGE FORMAT)
header = struct.unpack('<hhhhhh', data[:12]) # 2 bytes == 1 short integer
magic_number = header[0] # the magic number (octal 0432)
size_names = header[1] # the size, in bytes, of the names section
size_booleans = header[2] # the number of bytes in the boolean section
num_numbers = header[3] # the number of short integers in the numbers section
num_offsets = header[4] # the number of offsets (short integers) in the strings section
size_strings = header[5] # the size, in bytes, of the string table
if magic_number != 0o432:
raise TerminfoError('Bad magic number')
# sections indexes
idx_section_names = 12
idx_section_booleans = idx_section_names + size_names
idx_section_numbers = idx_section_booleans + size_booleans
if idx_section_numbers % 2 != 0:
idx_section_numbers += 1 # must start on an even byte
idx_section_strings = idx_section_numbers + 2 * num_numbers
idx_section_string_table = idx_section_strings + 2 * num_offsets
# terminal names
terminal_names = data[idx_section_names:idx_section_booleans].decode('ascii')
terminal_names = terminal_names[:-1].split('|') # remove ASCII NUL and split
terminfo = Terminfo(terminal_names[0], terminal_names[1:])
# booleans
for i, idx in enumerate(range(idx_section_booleans, idx_section_booleans + size_booleans)):
cap = BooleanCapability(*BOOLEAN_CAPABILITIES[i], value=data[i] == b'\x00')
terminfo.booleans[cap.variable] = cap
# numbers
numbers = struct.unpack('<'+'h' * num_numbers, data[idx_section_numbers:idx_section_strings])
for i,strnum in enumerate(numbers):
cap = NumberCapability(*NUMBER_CAPABILITIES[i], value=strnum)
terminfo.numbers[cap.variable] = cap
# strings
offsets = struct.unpack('<'+'h' * num_offsets, data[idx_section_strings:idx_section_string_table])
idx = 0
for offset in offsets:
k = 0
string = []
while True and offset != -1:
char = data[idx_section_string_table + offset + k:idx_section_string_table + offset + k + 1]
if char == b'\x00':
break
string.append(char.decode('iso-8859-1'))
k += 1
string = u''.join(string)
cap = StringCapability(*STRING_CAPABILITIES[idx], value=string)
terminfo.strings[cap.variable] = cap
idx += 1
terminfo._reset_index()
return terminfo | 0.008664 |
def clean(self, value):
"""
When cleaning field, store original value to SourceText model and return rendered field.
@raise ValidationError when something went wrong with transformation.
"""
super_value = super(RichTextField, self).clean(value)
if super_value in fields.EMPTY_VALUES:
if self.instance:
obj_id = self.get_instance_id(self.instance)
if not obj_id:
SourceText.objects.filter(content_type=self.ct, object_id=obj_id, field=self.field_name, processor=self.processor).delete()
else:
SourceText.objects.filter(content_type=self.ct, object_id=obj_id, field=self.field_name).delete()
self.validate_rendered('')
return ''
text = smart_unicode(value)
if self.instance:
obj_id = self.get_instance_id(self.instance)
try:
if not obj_id:
src_text = SourceText(content_type=self.ct, object_id=obj_id, field=self.field_name, processor=self.processor)
else:
src_text = SourceText.objects.get(content_type=self.ct, object_id=obj_id, field=self.field_name)
assert src_text.processor == self.processor
except SourceText.DoesNotExist:
src_text = SourceText(content_type=self.ct, object_id=obj_id, field=self.field_name, processor=self.processor)
src_text.content = text
try:
rendered = src_text.render()
except ProcessorError, e:
raise ValidationError(self.error_messages['syntax_error'])
else:
# in case of adding new model, instance is not set
self.instance = src_text = SourceText(
content_type=self.ct,
field=self.field_name,
content=text,
processor=self.processor
)
try:
rendered = src_text.render()
except Exception, err:
raise ValidationError(self.error_messages['syntax_error'])
self.validate_rendered(rendered)
if not hasattr(self.model, RICH_FIELDS_SET):
setattr(self.model, RICH_FIELDS_SET, set())
getattr(self.model, RICH_FIELDS_SET).add(self.field_name)
# register the listener that saves the SourceText
#listener = self.post_save_listener(src_text)
signals.post_save.connect(receiver=self.post_save_listener, sender=self.model)
# wrap the text so that we can store the src_text on it
rendered = UnicodeWrapper(rendered)
setattr(rendered, self.src_text_attr, src_text)
return rendered | 0.003638 |
def _sasl_authenticate(self, stream, username, authzid):
"""Start SASL authentication process.
[initiating entity only]
:Parameters:
- `username`: user name.
- `authzid`: authorization ID.
- `mechanism`: SASL mechanism to use."""
if not stream.initiator:
raise SASLAuthenticationFailed("Only initiating entity start"
" SASL authentication")
if stream.features is None or not self.peer_sasl_mechanisms:
raise SASLNotAvailable("Peer doesn't support SASL")
props = dict(stream.auth_properties)
if not props.get("service-domain") and (
stream.peer and stream.peer.domain):
props["service-domain"] = stream.peer.domain
if username is not None:
props["username"] = username
if authzid is not None:
props["authzid"] = authzid
if "password" in self.settings:
props["password"] = self.settings["password"]
props["available_mechanisms"] = self.peer_sasl_mechanisms
enabled = sasl.filter_mechanism_list(
self.settings['sasl_mechanisms'], props,
self.settings['insecure_auth'])
if not enabled:
raise SASLNotAvailable(
"None of SASL mechanism selected can be used")
props["enabled_mechanisms"] = enabled
mechanism = None
for mech in enabled:
if mech in self.peer_sasl_mechanisms:
mechanism = mech
break
if not mechanism:
raise SASLMechanismNotAvailable("Peer doesn't support any of"
" our SASL mechanisms")
logger.debug("Our mechanism: {0!r}".format(mechanism))
stream.auth_method_used = mechanism
self.authenticator = sasl.client_authenticator_factory(mechanism)
initial_response = self.authenticator.start(props)
if not isinstance(initial_response, sasl.Response):
raise SASLAuthenticationFailed("SASL initiation failed")
element = ElementTree.Element(AUTH_TAG)
element.set("mechanism", mechanism)
if initial_response.data:
if initial_response.encode:
element.text = initial_response.encode()
else:
element.text = initial_response.data
stream.write_element(element) | 0.00195 |
def get_dependency_graph(component):
"""
Generate a component's graph of dependencies, which can be passed to
:func:`run` or :func:`run_incremental`.
"""
if component not in DEPENDENCIES:
raise Exception("%s is not a registered component." % get_name(component))
if not DEPENDENCIES[component]:
return {component: set()}
graph = defaultdict(set)
def visitor(c, parent):
if parent is not None:
graph[parent].add(c)
walk_dependencies(component, visitor)
graph = dict(graph)
# Find all items that don't depend on anything.
extra_items_in_deps = _reduce(set.union, graph.values(), set()) - set(graph.keys())
# Add empty dependencies where needed.
graph.update(dict((item, set()) for item in extra_items_in_deps))
return graph | 0.003636 |
def get_book(self):
"""Gets the ``Book`` at this node.
return: (osid.commenting.Book) - the book represented by this
node
*compliance: mandatory -- This method must be implemented.*
"""
if self._lookup_session is None:
mgr = get_provider_manager('COMMENTING', runtime=self._runtime, proxy=self._proxy)
self._lookup_session = mgr.get_book_lookup_session(proxy=getattr(self, "_proxy", None))
return self._lookup_session.get_book(Id(self._my_map['id'])) | 0.007407 |
def remove_service(self, zeroconf, srv_type, srv_name):
"""Remove the server from the list."""
self.servers.remove_server(srv_name)
logger.info(
"Glances server %s removed from the autodetect list" % srv_name) | 0.008163 |
def tokenize_paragraphs(self):
"""Apply paragraph tokenization to this Text instance. Creates ``paragraphs`` layer."""
tok = self.__paragraph_tokenizer
spans = tok.span_tokenize(self.text)
dicts = []
for start, end in spans:
dicts.append({'start': start, 'end': end})
self[PARAGRAPHS] = dicts
return self | 0.008065 |
def _fromScopeXpathToRefsDecl(self, scope, xpath):
""" Update xpath and scope property when refsDecl is updated
"""
if scope is not None and xpath is not None:
_xpath = scope + xpath
i = _xpath.find("?")
ii = 1
while i >= 0:
_xpath = _xpath[:i] + "$" + str(ii) + _xpath[i+1:]
i = _xpath.find("?")
ii += 1
self.refsDecl = _xpath | 0.004348 |
def upload_check(self, filename=None, folder_key=None, filedrop_key=None,
size=None, hash_=None, path=None, resumable=None):
"""upload/check
http://www.mediafire.com/developers/core_api/1.3/upload/#check
"""
return self.request('upload/check', QueryParams({
'filename': filename,
'folder_key': folder_key,
'filedrop_key': filedrop_key,
'size': size,
'hash': hash_,
'path': path,
'resumable': resumable
})) | 0.005445 |
def busy_display():
"""Display animation to show activity."""
sys.stdout.write("\033[?25l") # cursor off
sys.stdout.flush()
for x in range(1800):
symb = ['\\', '|', '/', '-']
sys.stdout.write("\033[D{}".format(symb[x % 4]))
sys.stdout.flush()
gevent.sleep(0.1) | 0.003236 |
def create_free_space_request_content():
"""Creates an XML for requesting of free space on remote WebDAV server.
:return: the XML string of request content.
"""
root = etree.Element('propfind', xmlns='DAV:')
prop = etree.SubElement(root, 'prop')
etree.SubElement(prop, 'quota-available-bytes')
etree.SubElement(prop, 'quota-used-bytes')
tree = etree.ElementTree(root)
return WebDavXmlUtils.etree_to_string(tree) | 0.004132 |
def evaluate_model(recording, model_folder, verbose=False):
"""Evaluate model for a single recording."""
from . import preprocess_dataset
from . import features
for target_folder in get_recognizer_folders(model_folder):
# The source is later than the target. That means we need to
# refresh the target
if "preprocessed" in target_folder:
logging.info("Start applying preprocessing methods...")
t = target_folder
_, _, preprocessing_queue = preprocess_dataset.get_parameters(t)
handwriting = handwritten_data.HandwrittenData(recording)
if verbose:
handwriting.show()
handwriting.preprocessing(preprocessing_queue)
if verbose:
logging.debug("After preprocessing: %s",
handwriting.get_sorted_pointlist())
handwriting.show()
elif "feature-files" in target_folder:
logging.info("Create feature file...")
infofile_path = os.path.join(target_folder, "info.yml")
with open(infofile_path, 'r') as ymlfile:
feature_description = yaml.load(ymlfile)
feature_str_list = feature_description['features']
feature_list = features.get_features(feature_str_list)
feature_count = sum(map(lambda n: n.get_dimension(),
feature_list))
x = handwriting.feature_extraction(feature_list)
# Create hdf5
_, output_filename = tempfile.mkstemp(suffix='.hdf5', text=True)
create_hdf5(output_filename, feature_count, [(x, 0)])
elif "model" in target_folder:
logfile, model_use = _evaluate_model_single_file(target_folder,
output_filename)
return logfile
else:
logging.info("'%s' not found", target_folder)
os.remove(output_filename)
os.remove(model_use) | 0.000494 |
def to_n_ref(self, fill=0, dtype='i1'):
"""Transform each genotype call into the number of
reference alleles.
Parameters
----------
fill : int, optional
Use this value to represent missing calls.
dtype : dtype, optional
Output dtype.
Returns
-------
out : ndarray, int8, shape (n_variants, n_samples)
Array of ref alleles per genotype call.
Notes
-----
By default this function returns 0 for missing genotype calls
**and** for homozygous non-reference genotype calls. Use the
`fill` argument to change how missing calls are represented.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 2], [1, 1]],
... [[2, 2], [-1, -1]]])
>>> g.to_n_ref()
array([[2, 1],
[1, 0],
[0, 0]], dtype=int8)
>>> g.to_n_ref(fill=-1)
array([[ 2, 1],
[ 1, 0],
[ 0, -1]], dtype=int8)
>>> v = g[:, 0]
>>> v
<GenotypeVector shape=(3, 2) dtype=int64>
0/0 0/2 2/2
>>> v.to_n_ref()
array([2, 1, 0], dtype=int8)
"""
# count number of alternate alleles
out = np.empty(self.shape[:-1], dtype=dtype)
np.sum(self.values == 0, axis=-1, out=out)
# fill missing calls
if fill != 0:
m = self.is_missing()
out[m] = fill
# handle mask
if self.mask is not None:
out[self.mask] = fill
return out | 0.001178 |
def add_files_via_url(self, dataset_key, files={}):
"""Add or update dataset files linked to source URLs
:param dataset_key: Dataset identifier, in the form of owner/id
:type dataset_key: str
:param files: Dict containing the name of files and metadata
Uses file name as a dict containing File description, labels and
source URLs to add or update (Default value = {})
*description and labels are optional.*
:type files: dict
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> url = 'http://www.acme.inc/example.csv'
>>> api_client = dw.api_client()
>>> api_client.add_files_via_url(
... 'username/test-dataset',
... {'example.csv': {
... 'url': url,
... 'labels': ['raw data'],
... 'description': 'file description'}}) # doctest: +SKIP
"""
file_requests = [_swagger.FileCreateOrUpdateRequest(
name=file_name,
source=_swagger.FileSourceCreateOrUpdateRequest(
url=file_info['url'],
expand_archive=file_info.get('expand_archive',
False)),
description=file_info.get('description'),
labels=file_info.get('labels'),
) for file_name, file_info in files.items()]
owner_id, dataset_id = parse_dataset_key(dataset_key)
try:
self._datasets_api.add_files_by_source(
owner_id, dataset_id,
_swagger.FileBatchUpdateRequest(files=file_requests))
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e) | 0.00112 |
def remove_variable(self, name):
"""Remove a variable from the problem."""
index = self._get_var_index(name)
# Remove from matrix
self._A = np.delete(self.A, index, 1)
# Remove from bounds
del self.bounds[name]
# Remove from var list
del self._variables[name]
self._update_variable_indices()
self._reset_solution() | 0.005076 |
def transform(self, X=None, y=None):
"""
Transform an image using an Affine transform with
rotation parameters randomly generated from the user-specified
range. Return the transform if X=None.
Arguments
---------
X : ANTsImage
Image to transform
y : ANTsImage (optional)
Another image to transform
Returns
-------
ANTsImage if y is None, else a tuple of ANTsImage types
Examples
--------
>>> import ants
>>> img = ants.image_read(ants.get_data('ch2'))
>>> tx = ants.contrib.RandomRotate3D(rotation_range=(-10,10))
>>> img2 = tx.transform(img)
"""
# random draw in rotation range
rotation_x = random.gauss(self.rotation_range[0], self.rotation_range[1])
rotation_y = random.gauss(self.rotation_range[0], self.rotation_range[1])
rotation_z = random.gauss(self.rotation_range[0], self.rotation_range[1])
self.params = (rotation_x, rotation_y, rotation_z)
tx = Rotate3D((rotation_x, rotation_y, rotation_z),
reference=self.reference,
lazy=self.lazy)
return tx.transform(X,y) | 0.006446 |
def forward(self, inputs, label, begin_state, sampled_values): # pylint: disable=arguments-differ
"""Defines the forward computation.
Parameters
-----------
inputs : NDArray
input tensor with shape `(sequence_length, batch_size)`
when `layout` is "TNC".
begin_state : list
initial recurrent state tensor with length equals to num_layers*2.
For each layer the two initial states have shape `(batch_size, num_hidden)`
and `(batch_size, num_projection)`
sampled_values : list
a list of three tensors for `sampled_classes` with shape `(num_samples,)`,
`expected_count_sampled` with shape `(num_samples,)`, and
`expected_count_true` with shape `(sequence_length, batch_size)`.
Returns
--------
out : NDArray
output tensor with shape `(sequence_length, batch_size, 1+num_samples)`
when `layout` is "TNC".
out_states : list
output recurrent state tensor with length equals to num_layers*2.
For each layer the two initial states have shape `(batch_size, num_hidden)`
and `(batch_size, num_projection)`
new_target : NDArray
output tensor with shape `(sequence_length, batch_size)`
when `layout` is "TNC".
"""
encoded = self.embedding(inputs)
length = inputs.shape[0]
batch_size = inputs.shape[1]
encoded, out_states = self.encoder.unroll(length, encoded, begin_state,
layout='TNC', merge_outputs=True)
out, new_target = self.decoder(encoded, sampled_values, label)
out = out.reshape((length, batch_size, -1))
new_target = new_target.reshape((length, batch_size))
return out, out_states, new_target | 0.004787 |
def implicify_hydrogens(self):
"""
remove explicit hydrogen if possible
:return: number of removed hydrogens
"""
explicit = defaultdict(list)
c = 0
for n, atom in self.atoms():
if atom.element == 'H':
for m in self.neighbors(n):
if self._node[m].element != 'H':
explicit[m].append(n)
for n, h in explicit.items():
atom = self._node[n]
len_h = len(h)
for i in range(len_h, 0, -1):
hi = h[:i]
if atom.get_implicit_h([y.order for x, y in self._adj[n].items() if x not in hi]) == i:
for x in hi:
self.remove_node(x)
c += 1
break
self.flush_cache()
return c | 0.003476 |
def get_version():
"""
Return package version as listed in `__version__` in `init.py`.
"""
with open(os.path.join(os.path.dirname(__file__), 'argparsetree', '__init__.py')) as init_py:
return re.search('__version__ = [\'"]([^\'"]+)[\'"]', init_py.read()).group(1) | 0.010453 |
def add_require(self, require):
""" Add a require object if it does not already exist """
for p in self.requires:
if p.value == require.value:
return
self.requires.append(require) | 0.008658 |
def propagate(self, date):
"""Propagate the orbit to a new date
Args:
date (Date)
Return:
Orbit
"""
if self.propagator.orbit is not self:
self.propagator.orbit = self
return self.propagator.propagate(date) | 0.006849 |
def send_cmd_recv_rsp(self, target, data, timeout):
"""Exchange data with a remote Target
Sends command *data* to the remote *target* discovered in the
most recent call to one of the sense_xxx() methods. Note that
*target* becomes invalid with any call to mute(), sense_xxx()
or listen_xxx()
Arguments:
target (nfc.clf.RemoteTarget): The target returned by the
last successful call of a sense_xxx() method.
data (bytearray): The binary data to send to the remote
device.
timeout (float): The maximum number of seconds to wait for
response data from the remote device.
Returns:
bytearray: Response data received from the remote device.
Raises:
nfc.clf.CommunicationError: When no data was received.
"""
fname = "send_cmd_recv_rsp"
cname = self.__class__.__module__ + '.' + self.__class__.__name__
raise NotImplementedError("%s.%s() is required" % (cname, fname)) | 0.001899 |
def init_driver(client_id):
"""Initialises a new driver via webwhatsapi module
@param client_id: ID of user client
@return webwhatsapi object
"""
# Create profile directory if it does not exist
profile_path = CHROME_CACHE_PATH + str(client_id)
if not os.path.exists(profile_path):
os.makedirs(profile_path)
# Options to customize chrome window
chrome_options = [
'window-size=' + CHROME_WINDOW_SIZE,
'--user-agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/60.0.3112.78 Chrome/60.0.3112.78 Safari/537.36'
]
if CHROME_IS_HEADLESS:
chrome_options.append('--headless')
if CHROME_DISABLE_GPU:
chrome_options.append('--disable-gpu')
# Create a whatsapidriver object
d = WhatsAPIDriver(
username=client_id,
profile=profile_path,
client='chrome',
chrome_options=chrome_options
)
return d | 0.008172 |
def get_server_model(snmp_client):
"""Get server model of the node.
:param snmp_client: an SNMP client object.
:raises: SNMPFailure if SNMP operation failed.
:returns: a string of server model.
"""
try:
server_model = snmp_client.get(SERVER_MODEL_OID)
return six.text_type(server_model)
except SNMPFailure as e:
raise SNMPServerModelFailure(
SNMP_FAILURE_MSG % ("GET SERVER MODEL", e)) | 0.002217 |
def get_span_offsets(docgraph, node_id):
"""
returns the character start and end position of the span of text that
the given node spans or dominates.
Returns
-------
offsets : tuple(int, int)
character onset and offset of the span
"""
try:
span = get_span(docgraph, node_id)
# workaround for issue #138
# TODO: when #138 is fixed, just take the first onset / last offset
onsets, offsets = zip(*[docgraph.get_offsets(tok_node)
for tok_node in span])
return (min(onsets), max(offsets))
except KeyError as _:
raise KeyError("Node '{}' doesn't span any tokens.".format(node_id)) | 0.001431 |
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
qs = model_admin.get_queryset(request)
qs.filter(id__range=(1, 99))
for item in qs:
dp = DeviceProtocol.objects.filter(pk=item.id).first()
if dp:
yield (dp.pk, dp.app_name) | 0.003521 |
def supported_languages(self, task=None):
"""Languages that are covered by a specific task.
Args:
task (string): Task name.
"""
if task:
collection = self.get_collection(task=task)
return [isoLangs[x.id.split('.')[1]]["name"]
for x in collection.packages]
else:
return [x.name.split()[0] for x in self.collections()
if Downloader.LANG_PREFIX in x.id] | 0.012579 |
def update_member(self, member_id, peer_urls):
"""
Update the configuration of an existing member in the cluster.
:param member_id: ID of the member to update
:param peer_urls: new list of peer urls the member will use to
communicate with the cluster
"""
member_update_request = etcdrpc.MemberUpdateRequest(ID=member_id,
peerURLs=peer_urls)
self.clusterstub.MemberUpdate(
member_update_request,
self.timeout,
credentials=self.call_credentials,
metadata=self.metadata
) | 0.002999 |
def export(self, class_name=None, method_name=None,
num_format=lambda x: str(x), details=False, **kwargs):
# pylint: disable=unused-argument
"""
Transpile a trained model to the syntax of a
chosen programming language.
Parameters
----------
:param class_name : string, default: None
The name for the ported class.
:param method_name : string, default: None
The name for the ported method.
:param num_format : lambda x, default: lambda x: str(x)
The representation of the floating-point values.
:param details : bool, default False
Return additional data for the compilation and execution.
Returns
-------
model : {mix}
The ported model as string or a dictionary
with further information.
"""
if class_name is None or class_name == '':
class_name = self.estimator_name
if method_name is None or method_name == '':
method_name = self.target_method
if isinstance(num_format, types.LambdaType):
self.template._num_format = num_format
output = self.template.export(class_name=class_name,
method_name=method_name, **kwargs)
if not details:
return output
language = self.target_language
filename = Porter._get_filename(class_name, language)
comp_cmd, exec_cmd = Porter._get_commands(filename, class_name,
language)
output = {
'estimator': str(output),
'filename': filename,
'class_name': class_name,
'method_name': method_name,
'cmd': {
'compilation': comp_cmd,
'execution': exec_cmd
},
'algorithm': {
'type': self.estimator_type,
'name': self.estimator_name
}
}
return output | 0.00194 |
def get_token(self, token):
'''
Request a token from the master
'''
load = {}
load['token'] = token
load['cmd'] = 'get_token'
tdata = self._send_token_request(load)
return tdata | 0.008299 |
def from_config(cls, cp, variable_params):
"""Gets sampling transforms specified in a config file.
Sampling parameters and the parameters they replace are read from the
``sampling_params`` section, if it exists. Sampling transforms are
read from the ``sampling_transforms`` section(s), using
``transforms.read_transforms_from_config``.
An ``AssertionError`` is raised if no ``sampling_params`` section
exists in the config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
variable_params : list
List of parameter names of the original variable params.
Returns
-------
SamplingTransforms
A sampling transforms class.
"""
if not cp.has_section('sampling_params'):
raise ValueError("no sampling_params section found in config file")
# get sampling transformations
sampling_params, replace_parameters = \
read_sampling_params_from_config(cp)
sampling_transforms = transforms.read_transforms_from_config(
cp, 'sampling_transforms')
logging.info("Sampling in {} in place of {}".format(
', '.join(sampling_params), ', '.join(replace_parameters)))
return cls(variable_params, sampling_params,
replace_parameters, sampling_transforms) | 0.001396 |
def dict_to_nvlist(dict):
'''Convert a dictionary into a CORBA namevalue list.'''
result = []
for item in list(dict.keys()):
result.append(SDOPackage.NameValue(item, omniORB.any.to_any(dict[item])))
return result | 0.008299 |
def change_forms(self, *args, **keywords):
"""
Checks which form is currently displayed and toggles to the other one
"""
# Returns to previous Form in history if there is a previous Form
try:
self.parentApp.switchFormPrevious()
except Exception as e: # pragma: no cover
self.parentApp.switchForm('MAIN') | 0.005319 |
def parse_args(args=None):
"""Parses arguments, returns (options, args)."""
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='Rename template project with'
'hyphen-separated <new name> (path names and in '
'files).')
parser.add_argument('new_name', help='New project name (e.g., '
' `my-new-project`)')
args = parser.parse_args()
return args | 0.001938 |
def ontologyShapeTree(self):
"""
Returns a dict representing the ontology tree
Top level = {0:[top properties]}
Multi inheritance is represented explicitly
"""
treedict = {}
if self.all_shapes:
treedict[0] = self.toplayer_shapes
for element in self.all_shapes:
if element.children():
treedict[element] = element.children()
return treedict
return treedict | 0.004082 |
def walk_files_info(self, relativePath="", fullPath=False, recursive=False):
"""
Walk the repository relative path and yield tuple of two items where
first item is file relative/full path and second item is file info.
If file info is not found on disk, second item will be None.
:parameters:
#. relativePath (string): The relative path from which start the walk.
#. fullPath (boolean): Whether to return full or relative path.
#. recursive (boolean): Whether walk all directories files recursively
"""
assert isinstance(fullPath, bool), "fullPath must be boolean"
assert isinstance(recursive, bool), "recursive must be boolean"
relativePath = self.to_repo_relative_path(path=relativePath, split=False)
for relaPath in self.walk_files_path(relativePath=relativePath, fullPath=False, recursive=recursive):
fpath, fname = os.path.split(relaPath)
fileInfoPath = os.path.join(self.__path,fpath,self.__fileInfo%fname)
if os.path.isfile(fileInfoPath):
with open(fileInfoPath, 'rb') as fd:
info = pickle.load(fd)
else:
info = None
if fullPath:
yield (os.path.join(self.__path, relaPath), info)
else:
yield (relaPath, info) | 0.00721 |
def mask_binary(self, binary_im):
"""Create a new image by zeroing out data at locations
where binary_im == 0.0.
Parameters
----------
binary_im : :obj:`BinaryImage`
A BinaryImage of the same size as this image, with pixel values of either
zero or one. Wherever this image has zero pixels, we'll zero out the
pixels of the new image.
Returns
-------
:obj:`Image`
A new Image of the same type, masked by the given binary image.
"""
data = np.copy(self._data)
ind = np.where(binary_im.data == 0)
data[ind[0], ind[1], :] = 0
return SegmentationImage(data, self._frame) | 0.005563 |
def compileFeatures(
ufo,
ttFont=None,
glyphSet=None,
featureWriters=None,
featureCompilerClass=None,
):
""" Compile OpenType Layout features from `ufo` into FontTools OTL tables.
If `ttFont` is None, a new TTFont object is created containing the new
tables, else the provided `ttFont` is updated with the new tables.
If no explicit `featureCompilerClass` is provided, the one used will
depend on whether the ufo contains any MTI feature files in its 'data'
directory (thus the `MTIFeatureCompiler` is used) or not (then the
default FeatureCompiler for Adobe FDK features is used).
If skipExportGlyphs is provided (see description in the ``compile*``
functions), the feature compiler will prune groups (removing them if empty)
and kerning of the UFO of these glyphs. The feature file is left untouched.
"""
if featureCompilerClass is None:
if any(
fn.startswith(MTI_FEATURES_PREFIX) and fn.endswith(".mti")
for fn in ufo.data.fileNames
):
featureCompilerClass = MtiFeatureCompiler
else:
featureCompilerClass = FeatureCompiler
featureCompiler = featureCompilerClass(
ufo, ttFont, glyphSet=glyphSet, featureWriters=featureWriters
)
return featureCompiler.compile() | 0.000755 |
def stop(self, ends=None, forced=False):
"""
Stops an ``NuMap`` instance. If the list of end tasks is specified *via*
the "ends" argument a call to ``NuMap.stop`` will block the calling
thread and retrieve (discards) a maximum of 2 * stride of results. This
will stop the worker pool and the threads which manage its input and
output queues respectively.
If the "ends" argument is not specified, but the "forced" argument is
the method does not block and the ``NuMap._stop`` has to be called after
**all** pending results have been retrieved. Calling ``NuMap._stop`` with
pending results **will** dead-lock.
Either "ends" or "forced" has to be ``True``.
Arguments:
- ends (``list``) [default: ``None``] A list of task ids which are not
consumed within the ``NuMap`` instance.
- forced (``bool``) [default: ``False``] If "ends" is not ``None``
this argument is ignored. If "ends" is ``None`` and "forced" is
``True`` the ``NuMap`` instance will trigger *stopping mode*.
"""
if self._started.isSet():
if ends:
self._stopping.set()
# if _stopping is set the pool putter will notify the weave
# generator that no more new results are needed. The weave
# generator will stop _before_ getting the first result from
# task 0 in the next stride.
log.debug('%s begins stopping routine' % self)
to_do = ends[:] # if ends else ends
# We continue this loop until all end tasks
# have raised StopIteration this stops the pool
while to_do:
for task in to_do:
try:
#for i in xrange(self.stride):
self.next(task=task)
except StopIteration:
to_do.remove(task)
log.debug('%s stopped task %s' % (self, task))
continue
except Exception, excp:
log.debug('%s task %s raised exception %s' % \
(self, task, excp))
# stop threads remove queues
self._stop()
log.debug('%s finished stopping routine' % self)
elif forced:
self._stopping.set()
log.debug('%s begins triggers stopping' % self)
# someone has to retrieve results and call the _stop_managers
else:
# this is the default
msg = '%s is started, but neither ends nor forced was set.' % \
self
log.error(msg)
raise RuntimeError(msg) | 0.008089 |
def current_sleep_breakdown(self):
"""Return durations of sleep stages for in-progress session."""
try:
stages = self.intervals[0]['stages']
breakdown = {'awake': 0, 'light': 0, 'deep': 0, 'rem': 0}
for stage in stages:
if stage['stage'] == 'awake':
breakdown['awake'] += stage['duration']
elif stage['stage'] == 'light':
breakdown['light'] += stage['duration']
elif stage['stage'] == 'deep':
breakdown['deep'] += stage['duration']
elif stage['stage'] == 'rem':
breakdown['rem'] += stage['duration']
except KeyError:
breakdown = None
return breakdown | 0.002584 |
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
if nidm_version['major'] < 1 or \
(nidm_version['major'] == 1 and nidm_version['minor'] < 3):
self.type = NIDM_DATA_SCALING
# Create "Data" entity
# FIXME: grand mean scaling?
# FIXME: medianIntensity
self.add_attributes((
(PROV['type'], self.type),
(PROV['type'], PROV['Collection']),
(PROV['label'], self.label),
(NIDM_GRAND_MEAN_SCALING, self.grand_mean_sc),
(NIDM_TARGET_INTENSITY, self.target_intensity)))
if nidm_version['major'] > 1 or \
(nidm_version['major'] == 1 and nidm_version['minor'] > 2):
if self.mri_protocol is not None:
self.add_attributes(
[(NIDM_HAS_MRI_PROTOCOL, self.mri_protocol)]) | 0.002186 |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncMapItemContext for this SyncMapItemInstance
:rtype: twilio.rest.preview.sync.service.sync_map.sync_map_item.SyncMapItemContext
"""
if self._context is None:
self._context = SyncMapItemContext(
self._version,
service_sid=self._solution['service_sid'],
map_sid=self._solution['map_sid'],
key=self._solution['key'],
)
return self._context | 0.007418 |
def get_attached_container_host_config_kwargs(self, action, container_name, kwargs=None):
"""
Generates keyword arguments for the Docker client to set up the HostConfig or start an attached container.
:param action: Action configuration.
:type action: ActionConfig
:param container_name: Container name or id. Set ``None`` when included in kwargs for ``create_container``.
:type container_name: unicode | str | NoneType
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict
"""
if container_name:
c_kwargs = {'container': container_name}
else:
c_kwargs = {}
update_kwargs(c_kwargs, kwargs)
return c_kwargs | 0.006826 |
def set_index(self, field, value):
"""
set_index(field, value)
Works like :meth:`add_index`, but ensures that there is only
one index on given field. If other found, then removes it
first.
:param field: The index field.
:type field: string
:param value: The index value.
:type value: string or integer
:rtype: :class:`RiakObject <riak.riak_object.RiakObject>`
"""
to_rem = set((x for x in self.indexes if x[0] == field))
self.indexes.difference_update(to_rem)
return self.add_index(field, value) | 0.003284 |
def _get_list_widget(
self,
filters,
actions=None,
order_column="",
order_direction="",
page=None,
page_size=None,
widgets=None,
**args
):
""" get joined base filter and current active filter for query """
widgets = widgets or {}
actions = actions or self.actions
page_size = page_size or self.page_size
if not order_column and self.base_order:
order_column, order_direction = self.base_order
joined_filters = filters.get_joined_filters(self._base_filters)
count, lst = self.datamodel.query(
joined_filters,
order_column,
order_direction,
page=page,
page_size=page_size,
)
pks = self.datamodel.get_keys(lst)
# serialize composite pks
pks = [self._serialize_pk_if_composite(pk) for pk in pks]
widgets["list"] = self.list_widget(
label_columns=self.label_columns,
include_columns=self.list_columns,
value_columns=self.datamodel.get_values(lst, self.list_columns),
order_columns=self.order_columns,
formatters_columns=self.formatters_columns,
page=page,
page_size=page_size,
count=count,
pks=pks,
actions=actions,
filters=filters,
modelview_name=self.__class__.__name__,
)
return widgets | 0.002012 |
def authorization_header(self):
"""
Returns a string containing the authorization header used to authenticate
with GenePattern. This string is included in the header of subsequent
requests sent to GenePattern.
"""
return 'Basic %s' % base64.b64encode(bytes(self.username + ':' + self.password, 'ascii')).decode('ascii') | 0.010899 |
def satisfy_custom_matcher(self, args, kwargs):
"""Returns a boolean indicating whether or not the mock will accept the provided arguments.
:param tuple args: A tuple of position args
:param dict kwargs: A dictionary of keyword args
:return: Whether or not the mock accepts the provided arguments.
:rtype: bool
"""
is_match = super(Expectation, self).satisfy_custom_matcher(args, kwargs)
if is_match:
self._satisfy()
return is_match | 0.007692 |
def execute(self, eopatch):
""" Computation of NDVI slope using finite central differences
This implementation loops through every spatial location, considers the valid NDVI values and approximates their
first order derivative using central differences. The argument of min and max is added to the eopatch.
The NDVI slope at date t is comuted as $(NDVI_{t+1}-NDVI_{t-1})/(date_{t+1}-date_{t-1})$.
:param eopatch: Input eopatch
:return: eopatch with NDVI slope argmin/argmax features
"""
# pylint: disable=invalid-name
if self.mask_data:
valid_data_mask = eopatch.mask['VALID_DATA']
else:
valid_data_mask = eopatch.mask['IS_DATA']
ndvi = np.ma.array(eopatch.data[self.data_feature],
dtype=np.float32,
mask=~valid_data_mask.astype(np.bool))
all_dates = np.asarray([x.toordinal() for x in eopatch.timestamp])
if ndvi.ndim == 4:
h, w = ndvi.shape[1: 3]
else:
raise ValueError('{} feature has incorrect number of dimensions'.format(self.data_feature))
argmax_ndvi_slope, argmin_ndvi_slope = np.zeros((h, w, 1), dtype=np.uint8), np.zeros((h, w, 1), dtype=np.uint8)
for ih, iw in it.product(range(h), range(w)):
ndvi_curve = ndvi[:, ih, iw, :]
valid_idx = np.where(~ndvi.mask[:, ih, iw])[0]
ndvi_curve = ndvi_curve[valid_idx]
valid_dates = all_dates[valid_idx]
ndvi_slope = np.convolve(ndvi_curve.squeeze(), [1, 0, -1], 'valid') / np.convolve(valid_dates, [1, 0, -1],
'valid')
# +1 to compensate for the 'valid' convolution which eliminates first and last
argmax_ndvi_slope[ih, iw] = valid_idx[np.argmax(ndvi_slope) + 1]
argmin_ndvi_slope[ih, iw] = valid_idx[np.argmin(ndvi_slope) + 1]
del ndvi_curve, valid_idx, valid_dates, ndvi_slope
eopatch.data_timeless[self.argmax_feature] = argmax_ndvi_slope
eopatch.data_timeless[self.argmin_feature] = argmin_ndvi_slope
return eopatch | 0.004462 |
def log_fault (exc, message = "", level = logging.CRITICAL,
traceback = False):
"""Print the usual traceback information, followed by a listing of all
the local variables in each frame.
"""
tb = sys.exc_info ()[2]
stack = _get_stack (tb)
LOG.log (level,
"FAULT: %s%s(%s): %s", ("%s -- " % message) if message else "",
tb.tb_frame.f_code.co_filename,
tb.tb_lineno,
repr (exc))
if traceback or LOG.isEnabledFor (logging.DEBUG):
for line in _generate_stackdump (stack):
LOG.debug (line) | 0.037102 |
def install_anaconda_python(args):
"""Provide isolated installation of Anaconda python for running bcbio-nextgen.
http://docs.continuum.io/anaconda/index.html
"""
anaconda_dir = os.path.join(args.datadir, "anaconda")
bindir = os.path.join(anaconda_dir, "bin")
conda = os.path.join(bindir, "conda")
if not os.path.exists(anaconda_dir) or not os.path.exists(conda):
if os.path.exists(anaconda_dir):
shutil.rmtree(anaconda_dir)
dist = args.distribution if args.distribution else _guess_distribution()
url = REMOTES["anaconda"] % ("MacOSX" if dist.lower() == "macosx" else "Linux")
if not os.path.exists(os.path.basename(url)):
subprocess.check_call(["wget", "--progress=dot:mega", "--no-check-certificate", url])
subprocess.check_call("bash %s -b -p %s" %
(os.path.basename(url), anaconda_dir), shell=True)
return {"conda": conda,
"pip": os.path.join(bindir, "pip"),
"dir": anaconda_dir} | 0.005803 |
def data_iterator_csv_dataset(uri,
batch_size,
shuffle=False,
rng=None,
normalize=True,
with_memory_cache=True,
with_file_cache=True,
cache_dir=None,
epoch_begin_callbacks=[],
epoch_end_callbacks=[]):
'''data_iterator_csv_dataset
Get data directly from a dataset provided as a CSV file.
You can read files located on the local file system, http(s) servers or Amazon AWS S3 storage.
For example,
.. code-block:: python
batch = data_iterator_csv_dataset('CSV_FILE.csv', batch_size, shuffle=True)
Args:
uri (str): Location of dataset CSV file.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
normalize (bool): If True, each sample in the data gets normalized by a factor of 255.
Default is True.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
with_file_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithFileCache`
to wrap ``data_source``.
If ``data_source`` is slow, enabling this option a is good idea.
Default value is False.
cache_dir (str):
Location of file_cache.
If this value is None, :py:class:`.data_source.DataSourceWithFileCache`
creates file caches implicitly on temporary directory and erases them all
when data_iterator is finished.
Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache.
Default is None.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator
'''
ds = CsvDataSource(uri,
shuffle=shuffle,
rng=rng,
normalize=normalize)
return data_iterator(ds,
batch_size=batch_size,
with_memory_cache=with_memory_cache,
with_file_cache=with_file_cache,
cache_dir=cache_dir,
epoch_begin_callbacks=epoch_begin_callbacks,
epoch_end_callbacks=epoch_end_callbacks) | 0.003114 |
def _process_response(self, response, object_mapping=None):
"""
Attempt to find a ResponseHandler that knows how to process this response.
If no handler can be found, raise an Exception.
"""
try:
pretty_response = response.json()
except ValueError:
pretty_response = response
for handler in self._response_handlers:
if handler.applies_to(self, response):
log.debug("{} matched: {}".format(handler.__name__, pretty_response))
r = handler(self, object_mapping).build(response)
self._clean_dirty_objects()
return r
raise ZenpyException("Could not handle response: {}".format(pretty_response)) | 0.00664 |
def capture_guest(userid):
"""Caputre a virtual machine image.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8
Output parameters:
:image_name: Image name that captured
"""
# check power state, if down, start it
ret = sdk_client.send_request('guest_get_power_state', userid)
power_status = ret['output']
if power_status == 'off':
sdk_client.send_request('guest_start', userid)
# TODO: how much time?
time.sleep(1)
# do capture
image_name = 'image_captured_%03d' % (time.time() % 1000)
sdk_client.send_request('guest_capture', userid, image_name,
capture_type='rootonly', compress_level=6)
return image_name | 0.001344 |
def setColumns(self, columns):
"""
Sets the column count and list of columns to the inputed column list.
:param columns | [<str>, ..]
"""
self.setColumnCount(len(columns))
self.setHeaderLabels(columns) | 0.011111 |
def stop(self):
"""Permanently stop sending heartbeats."""
if not self.stopped:
self.stopped = True
if self.pendingHeartbeat is not None:
self.pendingHeartbeat.cancel()
self.pendingHeartbeat = None | 0.007435 |
def execute_once(self, swap=None,
spell_changes=None, spell_destructions=None,
random_fill=False):
"""Execute the board only one time. Do not execute chain reactions.
Arguments:
swap - pair of adjacent positions
spell_changes - sequence of (position, tile) changes
spell_destructions - sequence of positions to be destroyed
Return: (copy of the board, destroyed tile groups)
"""
bcopy = self.copy() # work with a copy, not self
total_destroyed_tile_groups = list()
# swap if any
bcopy._swap(swap)
# spell changes if any
bcopy._change(spell_changes)
# spell destructions and record if any
# first convert simple positions to groups
spell_destructions = spell_destructions or tuple()
destruction_groups = [[p] for p in spell_destructions]
destroyed_tile_groups = bcopy._destroy(destruction_groups)
total_destroyed_tile_groups.extend(destroyed_tile_groups)
# execute one time only
# look for matched groups
matched_position_groups = bcopy._match()
# destroy and record matched groups
destroyed_tile_groups = bcopy._destroy(matched_position_groups)
total_destroyed_tile_groups.extend(destroyed_tile_groups)
bcopy._fall()
if random_fill:
bcopy._random_fill()
return bcopy, total_destroyed_tile_groups | 0.00271 |
def ticket_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/tickets#show-ticket"
api_path = "/api/v2/tickets/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | 0.008032 |
def render_as_xml(func):
"""
Decorator to render as XML
:param func:
:return:
"""
if inspect.isclass(func):
setattr(func, "_renderer", xml_renderer)
return func
else:
@functools.wraps(func)
def decorated_view(*args, **kwargs):
data = func(*args, **kwargs)
return _build_response(data, dicttoxml)
return decorated_view | 0.002445 |
def _delete_nodes(self, features):
""" Removes the node corresponding to each item in 'features'.
"""
graph = self._graph
if graph is not None:
for feature in features:
graph.delete_node( id(feature) )
graph.arrange_all() | 0.013746 |
def uniquify(value, seen_values):
""" Adds value to seen_values set and ensures it is unique """
id = 1
new_value = value
while new_value in seen_values:
new_value = "%s%s" % (value, id)
id += 1
seen_values.add(new_value)
return new_value | 0.003597 |
def tasks(self):
"""
Returns a list of all tasks known to the engine.
:return: A list of task names.
"""
task_input = {'taskName': 'QueryTaskCatalog'}
output = taskengine.execute(task_input, self._engine_name, cwd=self._cwd)
return output['outputParameters']['TASKS'] | 0.009259 |
def auto_load_configs(self):
"""Auto load all configs from app configs"""
for app in apps.get_app_configs():
for model in app.get_models():
config = ModelConfig(model, getattr(app, model.__name__, None))
self.configs[self.get_model_name(model)] = config | 0.00639 |
def call(cmd, input=None, assert_zero_exit_status=True, warn_on_non_zero_exist_status=False, **kwargs):
"""
:rtype: SubprocessResult
Raises OSError if command was not found
Returns non-zero result in result.ret if subprocess terminated with non-zero exist status.
"""
if (not kwargs.get('shell')) and isinstance(cmd, basestring):
raise ValueError('cmd should be list or tuple, not a string: %r' % cmd)
result = SubprocessResult.call(cmd, input=input, **kwargs)
if assert_zero_exit_status and result.ret != 0:
raise SubprocessError(result)
if warn_on_non_zero_exist_status and result.ret != 0:
logger.warn('subprocess failed %r' % result)
return result | 0.004172 |
def is_fully_defined(self):
"""Returns True iff `self` is fully defined in every dimension."""
return self._dims is not None and all(
dim.value is not None for dim in self._dims
) | 0.009302 |
def _fetch_from_archive(self, method, args):
"""Fetch data from the archive
:param method: the name of the command to execute
:param args: the arguments required by the command
"""
if not self.archive:
raise ArchiveError(cause="Archive not provided")
data = self.archive.retrieve(method, args, None)
if isinstance(data, nntplib.NNTPTemporaryError):
raise data
return data | 0.00432 |
def _realPath(self, newPathName: str = None) -> str:
""" Private Real Path
Get path name.
@param newPathName: variable for new path name if passed argument.
@type newPathName: String
@return: Path Name as string.
"""
directory = self._directory()
assert directory
return os.path.join(directory.path,
newPathName if newPathName else self._pathName) | 0.004444 |
def _read(self, fd, mask):
"""Read waiting data and terminate Tk mainloop if done"""
try:
# if EOF was encountered on a tty, avoid reading again because
# it actually requests more data
if select.select([fd],[],[],0)[0]:
snew = os.read(fd, self.nbytes) # returns bytes in PY3K
if PY3K: snew = snew.decode('ascii','replace')
self.value.append(snew)
self.nbytes -= len(snew)
else:
snew = ''
if (self.nbytes <= 0 or len(snew) == 0) and self.widget:
# stop the mainloop
self.widget.quit()
except OSError:
raise IOError("Error reading from %s" % (fd,)) | 0.010596 |
def masked_rec_array_to_mgr(data, index, columns, dtype, copy):
"""
Extract from a masked rec array and create the manager.
"""
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = get_names_from_index(fdata)
if index is None:
index = ibase.default_index(len(data))
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype)
if copy:
mgr = mgr.copy()
return mgr | 0.00089 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.