text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _build_instance_count_and_type_args(self, master_instance_type,
slave_instance_type, num_instances):
"""
Takes a master instance type (string), a slave instance type
(string), and a number of instances. Returns a comparable dict
for use in making a RunJobFlow request.
"""
params = {
'Instances.MasterInstanceType' : master_instance_type,
'Instances.SlaveInstanceType' : slave_instance_type,
'Instances.InstanceCount' : num_instances,
}
return params | 0.011628 |
def bytes_iter(obj):
"""Turn a complex object into an iterator of byte strings.
The resulting iterator can be used for caching.
"""
if obj is None:
return
elif isinstance(obj, six.binary_type):
yield obj
elif isinstance(obj, six.string_types):
yield obj
elif isinstance(obj, (date, datetime)):
yield obj.isoformat()
elif is_mapping(obj):
for key in sorted(obj.keys()):
for out in chain(bytes_iter(key), bytes_iter(obj[key])):
yield out
elif is_sequence(obj):
if isinstance(obj, (list, set)):
try:
obj = sorted(obj)
except Exception:
pass
for item in obj:
for out in bytes_iter(item):
yield out
elif isinstance(obj, (types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)):
yield getattr(obj, 'func_name', '')
else:
yield six.text_type(obj) | 0.000973 |
def FromStream(cls, stream):
"""Create a DataStreamSelector from a DataStream.
Args:
stream (DataStream): The data stream that we want to convert.
"""
if stream.system:
specifier = DataStreamSelector.MatchSystemOnly
else:
specifier = DataStreamSelector.MatchUserOnly
return DataStreamSelector(stream.stream_type, stream.stream_id, specifier) | 0.007009 |
def publish_ap(self, apid, args):
"""绑定自定义域名
绑定用户自定义的域名,仅对公网域名模式接入点生效。
Args:
- apid: 接入点ID
- args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/aps/{1}/publish'.format(self.host, apid)
return self.__post(url, args) | 0.003976 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'collection_id') and self.collection_id is not None:
_dict['collection_id'] = self.collection_id
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
if hasattr(self,
'configuration_id') and self.configuration_id is not None:
_dict['configuration_id'] = self.configuration_id
if hasattr(self, 'language') and self.language is not None:
_dict['language'] = self.language
if hasattr(self,
'document_counts') and self.document_counts is not None:
_dict['document_counts'] = self.document_counts._to_dict()
if hasattr(self, 'disk_usage') and self.disk_usage is not None:
_dict['disk_usage'] = self.disk_usage._to_dict()
if hasattr(self,
'training_status') and self.training_status is not None:
_dict['training_status'] = self.training_status._to_dict()
if hasattr(self, 'source_crawl') and self.source_crawl is not None:
_dict['source_crawl'] = self.source_crawl._to_dict()
return _dict | 0.001147 |
def new_knitting_pattern_set_loader(specification=DefaultSpecification()):
"""Create a loader for a knitting pattern set.
:param specification: a :class:`specification
<knittingpattern.ParsingSpecification.ParsingSpecification>`
for the knitting pattern set, default
:class:`DefaultSpecification`
"""
parser = specification.new_parser(specification)
loader = specification.new_loader(parser.knitting_pattern_set)
return loader | 0.002132 |
def coordination_geometry_symmetry_measures_standard(self,
coordination_geometry,
algo,
points_perfect=None,
optimization=None):
"""
Returns the symmetry measures for a set of permutations (whose setup depends on the coordination geometry)
for the coordination geometry "coordination_geometry". Standard implementation looking for the symmetry
measures of each permutation
:param coordination_geometry: The coordination geometry to be investigated
:return: The symmetry measures for the given coordination geometry for each permutation investigated
"""
# permutations_symmetry_measures = np.zeros(len(algo.permutations),
# np.float)
if optimization == 2:
permutations_symmetry_measures = [None] * len(algo.permutations)
permutations = list()
algos = list()
local2perfect_maps = list()
perfect2local_maps = list()
for iperm, perm in enumerate(algo.permutations):
local2perfect_map = {}
perfect2local_map = {}
permutations.append(perm)
for iperfect, ii in enumerate(perm):
perfect2local_map[iperfect] = ii
local2perfect_map[ii] = iperfect
local2perfect_maps.append(local2perfect_map)
perfect2local_maps.append(perfect2local_map)
points_distorted = self.local_geometry.points_wcs_ctwcc(
permutation=perm)
sm_info = symmetry_measure(points_distorted=points_distorted,
points_perfect=points_perfect)
sm_info['translation_vector'] = self.local_geometry.centroid_with_centre
permutations_symmetry_measures[iperm] = sm_info
algos.append(str(algo))
return permutations_symmetry_measures, permutations, algos, local2perfect_maps, perfect2local_maps
else:
permutations_symmetry_measures = [None] * len(algo.permutations)
permutations = list()
algos = list()
local2perfect_maps = list()
perfect2local_maps = list()
for iperm, perm in enumerate(algo.permutations):
local2perfect_map = {}
perfect2local_map = {}
permutations.append(perm)
for iperfect, ii in enumerate(perm):
perfect2local_map[iperfect] = ii
local2perfect_map[ii] = iperfect
local2perfect_maps.append(local2perfect_map)
perfect2local_maps.append(perfect2local_map)
points_distorted = self.local_geometry.points_wcs_ctwcc(
permutation=perm)
sm_info = symmetry_measure(points_distorted=points_distorted,
points_perfect=points_perfect)
sm_info['translation_vector'] = self.local_geometry.centroid_with_centre
permutations_symmetry_measures[iperm] = sm_info
algos.append(str(algo))
return permutations_symmetry_measures, permutations, algos, local2perfect_maps, perfect2local_maps | 0.004257 |
def _set_fcoe_map_cee_map(self, v, load=False):
"""
Setter method for fcoe_map_cee_map, mapped from YANG variable /fcoe/fcoe_map/fcoe_map_cee_map (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoe_map_cee_map is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoe_map_cee_map() directly.
YANG Description: This provides the grouping for FCoE CEE map
configuration elements.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=fcoe_map_cee_map.fcoe_map_cee_map, is_container='container', presence=False, yang_name="fcoe-map-cee-map", rest_name="cee-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure the CEE-map in the FCoE Map', u'alt-name': u'cee-map'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fcoe_map_cee_map must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=fcoe_map_cee_map.fcoe_map_cee_map, is_container='container', presence=False, yang_name="fcoe-map-cee-map", rest_name="cee-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure the CEE-map in the FCoE Map', u'alt-name': u'cee-map'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)""",
})
self.__fcoe_map_cee_map = t
if hasattr(self, '_set'):
self._set() | 0.005411 |
def require_flush(fun):
"""Decorator for methods that need to query security.
It ensures all security related operations are flushed to DB, but
avoids unneeded flushes.
"""
@wraps(fun)
def ensure_flushed(service, *args, **kwargs):
if service.app_state.needs_db_flush:
session = db.session()
if not session._flushing and any(
isinstance(m, (RoleAssignment, SecurityAudit))
for models in (session.new, session.dirty, session.deleted)
for m in models
):
session.flush()
service.app_state.needs_db_flush = False
return fun(service, *args, **kwargs)
return ensure_flushed | 0.001377 |
def get_statement_queries(stmts, **params):
"""Get queries used to search based on a statement.
In addition to the stmts, you can enter any parameters standard to the
query. See https://github.com/indralab/indra_db/rest_api for a full list.
Parameters
----------
stmts : list[Statement]
A list of INDRA statements.
"""
def pick_ns(ag):
for ns in ['HGNC', 'FPLX', 'CHEMBL', 'CHEBI', 'GO', 'MESH']:
if ns in ag.db_refs.keys():
dbid = ag.db_refs[ns]
break
else:
ns = 'TEXT'
dbid = ag.name
return '%s@%s' % (dbid, ns)
queries = []
url_base = get_url_base('statements/from_agents')
non_binary_statements = [Complex, SelfModification, ActiveForm]
for stmt in stmts:
kwargs = {}
if type(stmt) not in non_binary_statements:
for pos, ag in zip(['subject', 'object'], stmt.agent_list()):
if ag is not None:
kwargs[pos] = pick_ns(ag)
else:
for i, ag in enumerate(stmt.agent_list()):
if ag is not None:
kwargs['agent%d' % i] = pick_ns(ag)
kwargs['type'] = stmt.__class__.__name__
kwargs.update(params)
query_str = '?' + '&'.join(['%s=%s' % (k, v) for k, v in kwargs.items()
if v is not None])
queries.append(url_base + query_str)
return queries | 0.000678 |
def generate_read_batches(
self,
table,
columns,
keyset,
index="",
partition_size_bytes=None,
max_partitions=None,
):
"""Start a partitioned batch read operation.
Uses the ``PartitionRead`` API request to initiate the partitioned
read. Returns a list of batch information needed to perform the
actual reads.
:type table: str
:param table: name of the table from which to fetch data
:type columns: list of str
:param columns: names of columns to be retrieved
:type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet`
:param keyset: keys / ranges identifying rows to be retrieved
:type index: str
:param index: (Optional) name of index to use, rather than the
table's primary key
:type partition_size_bytes: int
:param partition_size_bytes:
(Optional) desired size for each partition generated. The service
uses this as a hint, the actual partition size may differ.
:type max_partitions: int
:param max_partitions:
(Optional) desired maximum number of partitions generated. The
service uses this as a hint, the actual number of partitions may
differ.
:rtype: iterable of dict
:returns:
mappings of information used peform actual partitioned reads via
:meth:`process_read_batch`.
"""
partitions = self._get_snapshot().partition_read(
table=table,
columns=columns,
keyset=keyset,
index=index,
partition_size_bytes=partition_size_bytes,
max_partitions=max_partitions,
)
read_info = {
"table": table,
"columns": columns,
"keyset": keyset._to_dict(),
"index": index,
}
for partition in partitions:
yield {"partition": partition, "read": read_info.copy()} | 0.001462 |
def to_json_object(self):
"""Returns a dict representation that can be serialized to JSON."""
obj_dict = dict(namespace_start=self.namespace_start,
namespace_end=self.namespace_end)
if self.app is not None:
obj_dict['app'] = self.app
return obj_dict | 0.006873 |
def verbose_option(default=False):
""" Attaches the option ``verbose`` with its *default* value to the
keyword arguments when the option does not exist. All positional
arguments and keyword arguments are forwarded unchanged.
"""
def decorator(method):
@wraps(method)
def wrapper(*args, **kwargs):
option = Option.verbose.value
kwargs[option] = kwargs.get(option, bool(default))
return method(*args, **kwargs)
return wrapper
return decorator | 0.001898 |
def _init_edges(self, dst_srcs_list):
"""Create all GO edges given a list of (dst, srcs)."""
from goatools.gosubdag.go_paths import get_paths_goobjs, paths2edges
edges_all = set()
goid_all = set()
go2obj = self.go2obj
for dst, srcs in dst_srcs_list:
go2obj_srcs = {}
for goid in srcs:
go2obj_srcs[goid] = go2obj[goid]
go_paths, go_all = get_paths_goobjs(go2obj_srcs.values(), go_top=dst, go2obj=go2obj)
edges_all |= paths2edges(go_paths)
goid_all |= go_all
self.edges = [(a.id, b.id) for a, b in edges_all]
self.goid_all = goid_all | 0.004471 |
def set_start_timestamp(self, start_timestamp=None):
"""stub"""
if start_timestamp is None:
raise NullArgument()
if self.get_start_timestamp_metadata().is_read_only():
raise NoAccess()
if not self.my_osid_object_form._is_valid_integer(
start_timestamp,
self.get_start_timestamp_metadata()):
raise InvalidArgument()
self.my_osid_object_form._my_map['startTimestamp'] = start_timestamp | 0.004073 |
def parse_docstring(self):
"""Parse a single docstring and return its value."""
self.log.debug(
"parsing docstring, token is %r (%s)", self.current.kind, self.current.value
)
while self.current.kind in (tk.COMMENT, tk.NEWLINE, tk.NL):
self.stream.move()
self.log.debug(
"parsing docstring, token is %r (%s)",
self.current.kind,
self.current.value,
)
if self.current.kind == tk.STRING:
docstring = self.current.value
self.stream.move()
return docstring
return None | 0.004666 |
def _repo_packages(self, args, search=False):
'''
List packages for one or more configured repos
'''
packages = []
repo_metadata = self._get_repo_metadata()
for repo in repo_metadata:
for pkg in repo_metadata[repo]['packages']:
if args[1] in pkg:
version = repo_metadata[repo]['packages'][pkg]['info']['version']
release = repo_metadata[repo]['packages'][pkg]['info']['release']
packages.append((pkg, version, release, repo))
for pkg in sorted(packages):
self.ui.status(
'{0}\t{1}-{2}\t{3}'.format(pkg[0], pkg[1], pkg[2], pkg[3])
)
return packages | 0.005413 |
def ensure_time_avg_has_cf_metadata(ds):
"""Add time interval length and bounds coordinates for time avg data.
If the Dataset or DataArray contains time average data, enforce
that there are coordinates that track the lower and upper bounds of
the time intervals, and that there is a coordinate that tracks the
amount of time per time average interval.
CF conventions require that a quantity stored as time averages
over time intervals must have time and time_bounds coordinates [1]_.
aospy further requires AVERAGE_DT for time average data, for accurate
time-weighted averages, which can be inferred from the CF-required
time_bounds coordinate if needed. This step should be done
prior to decoding CF metadata with xarray to ensure proper
computed timedeltas for different calendar types.
.. [1] http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#_data_representative_of_cells
Parameters
----------
ds : Dataset or DataArray
Input data
Returns
-------
Dataset or DataArray
Time average metadata attributes added if needed.
""" # noqa: E501
if TIME_WEIGHTS_STR not in ds:
time_weights = ds[TIME_BOUNDS_STR].diff(BOUNDS_STR)
time_weights = time_weights.rename(TIME_WEIGHTS_STR).squeeze()
if BOUNDS_STR in time_weights.coords:
time_weights = time_weights.drop(BOUNDS_STR)
ds[TIME_WEIGHTS_STR] = time_weights
raw_start_date = ds[TIME_BOUNDS_STR].isel(**{TIME_STR: 0, BOUNDS_STR: 0})
ds[RAW_START_DATE_STR] = raw_start_date.reset_coords(drop=True)
raw_end_date = ds[TIME_BOUNDS_STR].isel(**{TIME_STR: -1, BOUNDS_STR: 1})
ds[RAW_END_DATE_STR] = raw_end_date.reset_coords(drop=True)
for coord in [TIME_BOUNDS_STR, RAW_START_DATE_STR, RAW_END_DATE_STR]:
ds[coord].attrs['units'] = ds[TIME_STR].attrs['units']
if 'calendar' in ds[TIME_STR].attrs:
ds[coord].attrs['calendar'] = ds[TIME_STR].attrs['calendar']
unit_interval = ds[TIME_STR].attrs['units'].split('since')[0].strip()
ds[TIME_WEIGHTS_STR].attrs['units'] = unit_interval
return ds | 0.000463 |
def create_aql_text(*args):
"""
Create AQL querty from string or list or dict arguments
"""
aql_query_text = ""
for arg in args:
if isinstance(arg, dict):
arg = "({})".format(json.dumps(arg))
elif isinstance(arg, list):
arg = "({})".format(json.dumps(arg)).replace("[", "").replace("]", "")
aql_query_text += arg
return aql_query_text | 0.006667 |
def createDevice(self, deviceCfg):
"""
Creates a measurement deviceCfg from the input configuration.
:param: deviceCfg: the deviceCfg cfg.
:param: handlers: the loaded handlers.
:return: the constructed deviceCfg.
"""
ioCfg = deviceCfg['io']
type = deviceCfg['type']
if type == 'mpu6050':
fs = deviceCfg.get('fs')
name = deviceCfg.get('name')
if ioCfg['type'] == 'mock':
provider = ioCfg.get('provider')
if provider is not None and provider == 'white noise':
dataProvider = WhiteNoiseProvider()
else:
raise ValueError(provider + " is not a supported mock io data provider")
self.logger.warning("Loading mock data provider for mpu6050")
io = mock_io(dataProvider=dataProvider.provide)
elif ioCfg['type'] == 'smbus':
busId = ioCfg['busId']
self.logger.warning("Loading smbus %d", busId)
io = smbus_io(busId)
else:
raise ValueError(ioCfg['type'] + " is not a supported io provider")
self.logger.warning("Loading mpu6050 " + name + "/" + str(fs))
return mpu6050(io, name=name, fs=fs) if name is not None else mpu6050(io, fs=fs)
else:
raise ValueError(type + " is not a supported device") | 0.003465 |
def _check_update_fw(self, tenant_id, drvr_name):
"""Update the Firewall config by calling the driver.
This function calls the device manager routine to update the device
with modified FW cfg.
"""
if self.fwid_attr[tenant_id].is_fw_complete():
fw_dict = self.fwid_attr[tenant_id].get_fw_dict()
self.modify_fw_device(tenant_id, fw_dict.get('fw_id'), fw_dict) | 0.004739 |
def TRCCp_integral(T, a0, a1, a2, a3, a4, a5, a6, a7, I=0):
r'''Integrates ideal gas heat capacity using the model developed in [1]_.
Best used as a delta only.
The difference in enthalpy with respect to 0 K is given by:
.. math::
\frac{H(T) - H^{ref}}{RT} = a_0 + a_1x(a_2)/(a_2T) + I/T + h(T)/T
h(T) = (a_5 + a_7)\left[(2a_3 + 8a_4)\ln(1-y)+ \left\{a_3\left(1 +
\frac{1}{1-y}\right) + a_4\left(7 + \frac{1}{1-y}\right)\right\}y
+ a_4\left\{3y^2 + (5/3)y^3 + y^4 + (3/5)y^5 + (1/3)y^6\right\}
+ (1/7)\left\{a_4 - \frac{a_5}{(a_6+a_7)^2}\right\}y^7\right]
h(T) = 0 \text{ for } T \le a_7
y = \frac{T-a_7}{T+a_6} \text{ for } T > a_7 \text{ otherwise } 0
Parameters
----------
T : float
Temperature [K]
a1-a7 : float
Coefficients
I : float, optional
Integral offset
Returns
-------
H-H(0) : float
Difference in enthalpy from 0 K , [J/mol]
Notes
-----
Analytical integral as provided in [1]_ and verified with numerical
integration.
Examples
--------
>>> TRCCp_integral(298.15, 4.0, 7.65E5, 720., 3.565, -0.052, -1.55E6, 52.,
... 201., 1.2)
10802.532600592816
References
----------
.. [1] Kabo, G. J., and G. N. Roganov. Thermodynamics of Organic Compounds
in the Gas State, Volume II: V. 2. College Station, Tex: CRC Press, 1994.
'''
if T <= a7:
y = 0.
else:
y = (T - a7)/(T + a6)
y2 = y*y
y4 = y2*y2
if T <= a7:
h = 0.0
else:
first = a6 + a7
second = (2.*a3 + 8.*a4)*log(1. - y)
third = (a3*(1. + 1./(1. - y)) + a4*(7. + 1./(1. - y)))*y
fourth = a4*(3.*y2 + 5./3.*y*y2 + y4 + 0.6*y4*y + 1/3.*y4*y2)
fifth = 1/7.*(a4 - a5/((a6 + a7)**2))*y4*y2*y
h = first*(second + third + fourth + fifth)
return (a0 + a1*exp(-a2/T)/(a2*T) + I/T + h/T)*R*T | 0.005081 |
def updateGroupResponse(self, group, vendorSpecific=None):
"""CNIdentity.addGroupMembers(session, groupName, members) → boolean
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.addGroupMembers.
Args:
group:
vendorSpecific:
Returns:
"""
mmp_dict = {'group': ('group.xml', group.toxml('utf-8'))}
return self.PUT('groups', fields=mmp_dict, headers=vendorSpecific) | 0.00409 |
def update_beliefs(self, corpus_id):
"""Return updated belief scores for a given corpus.
Parameters
----------
corpus_id : str
The ID of the corpus for which beliefs are to be updated.
Returns
-------
dict
A dictionary of belief scores with keys corresponding to Statement
UUIDs and values to new belief scores.
"""
corpus = self.get_corpus(corpus_id)
be = BeliefEngine(self.scorer)
stmts = list(corpus.statements.values())
be.set_prior_probs(stmts)
# Here we set beliefs based on actual curation
for uuid, correct in corpus.curations.items():
stmt = corpus.statements.get(uuid)
if stmt is None:
logger.warning('%s is not in the corpus.' % uuid)
continue
stmt.belief = correct
belief_dict = {st.uuid: st.belief for st in stmts}
return belief_dict | 0.002041 |
def _check_for_misplaced(xs, subkey, other_keys):
"""Ensure configuration keys are not incorrectly nested under other keys.
"""
problems = []
for x in xs:
check_dict = x.get(subkey, {})
for to_check in other_keys:
if to_check in check_dict:
problems.append((x["description"], to_check, subkey))
if len(problems) > 0:
raise ValueError("\n".join(["Incorrectly nested keys found in sample YAML. These should be top level:",
" sample | key name | nested under ",
"----------------+-----------------+----------------"] +
["% 15s | % 15s | % 15s" % (a, b, c) for (a, b, c) in problems])) | 0.006435 |
def filter(self, filter_func):
"""Return a new SampleCollection containing only samples meeting the filter criteria.
Will pass any kwargs (e.g., field or skip_missing) used when instantiating the current class
on to the new SampleCollection that is returned.
Parameters
----------
filter_func : `callable`
A function that will be evaluated on every object in the collection. The function must
return a `bool`. If True, the object will be kept. If False, it will be removed from the
SampleCollection that is returned.
Returns
-------
`onecodex.models.SampleCollection` containing only objects `filter_func` returned True on.
Examples
--------
Generate a new collection of Samples that have a specific filename extension:
new_collection = samples.filter(lambda s: s.filename.endswith('.fastq.gz'))
"""
if callable(filter_func):
return self.__class__([obj for obj in self if filter_func(obj) is True], **self._kwargs)
else:
raise OneCodexException(
"Expected callable for filter, got: {}".format(type(filter_func).__name__)
) | 0.008821 |
def _eval_density(means, variances,observed_values, distribution):
"""
Calculates gamma/lognormal/normal pdf given mean variance, x
where x is the experimental species number measured at a particular timepoint. Returns ln(pdf)
:param mean: mean
:param var: variance
:param observed_values: experimental species number measured at a particular timepoint
:param distribution: distribution to consider. Either 'gamma', 'normal' or 'lognormal'
:return: normal log of the pdf
"""
means = np.array(means, dtype=NP_FLOATING_POINT_PRECISION)
variances = np.array(variances, dtype=NP_FLOATING_POINT_PRECISION)
observed_values = np.array(observed_values, dtype=NP_FLOATING_POINT_PRECISION)
# Remove data about unobserved datapoints
means = means[~np.isnan(observed_values)]
variances = variances[~np.isnan(observed_values)]
observed_values = observed_values[~np.isnan(observed_values)]
# Remove data for when variance is zero as we cannot estimate distributions that way
non_zero_varianes = ~(variances == 0)
means = means[non_zero_varianes]
variances = variances[~(variances == 0)]
observed_values = observed_values[non_zero_varianes]
if distribution == 'gamma':
b = variances / means
a = means / b
log_observed_values = np.log(observed_values)
log_density = (a - 1.0) * log_observed_values - (observed_values / b) - a * np.log(b) - gammaln(a)
elif distribution == 'normal':
log_density = -(observed_values - means) ** 2 / (2 * variances) - np.log(np.sqrt(2 * np.pi * variances))
elif distribution == 'lognormal':
log_density = -(np.log(observed_values) - means) ** 2 / (2 * variances) - np.log(observed_values * np.sqrt(2 * np.pi * variances))
else:
raise ValueError('Unsupported distribution {0!r}'.format(distribution))
total_log_density = np.sum(log_density)
return total_log_density | 0.005133 |
def do_help(self, line):
"""Displays help information."""
print ""
print "Perfdump CLI provides a handful of simple ways to query your"
print "performance data."
print ""
print "The simplest queries are of the form:"
print ""
print "\t[slowest|fastest] [tests|setups]"
print ""
print "For example:"
print ""
print "\tperfdump > slowest tests"
print ""
print "Prints the slowest 10 tests"
print ""
print "Additional grouping of results can be request."
print ""
print "\tperfdump > slowest tests groupby file"
print ""
print "Grouping options include:"
print ""
print "\tfile | module | class | function"
print "" | 0.002513 |
def run_lda(abstracts, n_topics=50, n_words=31, n_iters=1000, alpha=None,
beta=0.001):
""" Perform topic modeling using Latent Dirichlet Allocation with the
Java toolbox MALLET.
Args:
abstracts: A pandas DataFrame with two columns ('pmid' and 'abstract')
containing article abstracts.
n_topics: Number of topics to generate. Default=50.
n_words: Number of top words to return for each topic. Default=31,
based on Poldrack et al. (2012).
n_iters: Number of iterations to run in training topic model.
Default=1000.
alpha: The Dirichlet prior on the per-document topic
distributions.
Default: 50 / n_topics, based on Poldrack et al. (2012).
beta: The Dirichlet prior on the per-topic word distribution.
Default: 0.001, based on Poldrack et al. (2012).
Returns:
weights_df: A pandas DataFrame derived from the MALLET
output-doc-topics output file. Contains the weight assigned
to each article for each topic, which can be used to select
articles for topic-based meta-analyses (accepted threshold
from Poldrack article is 0.001). [n_topics]+1 columns:
'pmid' is the first column and the following columns are
the topic names. The names of the topics match the names
in df (e.g., topic_000).
keys_df: A pandas DataFrame derived from the MALLET
output-topic-keys output file. Contains the top [n_words]
words for each topic, which can act as a summary of the
topic's content. Two columns: 'topic' and 'terms'. The
names of the topics match the names in weights (e.g.,
topic_000).
"""
if abstracts.index.name != 'pmid':
abstracts.index = abstracts['pmid']
resdir = os.path.abspath(get_resource_path())
tempdir = os.path.join(resdir, 'topic_models')
absdir = os.path.join(tempdir, 'abstracts')
if not os.path.isdir(tempdir):
os.mkdir(tempdir)
if alpha is None:
alpha = 50. / n_topics
# Check for presence of abstract files and convert if necessary
if not os.path.isdir(absdir):
print('Abstracts folder not found. Creating abstract files...')
os.mkdir(absdir)
for pmid in abstracts.index.values:
abstract = abstracts.loc[pmid]['abstract']
with open(os.path.join(absdir, str(pmid) + '.txt'), 'w') as fo:
fo.write(abstract)
# Run MALLET topic modeling
print('Generating topics...')
mallet_bin = join(dirname(dirname(__file__)),
'resources/mallet/bin/mallet')
import_str = ('{mallet} import-dir '
'--input {absdir} '
'--output {outdir}/topic-input.mallet '
'--keep-sequence '
'--remove-stopwords').format(mallet=mallet_bin,
absdir=absdir,
outdir=tempdir)
train_str = ('{mallet} train-topics '
'--input {out}/topic-input.mallet '
'--num-topics {n_topics} '
'--num-top-words {n_words} '
'--output-topic-keys {out}/topic_keys.txt '
'--output-doc-topics {out}/doc_topics.txt '
'--num-iterations {n_iters} '
'--output-model {out}/saved_model.mallet '
'--random-seed 1 '
'--alpha {alpha} '
'--beta {beta}').format(mallet=mallet_bin, out=tempdir,
n_topics=n_topics, n_words=n_words,
n_iters=n_iters,
alpha=alpha, beta=beta)
subprocess.call(import_str, shell=True)
subprocess.call(train_str, shell=True)
# Read in and convert doc_topics and topic_keys.
def clean_str(string):
return os.path.basename(os.path.splitext(string)[0])
def get_sort(lst):
return [i[0] for i in sorted(enumerate(lst), key=lambda x:x[1])]
topic_names = ['topic_{0:03d}'.format(i) for i in range(n_topics)]
# doc_topics: Topic weights for each paper.
# The conversion here is pretty ugly at the moment.
# First row should be dropped. First column is row number and can be used
# as the index.
# Second column is 'file: /full/path/to/pmid.txt' <-- Parse to get pmid.
# After that, odd columns are topic numbers and even columns are the
# weights for the topics in the preceding column. These columns are sorted
# on an individual pmid basis by the weights.
n_cols = (2 * n_topics) + 1
dt_df = pd.read_csv(os.path.join(tempdir, 'doc_topics.txt'),
delimiter='\t', skiprows=1, header=None, index_col=0)
dt_df = dt_df[dt_df.columns[:n_cols]]
# Get pmids from filenames
dt_df[1] = dt_df[1].apply(clean_str)
# Put weights (even cols) and topics (odd cols) into separate dfs.
weights_df = dt_df[dt_df.columns[2::2]]
weights_df.index = dt_df[1]
weights_df.columns = range(n_topics)
topics_df = dt_df[dt_df.columns[1::2]]
topics_df.index = dt_df[1]
topics_df.columns = range(n_topics)
# Sort columns in weights_df separately for each row using topics_df.
sorters_df = topics_df.apply(get_sort, axis=1)
weights = weights_df.as_matrix()
sorters = sorters_df.as_matrix()
# there has to be a better way to do this.
for i in range(sorters.shape[0]):
weights[i, :] = weights[i, sorters[i, :]]
# Define topic names (e.g., topic_000)
index = dt_df[1]
weights_df = pd.DataFrame(columns=topic_names, data=weights, index=index)
weights_df.index.name = 'pmid'
# topic_keys: Top [n_words] words for each topic.
keys_df = pd.read_csv(os.path.join(tempdir, 'topic_keys.txt'),
delimiter='\t', header=None, index_col=0)
# Second column is a list of the terms.
keys_df = keys_df[[2]]
keys_df.rename(columns={2: 'terms'}, inplace=True)
keys_df.index = topic_names
keys_df.index.name = 'topic'
# Remove all temporary files (abstract files, model, and outputs).
shutil.rmtree(tempdir)
# Return article topic weights and topic keys.
return weights_df, keys_df | 0.000152 |
def get_staff_url(self):
"""
Return the Admin URL for the current view.
By default, it uses the :func:`get_staff_object` function to base the URL on.
"""
object = self.get_staff_object()
if object is not None:
# View is likely using SingleObjectMixin
return reverse(admin_urlname(object._meta, 'change'), args=(object.pk,))
model = _get_view_model(self)
if model is not None:
# View is likely using MultipleObjectMixin (e.g. ListView)
return reverse(admin_urlname(object._meta, 'changelist'))
return None | 0.00639 |
def get_keybindings(self, mode):
"""look up keybindings from `MODE-maps` sections
:param mode: mode identifier
:type mode: str
:returns: dictionaries of key-cmd for global and specific mode
:rtype: 2-tuple of dicts
"""
globalmaps, modemaps = {}, {}
bindings = self._bindings
# get bindings for mode `mode`
# retain empty assignations to silence corresponding global mappings
if mode in bindings.sections:
for key in bindings[mode].scalars:
value = bindings[mode][key]
if isinstance(value, list):
value = ','.join(value)
modemaps[key] = value
# get global bindings
# ignore the ones already mapped in mode bindings
for key in bindings.scalars:
if key not in modemaps:
value = bindings[key]
if isinstance(value, list):
value = ','.join(value)
if value and value != '':
globalmaps[key] = value
# get rid of empty commands left in mode bindings
for k, v in list(modemaps.items()):
if not v:
del modemaps[k]
return globalmaps, modemaps | 0.001567 |
def move_datetime_year(dt, direction, num_shifts):
"""
Move datetime 1 year in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(years=+num_shifts)
return _move_datetime(dt, direction, delta) | 0.00369 |
def isJournal(self, dbname = abrevDBname, manualDB = manualDBname, returnDict ='both', checkIfExcluded = False):
"""Returns `True` if the `Citation`'s `journal` field is a journal abbreviation from the WOS listing found at [http://images.webofknowledge.com/WOK46/help/WOS/A_abrvjt.html](http://images.webofknowledge.com/WOK46/help/WOS/A_abrvjt.html), i.e. checks if the citation is citing a journal.
**Note**: Requires the [j9Abbreviations](../modules/journalAbbreviations.html#metaknowledge.journalAbbreviations.backend.getj9dict) database file and will raise an error if it cannot be found.
**Note**: All parameters are used for getting the data base with [getj9dict](../modules/journalAbbreviations.html#metaknowledge.journalAbbreviations.backend.getj9dict).
# Parameters
_dbname_ : `optional [str]`
> The name of the downloaded database file, the default is determined at run time. It is recommended that this remain untouched.
_manualDB_ : `optional [str]`
> The name of the manually created database file, the default is determined at run time. It is recommended that this remain untouched.
_returnDict_ : `optional [str]`
> default `'both'`, can be used to get both databases or only one with `'WOS'` or `'manual'`.
# Returns
`bool`
> `True` if the `Citation` is for a journal
"""
global abbrevDict
if abbrevDict is None:
abbrevDict = getj9dict(dbname = dbname, manualDB = manualDB, returnDict = returnDict)
if not hasattr(self, 'journal'):
return False
elif checkIfExcluded and self.journal:
try:
if abbrevDict.get(self.journal, [True])[0]:
return False
else:
return True
except IndexError:
return False
else:
if self.journal:
dictVal = abbrevDict.get(self.journal, [b''])[0]
if dictVal:
return dictVal
else:
return False
else:
return False | 0.010565 |
def result(self, timeout=None):
"""
Waits up to timeout for the result the threaded job.
Returns immediately the result if the job has already been done.
:param timeout: The maximum time to wait for a result (in seconds)
:raise OSError: The timeout raised before the job finished
:raise Exception: The exception encountered during the call, if any
"""
if self._done_event.wait(timeout):
return self._done_event.data
else:
raise OSError("Timeout raised") | 0.003636 |
def _parse_environment(env_str):
'''
Parsing template
'''
try:
env = salt.utils.yaml.safe_load(env_str)
except salt.utils.yaml.YAMLError as exc:
raise ValueError(six.text_type(exc))
else:
if env is None:
env = {}
elif not isinstance(env, dict):
raise ValueError(
'The environment is not a valid YAML mapping data type.'
)
for param in env:
if param not in SECTIONS:
raise ValueError('environment has wrong section "{0}"'.format(param))
return env | 0.003425 |
def plugins(flags=None):
"""
Returns the plugins registered for the exporter. If the optional
flags are set then only plugins with the inputed flags will be
returned.
:param flags | <XExporter.Flags>
"""
XExporter.init()
plugs = XExporter._plugins[:]
if flags is not None:
return filter(lambda x: x.testFlag(flags), plugs)
return plugs | 0.008621 |
def touch(self, filepath):
"""Touches the specified file so that its modified time changes."""
if self.is_ssh(filepath):
self._check_ssh()
remotepath = self._get_remote(filepath)
stdin, stdout, stderr = self.ssh.exec_command("touch {}".format(remotepath))
stdin.close()
else:
os.system("touch {}".format(filepath)) | 0.007538 |
def remove_tenant_from_flavor(request, flavor, tenant):
"""Remove a tenant from the given flavor access list."""
return _nova.novaclient(request).flavor_access.remove_tenant_access(
flavor=flavor, tenant=tenant) | 0.004405 |
def print_pretty(text, **kwargs):
'''
Prints using pycolorterm formatting
:param text: Text with formatting
:type text: string
:param kwargs: Keyword args that will be passed to the print function
:type kwargs: dict
Example::
print_pretty('Hello {BG_RED}WORLD{END}')
'''
text = _prepare(text)
print('{}{}'.format(text.format(**styles).replace(styles['END'], styles['ALL_OFF']), styles['ALL_OFF'])) | 0.004454 |
def contains(self, key):
"""Checks if the specified item exists.
Args:
key: the key of the item to lookup.
Returns:
True if the item exists; False otherwise.
Raises:
Exception if there was an error requesting information about the item.
"""
try:
self._api.objects_get(self._bucket, key)
except datalab.utils.RequestException as e:
if e.status == 404:
return False
raise e
except Exception as e:
raise e
return True | 0.00998 |
def unitary_function(mappings: Dict[str, str]) -> np.ndarray:
"""
Creates a unitary transformation that maps each state to the values specified in mappings.
Some (but not all) of these transformations involve a scratch qubit, so room for one is
always provided. That is, if given the mapping of n qubits, the calculated transformation
will be on n + 1 qubits, where the 0th is the scratch bit and the return value
of the function is left in the 1st.
:param mappings: Dictionary of the mappings of f(x) on all length n bitstrings, e.g.
>>> {'00': '0', '01': '1', '10': '1', '11': '0'}
:return: ndarray representing specified unitary transformation.
"""
num_qubits = int(np.log2(len(mappings)))
bitsum = sum([int(bit) for bit in mappings.values()])
# Only zeros were entered
if bitsum == 0:
return np.kron(SWAP_MATRIX, np.identity(2 ** (num_qubits - 1)))
# Half of the entries were 0, half 1
elif bitsum == 2 ** (num_qubits - 1):
unitary_funct = np.zeros(shape=(2 ** num_qubits, 2 ** num_qubits))
index_lists = [list(range(2 ** (num_qubits - 1))),
list(range(2 ** (num_qubits - 1), 2 ** num_qubits))]
for j in range(2 ** num_qubits):
bitstring = np.binary_repr(j, num_qubits)
value = int(mappings[bitstring])
mappings.pop(bitstring)
i = index_lists[value].pop()
unitary_funct[i, j] = 1
return np.kron(np.identity(2), unitary_funct)
# Only ones were entered
elif bitsum == 2 ** num_qubits:
x_gate = np.array([[0, 1], [1, 0]])
return np.kron(SWAP_MATRIX, np.identity(2 ** (num_qubits - 1))).dot(
np.kron(x_gate, np.identity(2 ** num_qubits)))
else:
raise ValueError("f(x) must be constant or balanced") | 0.00404 |
def format_docstring(template_="{__doc__}", *args, **kwargs):
r"""
Parametrized decorator for adding/changing a function docstring.
For changing a already available docstring in the function, the
``"{__doc__}"`` in the template is replaced by the original function
docstring.
Parameters
----------
template_ :
A format-style template.
*args, **kwargs :
Positional and keyword arguments passed to the formatter.
Examples
--------
Closure docstring personalization:
>>> def add(n):
... @format_docstring(number=n)
... def func(m):
... '''Adds {number} to the given value.'''
... return n + m
... return func
>>> add(3).__doc__
'Adds 3 to the given value.'
>>> add("__").__doc__
'Adds __ to the given value.'
Same but using a lambda (you can also try with ``**locals()``):
>>> def add_with_lambda(n):
... return format_docstring("Adds {0}.", n)(lambda m: n + m)
>>> add_with_lambda(15).__doc__
'Adds 15.'
>>> add_with_lambda("something").__doc__
'Adds something.'
Mixing both template styles with ``{__doc__}``:
>>> templ = "{0}, {1} is my {name} docstring:{__doc__}->\nEND!"
>>> @format_docstring(templ, "zero", "one", "two", name="testing", k=[1, 2])
... def test():
... '''
... Not empty!
... {2} != {k[0]} but {2} == {k[1]}
... '''
>>> print(test.__doc__)
zero, one is my testing docstring:
Not empty!
two != 1 but two == 2
->
END!
"""
def decorator(func):
if func.__doc__:
kwargs["__doc__"] = func.__doc__.format(*args, **kwargs)
func.__doc__ = template_.format(*args, **kwargs)
return func
return decorator | 0.003003 |
def shutdown(self):
"""Shuts down the scheduler and immediately end all pending callbacks.
"""
# Drop all pending item from the executor. Without this, the executor
# will block until all pending items are complete, which is
# undesirable.
try:
while True:
self._executor._work_queue.get(block=False)
except queue.Empty:
pass
self._executor.shutdown() | 0.004396 |
def olympic_sprints(data_set='rogers_girolami_data'):
"""All olympics sprint winning times for multiple output prediction."""
X = np.zeros((0, 2))
Y = np.zeros((0, 1))
cats = {}
for i, dataset in enumerate([olympic_100m_men,
olympic_100m_women,
olympic_200m_men,
olympic_200m_women,
olympic_400m_men,
olympic_400m_women]):
data = dataset()
year = data['X']
time = data['Y']
X = np.vstack((X, np.hstack((year, np.ones_like(year)*i))))
Y = np.vstack((Y, time))
cats[dataset.__name__] = i
data['X'] = X
data['Y'] = Y
data['info'] = "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning."
return data_details_return({
'X': X,
'Y': Y,
'covariates' : [decimalyear('year', '%Y'), discrete(cats, 'event')],
'response' : ['time'],
'info': "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
'output_info': {
0:'100m Men',
1:'100m Women',
2:'200m Men',
3:'200m Women',
4:'400m Men',
5:'400m Women'}
}, data_set) | 0.011429 |
def membership_vector(clusterer, points_to_predict):
"""Predict soft cluster membership. The result produces a vector
for each point in ``points_to_predict`` that gives a probability that
the given point is a member of a cluster for each of the selected clusters
of the ``clusterer``.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` is a member of cluster ``j`` is
in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
points_to_predict = points_to_predict.astype(np.float64)
clusters = np.array(
sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
result = np.empty((points_to_predict.shape[0], clusters.shape[0]),
dtype=np.float64)
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = \
clusterer.prediction_data_.tree.query(points_to_predict,
k=2 * min_samples)
for i in range(points_to_predict.shape[0]):
# We need to find where in the tree the new point would go
# for the purposes of outlier membership approximation
nearest_neighbor, lambda_ = \
_find_neighbor_and_lambda(
neighbor_indices[i],
neighbor_distances[i],
clusterer.prediction_data_.core_distances,
min_samples)
neighbor_tree_row = get_tree_row_with_child(
clusterer.condensed_tree_._raw_tree, nearest_neighbor)
if neighbor_tree_row['lambda_val'] <= lambda_:
lambda_ = neighbor_tree_row['lambda_val']
distance_vec = dist_membership_vector(
points_to_predict[i],
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vec = outlier_membership_vector(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result[i] = distance_vec ** 0.5 * outlier_vec ** 2.0
result[i] /= result[i].sum()
result[i] *= prob_in_some_cluster(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
return result | 0.000636 |
def to_yellow(self, on: bool=False):
"""
Change the LED to yellow (on or off)
:param on: True or False
:return: None
"""
self._on = on
if on:
self._load_new(led_yellow_on)
if self._toggle_on_click:
self._canvas.bind('<Button-1>',
lambda x: self.to_yellow(False))
else:
self._load_new(led_yellow)
if self._toggle_on_click:
self._canvas.bind('<Button-1>',
lambda x: self.to_yellow(True)) | 0.006678 |
def copy(src_uri, dest_base_uri, config_path=None, progressbar=None):
"""Copy a dataset to another location.
:param src_uri: URI of dataset to be copied
:param dest_base_uri: base of URI for copy target
:param config_path: path to dtool configuration file
:returns: URI of new dataset
"""
dataset = DataSet.from_uri(src_uri)
proto_dataset = _copy_create_proto_dataset(
dataset,
dest_base_uri,
config_path,
progressbar
)
_copy_content(dataset, proto_dataset, progressbar)
proto_dataset.freeze(progressbar=progressbar)
return proto_dataset.uri | 0.001603 |
def _annotate_with_past_uses(cls, queryset, user):
''' Annotates the queryset with a usage count for that discount claus
by the given user. '''
if queryset.model == conditions.DiscountForCategory:
matches = (
Q(category=F('discount__discountitem__product__category'))
)
elif queryset.model == conditions.DiscountForProduct:
matches = (
Q(product=F('discount__discountitem__product'))
)
in_carts = (
Q(discount__discountitem__cart__user=user) &
Q(discount__discountitem__cart__status=commerce.Cart.STATUS_PAID)
)
past_use_quantity = When(
in_carts & matches,
then="discount__discountitem__quantity",
)
past_use_quantity_or_zero = Case(
past_use_quantity,
default=Value(0),
)
queryset = queryset.annotate(
past_use_count=Sum(past_use_quantity_or_zero)
)
return queryset | 0.001923 |
def fetch_entries(self):
"""Fetch data and parse it to build a list of broadcast entries."""
current_time = ''
data = []
for row in self.get_rows():
# Stop fetching data if limit has been met
if exceeded_limit(self.limit, len(data)):
break
entry = row.find_all('td')
entry_dict = {}
show_time = entry[0].string
if show_time and show_time != current_time:
current_time = show_time
if not show_time:
show_time = current_time
entry_dict['time'] = show_time
show_string = entry[1].string.split('(')
show = show_string[0][:-1]
net = self._get_net(show_string)
if not self._match_query(show, net):
continue
entry_dict['show'] = show
entry_dict['net'] = net
entry_dict['viewers'] = entry[3].string.strip('*')
entry_dict['rating'], entry_dict['share'] = self._get_rating(entry)
# Add data to initialize broadcast entry
data.append(Entry(**entry_dict))
return data | 0.001692 |
def superReadText(filepath, **kwargs):
"""
A wrapper to superReadCSV which wraps pandas.read_csv().
The benefit of using this function is that it automatically identifies the
column separator.
.tsv files are assumed to have a \t (tab) separation
.csv files are assumed to have a comma separation.
.txt (or any other type) get the first line of the file opened
and get tested for various separators as defined in the identify_sep
function.
"""
if isinstance(filepath, pd.DataFrame):
return filepath
sep = kwargs.get('sep', None)
ext = os.path.splitext(filepath)[1].lower()
if sep is None:
if ext == '.tsv':
kwargs['sep'] = '\t'
elif ext == '.csv':
kwargs['sep'] = ','
else:
found_sep = identify_sep(filepath)
print(found_sep)
kwargs['sep'] = found_sep
return superReadCSV(filepath, **kwargs) | 0.001056 |
def fast_sync_sign_snapshot( snapshot_path, private_key, first=False ):
"""
Append a signature to the end of a snapshot path
with the given private key.
If first is True, then don't expect the signature trailer.
Return True on success
Return False on error
"""
if not os.path.exists(snapshot_path):
log.error("No such file or directory: {}".format(snapshot_path))
return False
file_size = 0
payload_size = 0
write_offset = 0
try:
sb = os.stat(snapshot_path)
file_size = sb.st_size
assert file_size > 8
except Exception as e:
log.exception(e)
return False
num_sigs = 0
snapshot_hash = None
with open(snapshot_path, 'r+') as f:
if not first:
info = fast_sync_inspect(f)
if 'error' in info:
log.error("Failed to inspect {}: {}".format(snapshot_path, info['error']))
return False
num_sigs = len(info['signatures'])
write_offset = info['sig_append_offset']
payload_size = info['payload_size']
else:
# no one has signed yet.
write_offset = file_size
num_sigs = 0
payload_size = file_size
# hash the file and sign the (bin-encoded) hash
privkey_hex = keylib.ECPrivateKey(private_key).to_hex()
hash_hex = get_file_hash( f, hashlib.sha256, fd_len=payload_size )
sigb64 = sign_digest( hash_hex, privkey_hex, hashfunc=hashlib.sha256 )
if BLOCKSTACK_TEST:
log.debug("Signed {} with {} to make {}".format(hash_hex, keylib.ECPrivateKey(private_key).public_key().to_hex(), sigb64))
# append
f.seek(write_offset, os.SEEK_SET)
f.write(sigb64)
f.write('{:08x}'.format(len(sigb64)))
# append number of signatures
num_sigs += 1
f.write('{:08x}'.format(num_sigs))
f.flush()
os.fsync(f.fileno())
return True | 0.006927 |
def find_child(sexpr: Sexpr, *tags: str) -> Optional[Sexpr]:
"""Search for a tag among direct children of the s-expression."""
_assert_valid_sexpr(sexpr)
for child in sexpr[1:]:
if _is_sexpr(child) and child[0] in tags:
return child
return None | 0.003546 |
def _process_uniprot_ids(self, limit=None):
"""
This method processes the mappings from ZFIN gene IDs to UniProtKB IDs.
Triples created:
<zfin_gene_id> a class
<zfin_gene_id> rdfs:label gene_symbol
<uniprot_id> is an Individual
<uniprot_id> has type <polypeptide>
<zfin_gene_id> has_gene_product <uniprot_id>
:param limit:
:return:
"""
LOG.info("Processing UniProt IDs")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
model = Model(graph)
geno = Genotype(graph)
raw = '/'.join((self.rawdir, self.files['uniprot']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, gene_so_id, gene_symbol, uniprot_id
# , empty
) = row
if self.test_mode and gene_id not in self.test_ids['gene']:
continue
gene_id = 'ZFIN:' + gene_id.strip()
uniprot_id = 'UniProtKB:' + uniprot_id.strip()
geno.addGene(gene_id, gene_symbol)
# TODO: Abstract to one of the model utilities
model.addIndividualToGraph(
uniprot_id, None, self.globaltt['polypeptide'])
graph.addTriple(
gene_id, self.globaltt['has gene product'], uniprot_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with UniProt IDs")
return | 0.002242 |
def from_fp(self, file_pointer, comment_lead=['c']):
"""
Read a WCNF+ formula from a file pointer. A file pointer should be
specified as an argument. The only default argument is
``comment_lead``, which can be used for parsing specific comment
lines.
:param file_pointer: a file pointer to read the formula from.
:param comment_lead: a list of characters leading comment lines
:type file_pointer: file pointer
:type comment_lead: list(str)
Usage example:
.. code-block:: python
>>> with open('some-file.wcnf+', 'r') as fp:
... cnf1 = WCNFPlus()
... cnf1.from_fp(fp)
>>>
>>> with open('another-file.wcnf+', 'r') as fp:
... cnf2 = WCNFPlus(from_fp=fp)
"""
self.nv = 0
self.hard = []
self.atms = []
self.soft = []
self.wght = []
self.topw = 0
self.comments = []
comment_lead = tuple('p') + tuple(comment_lead)
for line in file_pointer:
line = line.strip()
if line:
if line[0] not in comment_lead:
if line[-1] == '0': # normal clause
cl = [int(l) for l in line.split()[:-1]]
w = cl.pop(0)
self.nv = max([abs(l) for l in cl] + [self.nv])
if w >= self.topw:
self.hard.append(cl)
else:
self.soft.append(cl)
self.wght.append(w)
else: # atmost/atleast constraint
items = [i for i in line.split()]
lits = [int(l) for l in items[1:-2]]
rhs = int(items[-1])
self.nv = max([abs(l) for l in lits] + [self.nv])
if items[-2][0] == '>':
lits = list(map(lambda l: -l, lits))
rhs = len(lits) - rhs
self.atms.append([lits, rhs])
elif not line.startswith('p wcnf+ '):
self.comments.append(line)
else: # expecting the preamble
self.topw = int(line.rsplit(' ', 1)[1]) | 0.00247 |
def from_deformation(cls, deformation):
"""
Factory method that returns a Strain object from a deformation
gradient
Args:
deformation (3x3 array-like):
"""
dfm = Deformation(deformation)
return cls(0.5 * (np.dot(dfm.trans, dfm) - np.eye(3))) | 0.006452 |
def add_highlights_docs(docs):
"""
"highlight": {
"knowledge_graph.title.value": [
"Before 1 January 2018, will <em>South</em> <em>Korea</em> file a World Trade Organization dispute against the United States related to solar panels?"
]
}
"""
if not isinstance(docs, list):
docs = [docs]
for doc in docs:
if 'matched_sentence' in doc['_source']:
matched_sentences = doc['_source']['matched_sentence']
for sentence in matched_sentences:
# also add matched sentence to knowledge graph
doc['_source']['knowledge_graph']['matched_sentence'] = [{'key': sentence, 'value': sentence}]
paragraph = SimilarityScoreRerank.get_description(doc)
if paragraph:
high_para = SimilarityScoreRerank.create_highlighted_sentences(matched_sentences, paragraph)
if high_para:
if 'highlight' not in doc:
doc['highlight'] = dict()
doc['highlight']['knowledge_graph.description.value'] = [high_para]
return docs | 0.004922 |
def json_decode(s: str) -> Any:
"""
Decodes an object from JSON using our custom decoder.
"""
try:
return json.JSONDecoder(object_hook=json_class_decoder_hook).decode(s)
except json.JSONDecodeError:
log.warning("Failed to decode JSON (returning None): {!r}", s)
return None | 0.003155 |
def get_scheduling_block_event():
"""Return the latest Scheduling Block event"""
event = DB.rpoplpush('scheduling_block_events',
'scheduling_block_event_history')
if event:
event = json.loads(event.decode('utf-8'))
return event | 0.003623 |
def connect(self, action=None, method=None, **kwargs):
"""
Create a <Connect> element
:param action: Action URL
:param method: Action URL method
:param kwargs: additional attributes
:returns: <Connect> element
"""
return self.nest(Connect(action=action, method=method, **kwargs)) | 0.005797 |
def get_kmgraph_meta(mapper_summary):
""" Extract info from mapper summary to be displayed below the graph plot
"""
d = mapper_summary["custom_meta"]
meta = (
"<b>N_cubes:</b> "
+ str(d["n_cubes"])
+ " <b>Perc_overlap:</b> "
+ str(d["perc_overlap"])
)
meta += (
"<br><b>Nodes:</b> "
+ str(mapper_summary["n_nodes"])
+ " <b>Edges:</b> "
+ str(mapper_summary["n_edges"])
+ " <b>Total samples:</b> "
+ str(mapper_summary["n_total"])
+ " <b>Unique_samples:</b> "
+ str(mapper_summary["n_unique"])
)
return meta | 0.001524 |
def neg_loglik(self, beta):
""" Creates negative loglikelihood of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
- Negative loglikelihood
"""
Z = np.zeros(2)
Z[0] = 1
states = np.zeros([self.state_no, self.data.shape[0]])
states[0,:] = beta[self.z_no:self.z_no+self.data.shape[0]]
states[1,:] = beta[self.z_no+self.data.shape[0]:]
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(self.z_no)]) # transformed distribution parameters
scale, shape, skewness = self._get_scale_and_shape(parm)
return self.state_likelihood(beta, states) + self.family.neg_loglikelihood(self.data, self.link(np.dot(Z, states)), scale, shape, skewness) | 0.012141 |
def make_exttrig_file(cp, ifos, sci_seg, out_dir):
'''
Make an ExtTrig xml file containing information on the external trigger
Parameters
----------
cp : pycbc.workflow.configuration.WorkflowConfigParser object
The parsed configuration options of a pycbc.workflow.core.Workflow.
ifos : str
String containing the analysis interferometer IDs.
sci_seg : ligo.segments.segment
The science segment for the analysis run.
out_dir : str
The output directory, destination for xml file.
Returns
-------
xml_file : pycbc.workflow.File object
The xml file with external trigger information.
'''
# Initialise objects
xmldoc = ligolw.Document()
xmldoc.appendChild(ligolw.LIGO_LW())
tbl = lsctables.New(lsctables.ExtTriggersTable)
cols = tbl.validcolumns
xmldoc.childNodes[-1].appendChild(tbl)
row = tbl.appendRow()
# Add known attributes for this GRB
setattr(row, "event_ra", float(cp.get("workflow", "ra")))
setattr(row, "event_dec", float(cp.get("workflow", "dec")))
setattr(row, "start_time", int(cp.get("workflow", "trigger-time")))
setattr(row, "event_number_grb", str(cp.get("workflow", "trigger-name")))
# Fill in all empty rows
for entry in cols.keys():
if not hasattr(row, entry):
if cols[entry] in ['real_4','real_8']:
setattr(row,entry,0.)
elif cols[entry] == 'int_4s':
setattr(row,entry,0)
elif cols[entry] == 'lstring':
setattr(row,entry,'')
elif entry == 'process_id':
row.process_id = ilwd.ilwdchar("external_trigger:process_id:0")
elif entry == 'event_id':
row.event_id = ilwd.ilwdchar("external_trigger:event_id:0")
else:
print("Column %s not recognized" %(entry), file=sys.stderr)
raise ValueError
# Save file
xml_file_name = "triggerGRB%s.xml" % str(cp.get("workflow",
"trigger-name"))
xml_file_path = os.path.join(out_dir, xml_file_name)
utils.write_filename(xmldoc, xml_file_path)
xml_file_url = urlparse.urljoin("file:", urllib.pathname2url(xml_file_path))
xml_file = File(ifos, xml_file_name, sci_seg, file_url=xml_file_url)
xml_file.PFN(xml_file_url, site="local")
return xml_file | 0.004167 |
def resample(self, sampling_rate=None, variables=None, force_dense=False,
in_place=False, kind='linear'):
''' Resample all dense variables (and optionally, sparse ones) to the
specified sampling rate.
Args:
sampling_rate (int, float): Target sampling rate (in Hz). If None,
uses the instance sampling rate.
variables (list): Optional list of Variables to resample. If None,
all variables are resampled.
force_dense (bool): if True, all sparse variables will be forced to
dense.
in_place (bool): When True, all variables are overwritten in-place.
When False, returns resampled versions of all variables.
kind (str): Argument to pass to scipy's interp1d; indicates the
kind of interpolation approach to use. See interp1d docs for
valid values.
'''
# Store old sampling rate-based variables
sampling_rate = sampling_rate or self.sampling_rate
_variables = {}
for name, var in self.variables.items():
if variables is not None and name not in variables:
continue
if isinstance(var, SparseRunVariable):
if force_dense and is_numeric_dtype(var.values):
_variables[name] = var.to_dense(sampling_rate)
else:
# None if in_place; no update needed
_var = var.resample(sampling_rate,
inplace=in_place,
kind=kind)
if not in_place:
_variables[name] = _var
if in_place:
for k, v in _variables.items():
self.variables[k] = v
self.sampling_rate = sampling_rate
else:
return _variables | 0.001576 |
def _update_persistent_boot(self, device_type=[], persistent=False):
"""Changes the persistent boot device order in BIOS boot mode for host
Note: It uses first boot device from the device_type and ignores rest.
:param device_type: ordered list of boot devices
:param persistent: Boolean flag to indicate if the device to be set as
a persistent boot device
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
tenure = 'Once'
new_device = device_type[0]
# If it is a standard device, we need to convert in RIS convention
if device_type[0].upper() in DEVICE_COMMON_TO_RIS:
new_device = DEVICE_COMMON_TO_RIS[device_type[0].upper()]
if persistent:
tenure = 'Continuous'
systems_uri = "/rest/v1/Systems/1"
# Need to set this option first if device is 'UefiTarget'
if new_device is 'UefiTarget':
system = self._get_host_details()
uefi_devices = (
system['Boot']['UefiTargetBootSourceOverrideSupported'])
iscsi_device = None
for device in uefi_devices:
if device is not None and 'iSCSI' in device:
iscsi_device = device
break
if iscsi_device is None:
msg = 'No UEFI iSCSI bootable device found'
raise exception.IloError(msg)
new_boot_settings = {}
new_boot_settings['Boot'] = {'UefiTargetBootSourceOverride':
iscsi_device}
status, headers, response = self._rest_patch(systems_uri, None,
new_boot_settings)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg)
new_boot_settings = {}
new_boot_settings['Boot'] = {'BootSourceOverrideEnabled': tenure,
'BootSourceOverrideTarget': new_device}
status, headers, response = self._rest_patch(systems_uri, None,
new_boot_settings)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg) | 0.000817 |
def _post(self, *args, **kwargs):
"""
Make a POST request.
"""
data = self._default_data()
data.update(kwargs.get('data') or {})
kwargs['data'] = data
return self._request(requests.post, *args, **kwargs) | 0.007722 |
def pickByDistribution(distribution, r=None):
"""
Pick a value according to the provided distribution.
Example:
::
pickByDistribution([.2, .1])
Returns 0 two thirds of the time and 1 one third of the time.
:param distribution: Probability distribution. Need not be normalized.
:param r: Instance of random.Random. Uses the system instance if one is
not provided.
"""
if r is None:
r = random
x = r.uniform(0, sum(distribution))
for i, d in enumerate(distribution):
if x <= d:
return i
x -= d | 0.010889 |
def cmd_list(self, argv, help):
"""Return a list of various things"""
parser = argparse.ArgumentParser(
prog="%s list" % self.progname,
description=help,
)
parser.add_argument("list", nargs=1,
metavar="listname",
help="Name of list to show.",
choices=sorted(self.list_cmds))
parser.add_argument("listopts",
metavar="...",
nargs=argparse.REMAINDER,
help="list command options")
args = parser.parse_args(argv)
for name, func in sorted(self.list_cmds[args.list[0]]):
func(args.listopts, func.__doc__) | 0.002642 |
def set_ref(self, ref_key, ref_id):
"""
Using a ref key and ref id set the
reference to the appropriate resource type.
"""
if ref_key == 'NETWORK':
self.network_id = ref_id
elif ref_key == 'NODE':
self.node_id = ref_id
elif ref_key == 'LINK':
self.link_id = ref_id
elif ref_key == 'GROUP':
self.group_id = ref_id
elif ref_key == 'SCENARIO':
self.scenario_id = ref_id
elif ref_key == 'PROJECT':
self.project_id = ref_id
else:
raise HydraError("Ref Key %s not recognised."%ref_key) | 0.004545 |
def augment(self, session, parent, addendum):
'''
Augments a DATASET with some calcs
NB: this is the PUBLIC method
@returns error
'''
parent_calc = session.query(model.Calculation).get(parent)
if not parent_calc or not parent_calc.siblings_count:
return 'Dataset is erroneously selected!'
existing_children, filtered_addendum = [child.checksum for child in parent_calc.children], []
for child in addendum:
if not child in existing_children:
filtered_addendum.append(child)
if not filtered_addendum:
return 'All these data are already present in this dataset.'
if parent_calc.checksum in filtered_addendum:
return 'A dataset cannot be added into itself.'
higher_lookup = {}
more = parent_calc.parent
distance = 0
while True:
distance += 1
higher, more = more, []
if not higher:
break
for item in higher:
try:
higher_lookup[distance].add(item)
except KeyError:
higher_lookup[distance] = set([item])
if item.parent:
more += item.parent
for members in list(higher_lookup.values()):
for member in members:
if member.checksum in filtered_addendum:
return 'A parent dataset cannot be added to its children dataset.'
parent_meta = session.query(model.Metadata).get(parent)
parent_grid = session.query(model.Grid).get(parent)
info_obj = json.loads(parent_grid.info)
for nested_depth, grid_item, download_size in session.query(model.Calculation.nested_depth, model.Grid.info, model.Metadata.download_size).filter(model.Calculation.checksum == model.Grid.checksum, model.Grid.checksum == model.Metadata.checksum, model.Calculation.checksum.in_(filtered_addendum)).all():
if nested_depth >= parent_calc.nested_depth:
parent_calc.nested_depth = nested_depth + 1
grid_item = json.loads(grid_item)
for entity in self.hierarchy:
topic = grid_item.get(entity['source'])
if not topic:
continue
if entity['source'] == 'standard':
topic = []
if not isinstance(topic, list):
topic = [ topic ]
existing_term = info_obj.get(entity['source'], [])
if not isinstance(existing_term, list):
existing_term = [ existing_term ] # TODO
info_obj[ entity['source'] ] = list(set( existing_term + topic ))
parent_meta.download_size += download_size
info_obj['standard'] = info_obj['standard'][0] # TODO
parent_grid.info = json.dumps(info_obj)
# tags ORM
for entity in self.hierarchy:
if not entity['creates_topic']:
continue
for item in info_obj.get( entity['source'], [] ):
parent_calc.uitopics.append( model.Topic.as_unique(session, cid=entity['cid'], topic="%s" % item) )
for child in session.query(model.Calculation).filter(model.Calculation.checksum.in_(filtered_addendum)).all():
parent_calc.children.append(child)
parent_calc.siblings_count = len(parent_calc.children)
for distance, members in higher_lookup.items():
for member in members:
d = parent_calc.nested_depth - member.nested_depth + distance
if d > 0:
member.nested_depth += d
member.meta_data.download_size += parent_meta.download_size # FIXME
session.add(member)
session.add_all([parent_calc, parent_meta, parent_grid])
session.commit()
return False | 0.006328 |
def send_file_to_host(src_filename, dst_file, filesize):
"""Function which runs on the pyboard. Matches up with recv_file_from_remote."""
import sys
import ubinascii
try:
with open(src_filename, 'rb') as src_file:
bytes_remaining = filesize
if HAS_BUFFER:
buf_size = BUFFER_SIZE
else:
buf_size = BUFFER_SIZE // 2
while bytes_remaining > 0:
read_size = min(bytes_remaining, buf_size)
buf = src_file.read(read_size)
if HAS_BUFFER:
sys.stdout.buffer.write(buf)
else:
sys.stdout.write(ubinascii.hexlify(buf))
bytes_remaining -= read_size
# Wait for an ack so we don't get ahead of the remote
while True:
char = sys.stdin.read(1)
if char:
if char == '\x06':
break
# This should only happen if an error occurs
sys.stdout.write(char)
return True
except:
return False | 0.002553 |
def _iterate_namespace_models(self, **kwargs) -> Iterable:
"""Return an iterator over the models to be converted to the namespace."""
return tqdm(
self._get_query(self.namespace_model),
total=self._count_model(self.namespace_model),
**kwargs
) | 0.009901 |
def put(self, thing_id='0', property_name=None):
"""
Handle a PUT request.
thing_id -- ID of the thing this request is for
property_name -- the name of the property from the URL path
"""
thing = self.get_thing(thing_id)
if thing is None:
self.set_status(404)
return
try:
args = json.loads(self.request.body.decode())
except ValueError:
self.set_status(400)
return
if property_name not in args:
self.set_status(400)
return
if thing.has_property(property_name):
try:
thing.set_property(property_name, args[property_name])
except PropertyError:
self.set_status(400)
return
self.set_header('Content-Type', 'application/json')
self.write(json.dumps({
property_name: thing.get_property(property_name),
}))
else:
self.set_status(404) | 0.00191 |
def format (self, record):
"""Returns the given LogRecord as formatted text."""
record.hostname = self.hostname
return logging.Formatter.format(self, record) | 0.016575 |
def cast_to_number_or_bool(inputstr):
"""Cast a string to int, float or bool. Return original string if it can't be
converted.
Scientific expression is converted into float.
"""
if inputstr.strip().lower() == "true":
return True
elif inputstr.strip().lower() == "false":
return False
try:
return int(inputstr)
except ValueError:
try:
return float(inputstr)
except ValueError:
return inputstr | 0.004098 |
def p_save_data(p):
""" statement : SAVE expr DATA
| SAVE expr DATA ID
| SAVE expr DATA ID LP RP
"""
if p[2].type_ != TYPE.string:
api.errmsg.syntax_error_expected_string(p.lineno(1), p[2].type_)
if len(p) != 4:
entry = SYMBOL_TABLE.access_id(p[4], p.lineno(4))
if entry is None:
p[0] = None
return
entry.accessed = True
access = entry
start = make_unary(p.lineno(4), 'ADDRESS', access, type_=TYPE.uinteger)
if entry.class_ == CLASS.array:
length = make_number(entry.memsize, lineno=p.lineno(4))
else:
length = make_number(entry.type_.size, lineno=p.lineno(4))
else:
access = SYMBOL_TABLE.access_id('.ZXBASIC_USER_DATA', p.lineno(3))
start = make_unary(p.lineno(3), 'ADDRESS', access, type_=TYPE.uinteger)
access = SYMBOL_TABLE.access_id('.ZXBASIC_USER_DATA_LEN', p.lineno(3))
length = make_unary(p.lineno(3), 'ADDRESS', access, type_=TYPE.uinteger)
p[0] = make_sentence(p[1], p[2], start, length) | 0.001805 |
def convert_sub(sub):
"""Convert BEL1 sub() to BEL2 var()"""
args = sub.args
(ref_aa, pos, new_aa) = args
parent_fn_name = sub.parent_function.name_short
prefix_list = {"p": "p.", "r": "r.", "g": "c."}
prefix = prefix_list[parent_fn_name]
new_var_arg = f'"{prefix}{spec["namespaces"]["AminoAcid"]["to_short"][ref_aa.value]}{pos.value}{spec["namespaces"]["AminoAcid"]["to_short"][new_aa.value]}"'
new_var = bel.lang.ast.Function("var", bo.spec)
new_var.add_argument(StrArg(new_var_arg, new_var))
return new_var | 0.003604 |
def collect(self):
"""
Collect and publish metrics
"""
stats = self.parse_stats_file(self.config["status_path"])
if len(stats) == 0:
return {}
elif "info" not in stats.keys():
return {}
elif "programstatus" not in stats.keys():
return {}
metrics = self.get_icinga_stats(stats["programstatus"])
if "hoststatus" in stats.keys():
metrics = dict(
metrics.items() + self.get_host_stats(
stats["hoststatus"]).items())
if "servicestatus" in stats.keys():
metrics = dict(
metrics.items() + self.get_svc_stats(
stats["servicestatus"]).items())
for metric in metrics.keys():
self.log.debug("Publishing '%s %s'.", metric, metrics[metric])
self.publish(metric, metrics[metric]) | 0.002193 |
def _set_compression(self, value):
""" May be used to compress PDF files. Code is more readable
for testing and inspection if not compressed. Requires a boolean. """
if isinstance(value, bool):
self.compression = value
else:
raise Exception(
TypeError, "%s is not a valid option for compression" % value) | 0.007752 |
def join(self, *colrs, **colorkwargs):
""" Like str.join, except it returns a Colr.
Arguments:
colrs : One or more Colrs. If a list or tuple is passed as an
argument it will be flattened.
Keyword Arguments:
fore, back, style...
see color().
"""
flat = []
for clr in colrs:
if isinstance(clr, (list, tuple, GeneratorType)):
# Flatten any lists, at least once.
flat.extend(str(c) for c in clr)
else:
flat.append(str(clr))
if colorkwargs:
fore = colorkwargs.get('fore', None)
back = colorkwargs.get('back', None)
style = colorkwargs.get('style', None)
flat = (
self.color(s, fore=fore, back=back, style=style)
for s in flat
)
return self.__class__(self.data.join(flat)) | 0.002047 |
def validate(self, messages):
"""Returns True if the fields are valid according to the SPDX standard.
Appends user friendly messages to the messages parameter.
"""
messages = self.validate_creators(messages)
messages = self.validate_created(messages)
return messages | 0.006349 |
def get_lib_ffi_shared(libpath, c_hdr):
'''
libpath-->str: shared library filename with optional path
c_hdr-->str: C-style header definitions for functions to wrap
Returns-->(ffi, lib)
'''
lib = SharedLibWrapper(libpath, c_hdr)
ffi = lib.ffi
return (ffi, lib) | 0.003436 |
def kpoints(self):
"""
k_product, default to 50, is kpoint number * length for a & b
directions, also for c direction in bulk calculations
Automatic mesh & Gamma is the default setting.
"""
# To get input sets, the input structure has to has the same number
# of required parameters as a Structure object (ie. 4). Slab
# attributes aren't going to affect the VASP inputs anyways so
# converting the slab into a structure should not matter
kpt = super().kpoints
kpt.comment = "Automatic mesh"
kpt.style = 'Gamma'
# use k_product to calculate kpoints, k_product = kpts[0][0] * a
abc = self.structure.lattice.abc
kpt_calc = [int(self.k_product / abc[0] + 0.5),
int(self.k_product / abc[1] + 0.5), 1]
self.kpt_calc = kpt_calc
# calculate kpts (c direction) for bulk. (for slab, set to 1)
if self.bulk:
kpt_calc[2] = int(self.k_product / abc[2] + 0.5)
kpt.kpts[0] = kpt_calc
return kpt | 0.00185 |
def delete(cls, cert_id, background=False):
""" Delete a certificate."""
result = cls.call('cert.delete', cert_id)
if background:
return result
cls.echo("Deleting your certificate.")
cls.display_progress(result)
cls.echo('Your certificate %s has been deleted.' % cert_id)
return result | 0.005618 |
def resample(sig, old=1, new=1, order=3, zero=0.):
"""
Generic resampler based on Waring-Lagrange interpolators.
Parameters
----------
sig :
Input signal (any iterable).
old :
Time duration reference (defaults to 1, allowing percentages to the ``new``
keyword argument). This can be float number, or perhaps a Stream instance.
new :
Time duration that the reference will have after resampling.
For example, if ``old = 1, new = 2``, then
there will be 2 samples yielded for each sample from input.
This can be a float number, or perhaps a Stream instance.
order :
Lagrange interpolator order. The amount of neighboring samples to be used by
the interpolator is ``order + 1``.
zero :
The input should be thought as zero-padded from the left with this value.
Returns
-------
The first value will be the first sample from ``sig``, and then the
interpolator will find the next samples towards the end of the ``sig``.
The actual sampling interval (or time step) for this interpolator obeys to
the ``old / new`` relationship.
Hint
----
The time step can also be time-varying, although that's certainly difficult
to synchonize (one sample is needed for each output sample). Perhaps the
best approach for this case would be a ControlStream keeping the desired
value at any time.
Note
----
The input isn't zero-padded at right. It means that the last output will be
one with interpolated with known data. For endless inputs that's ok, this
makes no difference, but for finite inputs that may be undesirable.
"""
sig = Stream(sig)
threshold = .5 * (order + 1)
step = old / new
data = deque([zero] * (order + 1), maxlen=order + 1)
data.extend(sig.take(rint(threshold)))
idx = int(threshold)
isig = iter(sig)
if isinstance(step, Iterable):
step = iter(step)
while True:
yield lagrange(enumerate(data))(idx)
idx += next(step)
while idx > threshold:
data.append(next(isig))
idx -= 1
else:
while True:
yield lagrange(enumerate(data))(idx)
idx += step
while idx > threshold:
data.append(next(isig))
idx -= 1 | 0.00823 |
def isexec(self, mode=stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH):
"""
isexec:
@rtype: bool
@return: Indicates whether the path points to a file or folder
which has the executable flag set.
Note that on systems which do not support executables flags the
result may be unpredicatable. On Windows the value is determined
by the file extension, a *.exe file is executable, a *.sh not.
"""
return self.connection.isexec(self, mode) | 0.003922 |
def _dinic_step(graph, capacity, lev, flow, u, target, limit):
""" tenter de pousser le plus de flot de u à target, sans dépasser limit
"""
if limit <= 0:
return 0
if u == target:
return limit
val = 0
for v in graph[u]:
residual = capacity[u][v] - flow[u][v]
if lev[v] == lev[u] + 1 and residual > 0:
z = min(limit, residual)
aug = _dinic_step(graph, capacity, lev, flow, v, target, z)
flow[u][v] += aug
flow[v][u] -= aug
val += aug
limit -= aug
if val == 0:
lev[u] = None # remove unreachable node
return val | 0.001513 |
def _incr_exceptions(self):
"""Increment the number of exceptions for the current connection."""
self._pool_manager.get_connection(self.pid, self._conn).exceptions += 1 | 0.01087 |
def trace(self, data, callback=None):
"""Queue data for tracing
Args:
data (bytearray, string): Unstructured data to trace to any
connected client.
callback (callable): An optional callback that will be called with
a bool value of True when this data actually gets traced.
If the client disconnects and the data is dropped instead,
callback will be called with False.
"""
conn_id = self._find_connection(self.conn_string)
if conn_id is not None:
self.adapter.notify_event_nowait(self.conn_string, 'trace', data)
if callback is not None:
callback(conn_id is not None) | 0.00274 |
def describe_event_source_mapping(UUID=None, EventSourceArn=None,
FunctionName=None,
region=None, key=None, keyid=None, profile=None):
'''
Given an event source mapping ID or an event source ARN and FunctionName,
obtain the current settings of that mapping.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_lambda.describe_event_source_mapping uuid
'''
ids = _get_ids(UUID, EventSourceArn=EventSourceArn,
FunctionName=FunctionName)
if not ids:
return {'event_source_mapping': None}
UUID = ids[0]
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
desc = conn.get_event_source_mapping(UUID=UUID)
if desc:
keys = ('UUID', 'BatchSize', 'EventSourceArn',
'FunctionArn', 'LastModified', 'LastProcessingResult',
'State', 'StateTransitionReason')
return {'event_source_mapping': dict([(k, desc.get(k)) for k in keys])}
else:
return {'event_source_mapping': None}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} | 0.002342 |
def lz4f_decode(payload):
"""Decode payload using interoperable LZ4 framing. Requires Kafka >= 0.10"""
# pylint: disable-msg=no-member
ctx = lz4f.createDecompContext()
data = lz4f.decompressFrame(payload, ctx)
lz4f.freeDecompContext(ctx)
# lz4f python module does not expose how much of the payload was
# actually read if the decompression was only partial.
if data['next'] != 0:
raise RuntimeError('lz4f unable to decompress full payload')
return data['decomp'] | 0.003945 |
def status(self, value):
"""
Property for getting or setting the bug status
>>> bug.status = "REOPENED"
"""
if self._bug.get('id', None):
if value in VALID_STATUS:
self._bug['status'] = value
else:
raise BugException("Invalid status type was used")
else:
raise BugException("Can not set status unless there is a bug id."
" Please call Update() before setting") | 0.003876 |
def upload_entities(namespace, workspace, entity_data):
"""Upload entities from tab-delimited string.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
entity_data (str): TSV string describing entites
Swagger:
https://api.firecloud.org/#!/Entities/importEntities
"""
body = urlencode({"entities" : entity_data})
headers = _fiss_agent_header({
'Content-type': "application/x-www-form-urlencoded"
})
uri = "workspaces/{0}/{1}/importEntities".format(namespace, workspace)
return __post(uri, headers=headers, data=body) | 0.003155 |
def get_params(self):
"Parameters used to initialize the class"
import inspect
a = inspect.getargspec(self.__init__)[0]
out = dict()
for key in a[1:]:
value = getattr(self, "_%s" % key, None)
out[key] = value
return out | 0.006873 |
def wait_for_device_ready(self, timeout=None, wait_polling_interval=None, after_first=None):
"""Wait for the device to become ready for reliable interaction via adb.
NOTE: if the device is *already* ready this method will timeout.
:param timeout: Maximum time to wait for the device to become ready
:param wait_polling_interval: Interval at which to poll for device readiness.
:param after_first: A function to run after first polling for device
readiness. This allows use cases such as stopping b2g
setting the unready state, and then restarting b2g.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Waiting for device to become ready")
profiles = self.get_profiles()
assert len(profiles) == 1
profile_dir = profiles.itervalues().next()
prefs_file = posixpath.normpath(profile_dir + "/prefs.js")
current_date = int(self.shell_output('date +\"%s\"'))
set_date = current_date - (365 * 24 * 3600 + 24 * 3600 + 3600 + 60 + 1)
try:
self.shell_output("touch -t %i %s" % (set_date, prefs_file))
except adb.ADBError:
# See Bug 1092383, the format for the touch command
# has changed for flame-kk builds.
set_date = datetime.datetime.fromtimestamp(set_date)
self.shell_output("touch -t %s %s" %
(set_date.strftime('%Y%m%d.%H%M%S'),
prefs_file))
def prefs_modified():
times = [None, None]
def inner():
try:
listing = self.shell_output("ls -l %s" % (prefs_file))
mode, user, group, size, date, time, name = listing.split(None, 6)
mtime = "%s %s" % (date, time)
except:
return False
if times[0] is None:
times[0] = mtime
else:
times[1] = mtime
if times[1] != times[0]:
return True
return False
return inner
poll_wait(prefs_modified(), timeout=timeout,
polling_interval=wait_polling_interval, after_first=after_first) | 0.004049 |
def get_DRAT(delta_x_prime, delta_y_prime, max_ptrm_check):
"""
Input: TRM length of best fit line (delta_x_prime),
NRM length of best fit line,
max_ptrm_check
Output: DRAT (maximum difference produced by a ptrm check normed by best fit line),
length best fit line
"""
L = numpy.sqrt(delta_x_prime**2 + delta_y_prime**2)
DRAT = (old_div(max_ptrm_check, L)) * 100
return DRAT, L | 0.004662 |
def xinfo_help(self):
"""Retrieve help regarding the ``XINFO`` sub-commands"""
fut = self.execute(b'XINFO', b'HELP')
return wait_convert(fut, lambda l: b'\n'.join(l)) | 0.010526 |
def _write(self, ret):
"""
This function needs to correspond to this:
https://github.com/saltstack/salt/blob/develop/salt/returners/redis_return.py#L88
"""
self.redis.set('{0}:{1}'.format(ret['id'], ret['jid']), json.dumps(ret))
self.redis.lpush('{0}:{1}'.format(ret['id'], ret['fun']), ret['jid'])
self.redis.sadd('minions', ret['id'])
self.redis.sadd('jids', ret['jid']) | 0.006881 |
def _gaps_from(intervals):
"""
From a list of intervals extract
a list of sorted gaps in the form of [(g,i)]
where g is the size of the ith gap.
"""
sliding_window = zip(intervals, intervals[1:])
gaps = [b[0] - a[1] for a, b in sliding_window]
return gaps | 0.003484 |
def fetch(self, start=False, full_data=True):
""" Get the current job data and possibly flag it as started. """
if self.id is None:
return self
if full_data is True:
fields = None
elif isinstance(full_data, dict):
fields = full_data
else:
fields = {
"_id": 0,
"path": 1,
"params": 1,
"status": 1,
"retry_count": 1,
}
if start:
self.datestarted = datetime.datetime.utcnow()
self.set_data(self.collection.find_and_modify(
{
"_id": self.id,
"status": {"$nin": ["cancel", "abort", "maxretries"]}
},
{"$set": {
"status": "started",
"datestarted": self.datestarted,
"worker": self.worker.id
},
"$unset": {
"dateexpires": 1 # we don't want started jobs to expire unexpectedly
}},
projection=fields)
)
context.metric("jobs.status.started")
else:
self.set_data(self.collection.find_one({
"_id": self.id
}, projection=fields))
if self.data is None:
context.log.info(
"Job %s not found in MongoDB or status was cancelled!" %
self.id)
self.stored = True
return self | 0.003236 |
def _find_batch_containing_event(self, uuid):
"""Find the batch number that contains a certain event.
Parameters:
uuid -- the event uuid to search for.
returns -- a batch number, or None if not found.
"""
if self.estore.key_exists(uuid):
# Reusing already opened DB if possible
return self.batchno
else:
for batchno in range(self.batchno - 1, -1, -1):
# Iterating backwards here because we are more likely to find
# the event in an later archive, than earlier.
db = self._open_event_store(batchno)
with contextlib.closing(db):
if db.key_exists(uuid):
return batchno
return None | 0.002528 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.