text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def insert_with_id(obj):
"""
Generates a unique ID for the supplied legislator/committee/bill
and inserts it into the appropriate collection.
"""
if '_id' in obj:
raise ValueError("object already has '_id' field")
# add created_at/updated_at on insert
obj['created_at'] = datetime.datetime.utcnow()
obj['updated_at'] = obj['created_at']
if obj['_type'] == 'person' or obj['_type'] == 'legislator':
collection = db.legislators
id_type = 'L'
elif obj['_type'] == 'committee':
collection = db.committees
id_type = 'C'
elif obj['_type'] == 'bill':
collection = db.bills
id_type = 'B'
else:
raise ValueError("unknown _type for object")
# get abbr
abbr = obj[settings.LEVEL_FIELD].upper()
id_reg = re.compile('^%s%s' % (abbr, id_type))
# Find the next available _id and insert
id_prefix = '%s%s' % (abbr, id_type)
cursor = collection.find({'_id': id_reg}).sort('_id', -1).limit(1)
try:
new_id = int(next(cursor)['_id'][len(abbr) + 1:]) + 1
except StopIteration:
new_id = 1
while True:
if obj['_type'] == 'bill':
obj['_id'] = '%s%08d' % (id_prefix, new_id)
else:
obj['_id'] = '%s%06d' % (id_prefix, new_id)
obj['_all_ids'] = [obj['_id']]
if obj['_type'] in ['person', 'legislator']:
obj['leg_id'] = obj['_id']
try:
return collection.insert(obj, safe=True)
except pymongo.errors.DuplicateKeyError:
new_id += 1 | 0.000632 |
def do_switch(self, subcmd, opts, *args):
"""Update the working copy to a different URL.
usage:
1. switch URL [PATH]
2. switch --relocate FROM TO [PATH...]
1. Update the working copy to mirror a new URL within the repository.
This behaviour is similar to 'svn update', and is the way to
move a working copy to a branch or tag within the same repository.
2. Rewrite working copy URL metadata to reflect a syntactic change only.
This is used when repository's root URL changes (such as a schema
or hostname change) but your working copy still reflects the same
directory within the same repository.
${cmd_option_list}
"""
print "'svn %s' opts: %s" % (subcmd, opts)
print "'svn %s' args: %s" % (subcmd, args) | 0.00576 |
def _rainbow_rgb_chars(self, s, freq=0.1, spread=3.0, offset=0):
""" Iterate over characters in a string to build data needed for a
rainbow effect.
Yields tuples of (char, (r, g, b)).
Arguments:
s : String to colorize.
freq : Frequency/"tightness" of colors in the rainbow.
Best results when in the range 0.0-1.0.
Default: 0.1
spread : Spread/width of colors.
Default: 3.0
offset : Offset for start of rainbow.
Default: 0
"""
return (
(c, self._rainbow_rgb(freq, offset + i / spread))
for i, c in enumerate(s)
) | 0.002584 |
def detect(self, filename, offset, standalone=False):
"""Verifies NTFS filesystem signature.
Returns:
bool: True if filesystem signature at offset 0x03 \
matches 'NTFS ', False otherwise.
"""
r = RawStruct(
filename=filename,
offset=offset + SIG_OFFSET,
length=SIG_SIZE)
oem_id = r.data
if oem_id == b"NTFS ":
return True
return False | 0.004237 |
def read_config_files(files):
"""Read and merge a list of config files."""
config = ConfigObj()
for _file in files:
_config = read_config_file(_file)
if bool(_config) is True:
config.merge(_config)
config.filename = _config.filename
return config | 0.003279 |
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue) | 0.001384 |
def t_TEXT(self, token):
ur'(?P<text>[^<#\n ].+?[^ ])(?=\n)'
text = token.lexer.lexmatch.group("text").decode("utf8")
token.value = text
return token | 0.01105 |
def calling(data):
"""Main function to parallelize peak calling."""
chip_bam = data.get("work_bam")
input_bam = data.get("work_bam_input", None)
caller_fn = get_callers()[data["peak_fn"]]
name = dd.get_sample_name(data)
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), data["peak_fn"], name))
out_files = caller_fn(name, chip_bam, input_bam, dd.get_genome_build(data), out_dir,
dd.get_chip_method(data), data["resources"], data)
greylistdir = greylisting(data)
data.update({"peaks_files": out_files})
# data["input_bam_filter"] = input_bam
if greylistdir:
data["greylist"] = greylistdir
return [[data]] | 0.004286 |
def fix_variables(bqm, sampling_mode=True):
"""Determine assignments for some variables of a binary quadratic model.
Roof duality finds a lower bound for the minimum of a quadratic polynomial. It
can also find minimizing assignments for some of the polynomial's variables;
these fixed variables take the same values in all optimal solutions [BHT]_ [BH]_.
A quadratic pseudo-Boolean function can be represented as a network to find
the lower bound through network-flow computations. `fix_variables` uses maximum
flow in the implication network to correctly fix variables. Consequently, you can
find an assignment for the remaining variables that attains the optimal value.
Args:
bqm (:obj:`.BinaryQuadraticModel`)
A binary quadratic model.
sampling_mode (bool, optional, default=True):
In sampling mode, only roof-duality is used. When `sampling_mode` is false, strongly
connected components are used to fix more variables, but in some optimal solutions
these variables may take different values.
Returns:
dict: Variable assignments for some variables of the specified binary quadratic model.
Examples:
This example creates a binary quadratic model with a single ground state
and fixes the model's single variable to the minimizing assignment.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_variable('a', 1.0)
>>> dimod.fix_variables(bqm)
{'a': -1}
This example has two ground states, :math:`a=b=-1` and :math:`a=b=1`, with
no variable having a single value for all ground states, so neither variable
is fixed.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm) # doctest: +SKIP
{}
This example turns sampling model off, so variables are fixed to an assignment
that attains the ground state.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm, sampling_mode=False) # doctest: +SKIP
{'a': 1, 'b': 1}
.. [BHT] Boros, E., P.L. Hammer, G. Tavares. Preprocessing of Unconstraint Quadratic Binary
Optimization. Rutcor Research Report 10-2006, April, 2006.
.. [BH] Boros, E., P.L. Hammer. Pseudo-Boolean optimization. Discrete Applied Mathematics 123,
(2002), pp. 155-225
"""
try:
from dimod.roof_duality._fix_variables import fix_variables_wrapper
except ImportError:
raise ImportError("c++ extension roof_duality is not built")
if sampling_mode:
method = 2 # roof-duality only
else:
method = 1 # roof-duality and strongly connected components
linear = bqm.linear
if all(v in linear for v in range(len(bqm))):
# we can work with the binary form of the bqm directly
fixed = fix_variables_wrapper(bqm.binary, method)
else:
try:
inverse_mapping = dict(enumerate(sorted(linear)))
except TypeError:
# in python3 unlike types cannot be sorted
inverse_mapping = dict(enumerate(linear))
mapping = {v: i for i, v in inverse_mapping.items()}
fixed = fix_variables_wrapper(bqm.relabel_variables(mapping, inplace=False).binary, method)
fixed = {inverse_mapping[v]: val for v, val in fixed.items()}
if bqm.vartype is Vartype.SPIN:
return {v: 2*val - 1 for v, val in fixed.items()}
else:
return fixed | 0.004677 |
def elapsed_time(self):
"""To know the duration of the function.
This property might return None if the function is still running.
"""
if self._end_time:
elapsed_time = round(self._end_time - self._start_time, 3)
return elapsed_time
else:
return None | 0.006116 |
def getExtraIncludes(self):
''' Some components must export whole directories full of headers into
the search path. This is really really bad, and they shouldn't do
it, but support is provided as a concession to compatibility.
'''
if 'extraIncludes' in self.description:
return [os.path.normpath(x) for x in self.description['extraIncludes']]
else:
return [] | 0.006849 |
def convert(ids, from_type):
'''Uses the NCBI IP Converter API to converts a list of publication IDs in the same format e.g. DOI identifiers to
another format e.g. PubMed identifiers.
ids is a list of IDs of the type from_type e.g. a from_type of 'doi' specifies DOI identifiers.
The function returns a Python dict with the mappings from the input IDs to IDs of all other types.
'''
if from_type not in converter_types:
raise PubMedConverterTypeException(from_type)
# Avoid multiple requests of the same ID
mapping = {}
ids = list(set(ids))
# Request the mapping from the server
query_string = "?ids=%s&idtype=%s" % (urllib2.quote(",".join(ids), ''), from_type)
xml = get_resource("www.ncbi.nlm.nih.gov", '/pmc/utils/idconv/v1.0/%s' % query_string).strip()
# Parse the response
try:
_dom = parseString(xml)
main_tag = _dom.getElementsByTagName("pmcids")
assert(len(main_tag) == 1)
main_tag = main_tag[0]
request_status = main_tag.getAttribute('status')
except Exception, e:
raise PubMedIDRetrievalException('An error occurred retrieving the XML from the PubMed ID Converter API: %s.' % str(e))
if request_status == 'ok':
for record_tag in main_tag.getElementsByTagName("record"):
attributes = record_tag.attributes
record_keys = attributes.keys()
assert('requested-id' in record_keys)
from_key = attributes['requested-id'].value
assert(from_key not in mapping)
mapping[from_key] = {}
for k in record_keys:
if k != 'requested-id':
mapping[from_key][k] = attributes[k].value
else:
# todo: parse the error tag here to print more details
raise PubMedIDRetrievalException('The request to the PubMed ID Converter API failed. Please check that the IDs are of the correct types.')
return mapping | 0.004055 |
def rshift_logical(self, shift_amount):
"""
Logical shift right.
:param StridedInterval shift_amount: The amount of shifting
:return: The shifted StridedInterval
:rtype: StridedInterval
"""
lower, upper = self._pre_shift(shift_amount)
# Shift the lower_bound and upper_bound by all possible amounts, and union all possible results
ret = None
for amount in xrange(lower, upper + 1):
si_ = self._rshift_logical(amount)
ret = si_ if ret is None else ret.union(si_)
ret.normalize()
ret.uninitialized = self.uninitialized
return ret | 0.004532 |
def conv_layer(x,
hidden_size,
kernel_size,
stride,
pooling_window,
dropout_rate,
dilation_rate,
name="conv"):
"""Single conv layer with relu, optional pooling, and dropout."""
with tf.variable_scope(name):
out = x
out = common_layers.conv1d_block(
out,
hidden_size, [(dilation_rate, kernel_size)],
strides=stride,
first_relu=False,
padding="same")
out = tf.nn.relu(out)
if pooling_window:
out = tf.layers.max_pooling1d(
out, pooling_window, pooling_window, padding="same")
out = tf.layers.dropout(out, dropout_rate)
return out | 0.005587 |
def _set_secondary_path(self, v, load=False):
"""
Setter method for secondary_path, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_secondary_path is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_secondary_path() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("secpath_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='secpath-name', extensions={u'tailf-common': {u'info': u'Define Secondary Path for the LSP', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsLspSecPath', u'cli-mode-name': u'config-router-mpls-lsp-$(../lsp-name)-secpath-$(secpath-name)'}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define Secondary Path for the LSP', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsLspSecPath', u'cli-mode-name': u'config-router-mpls-lsp-$(../lsp-name)-secpath-$(secpath-name)'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """secondary_path must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("secpath_name",secondary_path.secondary_path, yang_name="secondary-path", rest_name="secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='secpath-name', extensions={u'tailf-common': {u'info': u'Define Secondary Path for the LSP', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsLspSecPath', u'cli-mode-name': u'config-router-mpls-lsp-$(../lsp-name)-secpath-$(secpath-name)'}}), is_container='list', yang_name="secondary-path", rest_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define Secondary Path for the LSP', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsLspSecPath', u'cli-mode-name': u'config-router-mpls-lsp-$(../lsp-name)-secpath-$(secpath-name)'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)""",
})
self.__secondary_path = t
if hasattr(self, '_set'):
self._set() | 0.003766 |
def _splitGenoGeneWindow(self,annotation_file=None,cis=1e4,funct='protein_coding',minSnps=1.,maxSnps=SP.inf):
"""
split into windows based on genes
"""
#1. load annotation
assert annotation_file is not None, 'Splitter:: specify annotation file'
try:
f = h5py.File(annotation_file,'r')
geneID = f['geneID'][:]
gene_chrom = f['chrom'][:]
gene_start = f['start'][:]
gene_end = f['end'][:]
gene_strand = f['strand'][:]
gene_function = f['function'][:]
f.close()
except:
print('Splitter:: format annotation file not valid')
# if funct is not None, it has to be a list
if funct is not None and funct!=list: funct=[funct]
windows = []
nSnps = []
Igene = []
#2. calculates windows
for gene_i in range(geneID.shape[0]):
if funct is not None:
if gene_function[gene_i] not in funct:
Igene.append(False)
continue
wnd = [gene_chrom[gene_i],gene_start[gene_i]-cis,gene_end[gene_i]+cis]
Ir = (self.chrom==wnd[0])*(self.pos>=wnd[1])*(self.pos<=wnd[2])
_nSnps = Ir.sum()
if _nSnps>=minSnps and _nSnps<=maxSnps:
windows.append(wnd)
nSnps.append(_nSnps)
Igene.append(True)
else:
Igene.append(False)
Igene = SP.array(Igene)
self.info['nSnps'] = SP.array(nSnps)
self.info['geneID'] = geneID[Igene]
self.info['gene_start'] = gene_start[Igene]
self.info['gene_end'] = gene_end[Igene]
self.info['gene_strand'] = gene_strand[Igene]
self.info['gene_function'] = gene_function[Igene]
return SP.array(windows) | 0.017516 |
def _model(self, data, beta):
""" Creates the structure of the model
Parameters
----------
data : np.array
Contains the time series
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
a,P,K,F,v : np.array
Filted states, filtered variances, Kalman gains, F matrix, residuals
"""
T, Z, R, Q, H = self._ss_matrices(beta)
return univariate_kalman(data,Z,H,T,Q,R,0.0) | 0.018553 |
def _do_functions(self, rule, p_selectors, p_parents, p_children, scope, media, c_lineno, c_property, c_codestr, code, name):
"""
Implements @mixin and @function
"""
if name:
funct, params, _ = name.partition('(')
funct = funct.strip()
params = split_params(depar(params + _))
defaults = {}
new_params = []
for param in params:
param, _, default = param.partition(':')
param = param.strip()
default = default.strip()
if param:
new_params.append(param)
if default:
default = self.apply_vars(
default, rule[CONTEXT], None, rule)
defaults[param] = default
context = rule[CONTEXT].copy()
for p in new_params:
context.pop(p, None)
mixin = [list(new_params), defaults, self.
apply_vars(c_codestr, context, None, rule)]
if code == '@function':
def _call(mixin):
def __call(R, *args, **kwargs):
m_params = mixin[0]
m_vars = rule[CONTEXT].copy()
m_vars.update(mixin[1])
m_codestr = mixin[2]
for i, a in enumerate(args):
m_vars[m_params[i]] = a
m_vars.update(kwargs)
_options = rule[OPTIONS].copy()
_rule = spawn_rule(R, codestr=m_codestr, context=m_vars, options=_options, deps=set(), properties=[], final=False, lineno=c_lineno)
self.manage_children(_rule, p_selectors, p_parents,
p_children, (scope or '') + '', R[MEDIA])
ret = _rule[OPTIONS].pop('@return', '')
return ret
return __call
_mixin = _call(mixin)
_mixin.mixin = mixin
mixin = _mixin
# Insert as many @mixin options as the default parameters:
while len(new_params):
rule[OPTIONS]['%s %s:%d' % (code, funct,
len(new_params))] = mixin
param = new_params.pop()
if param not in defaults:
break
if not new_params:
rule[OPTIONS][code + ' ' + funct + ':0'] = mixin | 0.001934 |
def curtailment(network, carrier='solar', filename=None):
"""
Plot curtailment of selected carrier
Parameters
----------
network : PyPSA network container
Holds topology of grid including results from powerflow analysis
carrier: str
Plot curtailemt of this carrier
filename: str or None
Save figure in this direction
Returns
-------
Plot
"""
p_by_carrier = network.generators_t.p.groupby\
(network.generators.carrier, axis=1).sum()
capacity = network.generators.groupby("carrier").sum().at[carrier, "p_nom"]
p_available = network.generators_t.p_max_pu.multiply(
network.generators["p_nom"])
p_available_by_carrier = p_available.groupby(
network.generators.carrier, axis=1).sum()
p_curtailed_by_carrier = p_available_by_carrier - p_by_carrier
print(p_curtailed_by_carrier.sum())
p_df = pd.DataFrame({carrier +
" available": p_available_by_carrier[carrier],
carrier +
" dispatched": p_by_carrier[carrier], carrier +
" curtailed": p_curtailed_by_carrier[carrier]})
p_df[carrier + " capacity"] = capacity
p_df[carrier + " curtailed"][p_df[carrier + " curtailed"] < 0.] = 0.
fig, ax = plt.subplots(1, 1)
fig.set_size_inches(12, 6)
p_df[[carrier + " dispatched", carrier + " curtailed"]
].plot(kind="area", ax=ax, linewidth=3)
p_df[[carrier + " available", carrier + " capacity"]
].plot(ax=ax, linewidth=3)
ax.set_xlabel("")
ax.set_ylabel("Power [MW]")
ax.set_ylim([0, capacity * 1.1])
ax.legend()
if filename is None:
plt.show()
else:
plt.savefig(filename)
plt.close() | 0.001678 |
def euclidean_distances(a, b, squared=False, to_numpy=True):
"""
Compute the pairwise euclidean distance between matrices a and b.
If the input matrix are in numpy format, they will be uploaded to the
GPU first which can incur significant time overhead.
Parameters
----------
a : np.ndarray (n, f)
first matrix
b : np.ndarray (m, f)
second matrix
to_numpy : boolean, optional (default True)
If true convert back the GPU array result to numpy format.
squared : boolean, optional (default False)
if True, return squared euclidean distance matrix
Returns
-------
c : (n x m) np.ndarray or cupy.ndarray
pairwise euclidean distance distance matrix
"""
a, b = to_gpu(a, b)
a2 = np.sum(np.square(a), 1)
b2 = np.sum(np.square(b), 1)
c = -2 * np.dot(a, b.T)
c += a2[:, None]
c += b2[None, :]
if not squared:
np.sqrt(c, out=c)
if to_numpy:
return to_np(c)
else:
return c | 0.000978 |
def editContactItem(self, contactType, contactItem, contactInfo):
"""
Edit the given contact item with the given contact type. Broadcast
the edit to all L{IOrganizerPlugin} powerups.
@type contactType: L{IContactType}
@param contactType: The contact type which will be used to edit the
contact item.
@param contactItem: The contact item to edit.
@type contactInfo: C{dict}
@param contactInfo: The contact information to use to edit the
contact item.
@return: C{None}
"""
contactType.editContactItem(
contactItem, **_stringifyKeys(contactInfo))
self._callOnOrganizerPlugins('contactItemEdited', contactItem) | 0.002692 |
def find_components_without_annotation(model, components):
"""
Find model components with empty annotation attributes.
Parameters
----------
model : cobra.Model
A cobrapy metabolic model.
components : {"metabolites", "reactions", "genes"}
A string denoting `cobra.Model` components.
Returns
-------
list
The components without any annotation.
"""
return [elem for elem in getattr(model, components) if
elem.annotation is None or len(elem.annotation) == 0] | 0.001859 |
def save_gradebook(self, gradebook_form, *args, **kwargs):
"""Pass through to provider GradebookAdminSession.update_gradebook"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.update_bin
if gradebook_form.is_for_update():
return self.update_gradebook(gradebook_form, *args, **kwargs)
else:
return self.create_gradebook(gradebook_form, *args, **kwargs) | 0.004525 |
def as_graph(self, depth=0):
"""
Create a graph with self as node, cache it, return it.
Args:
depth (int): depth of the graph.
Returns:
Graph: an instance of Graph.
"""
if depth in self._graph_cache:
return self._graph_cache[depth]
self._graph_cache[depth] = graph = Graph(self, depth=depth)
return graph | 0.004926 |
def connect_combo_text(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_data: connect a callback property with a QComboBox widget based on the userData.
"""
def update_widget(value):
try:
idx = _find_combo_text(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemText(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop)) | 0.001887 |
def iteritems_sorted(dict_):
""" change to iteritems ordered """
if isinstance(dict_, OrderedDict):
return six.iteritems(dict_)
else:
return iter(sorted(six.iteritems(dict_))) | 0.004926 |
def _get_remote_node_url(self, remote_node):
"""Get grid-extras url of a node
:param remote_node: remote node name
:returns: grid-extras url
"""
logging.getLogger("requests").setLevel(logging.WARNING)
gridextras_port = 3000
return 'http://{}:{}'.format(remote_node, gridextras_port) | 0.0059 |
def main(feature_folder, create_learning_curve=False):
"""main function of create_ffiles.py"""
# Read the feature description file
with open(os.path.join(feature_folder, "info.yml"), 'r') as ymlfile:
feature_description = yaml.load(ymlfile)
# Get preprocessed .pickle file from model description file
path_to_data = os.path.join(utils.get_project_root(),
feature_description['data-source'])
if os.path.isdir(path_to_data):
path_to_data = os.path.join(path_to_data, "data.pickle")
target_paths = {'traindata': os.path.join(feature_folder,
"traindata.hdf5"),
'validdata': os.path.join(feature_folder,
"validdata.hdf5"),
'testdata': os.path.join(feature_folder,
"testdata.hdf5")}
feature_list = features.get_features(feature_description['features'])
mult_queue = data_multiplication.get_data_multiplication_queue(
feature_description['data-multiplication'])
# Set everything up for the creation of the 3 hdf5 (test, validation,
# training).
os.chdir(feature_folder)
logging.info("Start creation of hdf5-files...")
logging.info("Get sets from '%s' ...", path_to_data)
(training_set, validation_set, test_set, formula_id2index,
preprocessing_queue, index2latex) = get_sets(path_to_data)
training_set = training_set_multiplication(training_set, mult_queue)
_create_index_formula_lookup(formula_id2index, feature_folder, index2latex)
# Output data for documentation
print("Classes (nr of symbols): %i" % len(formula_id2index))
preprocessing.print_preprocessing_list(preprocessing_queue)
features.print_featurelist(feature_list)
logging.info("Start creating hdf5 files")
# Get the dimension of the feature vector
input_features = sum(map(lambda n: n.get_dimension(), feature_list))
# Traindata has to come first because of feature normalization
for dataset_name, dataset, is_traindata in \
[("traindata", training_set, True),
("testdata", test_set, False),
("validdata", validation_set, False)]:
t0 = time.time()
logging.info("Start preparing '%s' ...", dataset_name)
prepared, translation = prepare_dataset(dataset,
formula_id2index,
feature_list,
is_traindata)
logging.info("%s length: %i", dataset_name, len(prepared))
logging.info("start 'make_hdf5'x ...")
make_hdf5(dataset_name,
input_features,
prepared,
os.path.join(feature_folder, target_paths[dataset_name]),
create_learning_curve)
_create_translation_file(feature_folder,
dataset_name,
translation,
formula_id2index)
t1 = time.time() - t0
logging.info("%s was written. Needed %0.2f seconds", dataset_name, t1)
gc.collect()
utils.create_run_logfile(feature_folder) | 0.000303 |
def get_newcommand_macros(tex_source):
r"""Get all ``\newcommand`` macro definition from TeX source.
Parameters
----------
tex_source : `str`
TeX source content.
Returns
-------
macros : `dict`
Keys are macro names (including leading ``\``) and values are the
content (as `str`) of the macros.
Notes
-----
``\newcommand`` macros with arguments are not supported.
"""
macros = {}
command = LatexCommand(
'newcommand',
{'name': 'name', 'required': True, 'bracket': '{'},
{'name': 'content', 'required': True, 'bracket': '{'})
for macro in command.parse(tex_source):
macros[macro['name']] = macro['content']
return macros | 0.001353 |
def getYamlDocument(filePath):
"""
Return a yaml file's contents as a dictionary
"""
with open(filePath) as stream:
doc = yaml.load(stream)
return doc | 0.005495 |
def update_dcnm_partition_static_route(self, tenant_id, arg_dict):
"""Add static route in DCNM's partition.
This gets pushed to the relevant leaf switches.
"""
ip_list = self.os_helper.get_subnet_nwk_excl(tenant_id,
arg_dict.get('excl_list'))
srvc_node_ip = self.get_out_srvc_node_ip_addr(tenant_id)
ret = self.dcnm_obj.update_partition_static_route(
arg_dict.get('tenant_name'), fw_const.SERV_PART_NAME, ip_list,
vrf_prof=self.cfg.firewall.fw_service_part_vrf_profile,
service_node_ip=srvc_node_ip)
if not ret:
LOG.error("Unable to update DCNM ext profile with static "
"route %s", arg_dict.get('router_id'))
self.delete_intf_router(tenant_id, arg_dict.get('tenant_name'),
arg_dict.get('router_id'))
return False
return True | 0.002055 |
async def message_handler(self, event):
"""Callback method for received events.NewMessage"""
# Note that message_handler is called when a Telegram update occurs
# and an event is created. Telegram may not always send information
# about the ``.sender`` or the ``.chat``, so if you *really* want it
# you should use ``get_chat()`` and ``get_sender()`` while working
# with events. Since they are methods, you know they may make an API
# call, which can be expensive.
chat = await event.get_chat()
if event.is_group:
if event.out:
sprint('>> sent "{}" to chat {}'.format(
event.text, get_display_name(chat)
))
else:
sprint('<< {} @ {} sent "{}"'.format(
get_display_name(await event.get_sender()),
get_display_name(chat),
event.text
))
else:
if event.out:
sprint('>> "{}" to user {}'.format(
event.text, get_display_name(chat)
))
else:
sprint('<< {} sent "{}"'.format(
get_display_name(chat), event.text
)) | 0.001561 |
def open_shapefile(shapefile_path, file_geodatabase=None):
"""Opens a shapefile using either a shapefile path
or a file geodatabase
"""
if file_geodatabase:
gdb_driver = ogr.GetDriverByName("OpenFileGDB")
ogr_shapefile = gdb_driver.Open(file_geodatabase)
ogr_shapefile_lyr = ogr_shapefile.GetLayer(shapefile_path)
else:
ogr_shapefile = ogr.Open(shapefile_path)
ogr_shapefile_lyr = ogr_shapefile.GetLayer()
return ogr_shapefile_lyr, ogr_shapefile | 0.001949 |
def set_cache_implementation(self, cache_name, impl_name, maxsize, **kwargs):
"""
Changes the cache implementation for the named cache
"""
self._get_cache(cache_name).set_cache_impl(impl_name, maxsize, **kwargs) | 0.012346 |
def request(self, method, path, query=None, data=None, redirects=True):
"""
Sends HTTP request to LendingClub.
Parameters
----------
method : {GET, POST, HEAD, DELETE}
The HTTP method to use: GET, POST, HEAD or DELETE
path : string
The path that will be appended to the domain defined in :attr:`base_url`.
query : dict
A dictionary of query string parameters
data : dict
A dictionary of POST data values
redirects : boolean
True to follow redirects, False to return the original response from the server.
Returns
-------
requests.Response
A `requests.Response <http://docs.python-requests.org/en/latest/api/#requests.Response>`_ object
"""
# Check session time
self.__continue_session()
try:
url = self.build_url(path)
method = method.upper()
self.__log('{0} request to: {1}'.format(method, url))
if method == 'POST':
request = self.__session.post(url, params=query, data=data, allow_redirects=redirects)
elif method == 'GET':
request = self.__session.get(url, params=query, data=data, allow_redirects=redirects)
elif method == 'HEAD':
request = self.__session.head(url, params=query, data=data, allow_redirects=redirects)
elif method == 'DELETE':
request = self.__session.delete(url, params=query, data=data, allow_redirects=redirects)
else:
raise SessionError('{0} is not a supported HTTP method'.format(method))
self.last_response = request
self.__log('Status code: {0}'.format(request.status_code))
# Update session time
self.last_request_time = time.time()
except (RequestException, ConnectionError, TooManyRedirects, HTTPError) as e:
raise NetworkError('{0} failed to: {1}'.format(method, url), e)
except Timeout:
raise NetworkError('{0} request timed out: {1}'.format(method, url), e)
return request | 0.005482 |
def match_filtered_identities(self, fa, fb):
"""Determine if two filtered identities are the same.
This method compares the username and the source of each
identity to check if the given unique identities are the
same. Identities sources have to start with 'github' keyword
(uppercase or lowercase). When the given filtered identities
are the same object or share the same UUID, this will also
produce a positive match.
Identities which their usernames are in the blacklist will be
ignored and the result of the comparison will be false.
:param fa: filtered identity to match
:param fb: filtered identity to match
:returns: True when both filtered identities are likely to be the same.
Otherwise, returns False.
:raises ValueError: when any of the given filtered identities is not
an instance of EmailNameIdentity class.
"""
if not isinstance(fa, GitHubUsernameIdentity):
raise ValueError("<fa> is not an instance of GitHubUsernameIdentity")
if not isinstance(fb, GitHubUsernameIdentity):
raise ValueError("<fb> is not an instance of GitHubUsernameIdentity")
if fa.uuid and fb.uuid and fa.uuid == fb.uuid:
return True
if self._check_blacklist(fa):
return False
# Compare username
return fa.username and (fa.username == fb.username) | 0.002719 |
def expand_to_chunk_size(self, chunk_size, offset=Vec(0,0,0, dtype=int)):
"""
Align a potentially non-axis aligned bbox to the grid by growing it
to the nearest grid lines.
Required:
chunk_size: arraylike (x,y,z), the size of chunks in the
dataset e.g. (64,64,64)
Optional:
offset: arraylike (x,y,z), the starting coordinate of the dataset
"""
chunk_size = np.array(chunk_size, dtype=np.float32)
result = self.clone()
result = result - offset
result.minpt = np.floor(result.minpt / chunk_size) * chunk_size
result.maxpt = np.ceil(result.maxpt / chunk_size) * chunk_size
return (result + offset).astype(self.dtype) | 0.007184 |
def add_comment_to_issue(self, issue, comment, visibility=None):
"""
Adds a comment to a specified issue from the current user.
Arguments:
| issue (string) | A JIRA Issue that a watcher needs added to, can be an issue ID or Key |
| comment (string) | A body of text to add as a comment to an issue |
| visibility (string) | (Optional) |
Example:
| *Keyword* | *Parameters* | | |
| connect to jira | asimmons | options= {'http://devjira01'} | |
| ${issue} | create issue | ${issue_field_dict} | True |
| add comment to issue | ${issue} | Starting work on this issue | |
"""
self.jira.add_comment(issue=issue, body=comment) | 0.007839 |
def post(self, endpoint, params=None, version='1.1', json_encoded=False):
"""Shortcut for POST requests via :class:`request`"""
return self.request(endpoint, 'POST', params=params, version=version, json_encoded=json_encoded) | 0.0125 |
def wrap_json(cls, json, viewers=None, channels=None):
"""Create a Game instance for the given json
:param json: the dict with the information of the game
:type json: :class:`dict`
:param viewers: The viewer count
:type viewers: :class:`int`
:param channels: The viewer count
:type channels: :class:`int`
:returns: the new game instance
:rtype: :class:`Game`
:raises: None
"""
g = Game(name=json.get('name'),
box=json.get('box'),
logo=json.get('logo'),
twitchid=json.get('_id'),
viewers=viewers,
channels=channels)
return g | 0.002805 |
def copy_file(file_name):
"""
Copy a given file from the cache storage
"""
remote_file_path = join(join(expanduser('~'), OCTOGRID_DIRECTORY), file_name)
current_path = join(getcwd(), file_name)
try:
copyfile(remote_file_path, current_path)
except Exception, e:
raise e | 0.035587 |
def _Liquid(T, P=0.1):
"""Supplementary release on properties of liquid water at 0.1 MPa
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [MPa]
Although this relation is for P=0.1MPa, can be extrapoled at pressure
0.3 MPa
Returns
-------
prop : dict
Dict with calculated properties of water. The available properties are:
* h: Specific enthalpy, [kJ/kg]
* u: Specific internal energy, [kJ/kg]
* a: Specific Helmholtz energy, [kJ/kg]
* g: Specific Gibbs energy, [kJ/kg]
* s: Specific entropy, [kJ/kgK]
* cp: Specific isobaric heat capacity, [kJ/kgK]
* cv: Specific isochoric heat capacity, [kJ/kgK]
* w: Speed of sound, [m/s²]
* rho: Density, [kg/m³]
* v: Specific volume, [m³/kg]
* vt: [∂v/∂T]P, [m³/kgK]
* vtt: [∂²v/∂T²]P, [m³/kgK²]
* vp: [∂v/∂P]T, [m³/kg/MPa]
* vtp: [∂²v/∂T∂P], [m³/kg/MPa]
* alfav: Cubic expansion coefficient, [1/K]
* xkappa : Isothermal compressibility, [1/MPa]
* ks: Isentropic compressibility, [1/MPa]
* mu: Viscosity, [mPas]
* k: Thermal conductivity, [W/mK]
* epsilon: Dielectric constant, [-]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 253.15 ≤ T ≤ 383.15
* 0.1 ≤ P ≤ 0.3
Examples
--------
>>> st1 = _Liquid(260)
>>> st1["rho"], st1["h"], st1["s"]
997.0683602710492 -55.86223174460868 -0.20998554842619535
References
----------
IAPWS, Revised Supplementary Release on Properties of Liquid Water at 0.1
MPa, http://www.iapws.org/relguide/LiquidWater.html
"""
# Check input in range of validity
if T <= 253.15 or T >= 383.15 or P < 0.1 or P > 0.3:
raise NotImplementedError("Incoming out of bound")
elif P != 0.1:
# Raise a warning if the P value is extrapolated
warnings.warn("Using extrapolated values")
R = 0.46151805 # kJ/kgK
Po = 0.1
Tr = 10
tau = T/Tr
alfa = Tr/(593-T)
beta = Tr/(T-232)
a = [None, -1.661470539e5, 2.708781640e6, -1.557191544e8, None,
1.93763157e-2, 6.74458446e3, -2.22521604e5, 1.00231247e8,
-1.63552118e9, 8.32299658e9, -7.5245878e-6, -1.3767418e-2,
1.0627293e1, -2.0457795e2, 1.2037414e3]
b = [None, -8.237426256e-1, 1.908956353, -2.017597384, 8.546361348e-1,
5.78545292e-3, -1.53195665E-2, 3.11337859e-2, -4.23546241e-2,
3.38713507e-2, -1.19946761e-2, -3.1091470e-6, 2.8964919e-5,
-1.3112763e-4, 3.0410453e-4, -3.9034594e-4, 2.3403117e-4,
-4.8510101e-5]
c = [None, -2.452093414e2, 3.869269598e1, -8.983025854]
n = [None, 4, 5, 7, None, None, 4, 5, 7, 8, 9, 1, 3, 5, 6, 7]
m = [None, 2, 3, 4, 5, 1, 2, 3, 4, 5, 6, 1, 3, 4, 5, 6, 7, 9]
suma1 = sum([a[i]*alfa**n[i] for i in range(1, 4)])
suma2 = sum([b[i]*beta**m[i] for i in range(1, 5)])
go = R*Tr*(c[1]+c[2]*tau+c[3]*tau*log(tau)+suma1+suma2)
suma1 = sum([a[i]*alfa**n[i] for i in range(6, 11)])
suma2 = sum([b[i]*beta**m[i] for i in range(5, 11)])
vo = R*Tr/Po/1000*(a[5]+suma1+suma2)
suma1 = sum([a[i]*alfa**n[i] for i in range(11, 16)])
suma2 = sum([b[i]*beta**m[i] for i in range(11, 18)])
vpo = R*Tr/Po**2/1000*(suma1+suma2)
suma1 = sum([n[i]*a[i]*alfa**(n[i]+1) for i in range(1, 4)])
suma2 = sum([m[i]*b[i]*beta**(m[i]+1) for i in range(1, 5)])
so = -R*(c[2]+c[3]*(1+log(tau))+suma1-suma2)
suma1 = sum([n[i]*(n[i]+1)*a[i]*alfa**(n[i]+2) for i in range(1, 4)])
suma2 = sum([m[i]*(m[i]+1)*b[i]*beta**(m[i]+2) for i in range(1, 5)])
cpo = -R*(c[3]+tau*suma1+tau*suma2)
suma1 = sum([n[i]*a[i]*alfa**(n[i]+1) for i in range(6, 11)])
suma2 = sum([m[i]*b[i]*beta**(m[i]+1) for i in range(5, 11)])
vto = R/Po/1000*(suma1-suma2)
# This properties are only neccessary for computing thermodynamic
# properties at pressures different from 0.1 MPa
suma1 = sum([n[i]*(n[i]+1)*a[i]*alfa**(n[i]+2) for i in range(6, 11)])
suma2 = sum([m[i]*(m[i]+1)*b[i]*beta**(m[i]+2) for i in range(5, 11)])
vtto = R/Tr/Po/1000*(suma1+suma2)
suma1 = sum([n[i]*a[i]*alfa**(n[i]+1) for i in range(11, 16)])
suma2 = sum([m[i]*b[i]*beta**(m[i]+1) for i in range(11, 18)])
vpto = R/Po**2/1000*(suma1-suma2)
if P != 0.1:
go += vo*(P-0.1)
so -= vto*(P-0.1)
cpo -= T*vtto*(P-0.1)
vo -= vpo*(P-0.1)
vto += vpto*(P-0.1)
vppo = 3.24e-10*R*Tr/0.1**3
vpo += vppo*(P-0.1)
h = go+T*so
u = h-P*vo
a = go-P*vo
cv = cpo+T*vto**2/vpo
xkappa = -vpo/vo
alfa = vto/vo
ks = -(T*vto**2/cpo+vpo)/vo
w = (-vo**2*1e9/(vpo*1e3+T*vto**2*1e6/cpo))**0.5
propiedades = {}
propiedades["g"] = go
propiedades["T"] = T
propiedades["P"] = P
propiedades["v"] = vo
propiedades["vt"] = vto
propiedades["vp"] = vpo
propiedades["vpt"] = vpto
propiedades["vtt"] = vtto
propiedades["rho"] = 1/vo
propiedades["h"] = h
propiedades["s"] = so
propiedades["cp"] = cpo
propiedades["cv"] = cv
propiedades["u"] = u
propiedades["a"] = a
propiedades["xkappa"] = xkappa
propiedades["alfav"] = vto/vo
propiedades["ks"] = ks
propiedades["w"] = w
# Viscosity correlation, Eq 7
a = [None, 280.68, 511.45, 61.131, 0.45903]
b = [None, -1.9, -7.7, -19.6, -40]
T_ = T/300
mu = sum([a[i]*T_**b[i] for i in range(1, 5)])/1e6
propiedades["mu"] = mu
# Thermal conductivity correlation, Eq 8
c = [None, 1.6630, -1.7781, 1.1567, -0.432115]
d = [None, -1.15, -3.4, -6.0, -7.6]
k = sum([c[i]*T_**d[i] for i in range(1, 5)])
propiedades["k"] = k
# Dielectric constant correlation, Eq 9
e = [None, -43.7527, 299.504, -399.364, 221.327]
f = [None, -0.05, -1.47, -2.11, -2.31]
epsilon = sum([e[i]*T_**f[i] for i in range(1, 5)])
propiedades["epsilon"] = epsilon
return propiedades | 0.000163 |
def checklink (form=None, env=os.environ):
"""Validates the CGI form and checks the given links."""
if form is None:
form = {}
try:
checkform(form, env)
except LCFormError as errmsg:
log(env, errmsg)
yield encode(format_error(errmsg))
return
out = ThreadsafeIO()
config = get_configuration(form, out)
url = strformat.stripurl(formvalue(form, "url"))
aggregate = director.get_aggregate(config)
url_data = checker.get_url_from(url, 0, aggregate, extern=(0, 0))
aggregate.urlqueue.put(url_data)
for html_str in start_check(aggregate, out):
yield encode(html_str)
out.close() | 0.002999 |
def init(self):
"""init `todo` file
if file exists, then initialization self.todos
and record current max index of todos
: when add a new todo, the `idx` via only `self.current_max_idx + 1`
"""
if os.path.isdir(self.path):
raise InvalidTodoFile
if os.path.exists(self.path):
with open(self.path, 'r') as f:
tls = [tl.strip() for tl in f if tl]
todos = map(_todo_from_file, tls)
self.todos = todos
for todo in todos:
if self.current_max_idx < todo['idx']:
self.current_max_idx = todo['idx']
else:
logger.warning('No todo files found, initialization a empty todo file')
with open(self.path, 'w') as f:
f.flush() | 0.00355 |
def setLevel(self, lvl):
"""
Trim or extend scope
lvl = 1 -> only one scope (global)
"""
while len(self) != lvl:
if len(self) > lvl:
self.pop()
else:
self.append(NameScopeItem(len(self))) | 0.007067 |
def W_min_HS_ratio(self):
"""Calculate the minimum flocculator channel width, given the minimum
ratio between expansion height (H) and baffle spacing (S).
:returns: Minimum channel width given H_e/S
:rtype: float * centimeter
"""
return ((self.HS_RATIO_MIN * self.Q / self.downstream_H) *
(self.BAFFLE_K /
(2 * self.downstream_H * pc.viscosity_kinematic(self.temp) * self.vel_grad_avg ** 2)) ** (1/3)
).to(u.cm) | 0.009901 |
def _interleaved_dtype(
blocks: List[Block]
) -> Optional[Union[np.dtype, ExtensionDtype]]:
"""Find the common dtype for `blocks`.
Parameters
----------
blocks : List[Block]
Returns
-------
dtype : Optional[Union[np.dtype, ExtensionDtype]]
None is returned when `blocks` is empty.
"""
if not len(blocks):
return None
return find_common_type([b.dtype for b in blocks]) | 0.002304 |
def RegisterArtifact(self,
artifact_rdfvalue,
source="datastore",
overwrite_if_exists=False,
overwrite_system_artifacts=False):
"""Registers a new artifact."""
artifact_name = artifact_rdfvalue.name
if artifact_name in self._artifacts:
if not overwrite_if_exists:
details = "artifact already exists and `overwrite_if_exists` is unset"
raise rdf_artifacts.ArtifactDefinitionError(artifact_name, details)
elif not overwrite_system_artifacts:
artifact_obj = self._artifacts[artifact_name]
if not artifact_obj.loaded_from.startswith("datastore:"):
# This artifact was not uploaded to the datastore but came from a
# file, refuse to overwrite.
details = "system artifact cannot be overwritten"
raise rdf_artifacts.ArtifactDefinitionError(artifact_name, details)
# Preserve where the artifact was loaded from to help debugging.
artifact_rdfvalue.loaded_from = source
# Clear any stale errors.
artifact_rdfvalue.error_message = None
self._artifacts[artifact_rdfvalue.name] = artifact_rdfvalue | 0.009213 |
def create_widget(self, dim, holomap=None, editable=False):
""""
Given a Dimension creates bokeh widgets to select along that
dimension. For numeric data a slider widget is created which
may be either discrete, if a holomap is supplied or the
Dimension.values are set, or a continuous widget for
DynamicMaps. If the slider is discrete the returned mapping
defines a mapping between values and labels making it possible
sync the two slider and label widgets. For non-numeric data
a simple dropdown selection widget is generated.
"""
label, mapping = None, None
if holomap is None:
if dim.values:
if dim.default is None:
default = dim.values[0]
elif dim.default not in dim.values:
raise ValueError("%s dimension default %r is not in dimension values: %s"
% (dim, dim.default, dim.values))
else:
default = dim.default
value = dim.values.index(default)
if all(isnumeric(v) for v in dim.values):
values = sorted(dim.values)
labels = [unicode(dim.pprint_value(v)) for v in values]
if editable:
label = AutocompleteInput(value=labels[value], completions=labels,
title=dim.pprint_label)
else:
label = Div(text='<b>%s</b>' % dim.pprint_value_string(labels[value]))
widget = Slider(value=value, start=0, end=len(dim.values)-1, title=None, step=1)
mapping = list(enumerate(zip(values, labels)))
else:
values = [(v, dim.pprint_value(v)) for v in dim.values]
widget = Select(title=dim.pprint_label, value=values[value][0],
options=values)
else:
start = dim.soft_range[0] if dim.soft_range[0] else dim.range[0]
end = dim.soft_range[1] if dim.soft_range[1] else dim.range[1]
dim_range = end - start
int_type = isinstance(dim.type, type) and issubclass(dim.type, int)
if dim.step is not None:
step = dim.step
elif isinstance(dim_range, int) or int_type:
step = 1
else:
step = 10**((round(math.log10(dim_range))-3))
if dim.default is None:
default = start
elif (dim.default < start or dim.default > end):
raise ValueError("%s dimension default %r is not in the provided range: %s"
% (dim, dim.default, (start, end)))
else:
default = dim.default
if editable:
label = TextInput(value=str(default), title=dim.pprint_label)
else:
label = Div(text='<b>%s</b>' % dim.pprint_value_string(default))
widget = Slider(value=default, start=start,
end=end, step=step, title=None)
else:
values = (dim.values if dim.values else
list(unique_array(holomap.dimension_values(dim.name))))
if dim.default is None:
default = values[0]
elif dim.default not in values:
raise ValueError("%s dimension default %r is not in dimension values: %s"
% (dim, dim.default, values))
else:
default = dim.default
if isinstance(values[0], np.datetime64) or isnumeric(values[0]):
values = sorted(values)
labels = [dim.pprint_value(v) for v in values]
value = values.index(default)
if editable:
label = AutocompleteInput(value=labels[value], completions=labels,
title=dim.pprint_label)
else:
label = Div(text='<b>%s</b>' % (dim.pprint_value_string(labels[value])))
widget = Slider(value=value, start=0, end=len(values)-1, title=None, step=1)
else:
labels = [dim.pprint_value(v) for v in values]
widget = Select(title=dim.pprint_label, value=default,
options=list(zip(values, labels)))
mapping = list(enumerate(zip(values, labels)))
return widget, label, mapping | 0.003404 |
def unregister(self, namespace, command=None):
"""
Unregisters the given command. If command is None, the whole name space
is unregistered.
:param namespace: The command name space.
:param command: The shell name of the command, or None
:return: True if the command was known, else False
"""
if not namespace:
namespace = DEFAULT_NAMESPACE
namespace = namespace.strip().lower()
if namespace not in self._commands:
self._logger.warning("Unknown name space: %s", namespace)
return False
if command is not None:
# Remove the command
command = command.strip().lower()
if command not in self._commands[namespace]:
self._logger.warning(
"Unknown command: %s.%s", namespace, command
)
return False
del self._commands[namespace][command]
# Remove the name space if necessary
if not self._commands[namespace]:
del self._commands[namespace]
else:
# Remove the whole name space
del self._commands[namespace]
return True | 0.001621 |
def _document_structure(self):
"""Document the structure of the dataset."""
logger.debug("Documenting dataset structure")
key = self.get_structure_key()
text = json.dumps(self._structure_parameters, indent=2, sort_keys=True)
self.put_text(key, text)
key = self.get_dtool_readme_key()
self.put_text(key, self._dtool_readme_txt) | 0.005222 |
def load_tpf(self):
'''
Loads the target pixel file.
'''
if not self.loaded:
if self._data is not None:
data = self._data
else:
data = self._mission.GetData(
self.ID, season=self.season,
cadence=self.cadence,
clobber=self.clobber_tpf,
aperture_name=self.aperture_name,
saturated_aperture_name=self.saturated_aperture_name,
max_pixels=self.max_pixels,
saturation_tolerance=self.saturation_tolerance,
get_hires=self.get_hires,
get_nearby=self.get_nearby)
if data is None:
raise Exception("Unable to retrieve target data.")
self.cadn = data.cadn
self.time = data.time
self.model = np.zeros_like(self.time)
self.fpix = data.fpix
self.fraw = np.sum(self.fpix, axis=1)
self.fpix_err = data.fpix_err
self.fraw_err = np.sqrt(np.sum(self.fpix_err ** 2, axis=1))
self.nanmask = data.nanmask
self.badmask = data.badmask
self.transitmask = np.array([], dtype=int)
self.outmask = np.array([], dtype=int)
self.aperture = data.aperture
self.aperture_name = data.aperture_name
self.apertures = data.apertures
self.quality = data.quality
self.Xpos = data.Xpos
self.Ypos = data.Ypos
self.mag = data.mag
self.pixel_images = data.pixel_images
self.nearby = data.nearby
self.hires = data.hires
self.saturated = data.saturated
self.meta = data.meta
self.bkg = data.bkg
# Update the last breakpoint to the correct value
self.breakpoints[-1] = len(self.time) - 1
# Get PLD normalization
self.get_norm()
self.loaded = True | 0.000953 |
def instruction_list_to_easm(instruction_list: list) -> str:
"""Convert a list of instructions into an easm op code string.
:param instruction_list:
:return:
"""
result = ""
for instruction in instruction_list:
result += "{} {}".format(instruction["address"], instruction["opcode"])
if "argument" in instruction:
result += " " + instruction["argument"]
result += "\n"
return result | 0.002232 |
def map(cls, latitudes, longitudes, labels=None, colors=None, areas=None, **kwargs):
"""Return markers from columns of coordinates, labels, & colors.
The areas column is not applicable to markers, but sets circle areas.
"""
assert len(latitudes) == len(longitudes)
assert areas is None or hasattr(cls, '_has_radius'), "A " + cls.__name__ + " has no radius"
inputs = [latitudes, longitudes]
if labels is not None:
assert len(labels) == len(latitudes)
inputs.append(labels)
else:
inputs.append(("",) * len(latitudes))
if colors is not None:
assert len(colors) == len(latitudes)
inputs.append(colors)
if areas is not None:
assert len(areas) == len(latitudes)
inputs.append(np.array(areas) ** 0.5 / math.pi)
ms = [cls(*args, **kwargs) for args in zip(*inputs)]
return Map(ms) | 0.004202 |
def AddEventSource(self, event_source):
"""Adds an event source.
Args:
event_source (EventSource): an event source.
Raises:
IOError: when the storage writer is closed.
OSError: when the storage writer is closed.
"""
self._RaiseIfNotWritable()
self._storage_file.AddEventSource(event_source)
self.number_of_event_sources += 1 | 0.002681 |
def request(self, request):
"""Sets the request of this V1beta1CertificateSigningRequestSpec.
Base64-encoded PKCS#10 CSR data # noqa: E501
:param request: The request of this V1beta1CertificateSigningRequestSpec. # noqa: E501
:type: str
"""
if request is None:
raise ValueError("Invalid value for `request`, must not be `None`") # noqa: E501
if request is not None and not re.search(r'^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', request): # noqa: E501
raise ValueError(r"Invalid value for `request`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") # noqa: E501
self._request = request | 0.002601 |
def select_single_column(engine, column):
"""
Select data from single column.
Example::
>>> select_single_column(engine, table_user.c.id)
[1, 2, 3]
>>> select_single_column(engine, table_user.c.name)
["Alice", "Bob", "Cathy"]
"""
s = select([column])
return column.name, [row[0] for row in engine.execute(s)] | 0.002725 |
def main(argv=None):
"""
Entry point
:param argv: Script arguments (None for sys.argv)
:return: An exit code or None
"""
# Prepare arguments
parser = argparse.ArgumentParser(
prog="pelix.shell.xmpp",
parents=[make_common_parser()],
description="Pelix XMPP Shell",
)
group = parser.add_argument_group("XMPP options")
group.add_argument("-j", "--jid", dest="jid", help="Jabber ID")
group.add_argument("--password", dest="password", help="JID password")
group.add_argument("-s", "--server", dest="server", help="XMPP server host")
group.add_argument(
"-p",
"--port",
dest="port",
type=int,
default=5222,
help="XMPP server port",
)
group.add_argument(
"--tls",
dest="use_tls",
action="store_true",
help="Use a STARTTLS connection",
)
group.add_argument(
"--ssl",
dest="use_ssl",
action="store_true",
help="Use an SSL connection",
)
# Parse them
args = parser.parse_args(argv)
# Handle common arguments
init = handle_common_arguments(args)
# Quiet down the SleekXMPP logger
if not args.verbose:
logging.getLogger("sleekxmpp").setLevel(logging.WARNING)
if not args.server and not args.jid:
_logger.error("No JID nor server given. Abandon.")
sys.exit(1)
# Get the password if necessary
password = args.password
if args.jid and args.password is None:
try:
import getpass
except ImportError:
_logger.error(
"getpass() unavailable: give a password in command line"
)
else:
try:
password = getpass.getpass()
except getpass.GetPassWarning:
pass
# Get the server from the JID, if necessary
server = args.server
if not server:
server = sleekxmpp.JID(args.jid).domain
# Set the initial bundles
bundles = [
"pelix.ipopo.core",
"pelix.shell.core",
"pelix.shell.ipopo",
"pelix.shell.console",
"pelix.shell.xmpp",
]
bundles.extend(init.bundles)
# Use the utility method to create, run and delete the framework
framework = pelix.framework.create_framework(
remove_duplicates(bundles), init.properties
)
framework.start()
# Instantiate a Remote Shell
with use_ipopo(framework.get_bundle_context()) as ipopo:
ipopo.instantiate(
pelix.shell.FACTORY_XMPP_SHELL,
"xmpp-shell",
{
"shell.xmpp.server": server,
"shell.xmpp.port": args.port,
"shell.xmpp.jid": args.jid,
"shell.xmpp.password": password,
"shell.xmpp.tls": args.use_tls,
"shell.xmpp.ssl": args.use_ssl,
},
)
# Instantiate configured components
init.instantiate_components(framework.get_bundle_context())
try:
framework.wait_for_stop()
except KeyboardInterrupt:
framework.stop() | 0.000639 |
def tokenize(s):
# type: (str) -> List[Token]
"""Translate a type comment into a list of tokens."""
original = s
tokens = [] # type: List[Token]
while True:
if not s:
tokens.append(End())
return tokens
elif s[0] == ' ':
s = s[1:]
elif s[0] in '()[],*':
tokens.append(Separator(s[0]))
s = s[1:]
elif s[:2] == '->':
tokens.append(Separator('->'))
s = s[2:]
else:
m = re.match(r'[-\w]+(\s*(\.|:)\s*[-/\w]*)*', s)
if not m:
raise ParseError(original)
fullname = m.group(0)
fullname = fullname.replace(' ', '')
if fullname in TYPE_FIXUPS:
fullname = TYPE_FIXUPS[fullname]
# pytz creates classes with the name of the timezone being used:
# https://github.com/stub42/pytz/blob/f55399cddbef67c56db1b83e0939ecc1e276cf42/src/pytz/tzfile.py#L120-L123
# This causes pyannotates to crash as it's invalid to have a class
# name with a `/` in it (e.g. "pytz.tzfile.America/Los_Angeles")
if fullname.startswith('pytz.tzfile.'):
fullname = 'datetime.tzinfo'
if '-' in fullname or '/' in fullname:
# Not a valid Python name; there are many places that
# generate these, so we just substitute Any rather
# than crashing.
fullname = 'Any'
tokens.append(DottedName(fullname))
s = s[len(m.group(0)):] | 0.000629 |
def match(self, **kwargs):
"""
Traverse relationships with properties matching the given parameters.
e.g: `.match(price__lt=10)`
:param kwargs: see `NodeSet.filter()` for syntax
:return: self
"""
if kwargs:
if self.definition.get('model') is None:
raise ValueError("match() with filter only available on relationships with a model")
output = process_filter_args(self.definition['model'], kwargs)
if output:
self.filters.append(output)
return self | 0.005137 |
def Histograms(self, run, tag):
"""Retrieve the histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.HistogramEvents`.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Histograms(tag) | 0.001946 |
def get_data_ttl(self, use_cached=True):
"""Retrieve the dataTTL for this stream
The dataTtl is the time to live (TTL) in seconds for data points stored in the data stream.
A data point expires after the configured amount of time and is automatically deleted.
:param bool use_cached: If False, the function will always request the latest from Device Cloud.
If True, the device will not make a request if it already has cached data.
:raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error
:raises devicecloud.streams.NoSuchStreamException: if this stream has not yet been created
:return: The dataTtl associated with this stream in seconds
:rtype: int or None
"""
data_ttl_text = self._get_stream_metadata(use_cached).get("dataTtl")
return int(data_ttl_text) | 0.008979 |
def get_gene2section2gos(gene2gos, sec2gos):
"""Get a list of section aliases for each gene product ID."""
gene2section2gos = {}
for geneid, gos_gene in gene2gos.items():
section2gos = {}
for section_name, gos_sec in sec2gos.items():
gos_secgene = gos_gene.intersection(gos_sec)
if gos_secgene:
section2gos[section_name] = gos_secgene
gene2section2gos[geneid] = section2gos
return gene2section2gos | 0.003868 |
def write(self, message, autoerase=True):
"""Send something for stdout and erased after delay"""
super(Animation, self).write(message)
self.last_message = message
if autoerase:
time.sleep(self.interval)
self.erase(message) | 0.007194 |
def get_file(path=None, content=None):
"""
:param path: relative path, or None to get from request
:param content: file content, output in data. Used for editfile
"""
if path is None:
path = request.args.get('path')
if path is None:
return error('No path in request')
filename = os.path.split(path.rstrip('/'))[-1]
extension = filename.rsplit('.', 1)[-1]
os_file_path = web_path_to_os_path(path)
if os.path.isdir(os_file_path):
file_type = 'folder'
# Ensure trailing slash
if path[-1] != '/':
path += '/'
else:
file_type = 'file'
ctime = int(os.path.getctime(os_file_path))
mtime = int(os.path.getmtime(os_file_path))
height = 0
width = 0
if extension in ['gif', 'jpg', 'jpeg', 'png']:
try:
im = PIL.Image.open(os_file_path)
height, width = im.size
except OSError:
log.exception('Error loading image "{}" to get width and height'.format(os_file_path))
attributes = {
'name': filename,
'path': get_url_path(path),
'readable': 1 if os.access(os_file_path, os.R_OK) else 0,
'writeable': 1 if os.access(os_file_path, os.W_OK) else 0,
'created': datetime.datetime.fromtimestamp(ctime).ctime(),
'modified': datetime.datetime.fromtimestamp(mtime).ctime(),
'timestamp': mtime,
'width': width,
'height': height,
'size': os.path.getsize(os_file_path)
}
if content:
attributes['content'] = content
return {
'id': path,
'type': file_type,
'attributes': attributes
} | 0.002385 |
def str_upper(x):
"""Converts all strings in a column to uppercase.
:returns: an expression containing the converted strings.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.upper()
Expression = str_upper(text)
Length: 5 dtype: str (expression)
---------------------------------
0 SOMETHING
1 VERY PRETTY
2 IS COMING
3 OUR
4 WAY.
"""
sl = _to_string_sequence(x).upper()
return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl) | 0.002577 |
def to_file(self, filepath, codec='utf-8', mode='normal'):
"""Write the object to a file.
:param str filepath: Path of the fil.
:param str codec: Text encoding.
:param string mode: Flag to for write mode, possible modes:
'n'/'normal', 's'/'short' and 'b'/'binary'
"""
self.tier_num = len(self.tiers)
if mode in ['binary', 'b']:
with open(filepath, 'wb') as f:
def writebstr(s):
try:
bstr = s.encode('ascii')
except UnicodeError:
f.write(b'\xff\xff')
bstr = b''.join(struct.pack('>h', ord(c)) for c in s)
f.write(struct.pack('>h', len(s)))
f.write(bstr)
f.write(b'ooBinaryFile\x08TextGrid')
f.write(struct.pack('>d', self.xmin))
f.write(struct.pack('>d', self.xmax))
f.write(b'\x01')
f.write(struct.pack('>i', self.tier_num))
for tier in self.tiers:
f.write(chr(len(tier.tier_type)).encode('ascii'))
f.write(tier.tier_type.encode('ascii'))
writebstr(tier.name)
f.write(struct.pack('>d', tier.xmin))
f.write(struct.pack('>d', tier.xmax))
ints = tier.get_all_intervals()
f.write(struct.pack('>i', len(ints)))
itier = tier.tier_type == 'IntervalTier'
for c in ints:
f.write(struct.pack('>d', c[0]))
itier and f.write(struct.pack('>d', c[1]))
writebstr(c[2 if itier else 1])
elif mode in ['normal', 'n', 'short', 's']:
with codecs.open(filepath, 'w', codec) as f:
short = mode[0] == 's'
def wrt(indent, prefix, value, ff=''):
indent = 0 if short else indent
prefix = '' if short else prefix
if value is not None or not short:
s = u'{{}}{{}}{}\n'.format(ff)
f.write(s.format(' '*indent, prefix, value))
f.write(u'File type = "ooTextFile"\n'
u'Object class = "TextGrid"\n\n')
wrt(0, u'xmin = ', self.xmin, '{:f}')
wrt(0, u'xmax = ', self.xmax, '{:f}')
wrt(0, u'tiers? ', u'<exists>', '{}')
wrt(0, u'size = ', self.tier_num, '{:d}')
wrt(0, u'item []:', None)
for tnum, tier in enumerate(self.tiers, 1):
wrt(4, u'item [{:d}]:'.format(tnum), None)
wrt(8, u'class = ', tier.tier_type, '"{}"')
wrt(8, u'name = ', tier.name, '"{}"')
wrt(8, u'xmin = ', tier.xmin, '{:f}')
wrt(8, u'xmax = ', tier.xmax, '{:f}')
if tier.tier_type == 'IntervalTier':
ints = tier.get_all_intervals()
wrt(8, u'intervals: size = ', len(ints), '{:d}')
for i, c in enumerate(ints):
wrt(8, 'intervals [{:d}]:'.format(i+1), None)
wrt(12, 'xmin = ', c[0], '{:f}')
wrt(12, 'xmax = ', c[1], '{:f}')
wrt(12, 'text = ', c[2].replace('"', '""'), '"{}"')
elif tier.tier_type == 'TextTier':
wrt(8, u'points: size = ', len(tier.intervals), '{:d}')
for i, c in enumerate(tier.get_intervals()):
wrt(8, 'points [{:d}]:'.format(i+1), None)
wrt(12, 'number = ', c[0], '{:f}')
wrt(12, 'mark = ', c[1].replace('"', '""'), '"{}"')
else:
raise Exception('Unknown mode') | 0.000502 |
def _get_value_opc_attr(self, attr_name, prec_decimals=2):
"""Return sensor attribute with precission, or None if not present."""
try:
value = getattr(self, attr_name)
if value is not None:
return round(value, prec_decimals)
except I2cVariableNotImplemented:
pass
return None | 0.005571 |
def from_dict(data, ctx):
"""
Instantiate a new OrderFillTransaction from a dict (generally from
loading a JSON response). The data used to instantiate the
OrderFillTransaction is a shallow copy of the dict passed in, with any
complex child types instantiated appropriately.
"""
data = data.copy()
if data.get('units') is not None:
data['units'] = ctx.convert_decimal_number(
data.get('units')
)
if data.get('gainQuoteHomeConversionFactor') is not None:
data['gainQuoteHomeConversionFactor'] = ctx.convert_decimal_number(
data.get('gainQuoteHomeConversionFactor')
)
if data.get('lossQuoteHomeConversionFactor') is not None:
data['lossQuoteHomeConversionFactor'] = ctx.convert_decimal_number(
data.get('lossQuoteHomeConversionFactor')
)
if data.get('price') is not None:
data['price'] = ctx.convert_decimal_number(
data.get('price')
)
if data.get('fullVWAP') is not None:
data['fullVWAP'] = ctx.convert_decimal_number(
data.get('fullVWAP')
)
if data.get('fullPrice') is not None:
data['fullPrice'] = \
ctx.pricing.ClientPrice.from_dict(
data['fullPrice'], ctx
)
if data.get('pl') is not None:
data['pl'] = ctx.convert_decimal_number(
data.get('pl')
)
if data.get('financing') is not None:
data['financing'] = ctx.convert_decimal_number(
data.get('financing')
)
if data.get('commission') is not None:
data['commission'] = ctx.convert_decimal_number(
data.get('commission')
)
if data.get('guaranteedExecutionFee') is not None:
data['guaranteedExecutionFee'] = ctx.convert_decimal_number(
data.get('guaranteedExecutionFee')
)
if data.get('accountBalance') is not None:
data['accountBalance'] = ctx.convert_decimal_number(
data.get('accountBalance')
)
if data.get('tradeOpened') is not None:
data['tradeOpened'] = \
ctx.transaction.TradeOpen.from_dict(
data['tradeOpened'], ctx
)
if data.get('tradesClosed') is not None:
data['tradesClosed'] = [
ctx.transaction.TradeReduce.from_dict(d, ctx)
for d in data.get('tradesClosed')
]
if data.get('tradeReduced') is not None:
data['tradeReduced'] = \
ctx.transaction.TradeReduce.from_dict(
data['tradeReduced'], ctx
)
if data.get('halfSpreadCost') is not None:
data['halfSpreadCost'] = ctx.convert_decimal_number(
data.get('halfSpreadCost')
)
return OrderFillTransaction(**data) | 0.000644 |
def any(self, array, role = None):
"""
Return ``True`` if ``array`` is ``True`` for any members of the entity.
``array`` must have the dimension of the number of persons in the simulation
If ``role`` is provided, only the entity member with the given role are taken into account.
Example:
>>> salaries = household.members('salary', '2018-01') # e.g. [2000, 1500, 0, 0, 0]
>>> household.any(salaries >= 1800)
>>> array([True])
"""
sum_in_entity = self.sum(array, role = role)
return (sum_in_entity > 0) | 0.016077 |
def get_datatype(object_type, propid, vendor_id=0):
"""Return the datatype for the property of an object."""
if _debug: get_datatype._debug("get_datatype %r %r vendor_id=%r", object_type, propid, vendor_id)
# get the related class
cls = get_object_class(object_type, vendor_id)
if not cls:
return None
# get the property
prop = cls._properties.get(propid)
if not prop:
return None
# return the datatype
return prop.datatype | 0.006224 |
def can_dict(obj):
"""Can the *values* of a dict."""
if istype(obj, dict):
newobj = {}
for k, v in iteritems(obj):
newobj[k] = can(v)
return newobj
else:
return obj | 0.004545 |
def update_info(self, custom=None):
"""Updates the figure's suptitle.
Calls self.info_string() unless custom is provided.
Args:
custom: Overwrite it with this string, unless None.
"""
self.figure.suptitle(self.info_string() if custom is None else custom) | 0.006494 |
def get_hashed_filename(name, file, suffix=None):
"""
Gets a new filename for the provided file of the form
"oldfilename.hash.ext". If the old filename looks like it already contains a
hash, it will be replaced (so you don't end up with names like
"pic.hash.hash.ext")
"""
basename, hash, ext = split_filename(name)
file.seek(0)
new_hash = '.%s' % md5(file.read()).hexdigest()[:12]
if suffix is not None:
basename = '%s_%s' % (basename, suffix)
return '%s%s%s' % (basename, new_hash, ext) | 0.003697 |
def delete(self):
"""
If a dynamic version, delete it the standard way and remove it from the
inventory, else delete all dynamic versions.
"""
if self.dynamic_version_of is None:
self._delete_dynamic_versions()
else:
super(DynamicFieldMixin, self).delete()
self._inventory.srem(self.dynamic_part) | 0.005263 |
def get_fit_auto(self, CentralFreq, MaxWidth=15000, MinWidth=500, WidthIntervals=500, MakeFig=True, show_fig=True, silent=False):
"""
Tries a range of regions to search for peaks and runs the one with the least error
and returns the parameters with the least errors.
Parameters
----------
CentralFreq : float
The central frequency to use for the fittings.
MaxWidth : float, optional
The maximum bandwidth to use for the fitting of the peaks.
MinWidth : float, optional
The minimum bandwidth to use for the fitting of the peaks.
WidthIntervals : float, optional
The intervals to use in going between the MaxWidth and MinWidth.
show_fig : bool, optional
Whether to plot and show the final (best) fitting or not.
Returns
-------
OmegaTrap : ufloat
Trapping frequency
A : ufloat
A parameter
Gamma : ufloat
Gamma, the damping parameter
fig : matplotlib.figure.Figure object
The figure object created showing the PSD of the data
with the fit
ax : matplotlib.axes.Axes object
The axes object created showing the PSD of the data
with the fit
"""
MinTotalSumSquaredError = _np.infty
for Width in _np.arange(MaxWidth, MinWidth - WidthIntervals, -WidthIntervals):
try:
OmegaTrap, A, Gamma,_ , _ \
= self.get_fit_from_peak(
CentralFreq - Width / 2,
CentralFreq + Width / 2,
silent=True,
MakeFig=False,
show_fig=False)
except RuntimeError:
_warnings.warn("Couldn't find good fit with width {}".format(
Width), RuntimeWarning)
val = _uncertainties.ufloat(_np.NaN, _np.NaN)
OmegaTrap = val
A = val
Gamma = val
TotalSumSquaredError = (
A.std_dev / A.n)**2 + (Gamma.std_dev / Gamma.n)**2 + (OmegaTrap.std_dev / OmegaTrap.n)**2
#print("totalError: {}".format(TotalSumSquaredError))
if TotalSumSquaredError < MinTotalSumSquaredError:
MinTotalSumSquaredError = TotalSumSquaredError
BestWidth = Width
if silent != True:
print("found best")
try:
OmegaTrap, A, Gamma, fig, ax \
= self.get_fit_from_peak(CentralFreq - BestWidth / 2,
CentralFreq + BestWidth / 2,
MakeFig=MakeFig,
show_fig=show_fig,
silent=silent)
except UnboundLocalError:
raise ValueError("A best width was not found, try increasing the number of widths tried by either decreasing WidthIntervals or MinWidth or increasing MaxWidth")
OmegaTrap = self.OmegaTrap
A = self.A
Gamma = self.Gamma
self.FTrap = OmegaTrap/(2*pi)
return OmegaTrap, A, Gamma, fig, ax | 0.003999 |
def plot_eps_data_hist(self, dfs):
"""Plot histograms of data residuals and data error weighting
TODO:
* add percentage of data below/above the RMS value
"""
# check if this is a DC inversion
if 'datum' in dfs[0]:
dc_inv = True
else:
dc_inv = False
nr_y = len(dfs)
size_y = 5 / 2.54 * nr_y
if dc_inv:
nr_x = 1
else:
nr_x = 3
size_x = 15 / 2.54
fig, axes = plt.subplots(nr_y, nr_x, figsize=(size_x, size_y))
axes = np.atleast_2d(axes)
# plot initial data errors
df = dfs[0]
if dc_inv:
ax = axes[0, 0]
ax.hist(
df['datum'] / df['eps_r'],
100,
)
ax.set_xlabel(r'$-log(|R|) / \epsilon_r$')
ax.set_ylabel(r'count')
else:
# complex inversion
ax = axes[0, 0]
ax.hist(
df['-log(|R|)'] / df['eps'],
100,
)
ax.set_xlabel(r'$-log(|R|)$')
ax.set_ylabel(r'count')
ax = axes[0, 1]
ax.hist(
df['-log(|R|)'] / df['eps_r'],
100,
)
ax.set_xlabel(r'$-log(|R|) / \epsilon_r$')
ax.set_ylabel(r'count')
ax = axes[0, 2]
phase_data = df['-Phase(rad)'] / df['eps_p']
if not np.all(np.isinf(phase_data) | np.isnan(phase_data)):
ax.hist(
phase_data,
100,
)
ax.set_xlabel(r'$-\phi[rad] / \epsilon_p$')
ax.set_ylabel(r'count')
# iterations
for it, df in enumerate(dfs[1:]):
ax = axes[1 + it, 0]
ax.hist(
df['psi'],
100
)
rms = np.sqrt(
1 / df['psi'].shape[0] *
np.sum(
df['psi'] ** 2
)
)
ax.axvline(rms, color='k', linestyle='dashed')
ax.set_title('iteration: {0}'.format(it))
ax.set_xlabel('psi')
ax.set_ylabel(r'count')
ax = axes[1 + it, 1]
Rdat = df['Re(d)']
Rmod = df['Re(f(m))']
ax.scatter(
Rdat,
Rmod,
)
ax.set_xlabel(r'$log(R_{data}~[\Omega])$')
ax.set_ylabel(r'$log(R_{mod}~[\Omega])$')
ax = axes[1 + it, 2]
phidat = df['Im(d)']
phimod = df['Im(f(m))']
ax.scatter(
phidat,
phimod,
)
ax.set_xlabel(r'$\phi_{data}~[mrad]$')
ax.set_ylabel(r'$\phi_{mod}~[mrad]$')
fig.tight_layout()
fig.savefig('eps_plot_hist.png', dpi=300) | 0.00069 |
def graph(self, **kw):
"""Set up a graphviz graph context.
"""
self.name = kw.get('name', 'G')
self.fillcolor = kw.get('fillcolor', '#ffffff')
self.fontcolor = kw.get('fontcolor', '#000000')
self.rankdir = kw.get('rankdir', 'BT' if self.reverse else 'TB')
if kw.get('concentrate', True):
self.concentrate = 'concentrate = true;'
else:
self.concentrate = ''
self.dedent("""
digraph {self.name} {{
{self.concentrate}
rankdir = {self.rankdir};
node [style=filled,fillcolor="{self.fillcolor}",fontcolor="{self.fontcolor}",fontname=Helvetica,fontsize=10];
""".format(self=self))
yield
self.writeln('}') | 0.003856 |
def get_project(project_id):
"""Return a PYBOSSA Project for the project_id.
:param project_id: PYBOSSA Project ID
:type project_id: integer
:rtype: PYBOSSA Project
:returns: A PYBOSSA Project object
"""
try:
res = _pybossa_req('get', 'project', project_id)
if res.get('id'):
return Project(res)
else:
return res
except: # pragma: no cover
raise | 0.004587 |
def _plain_authentication(self, login, password, authz_id=b""):
"""SASL PLAIN authentication
:param login: username
:param password: clear password
:return: True on success, False otherwise.
"""
if isinstance(login, six.text_type):
login = login.encode("utf-8")
if isinstance(password, six.text_type):
password = password.encode("utf-8")
params = base64.b64encode(b'\0'.join([authz_id, login, password]))
code, data = self.__send_command("AUTHENTICATE", [b"PLAIN", params])
if code == "OK":
return True
return False | 0.003125 |
def validate_image_col_row(image , col , row):
"""Basic checks for columns and rows values"""
SPLIT_LIMIT = 99
try:
col = int(col)
row = int(row)
except:
raise ValueError('columns and rows values could not be cast to integer.')
if col < 2:
raise ValueError('Number of columns must be between 2 and {} (you \
asked for {}).'.format(SPLIT_LIMIT, col))
if row < 2 :
raise ValueError('Number of rows must be between 2 and {} (you \
asked for {}).'.format(SPLIT_LIMIT, row)) | 0.010221 |
def _groupname():
'''
Grain for the minion groupname
'''
if grp:
try:
groupname = grp.getgrgid(os.getgid()).gr_name
except KeyError:
groupname = ''
else:
groupname = ''
return groupname | 0.003876 |
def guess_format(text):
"""Guess YANG/YIN format
If the first non-whitespace character is '<' then it is XML.
Return 'yang' or 'yin'"""
format = 'yang'
i = 0
while i < len(text) and text[i].isspace():
i += 1
if i < len(text):
if text[i] == '<':
format = 'yin'
return format | 0.002994 |
def undisplay(self):
"""Undisplays the top tool.
This actually forces a complete re-render.
"""
self._tools.pop()
self._justClear()
for tool in self._tools:
self._justDisplay(tool) | 0.008299 |
def name_file(lane: int, flowcell: str, sample: str, read: int,
undetermined: bool=False, date: dt.datetime=None, index: str=None) -> str:
"""Name a FASTQ file following MIP conventions."""
flowcell = f"{flowcell}-undetermined" if undetermined else flowcell
date_str = date.strftime('%y%m%d') if date else '171015'
index = index if index else 'XXXXXX'
return f"{lane}_{date_str}_{flowcell}_{sample}_{index}_{read}.fastq.gz" | 0.02079 |
def predecesors_pattern(element, root):
"""
Look for `element` by its predecesors.
Args:
element (obj): HTMLElement instance of the object you are looking for.
root (obj): Root of the `DOM`.
Returns:
list: ``[PathCall()]`` - list with one :class:`PathCall` object (to \
allow use with ``.extend(predecesors_pattern())``).
"""
def is_root_container(el):
return el.parent.parent.getTagName() == ""
if not element.parent or not element.parent.parent or \
is_root_container(element):
return []
trail = [
[
element.parent.parent.getTagName(),
_params_or_none(element.parent.parent.params)
],
[
element.parent.getTagName(),
_params_or_none(element.parent.params)
],
[element.getTagName(), _params_or_none(element.params)],
]
match = root.match(*trail)
if element in match:
return [
PathCall("match", match.index(element), trail)
] | 0.000952 |
def clearLocalServices(self):
'send Bye messages for the services and remove them'
for service in list(self._localServices.values()):
self._sendBye(service)
self._localServices.clear() | 0.009009 |
def heatmap(args):
"""
%prog heatmap fastafile chr1
Combine stack plot with heatmap to show abundance of various tracks along
given chromosome. Need to give multiple beds to --stacks and --heatmaps
"""
p = OptionParser(heatmap.__doc__)
p.add_option("--stacks",
default="Exons,Introns,DNA_transposons,Retrotransposons",
help="Features to plot in stackplot [default: %default]")
p.add_option("--heatmaps",
default="Copia,Gypsy,hAT,Helitron,Introns,Exons",
help="Features to plot in heatmaps [default: %default]")
p.add_option("--meres", default=None,
help="Extra centromere / telomere features [default: %default]")
add_window_options(p)
opts, args, iopts = p.set_image_options(args, figsize="8x5")
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, chr = args
window, shift, subtract, merge = check_window_options(opts)
stacks = opts.stacks.split(",")
heatmaps = opts.heatmaps.split(",")
stackbeds = get_beds(stacks)
heatmapbeds = get_beds(heatmaps)
stackbins = get_binfiles(stackbeds, fastafile, shift,
subtract=subtract, merge=merge)
heatmapbins = get_binfiles(heatmapbeds, fastafile, shift,
subtract=subtract, merge=merge)
margin = .06
inner = .015
clen = Sizes(fastafile).mapping[chr]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
# Gauge
ratio = draw_gauge(root, margin, clen, rightmargin=4 * margin)
yinterval = .3
xx = margin
yy = 1 - margin
yy -= yinterval
xlen = clen / ratio
cc = chr
if "_" in chr:
ca, cb = chr.split("_")
cc = ca[0].upper() + cb
root.add_patch(Rectangle((xx, yy), xlen, yinterval - inner, color=gray))
ax = fig.add_axes([xx, yy, xlen, yinterval - inner])
nbins = get_nbins(clen, shift)
owindow = clen / 100
if owindow > window:
window = owindow / shift * shift
stackplot(ax, stackbins, nbins, palette, chr, window, shift)
ax.text(.1, .9, cc, va="top", zorder=100, transform=ax.transAxes,
bbox=dict(boxstyle="round", fc="w", alpha=.5))
# Legends
xx += xlen + .01
yspace = (yinterval - inner) / (len(stackbins) + 1)
yy = 1 - margin - yinterval
for s, p in zip(stacks, palette):
s = s.replace("_", " ")
s = Registration.get(s, s)
yy += yspace
root.add_patch(Rectangle((xx, yy), inner, inner, color=p, lw=0))
root.text(xx + 1.5 * inner, yy, s, size=10)
yh = .05 # Heatmap height
# Heatmaps
xx = margin
yy = 1 - margin - yinterval - inner
for s, p in zip(heatmaps, heatmapbins):
s = s.replace("_", " ")
s = Registration.get(s, s)
yy -= yh
m = stackarray(p, chr, window, shift)
Y = np.array([m, m])
root.imshow(Y, extent=(xx, xx + xlen, yy, yy + yh - inner),
interpolation="nearest", aspect="auto", cmap=iopts.cmap)
root.text(xx + xlen + .01, yy, s, size=10)
yy -= yh
meres = opts.meres
if meres:
bed = Bed(meres)
for b in bed:
if b.seqid != chr:
continue
pos = (b.start + b.end) / 2
cpos = pos / ratio
xx = margin + cpos
accn = b.accn.capitalize()
root.add_patch(CirclePolygon((xx, yy), radius=.01, fc="m", ec="m"))
root.text(xx + .014, yy, accn, va="center", color="m")
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
image_name = chr + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | 0.000799 |
def plot_variability_thresholds(varthreshpkl,
xmin_lcmad_stdev=5.0,
xmin_stetj_stdev=2.0,
xmin_iqr_stdev=2.0,
xmin_inveta_stdev=2.0,
lcformat='hat-sql',
lcformatdir=None,
magcols=None):
'''This makes plots for the variability threshold distributions.
Parameters
----------
varthreshpkl : str
The pickle produced by the function above.
xmin_lcmad_stdev,xmin_stetj_stdev,xmin_iqr_stdev,xmin_inveta_stdev : float or np.array
Values of the threshold values to override the ones in the
`vartresholdpkl`. If provided, will plot the thresholds accordingly
instead of using the ones in the input pickle directly.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
magcols : list of str or None
The magcol keys to use from the lcdict.
Returns
-------
str
The file name of the threshold plot generated.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
if magcols is None:
magcols = dmagcols
with open(varthreshpkl,'rb') as infd:
allobjects = pickle.load(infd)
magbins = allobjects['magbins']
for magcol in magcols:
min_lcmad_stdev = (
xmin_lcmad_stdev or allobjects[magcol]['min_lcmad_stdev']
)
min_stetj_stdev = (
xmin_stetj_stdev or allobjects[magcol]['min_stetj_stdev']
)
min_iqr_stdev = (
xmin_iqr_stdev or allobjects[magcol]['min_iqr_stdev']
)
min_inveta_stdev = (
xmin_inveta_stdev or allobjects[magcol]['min_inveta_stdev']
)
fig = plt.figure(figsize=(20,16))
# the mag vs lcmad
plt.subplot(221)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['lcmad']*1.483,
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_lcmad_median'])*1.483,
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_lcmad_median'])*1.483 +
min_lcmad_stdev*np.array(
allobjects[magcol]['binned_lcmad_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.xlabel('SDSS r')
plt.ylabel(r'lightcurve RMS (MAD $\times$ 1.483)')
plt.title('%s - SDSS r vs. light curve RMS' % magcol)
plt.yscale('log')
plt.tight_layout()
# the mag vs stetsonj
plt.subplot(222)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['stetsonj'],
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
allobjects[magcol]['binned_stetsonj_median'],
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_stetsonj_median']) +
min_stetj_stdev*np.array(
allobjects[magcol]['binned_stetsonj_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.xlabel('SDSS r')
plt.ylabel('Stetson J index')
plt.title('%s - SDSS r vs. Stetson J index' % magcol)
plt.yscale('log')
plt.tight_layout()
# the mag vs IQR
plt.subplot(223)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['iqr'],
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
allobjects[magcol]['binned_iqr_median'],
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_iqr_median']) +
min_iqr_stdev*np.array(
allobjects[magcol]['binned_iqr_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlabel('SDSS r')
plt.ylabel('IQR')
plt.title('%s - SDSS r vs. IQR' % magcol)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.yscale('log')
plt.tight_layout()
# the mag vs IQR
plt.subplot(224)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['inveta'],
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
allobjects[magcol]['binned_inveta_median'],
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_inveta_median']) +
min_inveta_stdev*np.array(
allobjects[magcol]['binned_inveta_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlabel('SDSS r')
plt.ylabel(r'$1/\eta$')
plt.title(r'%s - SDSS r vs. $1/\eta$' % magcol)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.yscale('log')
plt.tight_layout()
plt.savefig('varfeatures-%s-%s-distributions.png' % (varthreshpkl,
magcol),
bbox_inches='tight')
plt.close('all') | 0.001321 |
def upload_sticker_file(self, user_id, png_sticker):
"""
Use this method to upload a .png file with a sticker for later use in createNewStickerSet and addStickerToSet methods (can be used multiple times). Returns the uploaded File on success.
https://core.telegram.org/bots/api#uploadstickerfile
Parameters:
:param user_id: User identifier of sticker file owner
:type user_id: int
:param png_sticker: Png image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. More info on Sending Files »
:type png_sticker: pytgbot.api_types.sendable.files.InputFile
Returns:
:return: Returns the uploaded File on success
:rtype: pytgbot.api_types.receivable.media.File
"""
from pytgbot.api_types.sendable.files import InputFile
assert_type_or_raise(user_id, int, parameter_name="user_id")
assert_type_or_raise(png_sticker, InputFile, parameter_name="png_sticker")
result = self.do("uploadStickerFile", user_id=user_id, png_sticker=png_sticker)
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result)))
from pytgbot.api_types.receivable.media import File
try:
return File.from_array(result)
except TgApiParseException:
logger.debug("Failed parsing as api_type File", exc_info=True)
# end try
# no valid parsing so far
raise TgApiParseException("Could not parse result.") # See debug log for details!
# end if return_python_objects
return result | 0.008319 |
def watch_thread(self):
'''watch for menu events from child'''
from MAVProxy.modules.lib.mp_settings import MPSetting
try:
while True:
msg = self.parent_pipe_recv.recv()
if isinstance(msg, win_layout.WinLayout):
win_layout.set_layout(msg, self.set_layout)
elif self.menu_callback is not None:
self.menu_callback(msg)
time.sleep(0.1)
except EOFError:
pass | 0.003891 |
def metric(self):
"""
Compute a matrix of Hilbert-Schmidt inner products for the basis operators, update
self._metric, and return the value.
:return: The matrix of inner products.
:rtype: numpy.matrix
"""
if self._metric is None:
_log.debug("Computing and caching operator basis metric")
self._metric = np.matrix([[(j.dag() * k).tr() for k in self.ops] for j in self.ops])
return self._metric | 0.008316 |
def readerWalker(self):
"""Create an xmltextReader for a preparsed document. """
ret = libxml2mod.xmlReaderWalker(self._o)
if ret is None:raise treeError('xmlReaderWalker() failed')
__tmp = xmlTextReader(_obj=ret)
return __tmp | 0.015038 |
def accept(self, key, result):
"""
同步接受确认消息
:param key: correlation_id
:param result 服务端返回的消息
"""
self.data[key]['isAccept'] = True # 设置为已经接受到服务端返回的消息
self.data[key]['result'] = str(result)
self._channel.queue_delete(self.data[key]['reply_queue_name']) | 0.006309 |
def taper(path,
length,
final_width,
final_distance,
direction=None,
layer=0,
datatype=0):
'''
Linear tapers for the lazy.
path : `gdspy.Path` to append the taper
length : total length
final_width : final width of th taper
direction : taper direction
layer : GDSII layer number (int or list)
datatype : GDSII datatype number (int or list)
Parameters `layer` and `datatype` must be of the same type. If they
are lists, they must have the same length. Their length indicate the
number of pieces that compose the taper.
Return `path`.
'''
if layer.__class__ == datatype.__class__ == [].__class__:
assert len(layer) == len(datatype)
elif isinstance(layer, int) and isinstance(datatype, int):
layer = [layer]
datatype = [datatype]
else:
raise ValueError('Parameters layer and datatype must have the same '
'type (either int or list) and length.')
n = len(layer)
w = numpy.linspace(2 * path.w, final_width, n + 1)[1:]
d = numpy.linspace(path.distance, final_distance, n + 1)[1:]
l = float(length) / n
for i in range(n):
path.segment(
l, direction, w[i], d[i], layer=layer[i], datatype=datatype[i])
return path | 0.001477 |
def _sim_prediction(self, lmda, Y, scores, h, t_params, simulations):
""" Simulates a h-step ahead mean prediction
Parameters
----------
lmda : np.array
The past predicted values
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
simulations : int
How many simulations to perform
Returns
----------
Matrix of simulations
"""
sim_vector = np.zeros([simulations,h])
for n in range(0,simulations):
# Create arrays to iteratre over
lmda_exp = lmda.copy()
scores_exp = scores.copy()
Y_exp = Y.copy()
# Loop over h time periods
for t in range(0,h):
new_value = t_params[0]
if self.p != 0:
for j in range(1,self.p+1):
new_value += t_params[j]*lmda_exp[-j]
if self.q != 0:
for k in range(1,self.q+1):
new_value += t_params[k+self.p]*scores_exp[-k]
if self.leverage is True:
new_value += t_params[1+self.p+self.q]*np.sign(-(Y_exp[-1]-t_params[-2]-t_params[-1]*np.exp(lmda_exp[-1]/2.0)))*(scores_exp[-1]+1)
lmda_exp = np.append(lmda_exp,[new_value]) # For indexing consistency
scores_exp = np.append(scores_exp,scores[np.random.randint(scores.shape[0])]) # expectation of score is zero
Y_exp = np.append(Y_exp,Y[np.random.randint(Y.shape[0])]) # bootstrap returns
sim_vector[n] = lmda_exp[-h:]
return np.transpose(sim_vector) | 0.010644 |
def build_tree_from_distance_matrix(matrix, best_tree=False, params={},\
working_dir='/tmp'):
"""Returns a tree from a distance matrix.
matrix: a square Dict2D object (cogent.util.dict2d)
best_tree: if True (default:False), uses a slower but more accurate
algorithm to build the tree.
params: dict of parameters to pass in to the Clearcut app controller.
The result will be an cogent.core.tree.PhyloNode object, or None if tree
fails.
"""
params['--out'] = get_tmp_filename(working_dir)
# Create instance of app controller, enable tree, disable alignment
app = Clearcut(InputHandler='_input_as_multiline_string', params=params, \
WorkingDir=working_dir, SuppressStdout=True,\
SuppressStderr=True)
#Turn off input as alignment
app.Parameters['-a'].off()
#Input is a distance matrix
app.Parameters['-d'].on()
if best_tree:
app.Parameters['-N'].on()
# Turn the dict2d object into the expected input format
matrix_input, int_keys = _matrix_input_from_dict2d(matrix)
# Collect result
result = app(matrix_input)
# Build tree
tree = DndParser(result['Tree'].read(), constructor=PhyloNode)
# reassign to original names
for node in tree.tips():
node.Name = int_keys[node.Name]
# Clean up
result.cleanUp()
del(app, result, params)
return tree | 0.005658 |
def setQuery(self, query):
"""
Sets the query for this wigdet to the inputed query instance.
:param query | <orb.Query> || <orb.QueryCompound>
"""
if not self.isNull() and hash(query) == hash(self.query()):
return
# add entries
table = self.tableType()
self.setUpdatesEnabled(False)
self.blockSignals(True)
self.clear()
if query is None or table is None:
self.setEnabled(False)
self.setUpdatesEnabled(True)
self.blockSignals(False)
return
else:
self.setEnabled(True)
# load the queries for this item
if QueryCompound.typecheck(query):
queries = query.queries()
self.setCurrentJoiner(query.operatorType())
else:
queries = [query]
self.uiNameTXT.setText(query.name())
layout = self._entryWidget.layout()
for index, query in enumerate(queries):
widget = self.addEntry(query)
widget.setFirst(index == 0)
widget.setLast(index == (len(queries) - 1))
widget.setJoiner(self.currentJoiner())
self.setUpdatesEnabled(True)
self.blockSignals(False) | 0.007299 |
def save_image(self, cat, img, data):
"""Saves a new image."""
filename = self.path(cat, img)
mkdir(filename)
if type(data) == np.ndarray:
data = Image.fromarray(data).convert('RGB')
data.save(filename) | 0.007874 |
async def _try_catch_coro(emitter, event, listener, coro):
"""Coroutine wrapper to catch errors after async scheduling.
Args:
emitter (EventEmitter): The event emitter that is attempting to
call a listener.
event (str): The event that triggered the emitter.
listener (async def): The async def that was used to generate the coro.
coro (coroutine): The coroutine that should be tried.
If an exception is caught the function will use the emitter to emit the
failure event. If, however, the current event _is_ the failure event then
the method reraises. The reraised exception may show in debug mode for the
event loop but is otherwise silently dropped.
"""
try:
await coro
except Exception as exc:
if event == emitter.LISTENER_ERROR_EVENT:
raise
emitter.emit(emitter.LISTENER_ERROR_EVENT, event, listener, exc) | 0.001074 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.