text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def set_background_color(self, color):
""" Given a background color (a QColor), attempt to set a color map
that will be aesthetically pleasing.
"""
# Set a new default color map.
self.default_color_map = self.darkbg_color_map.copy()
if color.value() >= 127:
# Colors appropriate for a terminal with a light background. For
# now, only use non-bright colors...
for i in xrange(8):
self.default_color_map[i + 8] = self.default_color_map[i]
# ...and replace white with black.
self.default_color_map[7] = self.default_color_map[15] = 'black'
# Update the current color map with the new defaults.
self.color_map.update(self.default_color_map) | 0.002551 |
def to_utc_datetime(self, value):
"""
from value to datetime with tzinfo format (datetime.datetime instance)
"""
if isinstance(value, (six.integer_types, float, six.string_types)):
value = self.to_naive_datetime(value)
if isinstance(value, datetime.datetime):
if timezone.is_naive(value):
value = timezone.make_aware(value, timezone.utc)
else:
value = timezone.localtime(value, timezone.utc)
return value
raise exceptions.ValidationError(
"Unable to convert value: '%s' to python data type" % value,
code="invalid_datetime"
) | 0.002911 |
def get_required_pull_request_reviews(self):
"""
:calls: `GET /repos/:owner/:repo/branches/:branch/protection/required_pull_request_reviews <https://developer.github.com/v3/repos/branches>`_
:rtype: :class:`github.RequiredPullRequestReviews.RequiredPullRequestReviews`
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.protection_url + "/required_pull_request_reviews",
headers={'Accept': Consts.mediaTypeRequireMultipleApprovingReviews}
)
return github.RequiredPullRequestReviews.RequiredPullRequestReviews(self._requester, headers, data, completed=True) | 0.007508 |
def event_types(self):
"""
Raises
------
IndexError
When there is no selected rater
"""
try:
events = self.rater.find('events')
except AttributeError:
raise IndexError('You need to have at least one rater')
return [x.get('type') for x in events] | 0.00578 |
def eval_objfn(self):
r"""Compute components of objective function as well as total
contribution to objective function. Data fidelity term is
:math:`(1/2) \| H \mathbf{x} - \mathbf{s} \|_2^2` and
regularisation term is :math:`\| W_{\mathrm{tv}}
\sqrt{(G_r \mathbf{x})^2 + (G_c \mathbf{x})^2}\|_1`.
"""
Ef = self.Af * self.Xf - self.Sf
dfd = sl.rfl2norm2(Ef, self.S.shape, axis=self.axes) / 2.0
reg = np.sum(self.Wtv * np.sqrt(np.sum(self.obfn_gvar()**2,
axis=self.saxes)))
obj = dfd + self.lmbda*reg
return (obj, dfd, reg) | 0.00304 |
def ensure_str(text):
u"""Convert unicode to str using pyreadline_codepage"""
if isinstance(text, unicode):
try:
return text.encode(pyreadline_codepage, u"replace")
except (LookupError, TypeError):
return text.encode(u"ascii", u"replace")
return text | 0.003236 |
def write_puml(self, filename=''):
"""
Writes PUML from the system. If filename is given, stores result in the file.
Otherwise returns result as a string.
"""
def get_type(o):
type = 'program'
if isinstance(o, AbstractSensor):
type = 'sensor'
elif isinstance(o, AbstractActuator):
type = 'actuator'
return type
if filename:
s = open(filename, 'w')
else:
s = io.StringIO()
s.write('@startuml\n')
s.write('skinparam state {\n')
for k, v in list(self.background_colors.items()):
s.write('BackGroundColor<<%s>> %s\n' % (k, v))
s.write('}\n')
for o in self.system.objects:
if isinstance(o, DefaultProgram) or o.hide_in_uml:
continue
if isinstance(o, ProgrammableSystemObject):
s.write('state "%s" as %s <<%s>>\n' % (o, o, get_type(o)))
s.write('%s: %s\n' % (o, o.class_name))
if isinstance(o, AbstractActuator):
for p in reversed(o.program_stack):
s.write('%s: %s :: %s\n' % (o, p, o.program_status.get(p, '-')))
elif hasattr(o, 'status'):
s.write('%s: Status: %s\n' % (o, o.status))
if getattr(o, 'is_program', False):
s.write('%s: Priority: %s\n' % (o, o.priority))
for t in o.actual_triggers:
if isinstance(t, DefaultProgram) or t.hide_in_uml:
continue
s.write('%s -[%s]-> %s\n' % (t, self.arrow_colors['trigger'], o))
for t in o.actual_targets:
if t.hide_in_uml:
continue
if o.active:
color = 'active_target'
else:
color = 'inactive_target'
if getattr(t, 'program', None) == o:
color = 'controlled_target'
s.write('%s -[%s]-> %s\n' % (o, self.arrow_colors[color], t))
s.write('@enduml\n')
if filename:
s.close()
else:
return s.getvalue() | 0.002602 |
def is_valid_github_uri(uri: URI, expected_path_terms: Tuple[str, ...]) -> bool:
"""
Return a bool indicating whether or not the URI fulfills the following specs
Valid Github URIs *must*:
- Have 'https' scheme
- Have 'api.github.com' authority
- Have a path that contains all "expected_path_terms"
"""
if not is_text(uri):
return False
parsed = parse.urlparse(uri)
path, scheme, authority = parsed.path, parsed.scheme, parsed.netloc
if not all((path, scheme, authority)):
return False
if any(term for term in expected_path_terms if term not in path):
return False
if scheme != "https":
return False
if authority != GITHUB_API_AUTHORITY:
return False
return True | 0.003922 |
def update_attrs(self):
""" Add attributes such as count/end_time that can be present """
for key, value in self._response_json.items():
if key != 'results' and type(value) not in (list, dict):
setattr(self, key, value) | 0.007605 |
def check_specifier(dist, attr, value):
"""Verify that value is a valid version specifier"""
try:
packaging.specifiers.SpecifierSet(value)
except packaging.specifiers.InvalidSpecifier as error:
tmpl = (
"{attr!r} must be a string "
"containing valid version specifiers; {error}"
)
raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) | 0.002433 |
def DocInheritMeta(style="parent", abstract_base_class=False):
""" A metaclass that merges the respective docstrings of a parent class and of its child, along with their
properties, methods (including classmethod, staticmethod, decorated methods).
Parameters
----------
style: Union[Any, Callable[[str, str], str]], optional (default: "parent")
A valid inheritance-scheme style ID or function that merges two docstrings.
abstract_base_class: bool, optional(default: False)
If True, the returned metaclass inherits from abc.ABCMeta.
Thus a class that derives from DocInheritMeta(style="numpy", abstract_base_class=True)
will be an abstract base class, whose derived classes will inherit docstrings
using the numpy-style inheritance scheme.
Returns
-------
custom_inherit.DocInheritorBase"""
merge_func = store[style]
metaclass = _DocInheritorBase
metaclass.class_doc_inherit = staticmethod(merge_func)
metaclass.attr_doc_inherit = staticmethod(merge_func)
return metaclass if not abstract_base_class else type("abc" + metaclass.__name__, (_ABCMeta, metaclass), {}) | 0.006568 |
def best_response(self, opponents_actions, tie_breaking='smallest',
payoff_perturbation=None, tol=None, random_state=None):
"""
Return the best response action(s) to `opponents_actions`.
Parameters
----------
opponents_actions : scalar(int) or array_like
A profile of N-1 opponents' actions, represented by either
scalar(int), array_like(float), array_like(int), or
array_like(array_like(float)). If N=2, then it must be a
scalar of integer (in which case it is treated as the
opponent's pure action) or a 1-dimensional array of floats
(in which case it is treated as the opponent's mixed
action). If N>2, then it must be an array of N-1 objects,
where each object must be an integer (pure action) or an
array of floats (mixed action).
tie_breaking : str, optional(default='smallest')
str in {'smallest', 'random', False}. Control how, or
whether, to break a tie (see Returns for details).
payoff_perturbation : array_like(float), optional(default=None)
Array of length equal to the number of actions of the player
containing the values ("noises") to be added to the payoffs
in determining the best response.
tol : scalar(float), optional(default=None)
Tolerance level used in determining best responses. If None,
default to the value of the `tol` attribute.
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to
set the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState
is used. Relevant only when tie_breaking='random'.
Returns
-------
scalar(int) or ndarray(int, ndim=1)
If tie_breaking=False, returns an array containing all the
best response pure actions. If tie_breaking='smallest',
returns the best response action with the smallest index; if
tie_breaking='random', returns an action randomly chosen
from the best response actions.
"""
if tol is None:
tol = self.tol
payoff_vector = self.payoff_vector(opponents_actions)
if payoff_perturbation is not None:
try:
payoff_vector += payoff_perturbation
except TypeError: # type mismatch
payoff_vector = payoff_vector + payoff_perturbation
best_responses = \
np.where(payoff_vector >= payoff_vector.max() - tol)[0]
if tie_breaking == 'smallest':
return best_responses[0]
elif tie_breaking == 'random':
return self.random_choice(best_responses,
random_state=random_state)
elif tie_breaking is False:
return best_responses
else:
msg = "tie_breaking must be one of 'smallest', 'random', or False"
raise ValueError(msg) | 0.000948 |
def on_person_update(self, people):
"""
People have changed
Should always include all people
(all that were added via on_person_new)
:param people: People to update
:type people: list[paps.people.People]
:rtype: None
:raises Exception: On error (for now just an exception)
"""
try:
self.sensor_client.person_update(people)
except:
self.exception("Failed to update people")
raise Exception("Updating people failed") | 0.005556 |
def merge(adata, ldata, copy=True):
"""Merges two annotated data matrices.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix (reference data set).
ldata: :class:`~anndata.AnnData`
Annotated data matrix (to be merged into adata).
Returns
-------
Returns a :class:`~anndata.AnnData` object
"""
if 'spliced' in ldata.layers.keys() and 'initial_size_spliced' not in ldata.obs.keys(): set_initial_size(ldata)
elif 'spliced' in adata.layers.keys() and 'initial_size_spliced' not in adata.obs.keys(): set_initial_size(adata)
common_obs = adata.obs_names.intersection(ldata.obs_names)
common_vars = adata.var_names.intersection(ldata.var_names)
if len(common_obs) == 0:
clean_obs_names(adata)
clean_obs_names(ldata)
common_obs = adata.obs_names.intersection(ldata.obs_names)
if copy:
_adata = adata[common_obs].copy() if adata.shape[1] >= ldata.shape[1] else ldata[common_obs].copy()
_ldata = ldata[common_obs].copy() if adata.shape[1] >= ldata.shape[1] else adata[common_obs].copy()
else:
adata._inplace_subset_obs(common_obs)
_adata, _ldata = adata, ldata[common_obs]
same_vars = (len(_adata.var_names) == len(_ldata.var_names) and np.all(_adata.var_names == _ldata.var_names))
if len(common_vars) > 0 and not same_vars:
_adata._inplace_subset_var(common_vars)
_ldata._inplace_subset_var(common_vars)
for attr in _ldata.obs.keys():
_adata.obs[attr] = _ldata.obs[attr]
for attr in _ldata.obsm.keys():
_adata.obsm[attr] = _ldata.obsm[attr]
for attr in _ldata.uns.keys():
_adata.uns[attr] = _ldata.uns[attr]
for attr in _ldata.layers.keys():
_adata.layers[attr] = _ldata.layers[attr]
if _adata.shape[1] == _ldata.shape[1]:
same_vars = (len(_adata.var_names) == len(_ldata.var_names) and np.all(_adata.var_names == _ldata.var_names))
if same_vars:
for attr in _ldata.var.keys():
_adata.var[attr] = _ldata.var[attr]
for attr in _ldata.varm.keys():
_adata.varm[attr] = _ldata.varm[attr]
else:
raise ValueError('Variable names are not identical.')
return _adata if copy else None | 0.003899 |
def split_and_strip_without(string, exclude, separator_regexp=None):
"""Split a string into items, and trim any excess spaces
Any items in exclude are not in the returned list
>>> split_and_strip_without('fred, was, here ', ['was'])
['fred', 'here']
"""
result = split_and_strip(string, separator_regexp)
if not exclude:
return result
return [x for x in result if x not in exclude] | 0.002358 |
def extrusion(target, throat_perimeter='throat.perimeter',
throat_length='throat.length'):
r"""
Calculate surface area for an arbitrary shaped throat give the perimeter
and length.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
throat_perimeter : string
Dictionary key to the throat perimeter array. Default is
'throat.perimeter'.
throat_length : string
Dictionary key to the throat length array. Default is 'throat.length'.
"""
P = target[throat_perimeter]
L = target[throat_length]
value = P*L
return value | 0.00128 |
def _split_constraints(constraints, concrete=True):
"""
Returns independent constraints, split from this Frontend's `constraints`.
"""
splitted = [ ]
for i in constraints:
splitted.extend(i.split(['And']))
l.debug("... splitted of size %d", len(splitted))
concrete_constraints = [ ]
variable_connections = { }
constraint_connections = { }
for n,s in enumerate(splitted):
l.debug("... processing constraint with %d variables", len(s.variables))
connected_variables = set(s.variables)
connected_constraints = { n }
if len(connected_variables) == 0:
concrete_constraints.append(s)
for v in s.variables:
if v in variable_connections:
connected_variables |= variable_connections[v]
if v in constraint_connections:
connected_constraints |= constraint_connections[v]
for v in connected_variables:
variable_connections[v] = connected_variables
constraint_connections[v] = connected_constraints
unique_constraint_sets = set()
for v in variable_connections:
unique_constraint_sets.add((frozenset(variable_connections[v]), frozenset(constraint_connections[v])))
results = [ ]
for v,c_indexes in unique_constraint_sets:
results.append((set(v), [ splitted[c] for c in c_indexes ]))
if concrete and len(concrete_constraints) > 0:
results.append(({ 'CONCRETE' }, concrete_constraints))
return results | 0.010817 |
def server_doc(self_or_cls, obj, doc=None):
"""
Get a bokeh Document with the plot attached. May supply
an existing doc, otherwise bokeh.io.curdoc() is used to
attach the plot to the global document instance.
"""
if not isinstance(obj, (Plot, BokehServerWidgets)):
if not isinstance(self_or_cls, BokehRenderer) or self_or_cls.mode != 'server':
renderer = self_or_cls.instance(mode='server')
else:
renderer = self_or_cls
plot, _ = renderer._validate(obj, 'auto')
else:
plot = obj
root = plot.state
if isinstance(plot, BokehServerWidgets):
plot = plot.plot
if doc is None:
doc = plot.document
else:
plot.document = doc
plot.traverse(lambda x: attach_periodic(x), [GenericElementPlot])
doc.add_root(root)
return doc | 0.004237 |
def _get_mixing_indices(size, seed=None, name=None):
"""Generates an array of indices suitable for mutation operation.
The mutation operation in differential evolution requires that for every
element of the population, three distinct other elements be chosen to produce
a trial candidate. This function generates an array of shape [size, 3]
satisfying the properties that:
(a). array[i, :] does not contain the index 'i'.
(b). array[i, :] does not contain any overlapping indices.
(c). All elements in the array are between 0 and size - 1 inclusive.
Args:
size: Scalar integer `Tensor`. The number of samples as well as a the range
of the indices to sample from.
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: `None`.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'get_mixing_indices'.
Returns:
sample: A `Tensor` of shape [size, 3] and same dtype as `size` containing
samples without replacement between 0 and size - 1 (inclusive) with the
`i`th row not including the number `i`.
"""
with tf.compat.v1.name_scope(
name, default_name='get_mixing_indices', values=[size]):
size = tf.convert_to_tensor(value=size)
dtype = size.dtype
seed_stream = distributions.SeedStream(seed, salt='get_mixing_indices')
first = tf.random.uniform([size],
maxval=size-1,
dtype=dtype,
seed=seed_stream())
second = tf.random.uniform([size],
maxval=size-2,
dtype=dtype,
seed=seed_stream())
third = tf.random.uniform([size],
maxval=size-3,
dtype=dtype,
seed=seed_stream())
# Shift second if it is on top of or to the right of first
second = tf.where(first < second, x=second, y=second + 1)
smaller = tf.math.minimum(first, second)
larger = tf.math.maximum(first, second)
# Shift the third one so it does not coincide with either the first or the
# second number. Assuming first < second, shift by 1 if the number is in
# [first, second) and by 2 if the number is greater than or equal to the
# second.
third = tf.where(third < smaller, x=third, y=third + 1)
third = tf.where(third < larger, x=third, y=third + 1)
sample = tf.stack([first, second, third], axis=1)
to_avoid = tf.expand_dims(tf.range(size), axis=-1)
sample = tf.where(sample < to_avoid, x=sample, y=sample + 1)
return sample | 0.001485 |
def find_charged(self, mol):
"""Looks for positive charges in arginine, histidine or lysine, for negative in aspartic and glutamic acid."""
data = namedtuple('pcharge', 'atoms atoms_orig_idx type center restype resnr reschain')
a_set = []
# Iterate through all residue, exclude those in chains defined as peptides
for res in [r for r in pybel.ob.OBResidueIter(mol.OBMol) if not r.GetChain() in config.PEPTIDES]:
if config.INTRA is not None:
if res.GetChain() != config.INTRA:
continue
a_contributing = []
a_contributing_orig_idx = []
if res.GetName() in ('ARG', 'HIS', 'LYS'): # Arginine, Histidine or Lysine have charged sidechains
for a in pybel.ob.OBResidueAtomIter(res):
if a.GetType().startswith('N') and res.GetAtomProperty(a, 8) \
and not self.Mapper.mapid(a.GetIdx(), mtype='protein') in self.altconf:
a_contributing.append(pybel.Atom(a))
a_contributing_orig_idx.append(self.Mapper.mapid(a.GetIdx(), mtype='protein'))
if not len(a_contributing) == 0:
a_set.append(data(atoms=a_contributing,
atoms_orig_idx=a_contributing_orig_idx,
type='positive',
center=centroid([ac.coords for ac in a_contributing]),
restype=res.GetName(),
resnr=res.GetNum(),
reschain=res.GetChain()))
if res.GetName() in ('GLU', 'ASP'): # Aspartic or Glutamic Acid
for a in pybel.ob.OBResidueAtomIter(res):
if a.GetType().startswith('O') and res.GetAtomProperty(a, 8) \
and not self.Mapper.mapid(a.GetIdx(), mtype='protein') in self.altconf:
a_contributing.append(pybel.Atom(a))
a_contributing_orig_idx.append(self.Mapper.mapid(a.GetIdx(), mtype='protein'))
if not len(a_contributing) == 0:
a_set.append(data(atoms=a_contributing,
atoms_orig_idx=a_contributing_orig_idx,
type='negative',
center=centroid([ac.coords for ac in a_contributing]),
restype=res.GetName(),
resnr=res.GetNum(),
reschain=res.GetChain()))
return a_set | 0.005553 |
def retrieve_paths(self, products, report_path, suffix=None):
"""Helper method to retrieve path from particular report metadata.
:param products: Report products.
:type products: list
:param report_path: Path of the IF output.
:type report_path: str
:param suffix: Expected output product file type (extension).
:type suffix: str
:return: List of absolute path of the output product.
:rtype: list
"""
paths = []
for product in products:
path = ImpactReport.absolute_output_path(
join(report_path, 'output'),
products,
product.key)
if isinstance(path, list):
for p in path:
paths.append(p)
elif isinstance(path, dict):
for p in list(path.values()):
paths.append(p)
else:
paths.append(path)
if suffix:
paths = [p for p in paths if p.endswith(suffix)]
paths = [p for p in paths if exists(p)]
return paths | 0.001783 |
def traverse(self, traverser, **kwargs):
"""
Implementation of mandatory interface for traversing the whole rule tree.
This method will call the ``traverse`` method of child rule tree and
then perform arbitrary conversion of the result before returning it back.
The optional ``kwargs`` are passed down to traverser callback as additional
arguments and can be used to provide additional data or context.
:param pynspect.rules.RuleTreeTraverser traverser: Traverser object providing appropriate interface.
:param dict kwargs: Additional optional keyword arguments to be passed down to traverser callback.
"""
result = self.rule.traverse(traverser, **kwargs)
return self.conversion(result) | 0.009032 |
def send(r, pools=None):
"""Sends a given Request object."""
if pools:
r._pools = pools
r.send()
return r.response | 0.007092 |
def as_call(self):
"""
Return this call as it is called in its source.
"""
default = self._default()
default = ', ' + default if default else ''
return "pyconfig.%s(%r%s)" % (self.method, self.get_key(), default) | 0.007663 |
def _store32(ins):
""" Stores 2nd operand content into address of 1st operand.
store16 a, x => *(&a) = x
"""
op = ins.quad[1]
indirect = op[0] == '*'
if indirect:
op = op[1:]
immediate = op[0] == '#' # Might make no sense here?
if immediate:
op = op[1:]
if is_int(op) or op[0] == '_' or immediate:
output = _32bit_oper(ins.quad[2], preserveHL=indirect)
if is_int(op):
op = str(int(op) & 0xFFFF)
if indirect:
output.append('ld hl, (%s)' % op)
output.append('call __STORE32')
REQUIRES.add('store32.asm')
return output
output.append('ld (%s), hl' % op)
output.append('ld (%s + 2), de' % op)
return output
output = _32bit_oper(ins.quad[2], preserveHL=True)
output.append('pop hl')
if indirect:
output.append('call __ISTORE32')
REQUIRES.add('store32.asm')
return output
output.append('call __STORE32')
REQUIRES.add('store32.asm')
return output | 0.000943 |
def convert_to_int(x: Any, default: int = None) -> int:
"""
Transforms its input into an integer, or returns ``default``.
"""
try:
return int(x)
except (TypeError, ValueError):
return default | 0.004405 |
def _end_flusher_loop(self):
"""
Let flusher_loop coroutine quit - useful when disconnecting.
"""
if not self.is_connected or self.is_connecting or self.io.closed():
if self._flush_queue is not None and self._flush_queue.empty():
self._flush_pending(check_connected=False)
yield tornado.gen.moment | 0.00542 |
def observe(self):
"""
Check if the request is an observing request.
:return: 0, if the request is an observing request
"""
for option in self.options:
if option.number == defines.OptionRegistry.OBSERVE.number:
# if option.value is None:
# return 0
if option.value is None:
return 0
return option.value
return None | 0.004329 |
def show_limit(entries, **kwargs):
"""Shows a menu but limits the number of entries shown at a time.
Functionally equivalent to `show_menu()` with the `limit` parameter set."""
limit = kwargs.pop('limit', 5)
if limit <= 0:
return show_menu(entries, **kwargs)
istart = 0 # Index of group start.
iend = limit # Index of group end.
dft = kwargs.pop('dft', None)
if type(dft) == int:
dft = str(dft)
while True:
if iend > len(entries):
iend = len(entries)
istart = iend - limit
if istart < 0:
istart = 0
iend = limit
unext = len(entries) - iend # Number of next entries.
uprev = istart # Number of previous entries.
nnext = "" # Name of 'next' menu entry.
nprev = "" # Name of 'prev' menu entry.
dnext = "" # Description of 'next' menu entry.
dprev = "" # Description of 'prev' menu entry.
group = copy.deepcopy(entries[istart:iend])
names = [i.name for i in group]
if unext > 0:
for i in ["n", "N", "next", "NEXT", "->", ">>", ">>>"]:
if i not in names:
nnext = i
dnext = "Next %u of %u entries" % (unext, len(entries))
group.append(MenuEntry(nnext, dnext, None, None, None))
names.append("n")
break
if uprev > 0:
for i in ["p", "P", "prev", "PREV", "<-", "<<", "<<<"]:
if i not in names:
nprev = i
dprev = "Previous %u of %u entries" % (uprev, len(entries))
group.append(MenuEntry(nprev, dprev, None, None, None))
names.append("p")
break
tmpdft = None
if dft != None:
if dft not in names:
if "n" in names:
tmpdft = "n"
else:
tmpdft = dft
result = show_menu(group, dft=tmpdft, **kwargs)
if result == nnext or result == dnext:
istart += limit
iend += limit
elif result == nprev or result == dprev:
istart -= limit
iend -= limit
else:
return result | 0.00439 |
def decode_to_unicode(text, default_encoding='utf-8'):
"""Decode input text into Unicode representation.
Decode input text into Unicode representation by first using the default
encoding utf-8.
If the operation fails, it detects the type of encoding used in the
given text.
For optimal result, it is recommended that the 'chardet' module is
installed.
NOTE: Beware that this might be slow for *very* large strings.
If chardet detection fails, it will try to decode the string using the basic
detection function guess_minimum_encoding().
Also, bear in mind that it is impossible to detect the correct encoding at
all times, other then taking educated guesses. With that said, this function
will always return some decoded Unicode string, however the data returned
may not be the same as original data in some cases.
:param text: the text to decode
:type text: string
:param default_encoding: the character encoding to use. Optional.
:type default_encoding: string
:return: input text as Unicode
:rtype: string
"""
if not text:
return ""
try:
return text.decode(default_encoding)
except (UnicodeError, LookupError):
pass
detected_encoding = None
if CHARDET_AVAILABLE:
# We can use chardet to perform detection
res = chardet.detect(text)
if res['confidence'] >= 0.8:
detected_encoding = res['encoding']
if detected_encoding is None:
# No chardet detection, try to make a basic guess
dummy, detected_encoding = guess_minimum_encoding(text)
return text.decode(detected_encoding) | 0.0018 |
def render_app_description(context, app, fallback="", template="/admin_app_description.html"):
""" Render the application description using the default template name. If it cannot find a
template matching the given path, fallback to the fallback argument.
"""
try:
template = app['app_label'] + template
text = render_to_string(template, context)
except:
text = fallback
return text | 0.009217 |
def OnSelectCard(self, card):
"""Called when a card is selected by clicking on the
card or reader tree control or toolbar."""
SimpleSCardAppEventObserver.OnSelectCard(self, card)
self.feedbacktext.SetLabel('Selected card: ' + repr(card))
if hasattr(self.selectedcard, 'connection'):
self.transmitbutton.Enable() | 0.00551 |
def recurse_module( overall_record, index, shared, stop_types=STOP_TYPES, already_seen=None, min_size=0 ):
"""Creates a has-a recursive-cost hierarchy
Mutates objects in-place to produce a hierarchy of memory usage based on
reference-holding cost assignment
"""
for record in recurse(
overall_record, index,
stop_types=stop_types,
already_seen=already_seen,
type_group=True,
):
# anything with a totsize we've already processed...
if record.get('totsize') is not None:
continue
rinfo = record
rinfo['module'] = overall_record.get('name',NON_MODULE_REFS )
if not record['refs']:
rinfo['rsize'] = 0
rinfo['children'] = []
else:
# TODO: provide a flag to coalesce based on e.g. type at each level or throughout...
rinfo['children'] = rinfo_children = list ( children( record, index, stop_types=stop_types ) )
rinfo['rsize'] = sum([
(
child.get('totsize',0.0)/float(len(shared.get( child['address'], [])) or 1)
)
for child in rinfo_children
], 0.0 )
rinfo['totsize'] = record['size'] + rinfo['rsize']
return None | 0.020108 |
def get_resources(connection):
""" Do an RTSP-DESCRIBE request, then parse out available resources from the response """
resp = connection.describe(verbose=False).split('\r\n')
resources = [x.replace('a=control:','') for x in resp if (x.find('control:') != -1 and x[-1] != '*' )]
return resources | 0.016026 |
def run(cmd, shell=False, debug=False):
'Run a command and return the output.'
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=shell)
(out, _) = proc.communicate() # no need for stderr
if debug:
print(cmd)
print(out)
return out | 0.003636 |
def _truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
"""
Truncates a colormap to use.
Code originall from http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib
"""
new_cmap = LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(numpy.linspace(minval, maxval, n))
)
return new_cmap | 0.004454 |
def filter_muons(blob):
"""Write all muons from McTracks to Muons."""
tracks = blob['McTracks']
muons = tracks[tracks.type == -13] # PDG particle code
blob["Muons"] = Table(muons)
return blob | 0.004673 |
def interpolate(self, lat, lon, var):
""" Interpolate each var on the coordinates requested
"""
subset, dims = self.crop(lat, lon, var)
if np.all([y in dims['lat'] for y in lat]) & \
np.all([x in dims['lon'] for x in lon]):
yn = np.nonzero([y in lat for y in dims['lat']])[0]
xn = np.nonzero([x in lon for x in dims['lon']])[0]
output = {}
for v in subset:
# output[v] = subset[v][dn, zn, yn, xn]
# Seriously that this is the way to do it?!!??
output[v] = subset[v][:, xn][yn]
return output
# The output coordinates shall be created only once.
points_out = []
for latn in lat:
for lonn in lon:
points_out.append([latn, lonn])
points_out = np.array(points_out)
output = {}
for v in var:
output[v] = ma.masked_all(
(lat.size, lon.size),
dtype=subset[v].dtype)
# The valid data
idx = np.nonzero(~ma.getmaskarray(subset[v]))
if idx[0].size > 0:
points = np.array([
dims['lat'][idx[0]], dims['lon'][idx[1]]]).T
values = subset[v][idx]
# Interpolate along the dimensions that have more than one
# position, otherwise it means that the output is exactly
# on that coordinate.
ind = np.array(
[np.unique(points[:, i]).size > 1 for i in
range(points.shape[1])])
assert ind.any()
values_out = griddata(
np.atleast_1d(np.squeeze(points[:, ind])),
values,
np.atleast_1d(np.squeeze(points_out[:, ind]))
)
# Remap the interpolated value back into a 4D array
idx = np.isfinite(values_out)
for [y, x], out in zip(points_out[idx], values_out[idx]):
output[v][y==lat, x==lon] = out
return output | 0.002224 |
def summary(self, indicator_data):
"""Return a summary value for any given indicator type."""
summary = None
for v in self._value_fields:
if indicator_data.get(v) is not None:
summary = indicator_data.get(v)
break
return indicator_data.get('summary', summary) | 0.00597 |
def _format_assertmsg(obj):
"""Format the custom assertion message given.
For strings this simply replaces newlines with '\n~' so that
util.format_explanation() will preserve them instead of escaping
newlines. For other objects py.io.saferepr() is used first.
"""
# reprlib appears to have a bug which means that if a string
# contains a newline it gets escaped, however if an object has a
# .__repr__() which contains newlines it does not get escaped.
# However in either case we want to preserve the newline.
if py.builtin._istext(obj) or py.builtin._isbytes(obj):
s = obj
is_repr = False
else:
s = py.io.saferepr(obj)
is_repr = True
if py.builtin._istext(s):
t = py.builtin.text
else:
t = py.builtin.bytes
s = s.replace(t("\n"), t("\n~")).replace(t("%"), t("%%"))
if is_repr:
s = s.replace(t("\\n"), t("\n~"))
return s | 0.001059 |
def pspawn_wrapper(self, sh, escape, cmd, args, env):
"""Wrapper function for handling piped spawns.
This looks to the calling interface (in Action.py) like a "normal"
spawn, but associates the call with the PSPAWN variable from
the construction environment and with the streams to which we
want the output logged. This gets slid into the construction
environment as the SPAWN variable so Action.py doesn't have to
know or care whether it's spawning a piped command or not.
"""
return self.pspawn(sh, escape, cmd, args, env, self.logstream, self.logstream) | 0.004762 |
def _alpha(self, L):
""" Covariance-derived term to construct expectations. See Rasmussen & Williams.
Parameters
----------
L : np.ndarray
Cholesky triangular
Returns
----------
np.ndarray (alpha)
"""
return la.cho_solve((L.T, True), la.cho_solve((L, True), np.transpose(self.data))) | 0.015707 |
def find_additional_properties(instance, schema):
"""
Return the set of additional properties for the given ``instance``.
Weeds out properties that should have been validated by ``properties`` and
/ or ``patternProperties``.
Assumes ``instance`` is dict-like already.
"""
properties = schema.get("properties", {})
patterns = "|".join(schema.get("patternProperties", {}))
for property in instance:
if property not in properties:
if patterns and re.search(patterns, property):
continue
yield property | 0.001706 |
def on_epoch_end(self, last_metrics, **kwargs):
"Set the final result in `last_metrics`."
return add_metrics(last_metrics, self.val/self.count) | 0.012579 |
def _cursor_position_changed(self):
""" Updates the document formatting based on the new cursor position.
"""
# Clear out the old formatting.
self._text_edit.setExtraSelections([])
# Attempt to match a bracket for the new cursor position.
cursor = self._text_edit.textCursor()
if not cursor.hasSelection():
position = cursor.position() - 1
match_position = self._find_match(position)
if match_position != -1:
extra_selections = [ self._selection_for_character(pos)
for pos in (position, match_position) ]
self._text_edit.setExtraSelections(extra_selections) | 0.005563 |
def _GetCachedFileByPath(self, key_path_upper):
"""Retrieves a cached Windows Registry file for a key path.
Args:
key_path_upper (str): Windows Registry key path, in upper case with
a resolved root key alias.
Returns:
tuple: consist:
str: key path prefix
WinRegistryFile: corresponding Windows Registry file or None if not
available.
"""
longest_key_path_prefix_upper = ''
longest_key_path_prefix_length = len(longest_key_path_prefix_upper)
for key_path_prefix_upper in self._registry_files:
if key_path_upper.startswith(key_path_prefix_upper):
key_path_prefix_length = len(key_path_prefix_upper)
if key_path_prefix_length > longest_key_path_prefix_length:
longest_key_path_prefix_upper = key_path_prefix_upper
longest_key_path_prefix_length = key_path_prefix_length
if not longest_key_path_prefix_upper:
return None, None
registry_file = self._registry_files.get(
longest_key_path_prefix_upper, None)
return longest_key_path_prefix_upper, registry_file | 0.004529 |
def get_cache(self, cache_name, miss_fn):
"""
Get an L{AsyncLRUCache} object with the given name. If such an object
does not exist, it will be created. Since the cache is permanent, this
method can be called only once, e.g., in C{startService}, and it value
stored indefinitely.
@param cache_name: name of the cache (usually the name of the type of
object it stores)
@param miss_fn: miss function for the cache; see L{AsyncLRUCache}
constructor.
@returns: L{AsyncLRUCache} instance
"""
try:
return self._caches[cache_name]
except KeyError:
max_size = self.config.get(cache_name, self.DEFAULT_CACHE_SIZE)
assert max_size >= 1
c = self._caches[cache_name] = lru.AsyncLRUCache(miss_fn, max_size)
return c | 0.002304 |
def _fstat_sig(self):
"""p-value of the F-statistic."""
return 1.0 - scs.f.cdf(self._fstat, self._df_reg, self._df_err) | 0.014599 |
def parse_list_cmd(proc, args, listsize=10):
"""Parses arguments for the "list" command and returns the tuple:
(filename, first line number, last line number)
or sets these to None if there was some problem."""
text = proc.current_command[len(args[0])+1:].strip()
if text in frozenset(('', '.', '+', '-')):
if text == '.':
location = resolve_location(proc, '.')
return location.path, location.line_number, listsize
else:
if proc.list_lineno is None:
proc.errmsg("Don't have previous list location")
return INVALID_PARSE_LIST
filename = proc.list_filename
if text == '+':
first = max(1, proc.list_lineno + listsize)
elif text == '-':
if proc.list_lineno == 1 + listsize:
proc.errmsg("Already at start of %s." % proc.list_filename)
return INVALID_PARSE_LIST
first = max(1, proc.list_lineno - (2*listsize) - 1)
elif text == '':
# Continue from where we last left off
first = proc.list_lineno + 1
last = first + listsize - 1
return filename, first, last
else:
try:
list_range = build_range(text)
except LocationError as e:
proc.errmsg("Error in parsing list range at or around:")
proc.errmsg(e.text)
proc.errmsg(e.text_cursor)
return INVALID_PARSE_LIST
except ScannerError as e:
proc.errmsg("Lexical error in parsing list range at or around:")
proc.errmsg(e.text)
proc.errmsg(e.text_cursor)
return INVALID_PARSE_LIST
if list_range.first is None:
# Last must have been given
assert isinstance(list_range.last, Location)
location = resolve_location(proc, list_range.last)
if not location:
return INVALID_PARSE_LIST
last = location.line_number
first = max(1, last - listsize)
return location.path, first, last
elif isinstance(list_range.first, int):
first = list_range.first
location = resolve_location(proc, list_range.last)
if not location:
return INVALID_PARSE_LIST
filename = location.path
last = location.line_number
if last < first:
# Treat as a count rather than an absolute location
last = first + last
return location.path, first, last
else:
# First is location. Last may be empty or a number
assert isinstance(list_range.first, Location)
location = resolve_location(proc, list_range.first)
if not location:
return INVALID_PARSE_LIST
first = location.line_number
last = list_range.last
if location.method:
first -= listsize // 2
if isinstance(last, str):
# Is an offset +number
assert last[0] == '+'
last = first + int(last[1:])
elif not last:
last = first + listsize
elif last < first:
# Treat as a count rather than an absolute location
last = first + last
return location.path, first, last
pass
return | 0.00201 |
def fire_metric(metric_name, metric_value):
""" Fires a metric using the MetricsApiClient
"""
metric_value = float(metric_value)
metric = {metric_name: metric_value}
metric_client.fire_metrics(**metric)
return "Fired metric <{}> with value <{}>".format(metric_name, metric_value) | 0.006601 |
def path_constant(self, name, value):
"""Declare and set a project global constant, whose value is a path. The
path is adjusted to be relative to the invocation directory. The given
value path is taken to be either absolute, or relative to this project
root."""
assert is_iterable_typed(name, basestring)
assert is_iterable_typed(value, basestring)
if len(value) > 1:
self.registry.manager.errors()("path constant should have one element")
self.registry.current().add_constant(name[0], value, path=1) | 0.006944 |
def _get_operations(self, context):
"""Returns a list of operations that need to be performed to turn the
cached source code into the one in the buffer."""
#Most of the time, the real-time update is going to fire with
#incomplete statements that don't result in any changes being made
#to the module instances. The SequenceMatches caches hashes for the
#second argument. Logically, we want to turn the cached version into
#the buffer version; however, the buffer is the string that keeps
#changing.
#in order to optimize the real-time update, we *switch* the two strings
#when we set the sequence and then fix the references on the operations
#after the fact.
if context.module.changed or self.unset:
self.matcher.set_seq2(context.cachedstr)
self.unset = False
#Set the changed flag back to false now that the sequencer has
#reloaded it.
context.module.changed = False
self.matcher.set_seq1(context.bufferstr)
opcodes = self.matcher.get_opcodes()
result = []
#Index i keeps track of how many operations were actually added because
#they constituted a change we need to take care of.
i = 0
for code in opcodes:
if code[0] != "equal":
#Replacements don't have a mode change. All of the operations
#switch the order of the line indices for the two strings.
if code[0] == "insert":
newcode = ("delete", code[3], code[4], code[1], code[2])
elif code[0] == "delete":
newcode = ("insert", code[3], code[4], code[1], code[2])
else:
newcode = ("replace", code[3], code[4], code[1], code[2])
op = Operation(context, self.parser, newcode, i)
result.append(op)
i += 1
return result | 0.009023 |
def split_emails(msg):
"""
Given a message (which may consist of an email conversation thread with
multiple emails), mark the lines to identify split lines, content lines and
empty lines.
Correct the split line markers inside header blocks. Header blocks are
identified by the regular expression RE_HEADER.
Return the corrected markers
"""
msg_body = _replace_link_brackets(msg)
# don't process too long messages
lines = msg_body.splitlines()[:MAX_LINES_COUNT]
markers = remove_initial_spaces_and_mark_message_lines(lines)
markers = _mark_quoted_email_splitlines(markers, lines)
# we don't want splitlines in header blocks
markers = _correct_splitlines_in_headers(markers, lines)
return markers | 0.001309 |
def _bell(self):
u'''ring the bell if requested.'''
if self.bell_style == u'none':
pass
elif self.bell_style == u'visible':
raise NotImplementedError(u"Bellstyle visible is not implemented yet.")
elif self.bell_style == u'audible':
self.console.bell()
else:
raise ReadlineError(u"Bellstyle %s unknown."%self.bell_style) | 0.009828 |
def add(self, *args, **kwargs):
""" Add new mapping from args and kwargs
>>> om = OperationIdMapping()
>>> om.add(
... OperationIdMapping(),
... 'aiohttp_apiset.swagger.operations', # any module
... getPets='mymod.handler',
... getPet='mymod.get_pet',
... )
>>> om['getPets']
'mymod.handler'
:param args: str, Mapping, module or obj
:param kwargs: operationId='handler' or operationId=handler
"""
for arg in args:
if isinstance(arg, str):
self._operations.append(self._from_str(arg))
else:
self._operations.append(arg)
if kwargs:
self._operations.append(kwargs) | 0.002604 |
def deregister(self, key):
""" Deregisters an existing key.
`key`
String key to deregister.
Returns boolean.
"""
res = super(ExtRegistry, self).deregister(key)
if key in self._type_info:
del self._type_info[key]
return res | 0.006231 |
def build_blast_db_from_fasta_file(fasta_file, is_protein=False,
output_dir=None, HALT_EXEC=False):
"""Build blast db from fasta_path; return db name and list of files created
**If using to create temporary blast databases, you can call
cogent.util.misc.remove_files(db_filepaths) to clean up all the
files created by formatdb when you're done with the database.
fasta_path: path to fasta file of sequences to build database from
is_protein: True if working on protein seqs (default: False)
output_dir: directory where output should be written
(default: directory containing fasta_path)
HALT_EXEC: halt just before running the formatdb command and
print the command -- useful for debugging
"""
output_dir = output_dir or '.'
_, fasta_path = mkstemp(dir=output_dir, prefix="BLAST_temp_db_",
suffix=".fasta")
fasta_f = open(fasta_path, 'w')
for line in fasta_file:
fasta_f.write('%s\n' % line.strip())
fasta_f.close()
blast_db, db_filepaths = build_blast_db_from_fasta_path(fasta_path,
is_protein=is_protein,
output_dir=None,
HALT_EXEC=HALT_EXEC
)
db_filepaths.append(fasta_path)
return blast_db, db_filepaths | 0.0013 |
def _wait_until_machine_finish(self):
"""
Internal method
wait until machine finish and kill main process (booted)
:return: None
"""
self.image._wait_for_machine_finish(self.name)
# kill main run process
self.start_process.kill()
# TODO: there are some backgroud processes, dbus async events or something similar, there is better to wait
# to provide enough time to finish also some async ops
time.sleep(constants.DEFAULT_SLEEP) | 0.005803 |
def convert_via_profile(self, data_np, order, inprof_name, outprof_name):
"""Convert the given RGB data from the working ICC profile
to the output profile in-place.
Parameters
----------
data_np : ndarray
RGB image data to be displayed.
order : str
Order of channels in the data (e.g. "BGRA").
inprof_name, outprof_name : str
ICC profile names (see :func:`ginga.util.rgb_cms.get_profiles`).
"""
# get rest of necessary conversion parameters
to_intent = self.t_.get('icc_output_intent', 'perceptual')
proofprof_name = self.t_.get('icc_proof_profile', None)
proof_intent = self.t_.get('icc_proof_intent', 'perceptual')
use_black_pt = self.t_.get('icc_black_point_compensation', False)
try:
rgbobj = RGBMap.RGBPlanes(data_np, order)
arr_np = rgbobj.get_array('RGB')
arr = rgb_cms.convert_profile_fromto(arr_np, inprof_name, outprof_name,
to_intent=to_intent,
proof_name=proofprof_name,
proof_intent=proof_intent,
use_black_pt=use_black_pt,
logger=self.logger)
ri, gi, bi = rgbobj.get_order_indexes('RGB')
out = data_np
out[..., ri] = arr[..., 0]
out[..., gi] = arr[..., 1]
out[..., bi] = arr[..., 2]
self.logger.debug("Converted from '%s' to '%s' profile" % (
inprof_name, outprof_name))
except Exception as e:
self.logger.warning("Error converting output from working profile: %s" % (str(e)))
# TODO: maybe should have a traceback here
self.logger.info("Output left unprofiled") | 0.002055 |
def _check_years(self, cell, prior_year):
'''
Helper method which defines the rules for checking for existence of a year indicator. If the
cell is blank then prior_year is used to determine validity.
'''
# Anything outside these values shouldn't auto
# categorize to strings
min_year = 1900
max_year = 2100
# Empty cells could represent the prior cell's title,
# but an empty cell before we find a year is not a title
if is_empty_cell(cell):
return bool(prior_year)
# Check if we have a numbered cell between min and max years
return is_num_cell(cell) and cell > min_year and cell < max_year | 0.004237 |
def _transition_stage(self, step, total_steps,
brightness=None, color=None):
"""
Get a transition stage at a specific step.
:param step: The current step.
:param total_steps: The total number of steps.
:param brightness: The brightness to transition to (0.0-1.0).
:param color: The color to transition to.
:return: The stage at the specific step.
"""
if brightness is not None:
self._assert_is_brightness(brightness)
brightness = self._interpolate(self.brightness, brightness,
step, total_steps)
if color is not None:
self._assert_is_color(color)
color = Color(*[self._interpolate(self.color[i], color[i],
step, total_steps)
for i in range(3)])
return {'brightness': brightness, 'color': color} | 0.003074 |
def get_keywords(self, entry):
"""
get list of models.Keyword objects from XML node entry
:param entry: XML node entry
:return: list of :class:`pyuniprot.manager.models.Keyword` objects
"""
keyword_objects = []
for keyword in entry.iterfind("./keyword"):
identifier = keyword.get('id')
name = keyword.text
keyword_hash = hash(identifier)
if keyword_hash not in self.keywords:
self.keywords[keyword_hash] = models.Keyword(**{'identifier': identifier, 'name': name})
keyword_objects.append(self.keywords[keyword_hash])
return keyword_objects | 0.004386 |
def runTask(self, task, timeout=None):
"""Run a child task to completion. Returns the result of
the child task.
"""
# Initialize the task.
task.initialize(self)
# Start the task.
task.start()
# Lets other threads run
time.sleep(0)
# Wait for it to finish.
res = task.wait(timeout=timeout)
# Now we're done
return res | 0.004706 |
def normalizeURL(url):
"""Normalize a URL, converting normalization failures to
DiscoveryFailure"""
try:
normalized = urinorm.urinorm(url)
except ValueError, why:
raise DiscoveryFailure('Normalizing identifier: %s' % (why[0],), None)
else:
return urlparse.urldefrag(normalized)[0] | 0.003086 |
def is_network_source_fw(cls, nwk, nwk_name):
"""Check if SOURCE is FIREWALL, if yes return TRUE.
If source is None or entry not in NWK DB, check from Name.
Name should have constant AND length should match.
"""
if nwk is not None:
if nwk.source == fw_const.FW_CONST:
return True
return False
if nwk_name in fw_const.DUMMY_SERVICE_NWK and (
len(nwk_name) == len(fw_const.DUMMY_SERVICE_NWK) +
fw_const.SERVICE_NAME_EXTRA_LEN):
return True
if nwk_name in fw_const.IN_SERVICE_NWK and (
len(nwk_name) == len(fw_const.IN_SERVICE_NWK) +
fw_const.SERVICE_NAME_EXTRA_LEN):
return True
if nwk_name in fw_const.OUT_SERVICE_NWK and (
len(nwk_name) == len(fw_const.OUT_SERVICE_NWK) +
fw_const.SERVICE_NAME_EXTRA_LEN):
return True
return False | 0.002116 |
def intersectsExtent(self, extent):
"Determine if an extent intersects this instance extent"
return \
self.extent[0] <= extent[2] and self.extent[2] >= extent[0] and \
self.extent[1] <= extent[3] and self.extent[3] >= extent[1] | 0.007491 |
def _build_likelihood(self):
r"""
q_alpha, q_lambda are variational parameters, size N x R
This method computes the variational lower bound on the likelihood,
which is:
E_{q(F)} [ \log p(Y|F) ] - KL[ q(F) || p(F)]
with
q(f) = N(f | K alpha + mean, [K^-1 + diag(square(lambda))]^-1) .
"""
K = self.kern.K(self.X)
K_alpha = tf.matmul(K, self.q_alpha)
f_mean = K_alpha + self.mean_function(self.X)
# compute the variance for each of the outputs
I = tf.tile(tf.expand_dims(tf.eye(self.num_data, dtype=settings.float_type), 0),
[self.num_latent, 1, 1])
A = I + tf.expand_dims(tf.transpose(self.q_lambda), 1) * \
tf.expand_dims(tf.transpose(self.q_lambda), 2) * K
L = tf.cholesky(A)
Li = tf.matrix_triangular_solve(L, I)
tmp = Li / tf.expand_dims(tf.transpose(self.q_lambda), 1)
f_var = 1. / tf.square(self.q_lambda) - tf.transpose(tf.reduce_sum(tf.square(tmp), 1))
# some statistics about A are used in the KL
A_logdet = 2.0 * tf.reduce_sum(tf.log(tf.matrix_diag_part(L)))
trAi = tf.reduce_sum(tf.square(Li))
KL = 0.5 * (A_logdet + trAi - self.num_data * self.num_latent +
tf.reduce_sum(K_alpha * self.q_alpha))
v_exp = self.likelihood.variational_expectations(f_mean, f_var, self.Y)
return tf.reduce_sum(v_exp) - KL | 0.003408 |
def get_attached_devices_2(self):
"""
Return list of connected devices to the router with details.
This call is slower and probably heavier on the router load.
Returns None if error occurred.
"""
_LOGGER.info("Get attached devices 2")
success, response = self._make_request(SERVICE_DEVICE_INFO,
"GetAttachDevice2")
if not success:
return None
success, devices_node = _find_node(
response.text,
".//GetAttachDevice2Response/NewAttachDevice")
if not success:
return None
xml_devices = devices_node.findall("Device")
devices = []
for d in xml_devices:
ip = _xml_get(d, 'IP')
name = _xml_get(d, 'Name')
mac = _xml_get(d, 'MAC')
signal = _convert(_xml_get(d, 'SignalStrength'), int)
link_type = _xml_get(d, 'ConnectionType')
link_rate = _xml_get(d, 'Linkspeed')
allow_or_block = _xml_get(d, 'AllowOrBlock')
device_type = _convert(_xml_get(d, 'DeviceType'), int)
device_model = _xml_get(d, 'DeviceModel')
ssid = _xml_get(d, 'SSID')
conn_ap_mac = _xml_get(d, 'ConnAPMAC')
devices.append(Device(name, ip, mac, link_type, signal, link_rate,
allow_or_block, device_type, device_model,
ssid, conn_ap_mac))
return devices | 0.001304 |
def refresh(self, token, timeout):
"""Modify an existing lock's timeout.
token:
Valid lock token.
timeout:
Suggested lifetime in seconds (-1 for infinite).
The real expiration time may be shorter than requested!
Returns:
Lock dictionary.
Raises ValueError, if token is invalid.
"""
assert token in self._dict, "Lock must exist"
assert timeout == -1 or timeout > 0
if timeout < 0 or timeout > LockStorageDict.LOCK_TIME_OUT_MAX:
timeout = LockStorageDict.LOCK_TIME_OUT_MAX
self._lock.acquire_write()
try:
# Note: shelve dictionary returns copies, so we must reassign
# values:
lock = self._dict[token]
lock["timeout"] = timeout
lock["expire"] = time.time() + timeout
self._dict[token] = lock
self._flush()
finally:
self._lock.release()
return lock | 0.001978 |
def word(self):
"""Return last changes with word diff"""
try:
output = ensure_unicode(self.git.diff(
'--no-color',
'--word-diff=plain',
'HEAD~1:content',
'HEAD:content',
).stdout)
except sh.ErrorReturnCode_128:
result = ensure_unicode(self.git.show(
"HEAD:content"
).stdout)
else:
ago = ensure_unicode(self.git.log(
'-2',
'--pretty=format:last change was %cr',
'content'
).stdout).splitlines()
lines = output.splitlines()
result = u'\n'.join(
itertools.chain(
itertools.islice(
itertools.dropwhile(
lambda x: not x.startswith('@@'),
lines[1:],
),
1,
None,
),
itertools.islice(ago, 1, None),
)
)
return result | 0.001773 |
def create_user(server_context, email, container_path=None, send_email=False):
"""
Create new account
:param server_context: A LabKey server context. See utils.create_server_context.
:param email:
:param container_path:
:param send_email: true to send email notification to user
:return:
"""
url = server_context.build_url(security_controller, 'createNewUser.api', container_path)
payload = {
'email': email,
'sendEmail': send_email
}
return server_context.make_request(url, payload) | 0.005474 |
def parse_idf(file_like):
"""
Records are created from string.
They are not attached to idf yet.
in idf: header comment, chapter comments, records
in record: head comment, field comments, tail comment
"""
tables_data = {}
head_comment = ""
record_data = None
make_new_record = True
copyright_list = get_multi_line_copyright_message().split("\n")
for i, raw_line in enumerate(file_like):
# manage if copyright
try:
copyright_line = copyright_list[i]
if raw_line.strip() == copyright_line:
# skip copyright line
continue
except IndexError:
pass
# GET LINE CONTENT AND COMMENT
split_line = raw_line.split("!")
# no "!" in the raw_line
if len(split_line) == 1:
# this is an empty line
if len(split_line[0].strip()) == 0:
content, comment = None, None
# this is a record line with no comments
else:
content, comment = split_line[0].strip(), None
# there is at least one "!" in the raw_line
else:
# this is a comment line
if len(split_line[0].strip()) == 0:
content, comment = None, "!".join(split_line[1:])
# this is a record line with a comment
else:
content, comment = split_line[0].strip(), "!".join(split_line[1:])
# SKIP CURRENT LINE IF VOID
if (content, comment) == (None, None):
continue
# NO CONTENT
if not content:
if record_data is None: # we only manage head idf comment
head_comment += comment.strip() + "\n"
continue
# CONTENT
# check if record end and prepare
record_end = content[-1] == ";"
content = content[:-1] # we tear comma or semi-colon
content_l = [text.strip() for text in content.split(",")]
# record creation if needed
if make_new_record:
# get table ref
table_ref = table_name_to_ref(content_l[0].strip())
# skip if special table
if table_ref.lower() in (
"lead input",
"end lead input",
"simulation data",
"end simulation data"
):
continue
# declare table if necessary
if table_ref not in tables_data:
tables_data[table_ref] = []
# create and store record
record_data = dict()
tables_data[table_ref].append(record_data)
# prepare in case fields on the same line
content_l = content_l[1:]
make_new_record = False
# fields
for value_s in content_l:
field_index = len(record_data)
record_data[field_index] = value_s
# signal that new record must be created
if record_end:
make_new_record = True
# add comment key
tables_data["_comment"] = head_comment
return tables_data | 0.000636 |
async def handle_adapter_event(self, adapter_id, conn_string, conn_id, name, event):
"""Handle an event received from an adapter."""
if name == 'device_seen':
self._track_device_seen(adapter_id, conn_string, event)
event = self._translate_device_seen(adapter_id, conn_string, event)
conn_string = self._translate_conn_string(adapter_id, conn_string)
elif conn_id is not None and self._get_property(conn_id, 'translate'):
conn_string = self._translate_conn_string(adapter_id, conn_string)
else:
conn_string = "adapter/%d/%s" % (adapter_id, conn_string)
await self.notify_event(conn_string, name, event) | 0.004261 |
def prepend(exception, message, end=': '):
"""Prepends the first argument (i.e., the exception message) of the a BaseException with the provided message.
Useful for reraising exceptions with additional information.
:param BaseException exception: the exception to prepend
:param str message: the message to prepend
:param str end: the separator to add to the end of the provided message
:returns: the exception
"""
exception.args = exception.args or ('',)
exception.args = (message + end + exception.args[0], ) + exception.args[1:]
return exception | 0.003396 |
def fill(self, paths):
"""
Initialise the tree.
paths is a list of strings where each string is the relative path to some
file.
"""
for path in paths:
tree = self.tree
parts = tuple(path.split('/'))
dir_parts = parts[:-1]
built = ()
for part in dir_parts:
self.cache[built] = tree
built += (part, )
parent = tree
tree = parent.folders.get(part, empty)
if tree is empty:
tree = parent.folders[part] = TreeItem(name=built, folders={}, files=set(), parent=parent)
self.cache[dir_parts] = tree
tree.files.add(parts[-1]) | 0.00534 |
def calc_grad(self):
"""The gradient of the cost w.r.t. the parameters."""
if self._fresh_JTJ:
return self._graderr
else:
residuals = self.calc_residuals()
return 2*np.dot(self.J, residuals) | 0.008 |
def walk(self, maxresults=100, maxdepth=None):
"""Walk the object tree, ignoring duplicates and circular refs."""
log.debug("step")
self.seen = {}
self.ignore(self, self.__dict__, self.obj, self.seen, self._ignore)
# Ignore the calling frame, its builtins, globals and locals
self.ignore_caller()
self.maxdepth = maxdepth
count = 0
log.debug("will iterate results")
for result in self._gen(self.obj):
log.debug("will yeld")
yield result
count += 1
if maxresults and count >= maxresults:
yield 0, 0, "==== Max results reached ===="
return | 0.002865 |
def query(self, stringa):
"""SPARQL query / wrapper for rdflib sparql query method """
qres = self.rdflib_graph.query(stringa)
return list(qres) | 0.011905 |
def _get_par_summary(sim, n, probs):
"""Summarize chains merged and individually
Parameters
----------
sim : dict from stanfit object
n : int
parameter index
probs : iterable of int
quantiles
Returns
-------
summary : dict
Dictionary containing summaries
"""
# _get_samples gets chains for nth parameter
ss = _get_samples(n, sim, inc_warmup=False)
msdfun = lambda chain: (np.mean(chain), np.std(chain, ddof=1))
qfun = lambda chain: mquantiles(chain, probs)
c_msd = np.array([msdfun(s) for s in ss]).flatten()
c_quan = np.array([qfun(s) for s in ss]).flatten()
ass = np.asarray(ss).flatten()
msd = np.asarray(msdfun(ass))
quan = qfun(np.asarray(ass))
return dict(msd=msd, quan=quan, c_msd=c_msd, c_quan=c_quan) | 0.003686 |
def collectData(reads1, reads2, square, matchAmbiguous):
"""
Get pairwise matching statistics for two sets of reads.
@param reads1: An C{OrderedDict} of C{str} read ids whose values are
C{Read} instances. These will be the rows of the table.
@param reads2: An C{OrderedDict} of C{str} read ids whose values are
C{Read} instances. These will be the columns of the table.
@param square: If C{True} we are making a square table of a set of
sequences against themselves (in which case we show nothing on the
diagonal).
@param matchAmbiguous: If C{True}, count ambiguous nucleotides that are
possibly correct as actually being correct. Otherwise, we are strict
and insist that only non-ambiguous nucleotides can contribute to the
matching nucleotide count.
"""
result = defaultdict(dict)
for id1, read1 in reads1.items():
for id2, read2 in reads2.items():
if id1 != id2 or not square:
match = compareDNAReads(
read1, read2, matchAmbiguous=matchAmbiguous)['match']
if not matchAmbiguous:
assert match['ambiguousMatchCount'] == 0
result[id1][id2] = result[id2][id1] = match
return result | 0.000778 |
def hdate(self, date):
"""Set the dates of the HDate object based on a given Hebrew date."""
# Sanity checks
if date is None and isinstance(self.gdate, datetime.date):
# Calculate the value since gdate has been set
date = self.hdate
if not isinstance(date, HebrewDate):
raise TypeError('date: {} is not of type HebrewDate'.format(date))
if not 0 < date.month < 15:
raise ValueError(
'month ({}) legal values are 1-14'.format(date.month))
if not 0 < date.day < 31:
raise ValueError('day ({}) legal values are 1-31'.format(date.day))
self._last_updated = "hdate"
self._hdate = date | 0.002774 |
def subs(self, *args):
"""Substitute a symbolic expression in ``['x', 'y', 'z']``
This is a wrapper around the substitution mechanism of
`sympy <http://docs.sympy.org/latest/tutorial/basic_operations.html>`_.
Any symbolic expression in the columns
``['x', 'y', 'z']`` of ``self`` will be substituted
with value.
Args:
symb_expr (sympy expression):
value :
perform_checks (bool): If ``perform_checks is True``,
it is asserted, that the resulting Zmatrix can be converted
to cartesian coordinates.
Dummy atoms will be inserted automatically if necessary.
Returns:
Cartesian: Cartesian with substituted symbolic expressions.
If all resulting sympy expressions in a column are numbers,
the column is recasted to 64bit float.
"""
cols = ['x', 'y', 'z']
out = self.copy()
def get_subs_f(*args):
def subs_function(x):
if hasattr(x, 'subs'):
x = x.subs(*args)
try:
x = float(x)
except TypeError:
pass
return x
return subs_function
for col in cols:
if out.loc[:, col].dtype is np.dtype('O'):
out.loc[:, col] = out.loc[:, col].map(get_subs_f(*args))
try:
out.loc[:, col] = out.loc[:, col].astype('f8')
except (SystemError, TypeError):
pass
return out | 0.001219 |
def set_position(cls, resource_id, to_position, db_session=None, *args, **kwargs):
"""
Sets node position for new node in the tree
:param resource_id: resource to move
:param to_position: new position
:param db_session:
:return:def count_children(cls, resource_id, db_session=None):
"""
db_session = get_db_session(db_session)
# lets lock rows to prevent bad tree states
resource = ResourceService.lock_resource_for_update(
resource_id=resource_id, db_session=db_session
)
cls.check_node_position(
resource.parent_id, to_position, on_same_branch=True, db_session=db_session
)
cls.shift_ordering_up(resource.parent_id, to_position, db_session=db_session)
db_session.flush()
db_session.expire(resource)
resource.ordering = to_position
return True | 0.005476 |
def linear_regression(self, target, regression_length, mask=NotSpecified):
"""
Construct a new Factor that performs an ordinary least-squares
regression predicting the columns of `self` from `target`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term to use as the predictor/independent variable in each
regression. This may be a Factor, a BoundColumn or a Slice. If
`target` is two-dimensional, regressions are computed asset-wise.
regression_length : int
Length of the lookback window over which to compute each
regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should be regressed with the
target slice each day.
Returns
-------
regressions : zipline.pipeline.factors.RollingLinearRegression
A new Factor that will compute linear regressions of `target`
against the columns of `self`.
Examples
--------
Suppose we want to create a factor that regresses AAPL's 10-day returns
against the 10-day returns of all other assets, computing each
regression over 30 days. This can be achieved by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_regressions = returns.linear_regression(
target=returns_slice, regression_length=30,
)
This is equivalent to doing::
aapl_regressions = RollingLinearRegressionOfReturns(
target=sid(24), returns_length=10, regression_length=30,
)
See Also
--------
:func:`scipy.stats.linregress`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
"""
from .statistical import RollingLinearRegression
return RollingLinearRegression(
dependent=self,
independent=target,
regression_length=regression_length,
mask=mask,
) | 0.000851 |
def calc_paired_insert_stats(in_bam, nsample=1000000):
"""Retrieve statistics for paired end read insert distances.
"""
dists = []
n = 0
with pysam.Samfile(in_bam, "rb") as in_pysam:
for read in in_pysam:
if read.is_proper_pair and read.is_read1:
n += 1
dists.append(abs(read.isize))
if n >= nsample:
break
return insert_size_stats(dists) | 0.002222 |
def envelope(self, instrument):
"""
Computes isotopic envelope for a given instrument model
:param instrument: instrument model to use
:returns: isotopic envelope as a function of mass
:rtype: function float(mz: float)
"""
def envelopeFunc(mz):
if isinstance(mz, numbers.Number):
return ims.spectrum_envelope(self.ptr, instrument.ptr, mz)
mzs = _cffi_buffer(mz, 'd')
n = len(mz)
buf = _cffi_buffer(n, 'f')
ret = ims.spectrum_envelope_plot(self.ptr, instrument.ptr, mzs.ptr, n, buf.ptr)
if ret < 0:
_raise_ims_exception()
return buf.python_data()
return envelopeFunc | 0.004011 |
def p_reset(self, program):
"""
reset : RESET primary
"""
program[0] = node.Reset([program[2]])
self.verify_reg(program[2], 'qreg') | 0.011696 |
def ip_prefixes_sanity_check(config, bird_configuration):
"""Sanity check on IP prefixes.
Arguments:
config (obg): A configparser object which holds our configuration.
bird_configuration (dict): A dictionary, which holds Bird configuration
per IP protocol version.
"""
for ip_version in bird_configuration:
modify_ip_prefixes(config,
bird_configuration[ip_version]['config_file'],
bird_configuration[ip_version]['variable_name'],
bird_configuration[ip_version]['dummy_ip_prefix'],
bird_configuration[ip_version]['reconfigure_cmd'],
bird_configuration[ip_version]['keep_changes'],
bird_configuration[ip_version]['changes_counter'],
ip_version) | 0.001135 |
def _get_indices(self, data):
""" Compute indices along temporal dimension corresponding to the sought percentile
:param data: Input 3D array holding the reference band
:type data: numpy array
:return: 2D array holding the temporal index corresponding to percentile
"""
indices = self._index_by_percentile(data, self.percentile)
return indices | 0.009709 |
def quantile(x, q):
"""
Calculates the q quantile of x. This is the value of x greater than q% of the ordered values from x.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param q: the quantile to calculate
:type q: float
:return: the value of this feature
:return type: float
"""
x = pd.Series(x)
return pd.Series.quantile(x, q) | 0.004938 |
def create_cache_cluster(name, wait=600, security_groups=None,
region=None, key=None, keyid=None, profile=None, **args):
'''
Create a cache cluster.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.create_cache_cluster name=myCacheCluster \
Engine=redis \
CacheNodeType=cache.t2.micro \
NumCacheNodes=1 \
SecurityGroupIds='[sg-11223344]' \
CacheSubnetGroupName=myCacheSubnetGroup
'''
if security_groups:
if not isinstance(security_groups, list):
security_groups = [security_groups]
sgs = __salt__['boto_secgroup.convert_to_group_ids'](groups=security_groups, region=region,
key=key, keyid=keyid, profile=profile)
if 'SecurityGroupIds' not in args:
args['SecurityGroupIds'] = []
args['SecurityGroupIds'] += sgs
args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
return _create_resource(name, name_param='CacheClusterId', desc='cache cluster',
res_type='cache_cluster', wait=wait, status_param='CacheClusterStatus',
region=region, key=key, keyid=keyid, profile=profile, **args) | 0.006431 |
def has_readonly(self, s):
"""Tests whether store `s` is read-only."""
for t in self.transitions:
if list(t.lhs[s]) != list(t.rhs[s]):
return False
return True | 0.009479 |
def filter_counter(self, counter, min=2, max=100000000):
"""
Filter the counted records.
Returns: List with record numbers.
"""
records_filterd = {}
counter_all_records = 0
for item in counter:
counter_all_records += 1
if max > counter[item] >= min:
records_filterd[item] = counter[item]
self.stat['user_record_events'] = counter_all_records
self.stat['records_filtered'] = len(records_filterd)
return records_filterd | 0.003711 |
def null_lml(self):
"""
Log of the marginal likelihood for the null hypothesis.
It is implemented as ::
2·log(p(Y)) = -n·p·log(2𝜋s) - log|K| - n·p,
for which s and 𝚩 are optimal.
Returns
-------
lml : float
Log of the marginal likelihood.
"""
np = self._nsamples * self._ntraits
scale = self.null_scale
return self._static_lml() / 2 - np * safe_log(scale) / 2 - np / 2 | 0.004132 |
def types(self):
"""A tuple containing the value types for this Slot.
The Python equivalent of the CLIPS deftemplate-slot-types function.
"""
data = clips.data.DataObject(self._env)
lib.EnvDeftemplateSlotTypes(
self._env, self._tpl, self._name, data.byref)
return tuple(data.value) if isinstance(data.value, list) else () | 0.005195 |
def set_copyright(self, copyright=None):
"""Sets the copyright.
:param copyright: the new copyright
:type copyright: ``string``
:raise: ``InvalidArgument`` -- ``copyright`` is invalid
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NullArgument`` -- ``copyright`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if copyright is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA['copyright'])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(copyright, metadata, array=False):
self._my_map['copyright']['text'] = copyright
else:
raise InvalidArgument() | 0.002516 |
def _initialize_rest(self):
"""Used to initialize the View object on first use.
"""
if self._submit_context is None:
raise ValueError("View has not been created.")
job = self._submit_context._job_access()
self._view_object = job.get_views(name=self.name)[0] | 0.006472 |
def calculate(self, token_list_x, token_list_y):
'''
Calculate similarity with the Dice coefficient.
Concrete method.
Args:
token_list_x: [token, token, token, ...]
token_list_y: [token, token, token, ...]
Returns:
Similarity.
'''
x, y = self.unique(token_list_x, token_list_y)
try:
result = 2 * len(x & y) / float(sum(map(len, (x, y))))
except ZeroDivisionError:
result = 0.0
return result | 0.008576 |
def create_code_cell(block):
"""Create a notebook code cell from a block."""
code_cell = nbbase.new_code_cell(source=block['content'])
attr = block['attributes']
if not attr.is_empty:
code_cell.metadata \
= nbbase.NotebookNode({'attributes': attr.to_dict()})
execution_count = attr.kvs.get('n')
if not execution_count:
code_cell.execution_count = None
else:
code_cell.execution_count = int(execution_count)
return code_cell | 0.003565 |
def get_vdW_settings(self):
'''Determine the vdW type if using vdW xc functional or correction
scheme from the input otherwise'''
xc = self.get_xc_functional().scalars[0].value
if 'vdw' in xc.lower(): # vdW xc functional
return Value(scalars=[Scalar(value=xc)])
else:
# look for vdw_corr in input
vdW_dict = {'xdm':'Becke-Johnson XDM', 'ts':
'Tkatchenko-Scheffler', 'ts-vdw':
'Tkatchenko-Scheffler',
'tkatchenko-scheffler':
'Tkatchenko-Scheffler', 'grimme-d2': 'Grimme D2', 'dft-d': 'Grimme D2'}
if self._get_line('vdw_corr', self.inputf, return_string=False, case_sens=False):
line = self._get_line('vdw_corr', self.inputf, return_string=True, case_sens=False)
vdwkey = str(line.split('=')[-1].replace("'", "").replace(',', '').lower().rstrip())
return Value(scalars=[Scalar(value=vdW_dict[vdwkey])])
return None | 0.007569 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.