text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def corrwith(self, other, axis=0, drop=False, method='pearson'):
"""
Compute pairwise correlation between rows or columns of DataFrame
with rows or columns of Series or DataFrame. DataFrames are first
aligned along both axes before computing the correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float
.. versionadded:: 0.24.0
Returns
-------
Series
Pairwise correlations.
See Also
-------
DataFrame.corr
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(lambda x: other.corr(x, method=method),
axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
if axis == 1:
left = left.T
right = right.T
if method == 'pearson':
# mask missing values
left = left + right * 0
right = right + left * 0
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
elif method in ['kendall', 'spearman'] or callable(method):
def c(x):
return nanops.nancorr(x[0], x[1], method=method)
correl = Series(map(c,
zip(left.values.T, right.values.T)),
index=left.columns)
else:
raise ValueError("Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
"'spearman', or callable".
format(method=method))
if not drop:
# Find non-matching labels along the given axis
# and append missing correlations (GH 22375)
raxis = 1 if axis == 0 else 0
result_index = (this._get_axis(raxis).
union(other._get_axis(raxis)))
idx_diff = result_index.difference(correl.index)
if len(idx_diff) > 0:
correl = correl.append(Series([np.nan] * len(idx_diff),
index=idx_diff))
return correl | 0.00065 |
def empty(self, name, **kwargs):
"""Create an array. Keyword arguments as per
:func:`zarr.creation.empty`."""
return self._write_op(self._empty_nosync, name, **kwargs) | 0.010471 |
def _linkUser(self, user):
"""Set the UID of the current Contact in the User properties and update
all relevant own properties.
"""
KEY = "linked_contact_uid"
username = user.getId()
contact = self.getContactByUsername(username)
# User is linked to another contact (fix in UI)
if contact and contact.UID() != self.UID():
raise ValueError("User '{}' is already linked to Contact '{}'"
.format(username, contact.Title()))
# User is linked to multiple other contacts (fix in Data)
if isinstance(contact, list):
raise ValueError("User '{}' is linked to multiple Contacts: '{}'"
.format(username, ",".join(
map(lambda x: x.Title(), contact))))
# XXX: Does it make sense to "remember" the UID as a User property?
tool = user.getTool()
try:
user.getProperty(KEY)
except ValueError:
logger.info("Adding User property {}".format(KEY))
tool.manage_addProperty(KEY, "", "string")
# Set the UID as a User Property
uid = self.UID()
user.setMemberProperties({KEY: uid})
logger.info("Linked Contact UID {} to User {}".format(
user.getProperty(KEY), username))
# Set the Username
self.setUsername(user.getId())
# Update the Email address from the user
self.setEmailAddress(user.getProperty("email"))
# somehow the `getUsername` index gets out of sync
self.reindexObject()
# N.B. Local owner role and client group applies only to client
# contacts, but not lab contacts.
if IClient.providedBy(self.aq_parent):
# Grant local Owner role
self._addLocalOwnerRole(username)
# Add user to "Clients" group
self._addUserToGroup(username, group="Clients")
return True | 0.001005 |
def _place_order(self,
side,
product_id='BTC-USD',
client_oid=None,
type=None,
stp=None,
price=None,
size=None,
funds=None,
time_in_force=None,
cancel_after=None,
post_only=None):
"""`<https://docs.exchange.coinbase.com/#orders>`_"""
data = {
'side':side,
'product_id':product_id,
'client_oid':client_oid,
'type':type,
'stp':stp,
'price':price,
'size':size,
'funds':funds,
'time_in_force':time_in_force,
'cancel_after':cancel_after,
'post_only':post_only
}
return self._post('orders', data=data) | 0.029262 |
def _transform_constant_sequence(self, seq):
"""
Transform a frozenset or tuple.
"""
should_transform = is_a(self.types)
if not any(filter(should_transform, flatten(seq))):
# Tuple doesn't contain any transformable strings. Ignore.
yield LOAD_CONST(seq)
return
for const in seq:
if should_transform(const):
yield from self.transform_stringlike(const)
elif isinstance(const, (tuple, frozenset)):
yield from self._transform_constant_sequence(const)
else:
yield LOAD_CONST(const)
if isinstance(seq, tuple):
yield BUILD_TUPLE(len(seq))
else:
assert isinstance(seq, frozenset)
yield BUILD_TUPLE(len(seq))
yield LOAD_CONST(frozenset)
yield ROT_TWO()
yield CALL_FUNCTION(1) | 0.002162 |
def is_pre_prepare_time_acceptable(self, pp: PrePrepare, sender: str) -> bool:
"""
Returns True or False depending on the whether the time in PRE-PREPARE
is acceptable. Can return True if time is not acceptable but sufficient
PREPAREs are found to support the PRE-PREPARE
:param pp:
:return:
"""
key = (pp.viewNo, pp.ppSeqNo)
if key in self.requested_pre_prepares:
# Special case for requested PrePrepares
return True
correct = self.is_pre_prepare_time_correct(pp, sender)
if not correct:
if key in self.pre_prepares_stashed_for_incorrect_time and \
self.pre_prepares_stashed_for_incorrect_time[key][-1]:
self.logger.debug('{} marking time as correct for {}'.format(self, pp))
correct = True
else:
self.logger.warning('{} found {} to have incorrect time.'.format(self, pp))
return correct | 0.004 |
def unsplat(f: Callable[[Iterable], A]) -> Callable[..., A]:
"""Convert a function taking a single iterable argument into a function taking multiple arguments.
Args:
f: Any function taking a single iterable argument
Returns:
A function that accepts multiple arguments. Each argument of this function is passed as an element of an
iterable to ``f``.
Example:
$ def f(a):
$ return a[0] + a[1] + a[2]
$
$ f([1, 2, 3]) # 6
$ g = unsplat(f)
$ g(1, 2, 3) # 6
"""
def unsplatted(*args):
return f(args)
return unsplatted | 0.004754 |
def plot_lines(f, x, samples, ax=None, **kwargs):
r"""
Plot a representative set of functions to sample
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the probability mass
function for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot_lines`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
fsamps = compute_samples(f, x, samples, logZ=logZ,
weights=weights, ntrim=ntrim,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
fgivenx.plot.plot_lines(x, fsamps, ax, **kwargs) | 0.000403 |
def force_unicode(raw):
'''Try really really hard to get a Unicode copy of a string.
First try :class:`BeautifulSoup.UnicodeDammit` to try to force
to Unicode; if that fails, assume UTF-8 encoding, and ignore
all errors.
:param str raw: string to coerce
:return: Unicode approximation of `raw`
:returntype: :class:`unicode`
'''
converted = UnicodeDammit(raw, isHTML=True)
if not converted.unicode:
converted.unicode = unicode(raw, 'utf8', errors='ignore')
encoding_m = encoding_re.match(converted.unicode)
if encoding_m:
converted.unicode = \
encoding_m.group('start_xml') + \
encoding_m.group('remainder')
return converted.unicode | 0.001374 |
def ResolveObject(self, document):
'''Tries to locate a document in the archive.
This function tries to locate the document inside the archive. It
returns a tuple where the first element is zero if the function
was successful, and the second is the UnitInfo for that document.
The UnitInfo is used to retrieve the document contents
'''
if self.file:
path = os.path.abspath(document)
return chmlib.chm_resolve_object(self.file, path)
else:
return (1, None) | 0.003604 |
def _partition(iter_dims, data_sources):
"""
Partition data sources into
1. Dictionary of data sources associated with radio sources.
2. List of data sources to feed multiple times.
3. List of data sources to feed once.
"""
src_nr_vars = set(source_var_types().values())
iter_dims = set(iter_dims)
src_data_sources = collections.defaultdict(list)
feed_many = []
feed_once = []
for ds in data_sources:
# Is this data source associated with
# a radio source (point, gaussian, etc.?)
src_int = src_nr_vars.intersection(ds.shape)
if len(src_int) > 1:
raise ValueError("Data source '{}' contains multiple "
"source types '{}'".format(ds.name, src_int))
elif len(src_int) == 1:
# Yep, record appropriately and iterate
src_data_sources[src_int.pop()].append(ds)
continue
# Are we feeding this data source multiple times
# (Does it possess dimensions on which we iterate?)
if len(iter_dims.intersection(ds.shape)) > 0:
feed_many.append(ds)
continue
# Assume this is a data source that we only feed once
feed_once.append(ds)
return src_data_sources, feed_many, feed_once | 0.001537 |
def mgmt_request(self, message, operation, op_type=None, node=None, callback=None, **kwargs):
"""Run a request/response operation. These are frequently used for management
tasks against a $management node, however any node name can be specified
and the available options will depend on the target service.
:param message: The message to send in the management request.
:type message: ~uamqp.message.Message
:param operation: The type of operation to be performed. This value will
be service-specific, but common values include READ, CREATE and UPDATE.
This value will be added as an application property on the message.
:type operation: bytes
:param op_type: The type on which to carry out the operation. This will
be specific to the entities of the service. This value will be added as
an application property on the message.
:type op_type: bytes
:param node: The target node. Default is `b"$management"`.
:type node: bytes
:param timeout: Provide an optional timeout in milliseconds within which a response
to the management request must be received.
:type timeout: int
:param callback: The function to process the returned parameters of the management
request including status code and a description if available. This can be used
to reformat the response or raise an error based on content. The function must
take 3 arguments - status code, response message and description.
:type callback: ~callable[int, bytes, ~uamqp.message.Message]
:param status_code_field: Provide an alternate name for the status code in the
response body which can vary between services due to the spec still being in draft.
The default is `b"statusCode"`.
:type status_code_field: bytes
:param description_fields: Provide an alternate name for the description in the
response body which can vary between services due to the spec still being in draft.
The default is `b"statusDescription"`.
:type description_fields: bytes
:rtype: ~uamqp.message.Message
"""
while not self.auth_complete():
time.sleep(0.05)
response = self._session.mgmt_request(
message,
operation,
op_type=op_type,
node=node,
callback=callback,
encoding=self._encoding,
debug=self._debug_trace,
**kwargs)
return response | 0.006211 |
def remove_duplicates(list_to_prune: List) -> List:
"""Removes duplicates from a list while preserving order of the items.
:param list_to_prune: the list being pruned of duplicates
:return: The pruned list
"""
temp_dict = collections.OrderedDict()
for item in list_to_prune:
temp_dict[item] = None
return list(temp_dict.keys()) | 0.00274 |
def aggregate_task_lm_losses(hparams,
problem_hparams,
logits,
feature_name,
feature):
"""LM loss for multiproblems."""
summaries = []
vocab_size = problem_hparams.vocab_size[feature_name]
if vocab_size is not None and hasattr(hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % hparams.vocab_divisor
modality = problem_hparams.modality[feature_name]
loss = hparams.loss.get(feature_name, modalities.get_loss(modality))
weights_fn = hparams.weights_fn.get(
feature_name, modalities.get_weights_fn(modality))
loss_num = 0.
loss_den = 0.
for task in hparams.problem.task_list:
loss_num_, loss_den_ = loss(
logits, feature,
lambda x: common_layers.weights_multi_problem_all(x, task.task_id), # pylint: disable=cell-var-from-loop
hparams, vocab_size, weights_fn)
loss_num += loss_num_
loss_den += loss_den_
loss_val = loss_num_ / tf.maximum(1.0, loss_den_)
summaries.append([task.name+"_loss", loss_val])
return loss_num, loss_den, summaries | 0.011424 |
def get_attributes(self, template_pack=TEMPLATE_PACK):
"""
Used by crispy_forms_tags to get helper attributes
"""
items = {
'form_method': self.form_method.strip(),
'form_tag': self.form_tag,
'form_style': self.form_style.strip(),
'form_show_errors': self.form_show_errors,
'help_text_inline': self.help_text_inline,
'error_text_inline': self.error_text_inline,
'html5_required': self.html5_required,
'form_show_labels': self.form_show_labels,
'disable_csrf': self.disable_csrf,
'label_class': self.label_class,
'field_class': self.field_class,
'include_media': self.include_media
}
if template_pack == 'bootstrap4':
bootstrap_size_match = re.findall('col-(xl|lg|md|sm)-(\d+)', self.label_class)
if bootstrap_size_match:
if template_pack == 'bootstrap4':
offset_pattern = 'offset-%s-%s'
else:
offset_pattern = 'col-%s-offset-%s'
items['bootstrap_checkbox_offsets'] = [offset_pattern % m for m in bootstrap_size_match]
else:
bootstrap_size_match = re.findall('col-(lg|md|sm|xs)-(\d+)', self.label_class)
if bootstrap_size_match:
if template_pack == 'bootstrap4':
offset_pattern = 'offset-%s-%s'
else:
offset_pattern = 'col-%s-offset-%s'
items['bootstrap_checkbox_offsets'] = [offset_pattern % m for m in bootstrap_size_match]
items['attrs'] = {}
if self.attrs:
items['attrs'] = self.attrs.copy()
if self.form_action:
items['attrs']['action'] = self.form_action.strip()
if self.form_id:
items['attrs']['id'] = self.form_id.strip()
if self.form_class:
# uni_form TEMPLATE PACK has a uniForm class by default
if template_pack == 'uni_form':
items['attrs']['class'] = "uniForm %s" % self.form_class.strip()
else:
items['attrs']['class'] = self.form_class.strip()
else:
if template_pack == 'uni_form':
items['attrs']['class'] = self.attrs.get('class', '') + " uniForm"
if self.form_group_wrapper_class:
items['attrs']['form_group_wrapper_class'] = self.form_group_wrapper_class
items['flat_attrs'] = flatatt(items['attrs'])
if self.inputs:
items['inputs'] = self.inputs
if self.form_error_title:
items['form_error_title'] = self.form_error_title.strip()
if self.formset_error_title:
items['formset_error_title'] = self.formset_error_title.strip()
for attribute_name, value in self.__dict__.items():
if attribute_name not in items and attribute_name not in ['layout', 'inputs'] and not attribute_name.startswith('_'):
items[attribute_name] = value
return items | 0.004189 |
def get_input_string_port(self, port_name, default=None):
"""
Get input string port value
:param port_name:
:param default:
:return: :rtype:
"""
if self.__string_input_ports:
return self.__string_input_ports.get(port_name, default)
return default | 0.006211 |
def get_gradebook_column_query_session(self, proxy):
"""Gets the ``OsidSession`` associated with the gradebook column query service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnQuerySession) - a
``GradebookColumnQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_column_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_query()`` is ``true``.*
"""
if not self.supports_gradebook_column_query():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.GradebookColumnQuerySession(proxy=proxy, runtime=self._runtime) | 0.004566 |
def _ParseStringOption(cls, options, argument_name, default_value=None):
"""Parses a string command line argument.
Args:
options (argparse.Namespace): parser options.
argument_name (str): name of the command line argument.
default_value (Optional[str]): default value of the command line argument.
Returns:
str: command line argument value or the default value if the command line
argument is not set
Raises:
BadConfigOption: if the command line argument value cannot be converted
to a Unicode string.
"""
argument_value = getattr(options, argument_name, None)
if argument_value is None:
return default_value
if isinstance(argument_value, py2to3.BYTES_TYPE):
encoding = sys.stdin.encoding
# Note that sys.stdin.encoding can be None.
if not encoding:
encoding = locale.getpreferredencoding()
if not encoding:
encoding = cls._PREFERRED_ENCODING
try:
argument_value = argument_value.decode(encoding)
except UnicodeDecodeError as exception:
raise errors.BadConfigOption((
'Unable to convert option: {0:s} to Unicode with error: '
'{1!s}.').format(argument_name, exception))
elif not isinstance(argument_value, py2to3.UNICODE_TYPE):
raise errors.BadConfigOption(
'Unsupported option: {0:s} string type required.'.format(
argument_name))
return argument_value | 0.006761 |
def calibrate_dates(chron, calib_curve, d_r, d_std, cutoff=0.0001, normal_distr=False, t_a=[3], t_b=[4]):
"""Get density of calendar dates for chron date segment in core
Parameters
----------
chron : DatedProxy-like
calib_curve : CalibCurve or list of CalibCurves
d_r : scalar or ndarray
Carbon reservoir offset.
d_std : scalar or ndarray
Carbon reservoir offset error standard deviation.
cutoff : scalar, optional
Unknown.
normal_distr : Bool, optional
Use normal distribution for date errors. If False, then use Student's t-distribution.
t_a : scalar or ndarray, optional
Student's t-distribution parameter, a. t_a - 1 must equal t_b.
t_b : scalar or ndarray, optional
Student's t-distribution parameter, b. t_a - 1 must equal t_b.
Returns
-------
depth : ndarray
Depth of dated sediment sample.
probs : list of 2d arrays
Density of calendar age for each dated sediment sample. For each
sediment sample, the 2d array has two columns, the first is the
calendar age. The second column is the density for that calendar age.
"""
# Python version of .bacon.calib() on line 908 in Bacon.R
# .bacon.calib - line 908
# rcmean = 4128; w2 = 4225; t_a=3; t_b=4
# test = d_cal(cc = calib_curve.rename(columns = {0:'a', 1:'b', 2:'c'}), rcmean = 4128, w2 = 4225, t_a=t_a,
# t_b=t_b, cutoff=cutoff, normal = normal)
# Line 959 of Bacon.R
# calib = list(dets.iloc[:, 3])
# Now Bacon goes and checks the ncol in the dets See line #960 in Bacon.R
# Line #973
# TODO(brews): Check that `normal_dist` is used and documented correctly in docstring above.
# TODO(brews): Check whether we call returned values densities, freqs or what options we should have.
n = len(chron.depth)
calib_curve = np.array(calib_curve)
t_a = np.array(t_a)
t_b = np.array(t_b)
assert t_b - 1 == t_a
d_r = np.array(d_r)
d_std = np.array(d_std)
if len(t_a) == 1:
t_a = np.repeat(t_a, n)
if len(t_b) == 1:
t_b = np.repeat(t_b, n)
if len(d_r) == 1:
d_r = np.repeat(d_r, n)
if len(d_std) == 1:
d_std = np.repeat(d_std, n)
if len(calib_curve) == 1:
calib_curve = np.repeat(calib_curve, n)
calib_probs = []
rcmean = chron.age - d_r
w2 = chron.error ** 2 + d_std ** 2
for i in range(n):
age_realizations = d_cal(calib_curve[i], rcmean=rcmean[i], w2=w2[i],
t_a=t_a[i], t_b=t_b[i],
cutoff=cutoff, normal_distr=normal_distr)
calib_probs.append(age_realizations)
return np.array(chron.depth), calib_probs | 0.002193 |
def find_descriptor(self, uuid):
"""Return the first child descriptor found that has the specified
UUID. Will return None if no descriptor that matches is found.
"""
for desc in self.list_descriptors():
if desc.uuid == uuid:
return desc
return None | 0.006309 |
def PrivateKeyFromWIF(wif):
"""
Get the private key from a WIF key
Args:
wif (str): The wif key
Returns:
bytes: The private key
"""
if wif is None or len(wif) is not 52:
raise ValueError('Please provide a wif with a length of 52 bytes (LEN: {0:d})'.format(len(wif)))
data = base58.b58decode(wif)
length = len(data)
if length is not 38 or data[0] is not 0x80 or data[33] is not 0x01:
raise ValueError("Invalid format!")
checksum = Crypto.Hash256(data[0:34])[0:4]
if checksum != data[34:]:
raise ValueError("Invalid WIF Checksum!")
return data[1:33] | 0.004219 |
def velocity(adata, var_names=None, basis=None, groupby=None, groups=None, mode=None, fits='all', layers='all',
color=None, color_map='RdBu_r', colorbar=False, perc=[2,98], use_raw=False, size=None, alpha=.5,
fontsize=None, figsize=None, dpi=None, show=True, save=None, ax=None, ncols=None, **kwargs):
"""Phase and velocity plot for set of genes.
The phase plot shows spliced against unspliced expressions with steady-state fit.
Further the embedding is shown colored by velocity and expression.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
var_names: `str` or list of `str` (default: `None`)
Which variables to show.
basis: `str` (default: `'umap'`)
Key for embedding coordinates.
mode: `'stochastic'` or `None` (default: `None`)
Whether to show show covariability phase portrait.
fits: `str` or list of `str` (default: `'all'`)
Which steady-state estimates to show.
layers: `str` or list of `str` (default: `'all'`)
Which layers to show.
color: `str`, list of `str` or `None` (default: `None`)
Key for annotations of observations/cells or variables/genes
color_map: `str` (default: `matplotlib.rcParams['image.cmap']`)
String denoting matplotlib color map.
perc: tuple, e.g. [2,98] (default: `None`)
Specify percentile for continuous coloring.
size: `float` (default: 5)
Point size.
alpha: `float` (default: 1)
Set blending - 0 transparent to 1 opaque.
fontsize: `float` (default: `None`)
Label font size.
figsize: tuple (default: `(7,5)`)
Figure size.
dpi: `int` (default: 80)
Figure dpi.
show: `bool`, optional (default: `None`)
Show the plot, do not return axis.
save: `bool` or `str`, optional (default: `None`)
If `True` or a `str`, save the figure. A string is appended to the default filename.
Infer the filetype if ending on {'.pdf', '.png', '.svg'}.
ax: `matplotlib.Axes`, optional (default: `None`)
A matplotlib axes object. Only works if plotting a single component.
"""
basis = default_basis(adata) if basis is None else basis
if isinstance(groupby, str) and groupby in adata.obs.keys():
if 'rank_velocity_genes' not in adata.uns.keys() or adata.uns['rank_velocity_genes']['params']['groupby'] != groupby:
rank_velocity_genes(adata, vkey='velocity', n_genes=10, groupby=groupby)
names = np.array(adata.uns['rank_velocity_genes']['names'].tolist())
if groups is None:
var_names = names[:, 0]
else:
groups = [groups] if isinstance(groups, str) else groups
idx = np.array([any([g in group for g in groups]) for group in adata.obs[groupby].cat.categories])
var_names = np.hstack(names[idx, :int(10 / idx.sum())])
elif var_names is not None:
var_names = [var_names] if isinstance(var_names, str) else [var for var in var_names if var in adata.var_names]
else:
raise ValueError('No var_names or groups specified.')
var_names = pd.unique(var_names)
(skey, ukey) = ('spliced', 'unspliced') if use_raw else ('Ms', 'Mu')
layers = ['velocity', skey, 'variance_velocity'] if layers == 'all' else layers
layers = [layer for layer in layers if layer in adata.layers.keys()]
fits = adata.layers.keys() if fits == 'all' else fits
fits = [fit for fit in fits if all(['velocity' in fit, fit + '_gamma' in adata.var.keys()])]
stochastic_fits = [fit for fit in fits if 'variance_' + fit in adata.layers.keys()]
nplts = (1 + len(layers) + (mode == 'stochastic') * 2)
ncols = 1 if ncols is None else ncols
nrows = int(np.ceil(len(var_names) / ncols))
ncols = int(ncols * nplts)
figsize = rcParams['figure.figsize'] if figsize is None else figsize
ax = pl.figure(figsize=(figsize[0] * ncols / 2, figsize[1] * nrows / 2), dpi=dpi) if ax is None else ax
gs = pl.GridSpec(nrows, ncols, wspace=0.3, hspace=0.5)
size = default_size(adata) / 2 if size is None else size # since fontsize is halved in width and height
fontsize = rcParams['font.size'] if fontsize is None else fontsize
for v, var in enumerate(var_names):
_adata = adata[:, var]
s, u = _adata.layers[skey], _adata.layers[ukey]
if issparse(s): s, u = s.A, u.A
# spliced/unspliced phase portrait with steady-state estimate
ax = pl.subplot(gs[v * nplts])
scatter(adata, basis=var, color=color, colorbar=colorbar, frameon=True, title=var, size=size, use_raw=use_raw,
alpha=alpha, fontsize=fontsize, xlabel='spliced', ylabel='unspliced', show=False, ax=ax, save=False,
legend_loc=None if v < len(var_names)-1 else 'lower right', **kwargs)
# velocity and expression plots
for l, layer in enumerate(layers):
ax = pl.subplot(gs[v * nplts + l + 1])
title = 'expression' if layer == skey else layer
scatter(adata, basis=basis, color=var, layer=layer, color_map=color_map, colorbar=colorbar, title=title,
perc=perc, use_raw=use_raw, fontsize=fontsize, size=size, alpha=alpha, frameon=False, show=False, ax=ax, save=False, **kwargs)
if mode == 'stochastic':
ss, us = second_order_moments(_adata)
ss, us = ss.flatten(), us.flatten()
fit = stochastic_fits[0]
ax = pl.subplot(gs[v * nplts + len(layers) + 1])
offset = _adata.var[fit + '_offset'] if fit + '_offset' in adata.var.keys() else 0
beta = _adata.var[fit + '_beta'] if fit + '_beta' in adata.var.keys() else 1
x = 2 * (ss - s**2) - s
y = 2 * (us - u * s) + u + 2 * s * offset / beta
scatter(adata, x=x, y=y, color=color, colorbar=colorbar, title=var, fontsize=40/ncols, size=size, perc=perc,
xlabel=r'2 $\Sigma_s - \langle s \rangle$', ylabel=r'2 $\Sigma_{us} + \langle u \rangle$',
use_raw=use_raw, frameon=True, ax=ax, save=False, show=False, **kwargs)
xnew = np.linspace(x.min(), x.max() * 1.02)
for fit in stochastic_fits:
gamma = _adata.var[fit + '_gamma'].values if fit + '_gamma' in adata.var.keys() else 1
beta = _adata.var[fit + '_beta'].values if fit + '_beta' in adata.var.keys() else 1
offset2 = _adata.var[fit + '_offset2'].values if fit + '_offset2' in adata.var.keys() else 0
pl.plot(xnew, gamma / beta * xnew + offset2 / beta, c='k', linestyle='--')
if v == len(var_names) - 1: pl.legend(fits, loc='lower right', prop={'size': 34/ncols})
savefig_or_show('', dpi=dpi, save=save, show=show)
if not show: return ax | 0.005118 |
def call(self, command, params=None, expect_body=True, stream=False):
"""
Sends the provided command to Serf for evaluation, with
any parameters as the message body.
"""
if self._socket is None:
raise SerfConnectionError('handshake must be made first')
header = msgpack.packb({"Seq": self._counter(), "Command": command})
if params is not None:
body = msgpack.packb(params)
self._socket.sendall(header + body)
else:
self._socket.sendall(header)
unpacker = msgpack.Unpacker(object_hook=self._decode_addr_key)
def read_from_socket():
try:
buf = self._socket.recv(self._socket_recv_size)
if len(buf) == 0: # Connection was closed.
raise SerfConnectionError("Connection closed by peer")
unpacker.feed(buf)
except socket.timeout:
raise SerfTimeout(
"timeout while waiting for an RPC response. (Have %s so"
"far)", response)
if stream:
def keep_reading_from_stream(init=[]):
sub_response = SerfResult()
while True:
if init is not None:
it = init
init = None
else:
if self._socket is None:
return
read_from_socket()
it = unpacker
for msg in it:
if sub_response.head is None:
sub_response.head = msg
elif sub_response.body is None:
sub_response.body = msg
yield sub_response
sub_response = SerfResult()
mem = []
messages_expected = 1
while messages_expected > 0:
read_from_socket()
# Might have received enough to deserialise one or more
# messages, try to fill out the response object.
for message in unpacker:
mem.append(message)
messages_expected -= 1
# Disable timeout while we are in streaming mode
self._socket.settimeout(None)
response = SerfResult()
response.head = mem.pop()
response.body = keep_reading_from_stream(mem)
else:
# The number of msgpack messages that are expected
# in response to this command.
messages_expected = 2 if expect_body else 1
response = SerfResult()
# Continue reading from the network until the expected number of
# msgpack messages have been received.
while messages_expected > 0:
read_from_socket()
# Might have received enough to deserialise one or more
# messages, try to fill out the response object.
for message in unpacker:
if response.head is None:
response.head = message
elif response.body is None:
response.body = message
else:
raise SerfProtocolError(
"protocol handler got more than 2 messages. "
"Unexpected message is: %s", message)
# Expecting one fewer message now.
messages_expected -= 1
return response | 0.000548 |
def kurtosis(data):
"""
Return the kurtosis for ``data``.
"""
if len(data) == 0:
return None
num = moment(data, 4)
denom = moment(data, 2) ** 2.
return num / denom if denom != 0 else 0 | 0.004484 |
def omnigraffle(self):
""" tries to open an export directly in omnigraffle """
temp = self.rdf_source("dot")
try: # try to put in the user/tmp folder
from os.path import expanduser
home = expanduser("~")
filename = home + "/tmp/turtle_sketch.dot"
f = open(filename, "w")
except:
filename = "turtle_sketch.dot"
f = open(filename, "w")
f.write(temp)
f.close()
try:
os.system("open " + filename)
except:
os.system("start " + filename) | 0.042283 |
def _write_config(config):
'''
writes /usbkey/config
'''
try:
with salt.utils.atomicfile.atomic_open('/usbkey/config', 'w') as config_file:
config_file.write("#\n# This file was generated by salt\n#\n")
for prop in salt.utils.odict.OrderedDict(sorted(config.items())):
if ' ' in six.text_type(config[prop]):
if not config[prop].startswith('"') or not config[prop].endswith('"'):
config[prop] = '"{0}"'.format(config[prop])
config_file.write(
salt.utils.stringutils.to_str(
"{0}={1}\n".format(prop, config[prop])
)
)
log.debug('smartos.config - wrote /usbkey/config: %s', config)
except IOError:
return False
return True | 0.003538 |
def minimal_raw_seqs(self):
''' m.minimal_raw_seqs() -- Return minimal list of seqs that represent consensus '''
seqs = [[], []]
for letter in self.oneletter:
if one2two.has_key(letter):
seqs[0].append(one2two[letter][0])
seqs[1].append(one2two[letter][1])
else:
seqs[0].append(letter)
seqs[1].append(letter)
if ''.join(seqs[0]) == ''.join(seqs[1]):
return( [''.join(seqs[0])] )
else:
return( [''.join(seqs[0]), ''.join(seqs[0])] ) | 0.013722 |
def is_address_readable(self, address):
"""
Determines if an address belongs to a commited and readable page.
The page may or may not have additional permissions.
@note: Returns always C{False} for kernel mode addresses.
@type address: int
@param address: Memory address to query.
@rtype: bool
@return:
C{True} if the address belongs to a commited and readable page.
@raise WindowsError: An exception is raised on error.
"""
try:
mbi = self.mquery(address)
except WindowsError:
e = sys.exc_info()[1]
if e.winerror == win32.ERROR_INVALID_PARAMETER:
return False
raise
return mbi.is_readable() | 0.002574 |
def periodic_send(self, content, interval, title=''):
"""
发送周期消息
:param content: (必填|str) - 需要发送的消息内容
:param interval: (必填|int|datetime.timedelta) - 发送消息间隔时间,支持 datetime.timedelta 或 integer 表示的秒数
:param title: (选填|str) - 需要发送的消息标题
:return: * status:发送状态,True 发送成,False 发送失败
* message:发送失败详情
"""
url = '{0}periodic_message'.format(self.remote)
if isinstance(interval, datetime.timedelta):
interval = int(interval.total_seconds())
if not isinstance(interval, int):
raise ValueError
data = self._wrap_post_data(title=title, content=content, interval=interval)
res = requests.post(url, data, timeout=self.timeout)
if res.status_code == requests.codes.ok:
res_data = json.loads(self._convert_bytes(res.content))
if res_data.get('status') == STATUS_SUCCESS:
return True, res_data.get('message')
return False, res_data.get('message')
res.raise_for_status()
return False, 'Request or Response Error' | 0.003617 |
def tag_media_sibling_ordinal(tag):
"""
Count sibling ordinal differently depending on if the
mimetype is video or not
"""
if hasattr(tag, 'name') and tag.name != 'media':
return None
nodenames = ['fig','supplementary-material','sub-article']
first_parent_tag = first_parent(tag, nodenames)
sibling_ordinal = None
if first_parent_tag:
# Start counting at 0
sibling_ordinal = 0
for media_tag in first_parent_tag.find_all(tag.name):
if 'mimetype' in tag.attrs and tag['mimetype'] == 'video':
# Count all video type media tags
if 'mimetype' in media_tag.attrs and tag['mimetype'] == 'video':
sibling_ordinal += 1
if media_tag == tag:
break
else:
# Count all non-video type media tags
if (('mimetype' not in media_tag.attrs)
or ('mimetype' in media_tag.attrs and tag['mimetype'] != 'video')):
sibling_ordinal += 1
if media_tag == tag:
break
else:
# Start counting at 1
sibling_ordinal = 1
for prev_tag in tag.find_all_previous(tag.name):
if not first_parent(prev_tag, nodenames):
if 'mimetype' in tag.attrs and tag['mimetype'] == 'video':
# Count all video type media tags
if supp_asset(prev_tag) == supp_asset(tag) and 'mimetype' in prev_tag.attrs:
sibling_ordinal += 1
else:
if supp_asset(prev_tag) == supp_asset(tag) and 'mimetype' not in prev_tag.attrs:
sibling_ordinal += 1
return sibling_ordinal | 0.00451 |
def fstab_present(name, fs_file, fs_vfstype, fs_mntops='defaults',
fs_freq=0, fs_passno=0, mount_by=None,
config='/etc/fstab', mount=True, match_on='auto'):
'''
Makes sure that a fstab mount point is pressent.
name
The name of block device. Can be any valid fs_spec value.
fs_file
Mount point (target) for the filesystem.
fs_vfstype
The type of the filesystem (e.g. ext4, xfs, btrfs, ...)
fs_mntops
The mount options associated with the filesystem. Default is
``defaults``.
fs_freq
Field is used by dump to determine which fs need to be
dumped. Default is ``0``
fs_passno
Field is used by fsck to determine the order in which
filesystem checks are done at boot time. Default is ``0``
mount_by
Select the final value for fs_spec. Can be [``None``,
``device``, ``label``, ``uuid``, ``partlabel``,
``partuuid``]. If ``None``, the value for fs_spect will be the
parameter ``name``, in other case will search the correct
value based on the device name. For example, for ``uuid``, the
value for fs_spec will be of type 'UUID=xxx' instead of the
device name set in ``name``.
config
Place where the fstab file lives. Default is ``/etc/fstab``
mount
Set if the mount should be mounted immediately. Default is
``True``
match_on
A name or list of fstab properties on which this state should
be applied. Default is ``auto``, a special value indicating
to guess based on fstype. In general, ``auto`` matches on
name for recognized special devices and device otherwise.
'''
ret = {
'name': name,
'result': False,
'changes': {},
'comment': [],
}
# Adjust fs_mntops based on the OS
if fs_mntops == 'defaults':
if __grains__['os'] in ['MacOS', 'Darwin']:
fs_mntops = 'noowners'
elif __grains__['os'] == 'AIX':
fs_mntops = ''
# Adjust the config file based on the OS
if config == '/etc/fstab':
if __grains__['os'] in ['MacOS', 'Darwin']:
config = '/etc/auto_salt'
elif __grains__['os'] == 'AIX':
config = '/etc/filesystems'
if not fs_file == '/':
fs_file = fs_file.rstrip('/')
fs_spec = _convert_to(name, mount_by)
# Validate that the device is valid after the conversion
if not fs_spec:
msg = 'Device {} cannot be converted to {}'
ret['comment'].append(msg.format(name, mount_by))
return ret
if __opts__['test']:
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.set_automaster'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
config=config,
test=True)
elif __grains__['os'] == 'AIX':
out = __salt__['mount.set_filesystems'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
mount=mount,
config=config,
test=True,
match_on=match_on)
else:
out = __salt__['mount.set_fstab'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
dump=fs_freq,
pass_num=fs_passno,
config=config,
test=True,
match_on=match_on)
ret['result'] = None
if out == 'present':
msg = '{} entry is already in {}.'
ret['comment'].append(msg.format(fs_file, config))
elif out == 'new':
msg = '{} entry will be written in {}.'
ret['comment'].append(msg.format(fs_file, config))
elif out == 'change':
msg = '{} entry will be updated in {}.'
ret['comment'].append(msg.format(fs_file, config))
else:
ret['result'] = False
msg = '{} entry cannot be created in {}: {}.'
ret['comment'].append(msg.format(fs_file, config, out))
return ret
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.set_automaster'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
config=config)
elif __grains__['os'] == 'AIX':
out = __salt__['mount.set_filesystems'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
mount=mount,
config=config,
match_on=match_on)
else:
out = __salt__['mount.set_fstab'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
dump=fs_freq,
pass_num=fs_passno,
config=config,
match_on=match_on)
ret['result'] = True
if out == 'present':
msg = '{} entry was already in {}.'
ret['comment'].append(msg.format(fs_file, config))
elif out == 'new':
ret['changes']['persist'] = out
msg = '{} entry added in {}.'
ret['comment'].append(msg.format(fs_file, config))
elif out == 'change':
ret['changes']['persist'] = out
msg = '{} entry updated in {}.'
ret['comment'].append(msg.format(fs_file, config))
else:
ret['result'] = False
msg = '{} entry cannot be changed in {}: {}.'
ret['comment'].append(msg.format(fs_file, config, out))
return ret | 0.000145 |
def _init_from_csr(self, csr):
"""
Initialize data from a CSR matrix.
"""
if len(csr.indices) != len(csr.data):
raise ValueError('length mismatch: {} vs {}'.format(len(csr.indices), len(csr.data)))
handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromCSREx(c_array(ctypes.c_size_t, csr.indptr),
c_array(ctypes.c_uint, csr.indices),
c_array(ctypes.c_float, csr.data),
ctypes.c_size_t(len(csr.indptr)),
ctypes.c_size_t(len(csr.data)),
ctypes.c_size_t(csr.shape[1]),
ctypes.byref(handle)))
self.handle = handle | 0.010169 |
def main():
"""
NAME
remanence_aniso_magic.py
DESCRIPTION
This program is similar to aarm_magic.py and atrm_magic.py with minor modifications.
Converts magic measurement file with ATRM/AARM data to best-fit tensor (6 elements plus sigma)
following Hext (1963), and calculates F-test statistics.
Comments:
- infield steps are marked with method codes LT-T-I:LP-AN-TRM; LT-AF-I:LP-AN-ARM
- zerofield steps are marked with method codes LT-T-Z:LP-AN-TRM; LT-AF-Z:LP-AN-ARM
- alteration check is marked with method codes LT-PTRM-I:LP-AN-TRM
please notice;
- ATRM: The program uses treatment_dc_field_phi/treatment_dc_field_theta columns to infer the direction of the applied field
(this is a change from atrm_magic.py)
- ATRM: zerofield (baseline) magnetization is subtructed from all infield measurements
- AARM: The program uses measurement number (running number) to to infer the direction of the applied field
assuming the SIO protocol for 6,9,15 measurements scheme.
See cookbook for diagram and details.
- AARM: zerofield (baseline) are assumed to be before any infield, and the baseline is subtructed from the
subsequent infield magnetization.
SYNTAX
remanence_aniso_magic.py [-h] [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input file, default is magic_measurements.txt
INPUT
magic measurement file with ATRM and/or AARM data.
if both types of measurements exist then the program calculates both.
OUTPUT
rmag_anisotropy.log
-I- information
-W- Warning
-E- Error
rmag_anistropy.txt:
This file contains in addition to some some magic information the following:
- anistropy tensor s1 to s6 normalized by the trace:
|Mx| |s1 s4 s6| |Bx|
|My| = |s4 s2 s5| . |By|
|Mz| |s6 s5 s3| |Bz|
- anisotropy_sigma (Hext, 1963)
- anisotropy_alt (altertion check for ATRM in units of %):
100* [abs(M_first-Mlast)/max(M_first,M_last)]
-
rmag_results.txt:
This file contains in addition to some magic information the follow(ing:
- anisotropy_t1,anisotropy_t2,anisotropy_t3 : eigenvalues
- anisotropy_v*_dec,anisotropy_v*_inc: declination/inclination of the eigenvectors
- anisotropy_ftest,anisotropy_ftest12,anisotropy_ftest13
- (the crtical F for 95% confidence level of anistropy is given in result_description column).
"""
#==================================================================================
meas_file="magic_measurements.txt"
args=sys.argv
dir_path='.'
#
# get name of file from command line
#
if '-WD' in args:
ind=args.index('-WD')
dir_path=args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-f" in args:
ind=args.index("-f")
meas_file=sys.argv[ind+1]
else:
meas_file=dir_path+'/'+meas_file
WD=dir_path
#======================================
# functions
#======================================
def get_Data(magic_file):
#------------------------------------------------
# Read magic measurement file and sort to blocks
#------------------------------------------------
Data={}
try:
meas_data,file_type=pmag.magic_read(magic_file)
except:
print("-E- ERROR: Cant read magic_measurement.txt file. File is corrupted.")
return Data
# get list of unique specimen names
#sids=pmag.get_specs(meas_data) # samples ID's
for rec in meas_data:
s=rec["er_specimen_name"]
method_codes= rec["magic_method_codes"].strip('\n')
method_codes.replace(" ","")
methods=method_codes.split(":")
if "LP-AN-TRM" in methods:
if s not in list(Data.keys()):
Data[s]={}
if 'atrmblock' not in list(Data[s].keys()):
Data[s]['atrmblock']=[]
Data[s]['atrmblock'].append(rec)
if "LP-AN-ARM" in methods:
if s not in list(Data.keys()):
Data[s]={}
if 'aarmblock' not in list(Data[s].keys()):
Data[s]['aarmblock']=[]
Data[s]['aarmblock'].append(rec)
return (Data)
#======================================
# better to put this one in pmagpy
#======================================
def calculate_aniso_parameters(B,K):
aniso_parameters={}
S_bs=dot(B,K)
# normalize by trace
trace=S_bs[0]+S_bs[1]+S_bs[2]
S_bs=old_div(S_bs,trace)
s1,s2,s3,s4,s5,s6=S_bs[0],S_bs[1],S_bs[2],S_bs[3],S_bs[4],S_bs[5]
s_matrix=[[s1,s4,s6],[s4,s2,s5],[s6,s5,s3]]
# calculate eigen vector,
t,evectors=eig(s_matrix)
# sort vectors
t=list(t)
t1=max(t)
ix_1=t.index(t1)
t3=min(t)
ix_3=t.index(t3)
for tt in range(3):
if t[tt]!=t1 and t[tt]!=t3:
t2=t[tt]
ix_2=t.index(t2)
v1=[evectors[0][ix_1],evectors[1][ix_1],evectors[2][ix_1]]
v2=[evectors[0][ix_2],evectors[1][ix_2],evectors[2][ix_2]]
v3=[evectors[0][ix_3],evectors[1][ix_3],evectors[2][ix_3]]
DIR_v1=pmag.cart2dir(v1)
DIR_v2=pmag.cart2dir(v2)
DIR_v3=pmag.cart2dir(v3)
aniso_parameters['anisotropy_s1']="%f"%s1
aniso_parameters['anisotropy_s2']="%f"%s2
aniso_parameters['anisotropy_s3']="%f"%s3
aniso_parameters['anisotropy_s4']="%f"%s4
aniso_parameters['anisotropy_s5']="%f"%s5
aniso_parameters['anisotropy_s6']="%f"%s6
aniso_parameters['anisotropy_degree']="%f"%(old_div(t1,t3))
aniso_parameters['anisotropy_t1']="%f"%t1
aniso_parameters['anisotropy_t2']="%f"%t2
aniso_parameters['anisotropy_t3']="%f"%t3
aniso_parameters['anisotropy_v1_dec']="%.1f"%DIR_v1[0]
aniso_parameters['anisotropy_v1_inc']="%.1f"%DIR_v1[1]
aniso_parameters['anisotropy_v2_dec']="%.1f"%DIR_v2[0]
aniso_parameters['anisotropy_v2_inc']="%.1f"%DIR_v2[1]
aniso_parameters['anisotropy_v3_dec']="%.1f"%DIR_v3[0]
aniso_parameters['anisotropy_v3_inc']="%.1f"%DIR_v3[1]
# modified from pmagpy:
if old_div(len(K),3)==9 or old_div(len(K),3)==6 or old_div(len(K),3)==15:
n_pos=old_div(len(K),3)
tmpH = Matrices[n_pos]['tmpH']
a=s_matrix
S=0.
comp=zeros((n_pos*3),'f')
for i in range(n_pos):
for j in range(3):
index=i*3+j
compare=a[j][0]*tmpH[i][0]+a[j][1]*tmpH[i][1]+a[j][2]*tmpH[i][2]
comp[index]=compare
for i in range(n_pos*3):
d=old_div(K[i],trace) - comp[i] # del values
S+=d*d
nf=float(n_pos*3-6) # number of degrees of freedom
if S >0:
sigma=math.sqrt(old_div(S,nf))
hpars=pmag.dohext(nf,sigma,[s1,s2,s3,s4,s5,s6])
aniso_parameters['anisotropy_sigma']="%f"%sigma
aniso_parameters['anisotropy_ftest']="%f"%hpars["F"]
aniso_parameters['anisotropy_ftest12']="%f"%hpars["F12"]
aniso_parameters['anisotropy_ftest23']="%f"%hpars["F23"]
aniso_parameters['result_description']="Critical F: %s"%(hpars['F_crit'])
aniso_parameters['anisotropy_F_crit']="%f"%float(hpars['F_crit'])
aniso_parameters['anisotropy_n']=n_pos
return(aniso_parameters)
#======================================
# Main
#======================================
aniso_logfile=open(WD+"/rmag_anisotropy.log",'w')
aniso_logfile.write("------------------------\n")
aniso_logfile.write( "-I- Start rmag anisrotropy script\n")
aniso_logfile.write( "------------------------\n")
Data=get_Data(meas_file)
#try:
# Data=get_Data(meas_file)
#except:
# aniso_logfile.write( "-E- Cant open measurement file %s\n" %meas_file)
# print "-E- Cant open measurement file %s\n exiting" %meas_file
# exit()
aniso_logfile.write( "-I- Open measurement file %s\n" %meas_file)
Data_anisotropy={}
specimens=list(Data.keys())
specimens.sort()
#-----------------------------------
# Prepare rmag_anisotropy.txt file for writing
#-----------------------------------
rmag_anisotropy_file =open(WD+"/rmag_anisotropy.txt",'w')
rmag_anisotropy_file.write("tab\trmag_anisotropy\n")
rmag_results_file =open(WD+"/rmag_results.txt",'w')
rmag_results_file.write("tab\trmag_results\n")
rmag_anistropy_header=['er_specimen_name','er_sample_name','er_site_name','anisotropy_type','anisotropy_n','anisotropy_description','anisotropy_s1','anisotropy_s2','anisotropy_s3','anisotropy_s4','anisotropy_s5','anisotropy_s6','anisotropy_sigma','anisotropy_alt','magic_experiment_names','magic_method_codes']
String=""
for i in range (len(rmag_anistropy_header)):
String=String+rmag_anistropy_header[i]+'\t'
rmag_anisotropy_file.write(String[:-1]+"\n")
rmag_results_header=['er_specimen_names','er_sample_names','er_site_names','anisotropy_type','magic_method_codes','magic_experiment_names','result_description','anisotropy_t1','anisotropy_t2','anisotropy_t3','anisotropy_ftest','anisotropy_ftest12','anisotropy_ftest23',\
'anisotropy_v1_dec','anisotropy_v1_inc','anisotropy_v2_dec','anisotropy_v2_inc','anisotropy_v3_dec','anisotropy_v3_inc']
String=""
for i in range (len(rmag_results_header)):
String=String+rmag_results_header[i]+'\t'
rmag_results_file.write(String[:-1]+"\n")
#-----------------------------------
# Matrices definitions:
# A design matrix
# B dot(inv(dot(A.transpose(),A)),A.transpose())
# tmpH is used for sigma calculation (9,15 measurements only)
#
# Anisotropy tensor:
#
# |Mx| |s1 s4 s6| |Bx|
# |My| = |s4 s2 s5| . |By|
# |Mz| |s6 s5 s3| |Bz|
#
# A matrix (measurement matrix):
# Each mesurement yields three lines in "A" matrix
#
# |Mi | |Bx 0 0 By 0 Bz| |s1|
# |Mi+1| = |0 By 0 Bx Bz 0 | . |s2|
# |Mi+2| |0 0 Bz 0 By Bx| |s3|
# |s4|
# |s5|
#
#-----------------------------------
Matrices={}
for n_pos in [6,9,15]:
Matrices[n_pos]={}
A=zeros((n_pos*3,6),'f')
if n_pos==6:
positions=[[0.,0.,1.],[90.,0.,1.],[0.,90.,1.],\
[180.,0.,1.],[270.,0.,1.],[0.,-90.,1.]]
if n_pos==15:
positions=[[315.,0.,1.],[225.,0.,1.],[180.,0.,1.],[135.,0.,1.],[45.,0.,1.],\
[90.,-45.,1.],[270.,-45.,1.],[270.,0.,1.],[270.,45.,1.],[90.,45.,1.],\
[180.,45.,1.],[180.,-45.,1.],[0.,-90.,1.],[0,-45.,1.],[0,45.,1.]]
if n_pos==9:
positions=[[315.,0.,1.],[225.,0.,1.],[180.,0.,1.],\
[90.,-45.,1.],[270.,-45.,1.],[270.,0.,1.],\
[180.,45.,1.],[180.,-45.,1.],[0.,-90.,1.]]
tmpH=zeros((n_pos,3),'f') # define tmpH
for i in range(len(positions)):
CART=pmag.dir2cart(positions[i])
a=CART[0];b=CART[1];c=CART[2]
A[3*i][0]=a
A[3*i][3]=b
A[3*i][5]=c
A[3*i+1][1]=b
A[3*i+1][3]=a
A[3*i+1][4]=c
A[3*i+2][2]=c
A[3*i+2][4]=b
A[3*i+2][5]=a
tmpH[i][0]=CART[0]
tmpH[i][1]=CART[1]
tmpH[i][2]=CART[2]
B=dot(inv(dot(A.transpose(),A)),A.transpose())
Matrices[n_pos]['A']=A
Matrices[n_pos]['B']=B
Matrices[n_pos]['tmpH']=tmpH
for specimen in specimens:
if 'atrmblock' in list(Data[specimen].keys()):
#-----------------------------------
# aTRM 6 positions
#-----------------------------------
aniso_logfile.write("-I- Start calculating ATRM tensor for specimen %s\n "%specimen)
atrmblock=Data[specimen]['atrmblock']
if len(atrmblock)<6:
aniso_logfile.write("-W- specimen %s has not enough measurementf for ATRM calculation\n"%specimen)
continue
B=Matrices[6]['B']
Reject_specimen = False
# The zero field step is a "baseline"
# Search the baseline in the ATRM measurement
baseline=""
Alteration_check=""
Alteration_check_index=""
baselines=[]
# search for baseline in atrm blocks
for rec in atrmblock:
dec=float(rec['measurement_dec'])
inc=float(rec['measurement_inc'])
moment=float(rec['measurement_magn_moment'])
# find the temperature of the atrm
if float(rec['treatment_dc_field'])!=0 and float(rec['treatment_temp'])!=273:
atrm_temperature=float(rec['treatment_temp'])
# find baseline
if float(rec['treatment_dc_field'])==0 and float(rec['treatment_temp'])!=273:
baselines.append(array(pmag.dir2cart([dec,inc,moment])))
# Find alteration check
#print rec['measurement_number']
if len(baselines)!=0:
aniso_logfile.write( "-I- found ATRM baseline for specimen %s\n"%specimen)
baselines=array(baselines)
baseline=array([mean(baselines[:,0]),mean(baselines[:,1]),mean(baselines[:,2])])
else:
baseline=zeros(3,'f')
aniso_logfile.write( "-I- No aTRM baseline for specimen %s\n"%specimen)
# sort measurements
M=zeros([6,3],'f')
for rec in atrmblock:
dec=float(rec['measurement_dec'])
inc=float(rec['measurement_inc'])
moment=float(rec['measurement_magn_moment'])
CART=array(pmag.dir2cart([dec,inc,moment]))-baseline
if float(rec['treatment_dc_field'])==0: # Ignore zero field steps
continue
if "LT-PTRM-I" in rec['magic_method_codes'].split(":"): # alteration check
Alteration_check=CART
Alteration_check_dc_field_phi=float(rec['treatment_dc_field_phi'])
Alteration_check_dc_field_theta=float(rec['treatment_dc_field_theta'])
if Alteration_check_dc_field_phi==0 and Alteration_check_dc_field_theta==0 :
Alteration_check_index=0
if Alteration_check_dc_field_phi==90 and Alteration_check_dc_field_theta==0 :
Alteration_check_index=1
if Alteration_check_dc_field_phi==0 and Alteration_check_dc_field_theta==90 :
Alteration_check_index=2
if Alteration_check_dc_field_phi==180 and Alteration_check_dc_field_theta==0 :
Alteration_check_index=3
if Alteration_check_dc_field_phi==270 and Alteration_check_dc_field_theta==0 :
Alteration_check_index=4
if Alteration_check_dc_field_phi==0 and Alteration_check_dc_field_theta==-90 :
Alteration_check_index=5
aniso_logfile.write( "-I- found alteration check for specimen %s\n"%specimen)
continue
treatment_dc_field_phi=float(rec['treatment_dc_field_phi'])
treatment_dc_field_theta=float(rec['treatment_dc_field_theta'])
treatment_dc_field=float(rec['treatment_dc_field'])
#+x, M[0]
if treatment_dc_field_phi==0 and treatment_dc_field_theta==0 :
M[0]=CART
#+Y , M[1]
if treatment_dc_field_phi==90 and treatment_dc_field_theta==0 :
M[1]=CART
#+Z , M[2]
if treatment_dc_field_phi==0 and treatment_dc_field_theta==90 :
M[2]=CART
#-x, M[3]
if treatment_dc_field_phi==180 and treatment_dc_field_theta==0 :
M[3]=CART
#-Y , M[4]
if treatment_dc_field_phi==270 and treatment_dc_field_theta==0 :
M[4]=CART
#-Z , M[5]
if treatment_dc_field_phi==0 and treatment_dc_field_theta==-90 :
M[5]=CART
# check if at least one measurement in missing
for i in range(len(M)):
if M[i][0]==0 and M[i][1]==0 and M[i][2]==0:
aniso_logfile.write( "-E- ERROR: missing atrm data for specimen %s\n"%(specimen))
Reject_specimen=True
# alteration check
anisotropy_alt=0
if Alteration_check!="":
for i in range(len(M)):
if Alteration_check_index==i:
M_1=sqrt(sum((array(M[i])**2)))
M_2=sqrt(sum(Alteration_check**2))
diff=abs(M_1-M_2)
diff_ratio=old_div(diff,mean([M_1,M_2]))
diff_ratio_perc=100*diff_ratio
if diff_ratio_perc > anisotropy_alt:
anisotropy_alt=diff_ratio_perc
else:
aniso_logfile.write( "-W- Warning: no alteration check for specimen %s \n "%specimen )
# Check for maximum difference in anti parallel directions.
# if the difference between the two measurements is more than maximum_diff
# The specimen is rejected
# i.e. +x versus -x, +y versus -y, etc.s
for i in range(3):
M_1=sqrt(sum(array(M[i])**2))
M_2=sqrt(sum(array(M[i+3])**2))
diff=abs(M_1-M_2)
diff_ratio=old_div(diff,max(M_1,M_2))
diff_ratio_perc=100*diff_ratio
if diff_ratio_perc>anisotropy_alt:
anisotropy_alt=diff_ratio_perc
if not Reject_specimen:
# K vector (18 elements, M1[x], M1[y], M1[z], ... etc.)
K=zeros(18,'f')
K[0],K[1],K[2]=M[0][0],M[0][1],M[0][2]
K[3],K[4],K[5]=M[1][0],M[1][1],M[1][2]
K[6],K[7],K[8]=M[2][0],M[2][1],M[2][2]
K[9],K[10],K[11]=M[3][0],M[3][1],M[3][2]
K[12],K[13],K[14]=M[4][0],M[4][1],M[4][2]
K[15],K[16],K[17]=M[5][0],M[5][1],M[5][2]
if specimen not in list(Data_anisotropy.keys()):
Data_anisotropy[specimen]={}
aniso_parameters=calculate_aniso_parameters(B,K)
Data_anisotropy[specimen]['ATRM']=aniso_parameters
Data_anisotropy[specimen]['ATRM']['anisotropy_alt']="%.2f"%anisotropy_alt
Data_anisotropy[specimen]['ATRM']['anisotropy_type']="ATRM"
Data_anisotropy[specimen]['ATRM']['er_sample_name']=atrmblock[0]['er_sample_name']
Data_anisotropy[specimen]['ATRM']['er_specimen_name']=specimen
Data_anisotropy[specimen]['ATRM']['er_site_name']=atrmblock[0]['er_site_name']
Data_anisotropy[specimen]['ATRM']['anisotropy_description']='Hext statistics adapted to ATRM'
Data_anisotropy[specimen]['ATRM']['magic_experiment_names']=specimen+";ATRM"
Data_anisotropy[specimen]['ATRM']['magic_method_codes']="LP-AN-TRM:AE-H"
#Data_anisotropy[specimen]['ATRM']['rmag_anisotropy_name']=specimen
if 'aarmblock' in list(Data[specimen].keys()):
#-----------------------------------
# AARM - 6, 9 or 15 positions
#-----------------------------------
aniso_logfile.write( "-I- Start calculating AARM tensors specimen %s\n"%specimen)
aarmblock=Data[specimen]['aarmblock']
if len(aarmblock)<12:
aniso_logfile.write( "-W- WARNING: not enough aarm measurement for specimen %s\n"%specimen)
continue
elif len(aarmblock)==12:
n_pos=6
B=Matrices[6]['B']
M=zeros([6,3],'f')
elif len(aarmblock)==18:
n_pos=9
B=Matrices[9]['B']
M=zeros([9,3],'f')
# 15 positions
elif len(aarmblock)==30:
n_pos=15
B=Matrices[15]['B']
M=zeros([15,3],'f')
else:
aniso_logfile.write( "-E- ERROR: number of measurements in aarm block is incorrect sample %s\n"%specimen)
continue
Reject_specimen = False
for i in range(n_pos):
for rec in aarmblock:
if float(rec['measurement_number'])==i*2+1:
dec=float(rec['measurement_dec'])
inc=float(rec['measurement_inc'])
moment=float(rec['measurement_magn_moment'])
M_baseline=array(pmag.dir2cart([dec,inc,moment]))
if float(rec['measurement_number'])==i*2+2:
dec=float(rec['measurement_dec'])
inc=float(rec['measurement_inc'])
moment=float(rec['measurement_magn_moment'])
M_arm=array(pmag.dir2cart([dec,inc,moment]))
M[i]=M_arm-M_baseline
K=zeros(3*n_pos,'f')
for i in range(n_pos):
K[i*3]=M[i][0]
K[i*3+1]=M[i][1]
K[i*3+2]=M[i][2]
if specimen not in list(Data_anisotropy.keys()):
Data_anisotropy[specimen]={}
aniso_parameters=calculate_aniso_parameters(B,K)
Data_anisotropy[specimen]['AARM']=aniso_parameters
Data_anisotropy[specimen]['AARM']['anisotropy_alt']=""
Data_anisotropy[specimen]['AARM']['anisotropy_type']="AARM"
Data_anisotropy[specimen]['AARM']['er_sample_name']=aarmblock[0]['er_sample_name']
Data_anisotropy[specimen]['AARM']['er_site_name']=aarmblock[0]['er_site_name']
Data_anisotropy[specimen]['AARM']['er_specimen_name']=specimen
Data_anisotropy[specimen]['AARM']['anisotropy_description']='Hext statistics adapted to AARM'
Data_anisotropy[specimen]['AARM']['magic_experiment_names']=specimen+";AARM"
Data_anisotropy[specimen]['AARM']['magic_method_codes']="LP-AN-ARM:AE-H"
#Data_anisotropy[specimen]['AARM']['rmag_anisotropy_name']=specimen
#-----------------------------------
specimens=list(Data_anisotropy.keys())
specimens.sort
# remove previous anistropy data, and replace with the new one:
s_list=list(Data.keys())
for sp in s_list:
if 'AniSpec' in list(Data[sp].keys()):
del Data[sp]['AniSpec']
for specimen in specimens:
# if both AARM and ATRM axist prefer the AARM !!
if 'AARM' in list(Data_anisotropy[specimen].keys()):
TYPES=['AARM']
if 'ATRM' in list(Data_anisotropy[specimen].keys()):
TYPES=['ATRM']
if 'AARM' in list(Data_anisotropy[specimen].keys()) and 'ATRM' in list(Data_anisotropy[specimen].keys()):
TYPES=['ATRM','AARM']
aniso_logfile.write( "-W- WARNING: both aarm and atrm data exist for specimen %s. using AARM by default. If you prefer using one of them, delete the other!\n"%specimen)
for TYPE in TYPES:
String=""
for i in range (len(rmag_anistropy_header)):
try:
String=String+Data_anisotropy[specimen][TYPE][rmag_anistropy_header[i]]+'\t'
except:
String=String+"%f"%(Data_anisotropy[specimen][TYPE][rmag_anistropy_header[i]])+'\t'
rmag_anisotropy_file.write(String[:-1]+"\n")
String=""
Data_anisotropy[specimen][TYPE]['er_specimen_names']=Data_anisotropy[specimen][TYPE]['er_specimen_name']
Data_anisotropy[specimen][TYPE]['er_sample_names']=Data_anisotropy[specimen][TYPE]['er_sample_name']
Data_anisotropy[specimen][TYPE]['er_site_names']=Data_anisotropy[specimen][TYPE]['er_site_name']
for i in range (len(rmag_results_header)):
try:
String=String+Data_anisotropy[specimen][TYPE][rmag_results_header[i]]+'\t'
except:
String=String+"%f"%(Data_anisotropy[specimen][TYPE][rmag_results_header[i]])+'\t'
rmag_results_file.write(String[:-1]+"\n")
if 'AniSpec' not in Data[specimen]:
Data[specimen]['AniSpec']={}
Data[specimen]['AniSpec'][TYPE]=Data_anisotropy[specimen][TYPE]
aniso_logfile.write("------------------------\n")
aniso_logfile.write("-I- remanence_aniso_magic script finished sucsessfuly\n")
aniso_logfile.write( "------------------------\n")
rmag_anisotropy_file.close()
print("Anisotropy tensors elements are saved in rmag_anistropy.txt")
print("Other anisotropy statistics are saved in rmag_results.txt")
print("log file is in rmag_anisotropy.log") | 0.031564 |
def run(cmd, filename=None, threads=True, verbose=False):
"""Similar to profile.run ."""
_run(threads, verbose, 'run', filename, cmd) | 0.007092 |
def bilinear_interpolation(x, y, points):
'''Interpolate (x,y) from values associated with four points.
The four points are a list of four triplets: (x, y, value).
The four points can be in any order. They should form a rectangle.
>>> bilinear_interpolation(12, 5.5,
... [(10, 4, 100),
... (20, 4, 200),
... (10, 6, 150),
... (20, 6, 300)])
165.0
@note : source -> http://stackoverflow.com/questions/8661537/how-to-perform-bilinear-interpolation-in-python
'''
# See formula at: http://en.wikipedia.org/wiki/Bilinear_interpolation
points = sorted(points) # order points by x, then by y
(x1, y1, q11), (_x1, y2, q12), (x2, _y1, q21), (_x2, _y2, q22) = points
# if x1 != _x1 or x2 != _x2 or y1 != _y1 or y2 != _y2:
# raise ValueError('points do not form a rectangle')
# if not x1 <= x <= x2 or not y1 <= y <= y2:
# raise ValueError('(x, y) not within the rectangle')
try :
if x1 != _x1 or x2 != _x2 or y1 != _y1 or y2 != _y2:
raise ValueError('points do not form a rectangle')
if not x1 <= x <= x2 or not y1 <= y <= y2:
raise ValueError('(x, y) not within the rectangle')
return (q11 * (x2 - x) * (y2 - y) +
q21 * (x - x1) * (y2 - y) +
q12 * (x2 - x) * (y - y1) +
q22 * (x - x1) * (y - y1)
) / ((x2 - x1) * (y2 - y1) + 0.0)
except ValueError : return np.nan | 0.006165 |
def DoxySourceScan(node, env, path):
"""
Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
any files used to generate docs to the list of source files.
"""
default_file_patterns = [
'*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
'*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
'*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
'*.py',
]
default_exclude_patterns = [
'*~',
]
sources = []
data = DoxyfileParse(node.get_contents())
if data.get("RECURSIVE", "NO") == "YES":
recursive = True
else:
recursive = False
file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
# We're running in the top-level directory, but the doxygen
# configuration file is in the same directory as node; this means
# that relative pathnames in node must be adjusted before they can
# go onto the sources list
conf_dir = os.path.dirname(str(node))
for node in data.get("INPUT", []):
if not os.path.isabs(node):
node = os.path.join(conf_dir, node)
if os.path.isfile(node):
sources.append(node)
elif os.path.isdir(node):
if recursive:
for root, dirs, files in os.walk(node):
for f in files:
filename = os.path.join(root, f)
pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
if pattern_check and not exclude_check:
sources.append(filename)
else:
for pattern in file_patterns:
sources.extend(glob.glob("/".join([node, pattern])))
# Add tagfiles to the list of source files:
for node in data.get("TAGFILES", []):
file = node.split("=")[0]
if not os.path.isabs(file):
file = os.path.join(conf_dir, file)
sources.append(file)
# Add additional files to the list of source files:
def append_additional_source(option):
file = data.get(option, "")
if file != "":
if not os.path.isabs(file):
file = os.path.join(conf_dir, file)
if os.path.isfile(file):
sources.append(file)
append_additional_source("HTML_STYLESHEET")
append_additional_source("HTML_HEADER")
append_additional_source("HTML_FOOTER")
sources = map( lambda path: env.File(path), sources )
return sources | 0.020661 |
def _create_wx_app():
"""
Creates a wx.App instance if it has not been created sofar.
"""
wxapp = wx.GetApp()
if wxapp is None:
wxapp = wx.App(False)
wxapp.SetExitOnFrameDelete(True)
# retain a reference to the app object so it does not get garbage
# collected and cause segmentation faults
_create_wx_app.theWxApp = wxapp | 0.002618 |
def is_primitive(self):
"""is the value a built-in type?"""
if is_py2:
return isinstance(
self.val,
(
types.NoneType,
types.BooleanType,
types.IntType,
types.LongType,
types.FloatType
)
)
else:
return isinstance(
self.val,
(
type(None),
bool,
int,
float
)
) | 0.004942 |
def reply(self, message: typing.Union[int, types.Message]):
"""
Reply to message
:param message: :obj:`int` or :obj:`types.Message`
:return: self
"""
setattr(self, 'reply_to_message_id', message.message_id if isinstance(message, types.Message) else message)
return self | 0.009174 |
def _set_id_variable_by_entity_key(self) -> Dict[str, str]:
'''Identify and set the good ids for the different entities'''
if self.id_variable_by_entity_key is None:
self.id_variable_by_entity_key = dict(
(entity.key, entity.key + '_id') for entity in self.tax_benefit_system.entities)
log.debug("Use default id_variable names:\n {}".format(self.id_variable_by_entity_key))
return self.id_variable_by_entity_key | 0.008386 |
def parse_token_list_rec(self, min_precedence):
"""
Parses a tokenized arithmetic expression into a parse tree. It calls
itself recursively to handle bracketed subexpressions.
@return: Returns a token string.
@rtype: lems.parser.expr.ExprNode
@attention: Does not handle unary minuses at the moment. Needs to be
fixed.
"""
exit_loop = False
ExprParser.depth = ExprParser.depth + 1
if self.debug: print('>>>>> Depth: %i'% ExprParser.depth)
precedence = min_precedence
while self.token_list:
token = self.token_list[0]
la = self.token_list[1] if len(self.token_list) > 1 else None
if self.debug: print('0> %s'% self.token_list)
if self.debug: print('1> Token: %s, next: %s, op stack: %s, val stack: %s, node stack: %s'% (token, la, self.op_stack, self.val_stack, self.node_stack))
self.token_list = self.token_list[1:]
close_bracket = False
if token == '(':
np = ExprParser('')
np.token_list = self.token_list
nexp = np.parse2()
self.node_stack.push(nexp)
self.val_stack.push('$')
self.token_list = np.token_list
if self.debug: print('>>> Tokens left: %s'%self.token_list)
close_bracket = True
elif token == ')':
break
elif self.is_func(token):
self.op_stack.push(token)
elif self.is_op(token):
stack_top = self.op_stack.top()
if self.debug: print('OP Token: %s (prior: %i), top: %s (prior: %i)'% (token, self.priority(token), stack_top, self.priority(stack_top)))
if self.priority(token) < self.priority(stack_top):
if self.debug: print(' Priority of %s is less than %s'%(token, stack_top))
self.node_stack.push(self.cleanup_stacks())
self.val_stack.push('$')
else:
if self.debug: print(' Priority of %s is greater than %s'%(token, stack_top))
self.op_stack.push(token)
else:
if self.debug: print('Not a bracket func or op...')
if la == '(':
raise Exception("Error parsing expression: %s\nToken: %s is placed like a function but is not recognised!\nKnown functions: %s"%(self.parse_string, token, known_functions))
stack_top = self.op_stack.top()
if stack_top == '$':
if self.debug: print("option a")
self.node_stack.push(ValueNode(token))
self.val_stack.push('$')
else:
if (self.is_op(la) and
self.priority(stack_top) < self.priority(la)):
if self.debug: print("option b")
self.node_stack.push(ValueNode(token))
self.val_stack.push('$')
else:
if self.debug: print("option c, nodes: %s"% self.node_stack)
op = self.op_stack.pop()
right = ValueNode(token)
op_node = self.make_op_node(op,right)
self.node_stack.push(op_node)
self.val_stack.push('$')
if close_bracket:
stack_top = self.op_stack.top()
if self.debug: print("+ Closing bracket, op stack: %s, node stack: %s la: %s"%(self.op_stack, self.node_stack, la))
if self.debug: print('>>> Tokens left: %s'%self.token_list)
if stack_top == '$':
if self.debug: print("+ option a")
'''
self.node_stack.push(ValueNode(token))
self.val_stack.push('$')'''
else:
la = self.token_list[0] if len(self.token_list) > 1 else None
if (self.is_op(la) and self.priority(stack_top) < self.priority(la)):
if self.debug: print("+ option b")
#self.node_stack.push(ValueNode(token))
#self.val_stack.push('$')
else:
if self.debug: print("+ option c, nodes: %s"% self.node_stack)
if self.debug: print('35> op stack: %s, val stack: %s, node stack: %s'% ( self.op_stack, self.val_stack, self.node_stack))
right = self.node_stack.pop()
op = self.op_stack.pop()
op_node = self.make_op_node(stack_top,right)
if self.debug: print("Made op node: %s, right: %s"%(op_node, right))
self.node_stack.push(op_node)
self.val_stack.push('$')
if self.debug: print('36> op stack: %s, val stack: %s, node stack: %s'% ( self.op_stack, self.val_stack, self.node_stack))
if self.debug: print('2> Token: %s, next: %s, op stack: %s, val stack: %s, node stack: %s'% (token, la, self.op_stack, self.val_stack, self.node_stack))
if self.debug: print('')
if self.debug: print('3> op stack: %s, val stack: %s, node stack: %s'% ( self.op_stack, self.val_stack, self.node_stack))
ret = self.cleanup_stacks()
if self.debug: print('4> op stack: %s, val stack: %s, node stack: %s'% ( self.op_stack, self.val_stack, self.node_stack))
if self.debug: print('<<<<< Depth: %s, returning: %s'% (ExprParser.depth, ret))
ExprParser.depth = ExprParser.depth - 1
if self.debug: print('')
return ret | 0.013676 |
def _handle_invite(self, room_id: _RoomID, state: dict):
""" Join rooms invited by whitelisted partners """
if self._stop_event.ready():
return
self.log.debug('Got invite', room_id=room_id)
invite_events = [
event
for event in state['events']
if event['type'] == 'm.room.member' and
event['content'].get('membership') == 'invite' and
event['state_key'] == self._user_id
]
if not invite_events:
self.log.debug('Invite: no invite event found', room_id=room_id)
return # there should always be one and only one invite membership event for us
invite_event = invite_events[0]
sender = invite_event['sender']
sender_join_events = [
event
for event in state['events']
if event['type'] == 'm.room.member' and
event['content'].get('membership') == 'join' and
event['state_key'] == sender
]
if not sender_join_events:
self.log.debug('Invite: no sender join event', room_id=room_id)
return # there should always be one and only one join membership event for the sender
sender_join_event = sender_join_events[0]
user = self._get_user(sender)
user.displayname = sender_join_event['content'].get('displayname') or user.displayname
peer_address = validate_userid_signature(user)
if not peer_address:
self.log.debug(
'Got invited to a room by invalid signed user - ignoring',
room_id=room_id,
user=user,
)
return
if not self._address_mgr.is_address_known(peer_address):
self.log.debug(
'Got invited by a non-whitelisted user - ignoring',
room_id=room_id,
user=user,
)
return
join_rules_events = [
event
for event in state['events']
if event['type'] == 'm.room.join_rules'
]
# room privacy as seen from the event
private_room: bool = False
if join_rules_events:
join_rules_event = join_rules_events[0]
private_room = join_rules_event['content'].get('join_rule') == 'invite'
# we join room and _set_room_id_for_address despite room privacy and requirements,
# _get_room_ids_for_address will take care of returning only matching rooms and
# _leave_unused_rooms will clear it in the future, if and when needed
room: Room = None
last_ex: Optional[Exception] = None
retry_interval = 0.1
for _ in range(JOIN_RETRIES):
try:
room = self._client.join_room(room_id)
except MatrixRequestError as e:
last_ex = e
if self._stop_event.wait(retry_interval):
break
retry_interval = retry_interval * 2
else:
break
else:
assert last_ex is not None
raise last_ex # re-raise if couldn't succeed in retries
if not room.listeners:
room.add_listener(self._handle_message, 'm.room.message')
# room state may not populated yet, so we populate 'invite_only' from event
room.invite_only = private_room
self._set_room_id_for_address(address=peer_address, room_id=room_id)
self.log.debug(
'Joined from invite',
room_id=room_id,
aliases=room.aliases,
peer=to_checksum_address(peer_address),
) | 0.00245 |
def run(self, *,
connector: Union[EnvVar, Token, SlackClient, None] = None,
interval: float = 0.5, retries: int = 16,
backoff: Callable[[int], float] = None,
until: Callable[[List[dict]], bool] = None) -> None:
"""
Connect to the Slack API and run the event handler loop.
Args:
connector: A means of connecting to the Slack API. This can be an
API :obj:`Token`, an :obj:`EnvVar` from which a token can be
retrieved, or an established :obj:`SlackClient` instance. If
absent an attempt will be made to use the ``LAYABOUT_TOKEN``
environment variable.
interval: The number of seconds to wait between fetching events
from the Slack API.
retries: The number of retry attempts to make if a connection to
Slack is not established or is lost.
backoff: The strategy used to determine how long to wait between
retries. Must take as input the number of the current retry and
output a :obj:`float`. The retry count begins at 1 and
continues up to ``retries``. If absent a
`truncated exponential backoff`_ strategy will be used.
until: The condition used to evaluate whether this method
terminates. Must take as input a :obj:`list` of :obj:`dict`
representing Slack RTM API events and return a :obj:`bool`. If
absent this method will run forever.
Raises:
TypeError: If an unsupported connector is given.
MissingToken: If no API token is available.
FailedConnection: If connecting to the Slack API fails.
.. _truncated exponential backoff:
https://cloud.google.com/storage/docs/exponential-backoff
"""
backoff = backoff or _truncated_exponential
until = until or _forever
self._ensure_slack(
connector=connector,
retries=retries,
backoff=backoff
)
assert self._slack is not None
while True:
events = self._slack.fetch_events()
if not until(events):
log.debug('Exiting event loop')
break
# Handle events!
for event in events:
type_ = event.get('type', '')
for handler in self._handlers[type_] + self._handlers['*']:
fn, kwargs = handler
fn(self._slack.inner, event, **kwargs)
# Maybe don't pester the Slack API too much.
time.sleep(interval) | 0.002217 |
def to_segment_xml(self, override_file_if_exists=False):
"""
Write the segment list in self.segmentList to self.storage_path.
"""
# create XML doc and add process table
outdoc = ligolw.Document()
outdoc.appendChild(ligolw.LIGO_LW())
process = ligolw_process.register_to_xmldoc(outdoc, sys.argv[0], {})
for key, seglist in self.segment_dict.items():
ifo, name = self.parse_segdict_key(key)
# Ensure we have LIGOTimeGPS
fsegs = [(lal.LIGOTimeGPS(seg[0]),
lal.LIGOTimeGPS(seg[1])) for seg in seglist]
if self.seg_summ_dict is None:
vsegs = [(lal.LIGOTimeGPS(seg[0]),
lal.LIGOTimeGPS(seg[1])) \
for seg in self.valid_segments]
else:
vsegs = [(lal.LIGOTimeGPS(seg[0]),
lal.LIGOTimeGPS(seg[1])) \
for seg in self.seg_summ_dict[key]]
# Add using glue library to set all segment tables
with ligolw_segments.LigolwSegments(outdoc, process) as x:
x.add(ligolw_segments.LigolwSegmentList(active=fsegs,
instruments=set([ifo]), name=name,
version=1, valid=vsegs))
# write file
url = urlparse.urljoin('file:', urllib.pathname2url(self.storage_path))
if not override_file_if_exists or not self.has_pfn(url, site='local'):
self.PFN(url, site='local')
ligolw_utils.write_filename(outdoc, self.storage_path) | 0.003069 |
def setCurrentIndex(self, index):
"""
Sets the current index on self and on the tab bar to keep the two insync.
:param index | <int>
"""
super(XViewPanel, self).setCurrentIndex(index)
self.tabBar().setCurrentIndex(index) | 0.010989 |
def set_end_of_event_function(self, function):
''' Adding function to module.
This is maybe the only way to make the clusterizer to work with multiprocessing.
'''
self.cluster_functions._end_of_event_function = self._jitted(function)
self._end_of_event_function = function | 0.009615 |
def set_distribute_compositions(self, distribute_comps):
"""Sets the distribution rights.
This sets distribute verbatim to ``true``.
arg: distribute_comps (boolean): right to distribute
modifications
raise: InvalidArgument - ``distribute_comps`` is invalid
raise: NoAccess - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_group_template
if self.get_distribute_compositions_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_boolean(distribute_comps):
raise errors.InvalidArgument()
self._my_map['distributeCompositions'] = distribute_comps | 0.003755 |
def start_udp_client(self, ip=None, port=None, name=None, timeout=None, protocol=None, family='ipv4'):
"""Starts a new UDP client.
Client can be optionally given `ip` and `port` to bind to, as well as
`name`, default `timeout` and a `protocol`. `family` can be either
ipv4 (default) or ipv6.
You should use `Connect` keyword to connect client to a host.
Examples:
| Start UDP client |
| Start UDP client | name=Client1 | protocol=GTPV2 |
| Start UDP client | 10.10.10.2 | 53 | name=Server1 | protocol=GTPV2 |
| Start UDP client | timeout=5 |
| Start UDP client | 0:0:0:0:0:0:0:1 | 53 | family=ipv6 |
"""
self._start_client(UDPClient, ip, port, name, timeout, protocol, family) | 0.005102 |
def get_function_argspec(func, is_class_method=None):
'''
A small wrapper around getargspec that also supports callable classes
:param is_class_method: Pass True if you are sure that the function being passed
is a class method. The reason for this is that on Python 3
``inspect.ismethod`` only returns ``True`` for bound methods,
while on Python 2, it returns ``True`` for bound and unbound
methods. So, on Python 3, in case of a class method, you'd
need the class to which the function belongs to be instantiated
and this is not always wanted.
'''
if not callable(func):
raise TypeError('{0} is not a callable'.format(func))
if six.PY2:
if is_class_method is True:
aspec = inspect.getargspec(func)
del aspec.args[0] # self
elif inspect.isfunction(func):
aspec = inspect.getargspec(func)
elif inspect.ismethod(func):
aspec = inspect.getargspec(func)
del aspec.args[0] # self
elif isinstance(func, object):
aspec = inspect.getargspec(func.__call__)
del aspec.args[0] # self
else:
raise TypeError(
'Cannot inspect argument list for \'{0}\''.format(func)
)
else:
if is_class_method is True:
aspec = _getargspec(func)
del aspec.args[0] # self
elif inspect.isfunction(func):
aspec = _getargspec(func) # pylint: disable=redefined-variable-type
elif inspect.ismethod(func):
aspec = _getargspec(func)
del aspec.args[0] # self
elif isinstance(func, object):
aspec = _getargspec(func.__call__)
del aspec.args[0] # self
else:
raise TypeError(
'Cannot inspect argument list for \'{0}\''.format(func)
)
return aspec | 0.00391 |
def estimate_ride(api_client):
"""Use an UberRidesClient to fetch a ride estimate and print the results.
Parameters
api_client (UberRidesClient)
An authorized UberRidesClient with 'request' scope.
"""
try:
estimate = api_client.estimate_ride(
product_id=SURGE_PRODUCT_ID,
start_latitude=START_LAT,
start_longitude=START_LNG,
end_latitude=END_LAT,
end_longitude=END_LNG,
seat_count=2
)
except (ClientError, ServerError) as error:
fail_print(error)
else:
success_print(estimate.json) | 0.001582 |
def check_config_options(_class, required_options, optional_options, options):
"""Helper method to check options.
Arguments:
_class -- the original class that takes received the options.
required_options -- the options that are required. If they are not
present, a ConfigurationError is raised. Given as a
tuple.
optional_options -- the options that are optional. Given options that are
not present in `optional_options` nor in
`required_options` will be logged as unrecognized.
Given as a tuple.
options -- a dictionary of given options.
Raises:
ConfigurationError -- if any required option is missing.
"""
for opt in required_options:
if opt not in options:
msg = "Required option missing: {0}"
raise ConfigurationError(msg.format(opt))
for opt in options:
if opt not in (required_options + optional_options):
msg = "Unknown config option to `{0}`: {1}"
_logger.warn(msg.format(_class, opt)) | 0.000875 |
def simple_time(value):
"""
Format a datetime or timedelta object to a string of format HH:MM
"""
if isinstance(value, timedelta):
return ':'.join(str(value).split(':')[:2])
return datetime_to_string(value, '%H:%M') | 0.004115 |
def _createGsshaPyObjects(self, eventChunk):
"""
Create GSSHAPY PrecipEvent, PrecipValue, and PrecipGage Objects Method
"""
## TODO: Add Support for RADAR file format type values
# Create GSSHAPY PrecipEvent
event = PrecipEvent(description=eventChunk['description'],
nrGag=eventChunk['nrgag'],
nrPds=eventChunk['nrpds'])
# Associate PrecipEvent with PrecipFile
event.precipFile = self
gages = []
for coord in eventChunk['coords']:
# Create GSSHAPY PrecipGage object
gage = PrecipGage(description=coord['description'],
x=coord['x'],
y=coord['y'])
# Associate PrecipGage with PrecipEvent
gage.event = event
# Append to gages list for association with PrecipValues
gages.append(gage)
for valLine in eventChunk['valLines']:
for index, value in enumerate(valLine['values']):
# Create GSSHAPY PrecipValue object
val = PrecipValue(valueType=valLine['type'],
dateTime=valLine['dateTime'],
value=value)
# Associate PrecipValue with PrecipEvent and PrecipGage
val.event = event
val.gage = gages[index] | 0.002092 |
def motif_tree_plot(outfile, tree, data, circle=True, vmin=None, vmax=None, dpi=300):
"""
Plot a "phylogenetic" tree
"""
try:
from ete3 import Tree, faces, AttrFace, TreeStyle, NodeStyle
except ImportError:
print("Please install ete3 to use this functionality")
sys.exit(1)
# Define the tree
t, ts = _get_motif_tree(tree, data, circle, vmin, vmax)
# Save image
t.render(outfile, tree_style=ts, w=100, dpi=dpi, units="mm");
# Remove the bottom (empty) half of the figure
if circle:
img = Image.open(outfile)
size = img.size[0]
spacer = 50
img.crop((0,0,size,size/2 + spacer)).save(outfile) | 0.012857 |
def branch_type(cls, branch):
"""
Return the string representation for the type of a branch
"""
typename = branch.GetClassName()
if not typename:
leaf = branch.GetListOfLeaves()[0]
typename = leaf.GetTypeName()
# check if leaf has multiple elements
leaf_count = leaf.GetLeafCount()
if leaf_count:
length = leaf_count.GetMaximum()
else:
length = leaf.GetLen()
if length > 1:
typename = '{0}[{1:d}]'.format(typename, length)
return typename | 0.003236 |
def undeploy_api_gateway(self, lambda_name, domain_name=None, base_path=None):
"""
Delete a deployed REST API Gateway.
"""
print("Deleting API Gateway..")
api_id = self.get_api_id(lambda_name)
if domain_name:
# XXX - Remove Route53 smartly here?
# XXX - This doesn't raise, but doesn't work either.
try:
self.apigateway_client.delete_base_path_mapping(
domainName=domain_name,
basePath='(none)' if base_path is None else base_path
)
except Exception as e:
# We may not have actually set up the domain.
pass
was_deleted = self.delete_stack(lambda_name, wait=True)
if not was_deleted:
# try erasing it with the older method
for api in self.get_rest_apis(lambda_name):
self.apigateway_client.delete_rest_api(
restApiId=api['id']
) | 0.001951 |
def qeuler(yaw, pitch, roll):
"""Convert Euler angle to quaternion.
Parameters
----------
yaw: number
pitch: number
roll: number
Returns
-------
np.array
"""
yaw = np.radians(yaw)
pitch = np.radians(pitch)
roll = np.radians(roll)
cy = np.cos(yaw * 0.5)
sy = np.sin(yaw * 0.5)
cr = np.cos(roll * 0.5)
sr = np.sin(roll * 0.5)
cp = np.cos(pitch * 0.5)
sp = np.sin(pitch * 0.5)
q = np.array((
cy * cr * cp + sy * sr * sp, cy * sr * cp - sy * cr * sp,
cy * cr * sp + sy * sr * cp, sy * cr * cp - cy * sr * sp
))
return q | 0.001603 |
def update(self, cookies):
"""Add specified cookies to our cookie jar, and persists it.
:param cookies: Any iterable that yields http.cookiejar.Cookie instances, such as a CookieJar.
"""
cookie_jar = self.get_cookie_jar()
for cookie in cookies:
cookie_jar.set_cookie(cookie)
with self._lock:
cookie_jar.save() | 0.011561 |
def _reschedule(self, node):
"""Maybe schedule new items on the node.
If there are any globally pending work units left then this will check
if the given node should be given any more tests.
"""
# Do not add more work to a node shutting down
if node.shutting_down:
return
# Check that more work is available
if not self.workqueue:
node.shutdown()
return
self.log("Number of units waiting for node:", len(self.workqueue))
# Check that the node is almost depleted of work
# 2: Heuristic of minimum tests to enqueue more work
if self._pending_of(self.assigned_work[node]) > 2:
return
# Pop one unit of work and assign it
self._assign_work_unit(node) | 0.002463 |
def name(self):
"""Concatenates the names of the given criteria in alphabetical order.
If a sub-criterion is itself a combined criterion, its name is
first split into the individual names and the names of the
sub-sub criteria is used instead of the name of the sub-criterion.
This is done recursively to ensure that the order and the hierarchy
of the criteria does not influence the name.
Returns
-------
str
The alphabetically sorted names of the sub-criteria concatenated
using double underscores between them.
"""
names = (criterion.name() for criterion in self._criteria)
return '__'.join(sorted(names)) | 0.00274 |
def members(self, is_manager=None):
"""
Retrieve members of the scope.
:param is_manager: (optional) set to True to return only Scope members that are also managers.
:type is_manager: bool
:return: List of members (usernames)
Examples
--------
>>> members = project.members()
>>> managers = project.members(is_manager=True)
"""
if not is_manager:
return [member for member in self._json_data['members'] if member['is_active']]
else:
return [member for member in self._json_data['members'] if
member.get('is_active', False) and member.get('is_manager', False)] | 0.007123 |
def _parse_categories(element):
"""
Returns a list with categories with relations.
"""
reference = {}
items = element.findall("./{%s}category" % WP_NAMESPACE)
for item in items:
term_id = item.find("./{%s}term_id" % WP_NAMESPACE).text
nicename = item.find("./{%s}category_nicename" % WP_NAMESPACE).text
name = item.find("./{%s}cat_name" % WP_NAMESPACE).text
parent = item.find("./{%s}category_parent" % WP_NAMESPACE).text
category = {
"term_id": term_id,
"nicename": nicename,
"name": name,
"parent": parent
}
reference[nicename] = category
return _build_category_tree(None, reference=reference) | 0.001368 |
def comparator(objective):
"""
Higher order function creating a compare function for objectives.
Args:
objective (cipy.algorithms.core.Objective): The objective to create a
compare for.
Returns:
callable: Function accepting two objectives to compare.
Examples:
>>> a = Minimum(0.1)
>>> b = Minimum(0.2)
>>> compare = comparator(a)
>>> comparison = compare(a, b) # False
"""
if isinstance(objective, Minimum):
return lambda l, r: l < r
else:
return lambda l, r: l > r | 0.00173 |
def download(self, protocol, host, user, password,
file_name, rbridge='all'):
"""
Download firmware to device
"""
urn = "{urn:brocade.com:mgmt:brocade-firmware}"
request_fwdl = self.get_firmware_download_request(protocol, host,
user, password,
file_name, rbridge)
response = self._callback(request_fwdl, 'get')
fwdl_result = None
for item in response.findall('%scluster-output' % urn):
fwdl_result = item.find('%sfwdl-msg' % urn).text
if not fwdl_result:
fwdl_result = response.find('%sfwdl-cmd-msg' % urn).text
return fwdl_result | 0.003911 |
def best_fit_font_size(cls, text, extents, max_size, font_file):
"""
Return the largest whole-number point size less than or equal to
*max_size* that allows *text* to fit completely within *extents* when
rendered using font defined in *font_file*.
"""
line_source = _LineSource(text)
text_fitter = cls(line_source, extents, font_file)
return text_fitter._best_fit_font_size(max_size) | 0.004474 |
def deep_force_unicode(value):
"""
Recursively call force_text on value.
"""
if isinstance(value, (list, tuple, set)):
value = type(value)(map(deep_force_unicode, value))
elif isinstance(value, dict):
value = type(value)(map(deep_force_unicode, value.items()))
elif isinstance(value, Promise):
value = force_text(value)
return value | 0.002604 |
def buckets_list(self, projection='noAcl', max_results=0, page_token=None, project_id=None):
"""Issues a request to retrieve the list of buckets.
Args:
projection: the projection of the bucket information to retrieve.
max_results: an optional maximum number of objects to retrieve.
page_token: an optional token to continue the retrieval.
project_id: the project whose buckets should be listed.
Returns:
A parsed list of bucket information dictionaries.
Raises:
Exception if there is an error performing the operation.
"""
if max_results == 0:
max_results = Api._MAX_RESULTS
args = {'project': project_id if project_id else self._project_id, 'maxResults': max_results}
if projection is not None:
args['projection'] = projection
if page_token is not None:
args['pageToken'] = page_token
url = Api._ENDPOINT + (Api._BUCKET_PATH % '')
return google.datalab.utils.Http.request(url, args=args, credentials=self._credentials) | 0.006863 |
def _validate_input_data(self, data, request):
""" Validate input data.
:param request: the HTTP request
:param data: the parsed data
:return: if validation is performed and succeeds the data is converted
into whatever format the validation uses (by default Django's
Forms) If not, the data is returned unchanged.
:raises: HttpStatusCodeError if data is not valid
"""
validator = self._get_input_validator(request)
if isinstance(data, (list, tuple)):
return map(validator.validate, data)
else:
return validator.validate(data) | 0.003044 |
def as_dataframe(self, fillna=True, subjects=None):
"""
Return association set as pandas DataFrame
Each row is a subject (e.g. gene)
Each column is the inferred class used to describe the subject
"""
entries = []
selected_subjects = self.subjects
if subjects is not None:
selected_subjects = subjects
for s in selected_subjects:
vmap = {}
for c in self.inferred_types(s):
vmap[c] = 1
entries.append(vmap)
logging.debug("Creating DataFrame")
df = pd.DataFrame(entries, index=selected_subjects)
if fillna:
logging.debug("Performing fillna...")
df = df.fillna(0)
return df | 0.003871 |
def calc_xyz2surf(surf, xyz, threshold=20, exponent=None, std=None):
"""Calculate transformation matrix from xyz values to vertices.
Parameters
----------
surf : instance of wonambi.attr.Surf
the surface of only one hemisphere.
xyz : numpy.ndarray
nChan x 3 matrix, with the locations in x, y, z.
std : float
distance in mm of the Gaussian kernel
exponent : int
inverse law (1-> direct inverse, 2-> inverse square, 3-> inverse cube)
threshold : float
distance in mm for a vertex to pick up electrode activity (if distance
is above the threshold, one electrode does not affect a vertex).
Returns
-------
numpy.ndarray
nVertices X xyz.shape[0] matrix
Notes
-----
This function is a helper when plotting onto brain surface, by creating a
transformation matrix from the values in space (f.e. at each electrode) to
the position of the vertices (used to show the brain surface).
There are many ways to move from values to vertices. The crucial parameter
is the function at which activity decreases in respect to the distance. You
can have an inverse relationship by specifying 'exponent'. If 'exponent' is
2, then the activity will decrease as inverse square of the distance. The
function can be a Gaussian. With std, you specify the width of the gaussian
kernel in mm.
For each vertex, it uses a threshold based on the distance ('threshold'
value, in mm). Finally, it normalizes the contribution of all the channels
to 1, so that the sum of the coefficients for each vertex is 1.
You can also create your own matrix (and skip calc_xyz2surf altogether) and
pass it as attribute to the main figure.
Because it's a loop over all the vertices, this function is pretty slow,
but if you calculate it once, you can reuse it.
We take advantage of multiprocessing, which speeds it up considerably.
"""
if exponent is None and std is None:
exponent = 1
if exponent is not None:
lg.debug('Vertex values based on inverse-law, with exponent ' +
str(exponent))
funct = partial(calc_one_vert_inverse, xyz=xyz, exponent=exponent)
elif std is not None:
lg.debug('Vertex values based on gaussian, with s.d. ' + str(std))
funct = partial(calc_one_vert_gauss, xyz=xyz, std=std)
with Pool() as p:
xyz2surf = p.map(funct, surf.vert)
xyz2surf = asarray(xyz2surf)
if exponent is not None:
threshold_value = (1 / (threshold ** exponent))
external_threshold_value = threshold_value
elif std is not None:
threshold_value = gauss(threshold, std)
external_threshold_value = gauss(std, std) # this is around 0.607
lg.debug('Values thresholded at ' + str(threshold_value))
xyz2surf[xyz2surf < threshold_value] = NaN
# here we deal with vertices that are within the threshold value but far
# from a single electrodes, so those remain empty
sumval = nansum(xyz2surf, axis=1)
sumval[sumval < external_threshold_value] = NaN
# normalize by the number of electrodes
xyz2surf /= atleast_2d(sumval).T
xyz2surf[isnan(xyz2surf)] = 0
return xyz2surf | 0.000613 |
def aggregate_region(self, variable, region='World', subregions=None,
components=None, append=False):
"""Compute the aggregate of timeseries over a number of regions
including variable components only defined at the `region` level
Parameters
----------
variable: str
variable for which the aggregate should be computed
region: str, default 'World'
dimension
subregions: list of str
list of subregions, defaults to all regions other than `region`
components: list of str
list of variables, defaults to all sub-categories of `variable`
included in `region` but not in any of `subregions`
append: bool, default False
append the aggregate timeseries to `data` and return None,
else return aggregate timeseries
"""
# default subregions to all regions other than `region`
if subregions is None:
rows = self._apply_filters(variable=variable)
subregions = set(self.data[rows].region) - set([region])
if not len(subregions):
msg = 'cannot aggregate variable `{}` to `{}` because it does not'\
' exist in any subregion'
logger().info(msg.format(variable, region))
return
# compute aggregate over all subregions
subregion_df = self.filter(region=subregions)
cols = ['region', 'variable']
_data = _aggregate(subregion_df.filter(variable=variable).data, cols)
# add components at the `region` level, defaults to all variables one
# level below `variable` that are only present in `region`
region_df = self.filter(region=region)
components = components or (
set(region_df._variable_components(variable)).difference(
subregion_df._variable_components(variable)))
if len(components):
rows = region_df._apply_filters(variable=components)
_data = _data.add(_aggregate(region_df.data[rows], cols),
fill_value=0)
if append is True:
self.append(_data, region=region, variable=variable, inplace=True)
else:
return _data | 0.001311 |
def create_chan_labels(self):
"""Create the channel labels, but don't plot them yet.
Notes
-----
It's necessary to have the width of the labels, so that we can adjust
the main scene.
"""
self.idx_label = []
for one_grp in self.parent.channels.groups:
for one_label in one_grp['chan_to_plot']:
item = QGraphicsSimpleTextItem(one_label)
item.setBrush(QBrush(QColor(one_grp['color'])))
item.setFlag(QGraphicsItem.ItemIgnoresTransformations)
self.idx_label.append(item) | 0.0033 |
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is `data_iter.provide_data`.
label_shapes : list of (str, tuple)
Typically is `data_iter.provide_label`.
for_training : bool
Default is ``True``. Whether the executors should be bind for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. Currently shared module is not supported for `SequentialModule`.
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
"""
if self.binded and not force_rebind:
self.logger.warning('Already bound, ignoring bind()')
return
if inputs_need_grad:
assert for_training is True
assert shared_module is None, 'Shared module is not supported'
assert len(self._modules) > 0, 'Attempting to bind an empty SequentialModule'
self.binded = True
# the same label shapes are used for all chained modules
self._label_shapes = label_shapes
my_data_shapes = data_shapes
anybody_ever_needs_label = False
for i_layer, module in enumerate(self._modules):
meta = self._metas[i_layer]
if SequentialModule.META_TAKE_LABELS in meta and \
meta[SequentialModule.META_TAKE_LABELS]:
my_label_shapes = label_shapes
anybody_ever_needs_label = True
else:
my_label_shapes = None
my_inputs_need_grad = bool(inputs_need_grad or
(for_training and i_layer > 0))
if meta.get(SequentialModule.META_AUTO_WIRING, False):
data_names = module.data_names
assert len(data_names) == len(my_data_shapes)
my_data_shapes = [(new_name, shape) for (new_name, (_, shape))
in zip(data_names, my_data_shapes)]
module.bind(data_shapes=my_data_shapes, label_shapes=my_label_shapes,
for_training=for_training, inputs_need_grad=my_inputs_need_grad,
force_rebind=force_rebind, shared_module=None, grad_req=grad_req)
# the output of the previous module is the data of the next module
my_data_shapes = module.output_shapes
if not anybody_ever_needs_label:
# then I do not need label either
self._label_shapes = None | 0.004693 |
def fcoe_fcoe_map_fcoe_map_fabric_map_fcoe_map_fabric_map_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe = ET.SubElement(config, "fcoe", xmlns="urn:brocade.com:mgmt:brocade-fcoe")
fcoe_map = ET.SubElement(fcoe, "fcoe-map")
fcoe_map_name_key = ET.SubElement(fcoe_map, "fcoe-map-name")
fcoe_map_name_key.text = kwargs.pop('fcoe_map_name')
fcoe_map_fabric_map = ET.SubElement(fcoe_map, "fcoe-map-fabric-map")
fcoe_map_fabric_map_name = ET.SubElement(fcoe_map_fabric_map, "fcoe-map-fabric-map-name")
fcoe_map_fabric_map_name.text = kwargs.pop('fcoe_map_fabric_map_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.005168 |
def get_ldap(cls, global_options=None):
"""
Returns the configured ldap module.
"""
# Apply global LDAP options once
if not cls._ldap_configured and global_options is not None:
for opt, value in global_options.items():
ldap.set_option(opt, value)
cls._ldap_configured = True
return ldap | 0.005319 |
def __put_names_on_csv_cols(names, cols):
"""
Put the variableNames with the corresponding column data.
:param list names: variableNames
:param list cols: List of Lists of column data
:return dict:
"""
_combined = {}
for idx, name in enumerate(names):
# Use the variableName, and the column data from the same index
_combined[name] = cols[idx]
return _combined | 0.006536 |
def process_allele(allele_data, dbsnp_data, header, reference):
"""Combine data from multiple lines refering to a single allele.
Returns three items in this order:
(string) concatenated variant sequence (ie allele the genome has)
(string) concatenated reference sequence
(string) start position (1-based)
"""
# One-based start to match VCF coordinates
start = str(int(allele_data[0][header['begin']]))
var_allele = ''
ref_allele = ''
filters = []
for data in allele_data:
if 'varQuality' in header:
if 'VQLOW' in data[header['varQuality']]:
filters.append('VQLOW')
else:
var_filter = data[header['varFilter']]
if var_filter and not var_filter == "PASS":
filters = filters + var_filter.split(';')
if data[header['varType']] == 'no-call':
filters = ['NOCALL']
ref_allele = ref_allele + data[header['reference']]
continue
var_allele = var_allele + data[header['alleleSeq']]
ref_allele = ref_allele + data[header['reference']]
if data[header['xRef']]:
for dbsnp_item in data[header['xRef']].split(';'):
dbsnp_data.append(dbsnp_item.split(':')[1])
# It's theoretically possible to break up a partial no-call allele into
# separated gVCF lines, but it's hard. Treat the whole allele as no-call.
if 'NOCALL' in filters:
filters = ['NOCALL']
var_allele = '?'
return var_allele, ref_allele, start, filters | 0.000637 |
def process_tcase(tcase):
"""Goes through the trun and processes "run.log" """
tcase["src_content"] = src_to_html(tcase["fpath"])
tcase["log_content"] = runlogs_to_html(tcase["res_root"])
tcase["aux_list"] = aux_listing(tcase["aux_root"])
tcase["descr_short"], tcase["descr_long"] = tcase_parse_descr(tcase)
tcase["hnames"] = extract_hook_names(tcase)
return True | 0.002545 |
def TextToAttachmentStatus(self, Text):
"""Returns attachment status code.
:Parameters:
Text : unicode
Text, one of 'UNKNOWN', 'SUCCESS', 'PENDING_AUTHORIZATION', 'REFUSED', 'NOT_AVAILABLE',
'AVAILABLE'.
:return: Attachment status.
:rtype: `enums`.apiAttach*
"""
conv = {'UNKNOWN': enums.apiAttachUnknown,
'SUCCESS': enums.apiAttachSuccess,
'PENDING_AUTHORIZATION': enums.apiAttachPendingAuthorization,
'REFUSED': enums.apiAttachRefused,
'NOT_AVAILABLE': enums.apiAttachNotAvailable,
'AVAILABLE': enums.apiAttachAvailable}
try:
return self._TextTo('api', conv[Text.upper()])
except KeyError:
raise ValueError('Bad text') | 0.003641 |
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s) | 0.015982 |
def parse_args():
'''Parse args
'''
parser = argparse.ArgumentParser(description='Train and Test an Adversarial Variatiional Encoder')
parser.add_argument('--train', help='train the network', action='store_true')
parser.add_argument('--test', help='test the network', action='store_true')
parser.add_argument('--save_embedding', help='saves the shape embedding of each input image', action='store_true')
parser.add_argument('--dataset', help='dataset name', default='caltech', type=str)
parser.add_argument('--activation', help='activation i.e. sigmoid or tanh', default='sigmoid', type=str)
parser.add_argument('--training_data_path', help='training data path', default='datasets/caltech101/data/images32x32', type=str)
parser.add_argument('--testing_data_path', help='testing data path', default='datasets/caltech101/test_data', type=str)
parser.add_argument('--pretrained_encoder_path', help='pretrained encoder model path', default='checkpoints32x32_sigmoid/caltech_E-0045.params', type=str)
parser.add_argument('--pretrained_generator_path', help='pretrained generator model path', default='checkpoints32x32_sigmoid/caltech_G-0045.params', type=str)
parser.add_argument('--output_path', help='output path for the generated images', default='outputs32x32_sigmoid', type=str)
parser.add_argument('--embedding_path', help='output path for the generated embeddings', default='outputs32x32_sigmoid', type=str)
parser.add_argument('--checkpoint_path', help='checkpoint saving path ', default='checkpoints32x32_sigmoid', type=str)
parser.add_argument('--nef', help='encoder filter count in the first layer', default=64, type=int)
parser.add_argument('--ndf', help='discriminator filter count in the first layer', default=64, type=int)
parser.add_argument('--ngf', help='generator filter count in the second last layer', default=64, type=int)
parser.add_argument('--nc', help='generator filter count in the last layer i.e. 1 for grayscale image, 3 for RGB image', default=1, type=int)
parser.add_argument('--batch_size', help='batch size, keep it 1 during testing', default=64, type=int)
parser.add_argument('--Z', help='embedding size', default=100, type=int)
parser.add_argument('--lr', help='learning rate', default=0.0002, type=float)
parser.add_argument('--beta1', help='beta1 for adam optimizer', default=0.5, type=float)
parser.add_argument('--epsilon', help='epsilon for adam optimizer', default=1e-5, type=float)
parser.add_argument('--g_dl_weight', help='discriminator layer loss weight', default=1e-1, type=float)
parser.add_argument('--gpu', help='gpu index', default=0, type=int)
parser.add_argument('--use_cpu', help='use cpu', action='store_true')
parser.add_argument('--num_epoch', help='number of maximum epochs ', default=45, type=int)
parser.add_argument('--save_after_every', help='save checkpoint after every this number of epochs ', default=5, type=int)
parser.add_argument('--visualize_after_every', help='save output images after every this number of epochs', default=5, type=int)
parser.add_argument('--show_after_every', help='show metrics after this number of iterations', default=10, type=int)
args = parser.parse_args()
return args | 0.0079 |
def is_commit_id_equal(self, dest, name):
"""
Return whether the current commit hash equals the given name.
Args:
dest: the repository directory.
name: a string name.
"""
if not name:
# Then avoid an unnecessary subprocess call.
return False
return self.get_revision(dest) == name | 0.005348 |
def _index_counter_keys(self, counter, unknown_token, reserved_tokens, most_freq_count,
min_freq):
"""Indexes keys of `counter`.
Indexes keys of `counter` according to frequency thresholds such as `most_freq_count` and
`min_freq`.
"""
assert isinstance(counter, collections.Counter), \
'`counter` must be an instance of collections.Counter.'
unknown_and_reserved_tokens = set(reserved_tokens) if reserved_tokens is not None else set()
unknown_and_reserved_tokens.add(unknown_token)
token_freqs = sorted(counter.items(), key=lambda x: x[0])
token_freqs.sort(key=lambda x: x[1], reverse=True)
token_cap = len(unknown_and_reserved_tokens) + (
len(counter) if most_freq_count is None else most_freq_count)
for token, freq in token_freqs:
if freq < min_freq or len(self._idx_to_token) == token_cap:
break
if token not in unknown_and_reserved_tokens:
self._idx_to_token.append(token)
self._token_to_idx[token] = len(self._idx_to_token) - 1 | 0.005208 |
def plugins(self):
"""
Newest version of all plugins in the group filtered by ``blacklist``
Returns:
dict: Nested dictionary of plugins accessible through dot-notation.
Plugins are returned in a nested dictionary, but can also be accessed through dot-notion.
Just as when accessing an undefined dictionary key with index-notation,
a :py:exc:`KeyError` will be raised if the plugin type or plugin does not exist.
Parent types are always included.
Child plugins will only be included if a valid, non-blacklisted plugin is available.
"""
if not self.loaded:
self.load_modules()
# pylint: disable=protected-access
return get_plugins()[self.group]._filter(blacklist=self.blacklist, newest_only=True,
type_filter=self.type_filter) | 0.006682 |
def config(ctx):
"""[GROUP] Configuration management operations"""
from hfos import database
database.initialize(ctx.obj['dbhost'], ctx.obj['dbname'])
from hfos.schemata.component import ComponentConfigSchemaTemplate
ctx.obj['col'] = model_factory(ComponentConfigSchemaTemplate) | 0.003333 |
def init_log(logger, filename=None, loglevel=None):
"""
Initializes the log file in the proper format.
Arguments:
filename (str): Path to a file. Or None if logging is to
be disabled.
loglevel (str): Determines the level of the log output.
"""
formatter = logging.Formatter(
'[%(asctime)s] %(levelname)s: %(name)s: %(message)s'
)
if loglevel:
logger.setLevel(getattr(logging, loglevel))
# We will allways print warnings and higher to stderr
ch = logging.StreamHandler()
ch.setLevel('WARNING')
ch.setFormatter(formatter)
if filename:
fi = logging.FileHandler(filename, encoding='utf-8')
if loglevel:
fi.setLevel(getattr(logging, loglevel))
fi.setFormatter(formatter)
logger.addHandler(fi)
# If no logfile is provided we print all log messages that the user has
# defined to stderr
else:
if loglevel:
ch.setLevel(getattr(logging, loglevel))
logger.addHandler(ch) | 0.006381 |
def retrieve_file_from_url(url):
"""
Retrieve a file from an URL
Args:
url: The URL to retrieve the file from.
Returns:
The absolute path of the downloaded file.
"""
try:
alias_source, _ = urlretrieve(url)
# Check for HTTPError in Python 2.x
with open(alias_source, 'r') as f:
content = f.read()
if content[:3].isdigit():
raise CLIError(ALIAS_FILE_URL_ERROR.format(url, content.strip()))
except Exception as exception:
if isinstance(exception, CLIError):
raise
# Python 3.x
raise CLIError(ALIAS_FILE_URL_ERROR.format(url, exception))
return alias_source | 0.002837 |
def fpn_map_rois_to_levels(boxes):
"""
Assign boxes to level 2~5.
Args:
boxes (nx4):
Returns:
[tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices of boxes in its level.
[tf.Tensor]: 4 tensors, the gathered boxes in each level.
Be careful that the returned tensor could be empty.
"""
sqrtarea = tf.sqrt(tf_area(boxes))
level = tf.cast(tf.floor(
4 + tf.log(sqrtarea * (1. / 224) + 1e-6) * (1.0 / np.log(2))), tf.int32)
# RoI levels range from 2~5 (not 6)
level_ids = [
tf.where(level <= 2),
tf.where(tf.equal(level, 3)), # == is not supported
tf.where(tf.equal(level, 4)),
tf.where(level >= 5)]
level_ids = [tf.reshape(x, [-1], name='roi_level{}_id'.format(i + 2))
for i, x in enumerate(level_ids)]
num_in_levels = [tf.size(x, name='num_roi_level{}'.format(i + 2))
for i, x in enumerate(level_ids)]
add_moving_summary(*num_in_levels)
level_boxes = [tf.gather(boxes, ids) for ids in level_ids]
return level_ids, level_boxes | 0.002703 |
def _get_api_params(api_url=None,
page_id=None,
api_key=None,
api_version=None):
'''
Retrieve the API params from the config file.
'''
statuspage_cfg = __salt__['config.get']('statuspage')
if not statuspage_cfg:
statuspage_cfg = {}
return {
'api_url': api_url or statuspage_cfg.get('api_url') or BASE_URL, # optional
'api_page_id': page_id or statuspage_cfg.get('page_id'), # mandatory
'api_key': api_key or statuspage_cfg.get('api_key'), # mandatory
'api_version': api_version or statuspage_cfg.get('api_version') or DEFAULT_VERSION
} | 0.004505 |
def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True):
""" Query the pypi index at index_url using warehouse api to find all of the "releases" """
url = '/'.join((index_url, project_name, 'json'))
response = requests.get(url, verify=requests_verify)
if response.status_code == 200:
return response.json()['releases'].keys()
return None | 0.007634 |
def update(data_df, cal_dict, param, bound, start, end):
'''Update calibration times for give parameter and boundary'''
from collections import OrderedDict
if param not in cal_dict['parameters']:
cal_dict['parameters'][param] = OrderedDict()
if bound not in cal_dict['parameters'][param]:
cal_dict['parameters'][param][bound] = OrderedDict()
cal_dict['parameters'][param][bound]['start'] = start
cal_dict['parameters'][param][bound]['end'] = end
return cal_dict | 0.003922 |
def _write_summary_cnts(self, cnts):
"""Write summary of level and depth counts for active GO Terms."""
# Count level(shortest path to root) and depth(longest path to root)
# values for all unique GO Terms.
max_val = max(max(dep for dep in cnts['depth']),
max(lev for lev in cnts['level']))
self.log.write('Dep <-Depth Counts-> <-Level Counts->\n')
self.log.write('Lev BP MF CC BP MF CC\n')
self.log.write('--- ---- ---- ---- ---- ---- ----\n')
for i in range(max_val+1):
vals = ['{:>5}'.format(cnts[desc][i][ns]) for desc in sorted(cnts) for ns in self.nss]
self.log.write('{:>02} {}\n'.format(i, ' '.join(vals))) | 0.004016 |
def get_fields(model, fields, meta=None):
"""
Acording to model and fields to get fields list
Each field element is a two elements tuple, just like:
(name, field_obj)
"""
model = get_model(model)
if fields is not None:
f = fields
elif meta and hasattr(model, meta):
m = getattr(model, meta)
if hasattr(m, 'fields'):
f = m.fields
else:
f = model._fields_list
else:
f = model._fields_list
fields_list = []
for x in f:
field = {}
if isinstance(x, str): #so x is field_name
field['name'] = x
elif isinstance(x, tuple):
field['name'] = x[0]
field['field'] = x[1]
elif isinstance(x, dict):
field = x.copy()
else:
raise UliwebError('Field definition {!r} is not right, it should be just like (field_name, form_field_obj)'.format(x))
if 'prop' not in field:
if hasattr(model, field['name']):
field['prop'] = getattr(model, field['name'])
else:
field['prop'] = None
fields_list.append((field['name'], field))
return fields_list | 0.004717 |
def get_nnsoap(obj, first_shell, alphas, betas, rcut=6, nmax=10, lmax=9, all_atomtypes=[]):
"""Takes cluster structure and nearest neighbour information of a datapoint,
Returns concatenated soap vectors for each nearest
neighbour (up to 3). Top, bridge, hollow fill the initial
zero soap vector from left to right.
"""
soap_vector = []
nnn = len(first_shell)
for tbh in range(0,3):
try:
atom_idx = first_shell[tbh]
except:
soap_vector.append(soap_zero)
else:
Hpos = []
print(atom_idx)
pos = obj.get_positions()[atom_idx]
Hpos.append(pos)
x = soapPy.get_soap_locals(obj, Hpos, myAlphas, myBetas, rCut=rcut, NradBas=nmax, Lmax=lmax,crossOver=False, all_atomtypes=all_atomtypes)
soap_zero = np.zeros(x.shape)
soap_vector.append(x)
print(len(soap_vector), soap_vector[0].shape, soap_vector[1].shape, soap_vector[2].shape)
print("exemplary soapvalues",soap_vector[0][0,1], soap_vector[1][0,1], soap_vector[2][0,1])
soap_array = np.hstack(soap_vector)
return soap_array | 0.011364 |
def _decrypt_data(self, data, options):
'''Decrypt data'''
if options['encryption_algorithm_id'] not in self.encryption_algorithms:
raise Exception('Unknown encryption algorithm id: %d'
% options['encryption_algorithm_id'])
encryption_algorithm = \
self.encryption_algorithms[options['encryption_algorithm_id']]
algorithm = self._get_algorithm_info(encryption_algorithm)
key_salt = ''
if algorithm['salt_size']:
key_salt = data[-algorithm['salt_size']:]
data = data[:-algorithm['salt_size']]
key = self._generate_key(options['encryption_passphrase_id'],
self.encryption_passphrases, key_salt, algorithm)
data = self._decode(data, algorithm, key)
return data | 0.004762 |
def append_lookup_key(model, lookup_key):
"Transform spanned__lookup__key into all possible translation versions, on all levels"
pieces = lookup_key.split('__', 1)
fields = append_translated(model, (pieces[0],))
if len(pieces) > 1:
# Check if we are doing a lookup to a related trans model
fields_to_trans_models = get_fields_to_translatable_models(model)
if pieces[0] in fields_to_trans_models:
transmodel = fields_to_trans_models[pieces[0]]
rest = append_lookup_key(transmodel, pieces[1])
fields = set('__'.join(pr) for pr in itertools.product(fields, rest))
else:
fields = set('%s__%s' % (f, pieces[1]) for f in fields)
return fields | 0.004065 |
def can_access_api(self):
"""
:return: True when we can access the REST API
"""
try:
version_dict = self.get_version()
except Exception, e:
msg = 'An exception was raised when connecting to REST API: "%s"'
raise APIException(msg % e)
else:
"""
This is an example response from the REST API
{
"branch": "develop",
"dirty": "Yes",
"revision": "f1cae98161 - 24 Jun 2015 16:29",
"version": "1.7.2"
}
"""
if 'version' in version_dict:
# Yup, this looks like a w3af REST API
return True
msg = 'Unexpected HTTP response when connecting to REST API'
raise APIException(msg) | 0.002375 |
def gen_file_path(self, name):
"""
Returns full path to generated files. Checks to see if directory
exists where generated files are stored and creates one otherwise.
"""
relative_path = self.convert_path(name)
file_path = self.get_path("%s.ipynb"%relative_path)
parent_path = rec_apply(os.path.dirname, self.gen_file_level)(file_path)
gen_file_name = name if isinstance(name,str) else name[1] #Name of generated file
gen_dir_path = self.get_path(os.path.join(parent_path, self.gen_dir_name))
if not os.path.exists(gen_dir_path): # Create folder for generated files if needed
os.makedirs(gen_dir_path)
new_file_path = self.get_path('%s.py'%os.path.join(gen_dir_path, gen_file_name))
return new_file_path | 0.01722 |
def split_merged_reads(outhandles, input_derep):
"""
Takes merged/concat derep file from vsearch derep and split it back into
separate R1 and R2 parts.
- sample_fastq: a list of the two file paths to write out to.
- input_reads: the path to the input merged reads
"""
handle1, handle2 = outhandles
splitderep1 = open(handle1, 'w')
splitderep2 = open(handle2, 'w')
with open(input_derep, 'r') as infile:
## Read in the infile two lines at a time: (seqname, sequence)
duo = itertools.izip(*[iter(infile)]*2)
## lists for storing results until ready to write
split1s = []
split2s = []
## iterate over input splitting, saving, and writing.
idx = 0
while 1:
try:
itera = duo.next()
except StopIteration:
break
## split the duo into separate parts and inc counter
part1, part2 = itera[1].split("nnnn")
idx += 1
## R1 needs a newline, but R2 inherits it from the original file
## store parts in lists until ready to write
split1s.append("{}{}\n".format(itera[0], part1))
split2s.append("{}{}".format(itera[0], part2))
## if large enough then write to file
if not idx % 10000:
splitderep1.write("".join(split1s))
splitderep2.write("".join(split2s))
split1s = []
split2s = []
## write final chunk if there is any
if any(split1s):
splitderep1.write("".join(split1s))
splitderep2.write("".join(split2s))
## close handles
splitderep1.close()
splitderep2.close() | 0.008616 |
def set_executing(on: bool):
"""
Toggle whether or not the current thread is executing a step file. This
will only apply when the current thread is a CauldronThread. This function
has no effect when run on a Main thread.
:param on:
Whether or not the thread should be annotated as executing a step file.
"""
my_thread = threading.current_thread()
if isinstance(my_thread, threads.CauldronThread):
my_thread.is_executing = on | 0.002105 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.