text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def agp(args):
"""
%prog agp Siirt_Female_pistachio_23May2017_table.txt
The table file, as prepared by Dovetail Genomics, is not immediately useful
to convert gene model coordinates, as assumed by formats.chain.fromagp().
This is a quick script to do such conversion. The file structure of this
table file is described in the .manifest file shipped in the same package::
pistachio_b_23May2017_MeyIy.table.txt
Tab-delimited table describing positions of input assembly scaffolds
in the Hirise scaffolds. The table has the following format:
1. HiRise scaffold name
2. Input sequence name
3. Starting base (zero-based) of the input sequence
4. Ending base of the input sequence
5. Strand (- or +) of the input sequence in the scaffold
6. Starting base (zero-based) in the HiRise scaffold
7. Ending base in the HiRise scaffold
where '-' in the strand column indicates that the sequence is reverse
complemented relative to the input assembly.
CAUTION: This is NOT a proper AGP format since it does not have gaps in
them.
"""
p = OptionParser(agp.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
tablefile, = args
fp = open(tablefile)
for row in fp:
atoms = row.split()
hr = atoms[0]
scaf = atoms[1]
scaf_start = int(atoms[2]) + 1
scaf_end = int(atoms[3])
strand = atoms[4]
hr_start = int(atoms[5]) + 1
hr_end = int(atoms[6])
print("\t".join(str(x) for x in \
(hr, hr_start, hr_end, 1, 'W',
scaf, scaf_start, scaf_end, strand))) | 0.001702 |
def catch(ignore=[],
was_doing="something important",
helpfull_tips="you should use a debugger",
gbc=None):
"""
Catch, prepare and log error
:param exc_cls: error class
:param exc: exception
:param tb: exception traceback
"""
exc_cls, exc, tb=sys.exc_info()
if exc_cls in ignore:
msg='exception in ignorelist'
gbc.say('ignoring caught:'+str(exc_cls))
return 'exception in ignorelist'
ex_message = traceback.format_exception_only(exc_cls, exc)[-1]
ex_message = ex_message.strip()
# TODO: print(ex_message)
error_frame = tb
while error_frame.tb_next is not None:
error_frame = error_frame.tb_next
file = error_frame.tb_frame.f_code.co_filename
line = error_frame.tb_lineno
stack = traceback.extract_tb(tb)
formated_stack = []
for summary in stack:
formated_stack.append({
'file': summary[0],
'line': summary[1],
'func': summary[2],
'text': summary[3]
})
event = {
'was_doing':was_doing,
'message': ex_message,
'errorLocation': {
'file': file,
'line': line,
'full': file + ' -> ' + str(line)
},
'stack': formated_stack
#,
#'time': time.time()
}
try:
#logging.info('caught:'+pformat(event))
gbc.cry('caught:'+pformat(event))
print('Bubble3: written error to log')
print('Bubble3: tips for fixing this:')
print(helpfull_tips)
except Exception as e:
print('Bubble3: cant log error cause of %s' % e) | 0.005995 |
def _get_var_from_string(item):
""" Get resource variable. """
modname, varname = _split_mod_var_names(item)
if modname:
mod = __import__(modname, globals(), locals(), [varname], -1)
return getattr(mod, varname)
else:
return globals()[varname] | 0.003534 |
def isnat(obj):
"""
Check if a value is np.NaT.
"""
if obj.dtype.kind not in ('m', 'M'):
raise ValueError("%s is not a numpy datetime or timedelta")
return obj.view(int64_dtype) == iNaT | 0.004695 |
def compare_values(values0, values1):
"""Compares all the values of a single registry key."""
values0 = {v[0]: v[1:] for v in values0}
values1 = {v[0]: v[1:] for v in values1}
created = [(k, v[0], v[1]) for k, v in values1.items() if k not in values0]
deleted = [(k, v[0], v[1]) for k, v in values0.items() if k not in values1]
modified = [(k, v[0], v[1]) for k, v in values0.items()
if v != values1.get(k, None)]
return created, deleted, modified | 0.002028 |
def _GetProxies(self):
"""Gather a list of proxies to use."""
# Detect proxies from the OS environment.
result = client_utils.FindProxies()
# Also try to connect directly if all proxies fail.
result.append("")
# Also try all proxies configured in the config system.
result.extend(config.CONFIG["Client.proxy_servers"])
return result | 0.002725 |
def list_components(self, dependency_order=True):
"""
Lists the Components by dependency resolving.
Usage::
>>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",))
>>> manager.register_components()
True
>>> manager.list_components()
[u'core.tests_component_a', u'core.tests_component_b']
:param dependency_order: Components are returned by dependency order.
:type dependency_order: bool
"""
if dependency_order:
return list(itertools.chain.from_iterable([sorted(list(batch)) for batch in
foundations.common.dependency_resolver(
dict((key, value.require) for (key, value) in self))]))
else:
return [key for (key, value) in self] | 0.006515 |
def ncp_hals(X, rank, random_state=None, init='rand', **options):
"""
Fits nonnegtaive CP Decomposition using the Hierarcial Alternating Least
Squares (HALS) Method.
Parameters
----------
X : (I_1, ..., I_N) array_like
A real array with nonnegative entries and ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
init : str, or KTensor, optional (default ``'rand'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
This implemenation is using the Hierarcial Alternating Least Squares Method.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Examples
--------
"""
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'NCP_HALS', **options)
# Store problem dimensions.
normX = linalg.norm(X)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Iterate the HALS algorithm until convergence or maxiter is reached
# i) compute the N gram matrices and multiply
# ii) Compute Khatri-Rao product
# iii) Update component U_1, U_2, ... U_N
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
while result.still_optimizing:
violation = 0.0
for n in range(X.ndim):
# Select all components, but U_n
components = [U[j] for j in range(X.ndim) if j != n]
# i) compute the N-1 gram matrices
grams = sci.multiply.reduce([arr.T.dot(arr) for arr in components])
# ii) Compute Khatri-Rao product
kr = khatri_rao(components)
p = unfold(X, n).dot(kr)
# iii) Update component U_n
violation += _hals_update(U[n], grams, p)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute objective function
# grams *= U[X.ndim - 1].T.dot(U[X.ndim - 1])
# obj = np.sqrt( (sci.sum(grams) - 2 * sci.sum(U[X.ndim - 1] * p) + normX**2)) / normX
result.update(linalg.norm(X - U.full()) / normX)
# end optimization loop, return result.
return result.finalize() | 0.00171 |
def _make_options(x):
"""Standardize the options tuple format.
The returned tuple should be in the format (('label', value), ('label', value), ...).
The input can be
* an iterable of (label, value) pairs
* an iterable of values, and labels will be generated
"""
# Check if x is a mapping of labels to values
if isinstance(x, Mapping):
import warnings
warnings.warn("Support for mapping types has been deprecated and will be dropped in a future release.", DeprecationWarning)
return tuple((unicode_type(k), v) for k, v in x.items())
# only iterate once through the options.
xlist = tuple(x)
# Check if x is an iterable of (label, value) pairs
if all((isinstance(i, (list, tuple)) and len(i) == 2) for i in xlist):
return tuple((unicode_type(k), v) for k, v in xlist)
# Otherwise, assume x is an iterable of values
return tuple((unicode_type(i), i) for i in xlist) | 0.003141 |
def get(self, name, default=None):
"""
Returns an extension instance with a given name.
In case there are few extensions with a given name, the first one
will be returned. If no extensions with a given name are exist,
the `default` value will be returned.
:param name: (str) an extension name
:param default: (object) a fallback value
:returns: (object) an extension instance
"""
try:
value = self[name]
except KeyError:
value = default
return value | 0.003503 |
def compute_bearing(start_lat, start_lon, end_lat, end_lon):
'''
Get the compass bearing from start to end.
Formula from
http://www.movable-type.co.uk/scripts/latlong.html
'''
# make sure everything is in radians
start_lat = math.radians(start_lat)
start_lon = math.radians(start_lon)
end_lat = math.radians(end_lat)
end_lon = math.radians(end_lon)
dLong = end_lon - start_lon
dPhi = math.log(math.tan(end_lat / 2.0 + math.pi / 4.0) /
math.tan(start_lat / 2.0 + math.pi / 4.0))
if abs(dLong) > math.pi:
if dLong > 0.0:
dLong = -(2.0 * math.pi - dLong)
else:
dLong = (2.0 * math.pi + dLong)
y = math.sin(dLong) * math.cos(end_lat)
x = math.cos(start_lat) * math.sin(end_lat) - \
math.sin(start_lat) * math.cos(end_lat) * math.cos(dLong)
bearing = (math.degrees(math.atan2(y, x)) + 360.0) % 360.0
return bearing | 0.001053 |
def write_elec_file(filename, mesh):
"""
Read in the electrode positions and return the indices of the electrodes
# TODO: Check if you find all electrodes
"""
elecs = []
# print('Write electrodes')
electrodes = np.loadtxt(filename)
for i in electrodes:
# find
for nr, j in enumerate(mesh['nodes']):
if np.isclose(j[1], i[0]) and np.isclose(j[2], i[1]):
elecs.append(nr + 1)
fid = open('elec.dat', 'w')
fid.write('{0}\n'.format(len(elecs)))
for i in elecs:
fid.write('{0}\n'.format(i))
fid.close() | 0.001672 |
def _selectView( self ):
"""
Matches the view selection to the trees selection.
"""
scene = self.uiGanttVIEW.scene()
scene.blockSignals(True)
scene.clearSelection()
for item in self.uiGanttTREE.selectedItems():
item.viewItem().setSelected(True)
scene.blockSignals(False)
curr_item = self.uiGanttTREE.currentItem()
vitem = curr_item.viewItem()
if vitem:
self.uiGanttVIEW.centerOn(vitem) | 0.011321 |
def validateOneElement(self, doc, elem):
"""Try to validate a single element and it's attributes,
basically it does the following checks as described by the
XML-1.0 recommendation: - [ VC: Element Valid ] - [ VC:
Required Attribute ] Then call xmlValidateOneAttribute()
for each attribute present. The ID/IDREF checkings are
done separately """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidateOneElement(self._o, doc__o, elem__o)
return ret | 0.009434 |
async def on_raw_433(self, message):
""" Nickname in use. """
if not self.registered:
self._registration_attempts += 1
# Attempt to set new nickname.
if self._attempt_nicknames:
await self.set_nickname(self._attempt_nicknames.pop(0))
else:
await self.set_nickname(
self._nicknames[0] + '_' * (self._registration_attempts - len(self._nicknames))) | 0.006508 |
def traverse_many(self, attribute, source_sequence, target_sequence,
visitor):
"""
Traverses the given source and target sequences and makes appropriate
calls to :method:`traverse_one`.
Algorithm:
1) Build a map target item ID -> target data item from the target
sequence;
2) For each source data item in the source sequence check if it
has a not-None ID; if yes, remove the corresponding target from the
map generated in step 1) and use as target data item for the
source data item; if no, use `None` as target data item;
3) For the remaining items in the target map from 1), call
:method:`traverse_one` passing `None` as source (REMOVE);
4) For all source/target data item pairs generated in 2, call
:method:`traverse_one` (ADD or UPDATE depending on whether target
item is `None`).
:param source_sequence: iterable of source data proxies
:type source_sequence: iterator yielding instances of
`DataTraversalProxy` or None
:param target_sequence: iterable of target data proxies
:type target_sequence: iterator yielding instances of
`DataTraversalProxy` or None
"""
target_map = {}
if not target_sequence is None:
for target in target_sequence:
target_map[target.get_id()] = target
src_tgt_pairs = []
if not source_sequence is None:
for source in source_sequence:
source_id = source.get_id()
if not source_id is None:
# Check if target exists for UPDATE.
target = target_map.pop(source_id, None)
else:
# Source is new, there is no target, so ADD.
target = None
src_tgt_pairs.append((source, target))
# All targets that are now still in the map where not present in the
# source and therefore need to be REMOVEd.
for target in itervalues_(target_map):
if not (None, target) in self.__trv_path:
self.traverse_one(attribute, None, target, visitor)
#
for source, target in src_tgt_pairs:
if not (source, target) in self.__trv_path:
self.traverse_one(attribute, source, target, visitor) | 0.002438 |
def stationary_distribution_from_eigenvector(T, ncv=None):
r"""Compute stationary distribution of stochastic matrix T.
The stationary distribution is the left eigenvector corresponding to the 1
non-degenerate eigenvalue :math: `\lambda=1`.
Input:
------
T : numpy array, shape(d,d)
Transition matrix (stochastic matrix).
ncv : int (optional)
The number of Lanczos vectors generated, `ncv` must be greater than k;
it is recommended that ncv > 2*k
Returns
-------
mu : numpy array, shape(d,)
Vector of stationary probabilities.
"""
vals, vecs = scipy.sparse.linalg.eigs(T.transpose(), k=1, which='LR', ncv=ncv)
nu = vecs[:, 0].real
mu = nu / np.sum(nu)
return mu | 0.002646 |
def from_py_func(cls, func):
""" Create a ``CustomJS`` instance from a Python function. The
function is translated to JavaScript using PScript.
"""
from bokeh.util.deprecation import deprecated
deprecated("'from_py_func' is deprecated and will be removed in an eventual 2.0 release. "
"Use CustomJS directly instead.")
if not isinstance(func, FunctionType):
raise ValueError('CustomJS.from_py_func needs function object.')
pscript = import_required('pscript',
'To use Python functions for CustomJS, you need PScript ' +
'("conda install -c conda-forge pscript" or "pip install pscript")')
# Collect default values
default_values = func.__defaults__ # Python 2.6+
default_names = func.__code__.co_varnames[:len(default_values)]
args = dict(zip(default_names, default_values))
args.pop('window', None) # Clear window, so we use the global window object
# Get JS code, we could rip out the function def, or just
# call the function. We do the latter.
code = pscript.py2js(func, 'cb') + 'cb(%s);\n' % ', '.join(default_names)
return cls(code=code, args=args) | 0.005452 |
def _check_hetcaller(item):
"""Ensure upstream SV callers requires to heterogeneity analysis are available.
"""
svs = _get_as_list(item, "svcaller")
hets = _get_as_list(item, "hetcaller")
if hets or any([x in svs for x in ["titancna", "purecn"]]):
if not any([x in svs for x in ["cnvkit", "gatk-cnv"]]):
raise ValueError("Heterogeneity caller used but need CNV calls. Add `gatk4-cnv` "
"or `cnvkit` to `svcaller` in sample: %s" % item["description"]) | 0.007707 |
def ionic_strength(mis, zis):
r'''Calculate the ionic strength of a solution in one of two ways,
depending on the inputs only. For Pitzer and Bromley models,
`mis` should be molalities of each component. For eNRTL models,
`mis` should be mole fractions of each electrolyte in the solution.
This will sum to be much less than 1.
.. math::
I = \frac{1}{2} \sum M_i z_i^2
I = \frac{1}{2} \sum x_i z_i^2
Parameters
----------
mis : list
Molalities of each ion, or mole fractions of each ion [mol/kg or -]
zis : list
Charges of each ion [-]
Returns
-------
I : float
ionic strength, [?]
Examples
--------
>>> ionic_strength([0.1393, 0.1393], [1, -1])
0.1393
References
----------
.. [1] Chen, Chau-Chyun, H. I. Britt, J. F. Boston, and L. B. Evans. "Local
Composition Model for Excess Gibbs Energy of Electrolyte Systems.
Part I: Single Solvent, Single Completely Dissociated Electrolyte
Systems." AIChE Journal 28, no. 4 (July 1, 1982): 588-96.
doi:10.1002/aic.690280410
.. [2] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012.
'''
return 0.5*sum([mi*zi*zi for mi, zi in zip(mis, zis)]) | 0.000765 |
def decode_network_values(ptype, plen, buf):
"""Decodes a list of DS values in collectd network format
"""
nvalues = short.unpack_from(buf, header.size)[0]
off = header.size + short.size + nvalues
valskip = double.size
# Check whether our expected packet size is the reported one
assert ((valskip + 1) * nvalues + short.size + header.size) == plen
assert double.size == number.size
result = []
for dstype in [ord(x) for x in buf[header.size + short.size:off]]:
if dstype == DS_TYPE_COUNTER:
result.append((dstype, number.unpack_from(buf, off)[0]))
off += valskip
elif dstype == DS_TYPE_GAUGE:
result.append((dstype, double.unpack_from(buf, off)[0]))
off += valskip
elif dstype == DS_TYPE_DERIVE:
result.append((dstype, number.unpack_from(buf, off)[0]))
off += valskip
elif dstype == DS_TYPE_ABSOLUTE:
result.append((dstype, number.unpack_from(buf, off)[0]))
off += valskip
else:
raise ValueError("DS type %i unsupported" % dstype)
return result | 0.000878 |
def _output_results(
self):
"""
*output results*
**Key Arguments:**
# -
**Return:**
- None
.. todo::
- @review: when complete, clean _output_results method
- @review: when complete add logging
"""
self.log.info('starting the ``_output_results`` method')
content = ""
maxNameLen = 0
for r in self.results:
if maxNameLen < len(r["ned_name"]):
maxNameLen = len(r["ned_name"])
if len(self.results) == 0:
content += "No resuls found"
else:
thisHeader = "| "
thisLine = "| "
for head in self.headers:
if head == "ned_name":
s = maxNameLen
else:
s = self.resultSpacing
thisHeader += str(head).ljust(s,
' ') + " | "
thisLine += ":".ljust(s,
'-') + " | "
content += thisHeader
content += "\n" + thisLine
for r in self.results:
thisRow = "| "
for head in self.headers:
if head == "ned_name":
s = maxNameLen
else:
s = self.resultSpacing
thisRow += str(r[head]).ljust(s,
' ') + " | "
content += "\n" + thisRow
if self.quiet == False:
print content
if self.outputFilePath:
import codecs
writeFile = codecs.open(
self.outputFilePath, encoding='utf-8', mode='w')
writeFile.write(content)
writeFile.close()
self.log.info('completed the ``_output_results`` method')
return None | 0.001547 |
def _cacheAllJobs(self):
"""
Downloads all jobs in the current job store into self.jobCache.
"""
logger.debug('Caching all jobs in job store')
self._jobCache = {jobGraph.jobStoreID: jobGraph for jobGraph in self._jobStore.jobs()}
logger.debug('{} jobs downloaded.'.format(len(self._jobCache))) | 0.008798 |
def correlations(self):
"""
Calculate the correlation coefficients.
"""
corr_df = self._sensors_num_df.corr()
corr_names = []
corrs = []
for i in range(len(corr_df.index)):
for j in range(len(corr_df.index)):
c_name = corr_df.index[i]
r_name = corr_df.columns[j]
corr_names.append("%s-%s" % (c_name, r_name))
corrs.append(corr_df.ix[i, j])
corrs_all = pd.DataFrame(index=corr_names)
corrs_all["value"] = corrs
corrs_all = corrs_all.dropna().drop(
corrs_all[(corrs_all["value"] == float(1))].index
)
corrs_all = corrs_all.drop(corrs_all[corrs_all["value"] == float(-1)].index)
corrs_all = corrs_all.sort_values("value", ascending=False)
corrs_all = corrs_all.drop_duplicates()
return corrs_all | 0.003322 |
def update_record(self, domain, record, record_type=None, name=None):
"""Call to GoDaddy API to update a single DNS record
:param name: only required if the record is None (deletion)
:param record_type: only required if the record is None (deletion)
:param domain: the domain where the DNS belongs to (eg. 'example.com')
:param record: dict with record info (ex. {'name': 'dynamic', 'ttl': 3600, 'data': '1.1.1.1', 'type': 'A'})
:return: True if no exceptions occurred
"""
if record_type is None:
record_type = record['type']
if name is None:
name = record['name']
url = self.API_TEMPLATE + self.RECORDS_TYPE_NAME.format(domain=domain, type=record_type, name=name)
self._put(url, json=[record])
self.logger.info(
'Updated record. Domain {} name {} type {}'.format(domain, str(record['name']), str(record['type'])))
# If we didn't get any exceptions, return True to let the user know
return True | 0.00478 |
def urinorm(uri):
'''
Normalize a URI
'''
# TODO: use urllib.parse instead of these complex regular expressions
if isinstance(uri, bytes):
uri = str(uri, encoding='utf-8')
uri = uri.encode('ascii', errors='oid_percent_escape').decode('utf-8')
# _escapeme_re.sub(_pct_escape_unicode, uri).encode('ascii').decode()
illegal_mo = uri_illegal_char_re.search(uri)
if illegal_mo:
raise ValueError('Illegal characters in URI: %r at position %s' %
(illegal_mo.group(), illegal_mo.start()))
uri_mo = uri_re.match(uri)
scheme = uri_mo.group(2)
if scheme is None:
raise ValueError('No scheme specified')
scheme = scheme.lower()
if scheme not in ('http', 'https'):
raise ValueError('Not an absolute HTTP or HTTPS URI: %r' % (uri, ))
authority = uri_mo.group(4)
if authority is None:
raise ValueError('Not an absolute URI: %r' % (uri, ))
authority_mo = authority_re.match(authority)
if authority_mo is None:
raise ValueError('URI does not have a valid authority: %r' % (uri, ))
userinfo, host, port = authority_mo.groups()
if userinfo is None:
userinfo = ''
if '%' in host:
host = host.lower()
host = pct_encoded_re.sub(_pct_encoded_replace, host)
host = host.encode('idna').decode()
else:
host = host.lower()
if port:
if (port == ':' or (scheme == 'http' and port == ':80') or
(scheme == 'https' and port == ':443')):
port = ''
else:
port = ''
authority = userinfo + host + port
path = uri_mo.group(5)
path = pct_encoded_re.sub(_pct_encoded_replace_unreserved, path)
path = remove_dot_segments(path)
if not path:
path = '/'
query = uri_mo.group(6)
if query is None:
query = ''
fragment = uri_mo.group(8)
if fragment is None:
fragment = ''
return scheme + '://' + authority + path + query + fragment | 0.000993 |
def get_stats(self, obj, stat_name):
""" Send CLI command that returns list of integer counters.
:param obj: requested object.
:param stat_name: statistics command name.
:return: list of counters.
:rtype: list(int)
"""
return [int(v) for v in self.send_command_return(obj, stat_name, '?').split()] | 0.008475 |
def connect_s3_bucket_to_lambda(self, bucket, function_arn, events,
prefix=None, suffix=None):
# type: (str, str, List[str], OptStr, OptStr) -> None
"""Configure S3 bucket to invoke a lambda function.
The S3 bucket must already have permission to invoke the
lambda function before you call this function, otherwise
the service will return an error. You can add permissions
by using the ``add_permission_for_s3_event`` below. The
``events`` param matches the event strings supported by the
service.
This method also only supports a single prefix/suffix for now,
which is what's offered in the Lambda console.
"""
s3 = self._client('s3')
existing_config = s3.get_bucket_notification_configuration(
Bucket=bucket)
# Because we're going to PUT this config back to S3, we need
# to remove `ResponseMetadata` because that's added in botocore
# and isn't a param of the put_bucket_notification_configuration.
existing_config.pop('ResponseMetadata', None)
existing_lambda_config = existing_config.get(
'LambdaFunctionConfigurations', [])
single_config = {
'LambdaFunctionArn': function_arn, 'Events': events
} # type: Dict[str, Any]
filter_rules = []
if prefix is not None:
filter_rules.append({'Name': 'Prefix', 'Value': prefix})
if suffix is not None:
filter_rules.append({'Name': 'Suffix', 'Value': suffix})
if filter_rules:
single_config['Filter'] = {'Key': {'FilterRules': filter_rules}}
new_config = self._merge_s3_notification_config(existing_lambda_config,
single_config)
existing_config['LambdaFunctionConfigurations'] = new_config
s3.put_bucket_notification_configuration(
Bucket=bucket,
NotificationConfiguration=existing_config,
) | 0.001949 |
def process_rule(self,rule,pa,tuple):
''' Process a string that denotes a boolean rule.
'''
for i,v in enumerate(tuple):
rule = rule.replace(pa[i],str(v))
return eval(rule) | 0.032407 |
def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613
'''
Lists all packages available for update.
When run in global zone, it reports only upgradable packages for the global
zone.
When run in non-global zone, it can report more upgradable packages than
``pkg update -vn``, because ``pkg update`` hides packages that require
newer version of ``pkg://solaris/entire`` (which means that they can be
upgraded only from the global zone). If ``pkg://solaris/entire`` is found
in the list of upgrades, then the global zone should be updated to get all
possible updates. Use ``refresh=True`` to refresh the package database.
refresh : True
Runs a full package database refresh before listing. Set to ``False`` to
disable running the refresh.
.. versionchanged:: 2017.7.0
In previous versions of Salt, ``refresh`` defaulted to ``False``. This was
changed to default to ``True`` in the 2017.7.0 release to make the behavior
more consistent with the other package modules, which all default to ``True``.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
salt '*' pkg.list_upgrades refresh=False
'''
if salt.utils.data.is_true(refresh):
refresh_db(full=True)
upgrades = {}
# awk is in core-os package so we can use it without checking
lines = __salt__['cmd.run_stdout']("/bin/pkg list -Huv").splitlines()
for line in lines:
upgrades[_ips_get_pkgname(line)] = _ips_get_pkgversion(line)
return upgrades | 0.003161 |
def keysym_to_keycodes(self, keysym):
"""Look up all the keycodes that is bound to keysym. A list of
tuples (keycode, index) is returned, sorted primarily on the
lowest index and secondarily on the lowest keycode."""
try:
# Copy the map list, reversing the arguments
return map(lambda x: (x[1], x[0]), self._keymap_syms[keysym])
except KeyError:
return [] | 0.00464 |
def sum_all(iterable, start):
"""Sum up an iterable starting with a start value.
In contrast to :func:`sum`, this also works on other types like
:class:`lists <list>` and :class:`sets <set>`.
"""
if hasattr(start, "__add__"):
for value in iterable:
start += value
else:
for value in iterable:
start |= value
return start | 0.002571 |
def sortedby(item_list, key_list, reverse=False):
""" sorts ``item_list`` using key_list
Args:
list_ (list): list to sort
key_list (list): list to sort by
reverse (bool): sort order is descending (largest first)
if reverse is True else acscending (smallest first)
Returns:
list : ``list_`` sorted by the values of another ``list``. defaults to
ascending order
SeeAlso:
sortedby2
Examples:
>>> # ENABLE_DOCTEST
>>> import utool
>>> list_ = [1, 2, 3, 4, 5]
>>> key_list = [2, 5, 3, 1, 5]
>>> result = utool.sortedby(list_, key_list, reverse=True)
>>> print(result)
[5, 2, 3, 1, 4]
"""
assert len(item_list) == len(key_list), (
'Expected same len. Got: %r != %r' % (len(item_list), len(key_list)))
sorted_list = [item for (key, item) in
sorted(list(zip(key_list, item_list)), reverse=reverse)]
return sorted_list | 0.000992 |
def delete_record(cls, record):
"""Delete a record and it's persistent identifiers."""
record.delete()
PersistentIdentifier.query.filter_by(
object_type='rec', object_uuid=record.id,
).update({PersistentIdentifier.status: PIDStatus.DELETED})
cls.delete_buckets(record)
db.session.commit() | 0.005747 |
def maybe_connect(self, node_id, wakeup=True):
"""Queues a node for asynchronous connection during the next .poll()"""
if self._can_connect(node_id):
self._connecting.add(node_id)
# Wakeup signal is useful in case another thread is
# blocked waiting for incoming network traffic while holding
# the client lock in poll().
if wakeup:
self.wakeup()
return True
return False | 0.004141 |
def load_dataset(relative_path):
"""Imports a data file within the 'h2o_data' folder."""
assert_is_type(relative_path, str)
h2o_dir = os.path.split(__file__)[0]
for possible_file in [os.path.join(h2o_dir, relative_path),
os.path.join(h2o_dir, "h2o_data", relative_path),
os.path.join(h2o_dir, "h2o_data", relative_path + ".csv")]:
if os.path.exists(possible_file):
return upload_file(possible_file)
# File not found -- raise an error!
raise H2OValueError("Data file %s cannot be found" % relative_path) | 0.003344 |
def regret(self):
'''
Calculate expected regret, where expected regret is
maximum optimal reward - sum of collected rewards, i.e.
expected regret = T*max_k(mean_k) - sum_(t=1-->T) (reward_t)
Returns
-------
float
'''
return (sum(self.pulls)*np.max(np.nan_to_num(self.wins/self.pulls)) -
sum(self.wins)) / sum(self.pulls) | 0.004866 |
def generate_parser():
"""Returns a parser configured with sub-commands and arguments."""
parser = argparse.ArgumentParser(
description=constants.TACL_DESCRIPTION,
formatter_class=ParagraphFormatter)
subparsers = parser.add_subparsers(title='subcommands')
generate_align_subparser(subparsers)
generate_catalogue_subparser(subparsers)
generate_counts_subparser(subparsers)
generate_diff_subparser(subparsers)
generate_excise_subparser(subparsers)
generate_highlight_subparser(subparsers)
generate_intersect_subparser(subparsers)
generate_lifetime_subparser(subparsers)
generate_ngrams_subparser(subparsers)
generate_prepare_subparser(subparsers)
generate_results_subparser(subparsers)
generate_supplied_diff_subparser(subparsers)
generate_search_subparser(subparsers)
generate_supplied_intersect_subparser(subparsers)
generate_statistics_subparser(subparsers)
generate_strip_subparser(subparsers)
return parser | 0.000995 |
def encodeThetas(self, theta1, theta2):
"""Return the SDR for theta1 and theta2"""
# print >> sys.stderr, "encoded theta1 value = ", theta1
# print >> sys.stderr, "encoded theta2 value = ", theta2
t1e = self.theta1Encoder.encode(theta1)
t2e = self.theta2Encoder.encode(theta2)
# print >> sys.stderr, "encoded theta1 = ", t1e.nonzero()[0]
# print >> sys.stderr, "encoded theta2 = ", t2e.nonzero()[0]
ex = numpy.outer(t2e,t1e)
return ex.flatten().nonzero()[0] | 0.004057 |
def get_book_currencies(self) -> List[Commodity]:
""" Returns currencies used in the book """
query = (
self.currencies_query
.order_by(Commodity.mnemonic)
)
return query.all() | 0.012712 |
def mwu(x, y, tail='two-sided'):
"""Mann-Whitney U Test (= Wilcoxon rank-sum test). It is the non-parametric
version of the independent T-test.
Parameters
----------
x, y : array_like
First and second set of observations. x and y must be independent.
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
Returns
-------
stats : pandas DataFrame
Test summary ::
'U-val' : U-value
'p-val' : p-value
'RBC' : rank-biserial correlation (effect size)
'CLES' : common language effect size
Notes
-----
mwu tests the hypothesis that data in x and y are samples from continuous
distributions with equal medians. The test assumes that x and y
are independent. This test corrects for ties and by default
uses a continuity correction (see :py:func:`scipy.stats.mannwhitneyu`
for details).
The rank biserial correlation is the difference between the proportion of
favorable evidence minus the proportion of unfavorable evidence
(see Kerby 2014).
The common language effect size is the probability (from 0 to 1) that a
randomly selected observation from the first sample will be greater than a
randomly selected observation from the second sample.
References
----------
.. [1] Mann, H. B., & Whitney, D. R. (1947). On a test of whether one of
two random variables is stochastically larger than the other.
The annals of mathematical statistics, 50-60.
.. [2] Kerby, D. S. (2014). The simple difference formula: An approach to
teaching nonparametric correlation. Comprehensive Psychology,
3, 11-IT.
.. [3] McGraw, K. O., & Wong, S. P. (1992). A common language effect size
statistic. Psychological bulletin, 111(2), 361.
Examples
--------
>>> import numpy as np
>>> from pingouin import mwu
>>> np.random.seed(123)
>>> x = np.random.uniform(low=0, high=1, size=20)
>>> y = np.random.uniform(low=0.2, high=1.2, size=20)
>>> mwu(x, y, tail='two-sided')
U-val p-val RBC CLES
MWU 97.0 0.00556 0.515 0.758
"""
from scipy.stats import mannwhitneyu
x = np.asarray(x)
y = np.asarray(y)
# Remove NA
x, y = remove_na(x, y, paired=False)
# Compute test
if tail == 'one-sided':
tail = 'less' if np.median(x) < np.median(y) else 'greater'
uval, pval = mannwhitneyu(x, y, use_continuity=True, alternative=tail)
# Effect size 1: common language effect size (McGraw and Wong 1992)
diff = x[:, None] - y
cles = max((diff < 0).sum(), (diff > 0).sum()) / diff.size
# Effect size 2: rank biserial correlation (Wendt 1972)
rbc = 1 - (2 * uval) / diff.size # diff.size = x.size * y.size
# Fill output DataFrame
stats = pd.DataFrame({}, index=['MWU'])
stats['U-val'] = round(uval, 3)
stats['p-val'] = pval
stats['RBC'] = round(rbc, 3)
stats['CLES'] = round(cles, 3)
col_order = ['U-val', 'p-val', 'RBC', 'CLES']
stats = stats.reindex(columns=col_order)
return stats | 0.000318 |
def _format_info(format_int, format_flag=_snd.SFC_GET_FORMAT_INFO):
"""Return the ID and short description of a given format."""
format_info = _ffi.new("SF_FORMAT_INFO*")
format_info.format = format_int
_snd.sf_command(_ffi.NULL, format_flag, format_info,
_ffi.sizeof("SF_FORMAT_INFO"))
name = format_info.name
return (_format_str(format_info.format),
_ffi.string(name).decode('utf-8', 'replace') if name else "") | 0.002132 |
def post_comment_ajax(request, using=None):
"""
Post a comment, via an Ajax call.
"""
if not request.is_ajax():
return HttpResponseBadRequest("Expecting Ajax call")
# This is copied from django_comments.
# Basically that view does too much, and doesn't offer a hook to change the rendering.
# The request object is not passed to next_redirect for example.
#
# This is a separate view to integrate both features. Previously this used django-ajaxcomments
# which is unfortunately not thread-safe (it it changes the comment view per request).
# Fill out some initial data fields from an authenticated user, if present
data = request.POST.copy()
if request.user.is_authenticated:
if not data.get('name', ''):
data["name"] = request.user.get_full_name() or request.user.username
if not data.get('email', ''):
data["email"] = request.user.email
# Look up the object we're trying to comment about
ctype = data.get("content_type")
object_pk = data.get("object_pk")
if ctype is None or object_pk is None:
return CommentPostBadRequest("Missing content_type or object_pk field.")
try:
model = apps.get_model(*ctype.split(".", 1))
target = model._default_manager.using(using).get(pk=object_pk)
except ValueError:
return CommentPostBadRequest("Invalid object_pk value: {0}".format(escape(object_pk)))
except (TypeError, LookupError):
return CommentPostBadRequest("Invalid content_type value: {0}".format(escape(ctype)))
except AttributeError:
return CommentPostBadRequest("The given content-type {0} does not resolve to a valid model.".format(escape(ctype)))
except ObjectDoesNotExist:
return CommentPostBadRequest("No object matching content-type {0} and object PK {1} exists.".format(escape(ctype), escape(object_pk)))
except (ValueError, ValidationError) as e:
return CommentPostBadRequest("Attempting go get content-type {0!r} and object PK {1!r} exists raised {2}".format(escape(ctype), escape(object_pk), e.__class__.__name__))
# Do we want to preview the comment?
is_preview = "preview" in data
# Construct the comment form
form = django_comments.get_form()(target, data=data, is_preview=is_preview)
# Check security information
if form.security_errors():
return CommentPostBadRequest("The comment form failed security verification: {0}".format(form.security_errors()))
# If there are errors or if we requested a preview show the comment
if is_preview:
comment = form.get_comment_object() if not form.errors else None
return _ajax_result(request, form, "preview", comment, object_id=object_pk)
if form.errors:
return _ajax_result(request, form, "post", object_id=object_pk)
# Otherwise create the comment
comment = form.get_comment_object()
comment.ip_address = request.META.get("REMOTE_ADDR", None)
if request.user.is_authenticated:
comment.user = request.user
# Signal that the comment is about to be saved
responses = signals.comment_will_be_posted.send(
sender=comment.__class__,
comment=comment,
request=request
)
for (receiver, response) in responses:
if response is False:
return CommentPostBadRequest("comment_will_be_posted receiver {0} killed the comment".format(receiver.__name__))
# Save the comment and signal that it was saved
comment.save()
signals.comment_was_posted.send(
sender = comment.__class__,
comment = comment,
request = request
)
return _ajax_result(request, form, "post", comment, object_id=object_pk) | 0.005622 |
def create_from_filedict(self, filedict):
"""
Creates h5 file from dictionary containing the file structure.
Filedict is a regular dictinary whose keys are hdf5 paths and whose
values are dictinaries containing the metadata and datasets. Metadata
is given as normal key-value -pairs and dataset arrays are given using
'DATASET' key. Datasets must be numpy arrays.
Method can also be used to append existing hdf5 file. If the file is
opened in read only mode, method does nothing.
Examples
--------
Create newfile.h5 and fill it with data and metadata
>>> h5f = HiisiHDF('newfile.h5', 'w')
>>> filedict = {'/':{'attr1':'A'},
'/dataset1/data1/data':{'DATASET':np.zeros(100), 'quantity':'emptyarray'}, 'B':'b'}
>>> h5f.create_from_filedict(filedict)
"""
if self.mode in ['r+','w', 'w-', 'x', 'a']:
for h5path, path_content in filedict.iteritems():
if path_content.has_key('DATASET'):
# If path exist, write only metadata
if h5path in self:
for key, value in path_content.iteritems():
if key != 'DATASET':
self[h5path].attrs[key] = value
else:
try:
group = self.create_group(os.path.dirname(h5path))
except ValueError:
group = self[os.path.dirname(h5path)]
pass # This pass has no effect?
new_dataset = group.create_dataset(os.path.basename(h5path), data=path_content['DATASET'])
for key, value in path_content.iteritems():
if key != 'DATASET':
new_dataset.attrs[key] = value
else:
try:
group = self.create_group(h5path)
except ValueError:
group = self[h5path]
for key, value in path_content.iteritems():
group.attrs[key] = value | 0.004409 |
def handle_profile_form(form):
"""Handle profile update form."""
form.process(formdata=request.form)
if form.validate_on_submit():
email_changed = False
with db.session.begin_nested():
# Update profile.
current_userprofile.username = form.username.data
current_userprofile.full_name = form.full_name.data
db.session.add(current_userprofile)
# Update email
if current_app.config['USERPROFILES_EMAIL_ENABLED'] and \
form.email.data != current_user.email:
current_user.email = form.email.data
current_user.confirmed_at = None
db.session.add(current_user)
email_changed = True
db.session.commit()
if email_changed:
send_confirmation_instructions(current_user)
# NOTE: Flash message after successful update of profile.
flash(_('Profile was updated. We have sent a verification '
'email to %(email)s. Please check it.',
email=current_user.email),
category='success')
else:
# NOTE: Flash message after successful update of profile.
flash(_('Profile was updated.'), category='success') | 0.000769 |
def _get_category_from_pars_var(template_var, context):
'''
get category from template variable or from tree_path
'''
cat = template_var.resolve(context)
if isinstance(cat, basestring):
cat = Category.objects.get_by_tree_path(cat)
return cat | 0.003663 |
def detach(self, dwProcessId, bIgnoreExceptions = False):
"""
Detaches from a process currently being debugged.
@note: On Windows 2000 and below the process is killed.
@see: L{attach}, L{detach_from_all}
@type dwProcessId: int
@param dwProcessId: Global ID of a process to detach from.
@type bIgnoreExceptions: bool
@param bIgnoreExceptions: C{True} to ignore any exceptions that may be
raised when detaching. C{False} to stop and raise an exception when
encountering an error.
@raise WindowsError: Raises an exception on error, unless
C{bIgnoreExceptions} is C{True}.
"""
# Keep a reference to the process. We'll need it later.
try:
aProcess = self.system.get_process(dwProcessId)
except KeyError:
aProcess = Process(dwProcessId)
# Determine if there is support for detaching.
# This check should only fail on Windows 2000 and older.
try:
win32.DebugActiveProcessStop
can_detach = True
except AttributeError:
can_detach = False
# Continue the last event before detaching.
# XXX not sure about this...
try:
if can_detach and self.lastEvent and \
self.lastEvent.get_pid() == dwProcessId:
self.cont(self.lastEvent)
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
# Cleanup all data referring to the process.
self.__cleanup_process(dwProcessId,
bIgnoreExceptions = bIgnoreExceptions)
try:
# Detach from the process.
# On Windows 2000 and before, kill the process.
if can_detach:
try:
win32.DebugActiveProcessStop(dwProcessId)
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
else:
try:
aProcess.kill()
except Exception:
if not bIgnoreExceptions:
raise
e = sys.exc_info()[1]
warnings.warn(str(e), RuntimeWarning)
finally:
# Cleanup what remains of the process data.
aProcess.clear() | 0.002306 |
def from_file(cls, fn, *args, **kwargs):
"""Constructor to build an AmiraHeader object from a file
:param str fn: Amira file
:return ah: object of class ``AmiraHeader`` containing header metadata
:rtype: ah: :py:class:`ahds.header.AmiraHeader`
"""
return AmiraHeader(get_parsed_data(fn, *args, **kwargs)) | 0.00831 |
def attach(self, events):
"""
Attach this screen to a events that processes commands and dispatches
events. Sets up the appropriate event handlers so that the screen will
update itself automatically as the events processes data.
"""
if events is not None:
events.add_event_listener("print", self._print)
events.add_event_listener("backspace", self._backspace)
events.add_event_listener("tab", self._tab)
events.add_event_listener("linefeed", self._linefeed)
events.add_event_listener("reverse-linefeed",
self._reverse_linefeed)
events.add_event_listener("carriage-return", self._carriage_return)
events.add_event_listener("index", self._index)
events.add_event_listener("reverse-index", self._reverse_index)
events.add_event_listener("store-cursor", self._save_cursor)
events.add_event_listener("restore-cursor", self._restore_cursor)
events.add_event_listener("cursor-up", self._cursor_up)
events.add_event_listener("cursor-down", self._cursor_down)
events.add_event_listener("cursor-right", self._cursor_forward)
events.add_event_listener("cursor-left", self._cursor_back)
events.add_event_listener("cursor-move", self._cursor_position)
events.add_event_listener("erase-in-line", self._erase_in_line)
events.add_event_listener("erase-in-display",
self._erase_in_display)
events.add_event_listener("delete-characters",
self._delete_character)
events.add_event_listener("insert-lines", self._insert_line)
events.add_event_listener("delete-lines", self._delete_line)
events.add_event_listener("select-graphic-rendition",
self._select_graphic_rendition)
events.add_event_listener("charset-g0", self._charset_g0)
events.add_event_listener("charset-g1", self._charset_g1)
events.add_event_listener("shift-in", self._shift_in)
events.add_event_listener("shift-out", self._shift_out)
events.add_event_listener("bell", self._bell) | 0.002564 |
def _read_eps_ctr(tomodir):
"""Parse a CRTomo eps.ctr file.
TODO: change parameters to only provide eps.ctr file
Parameters
----------
tomodir: string
Path to directory path
Returns
-------
"""
epsctr_file = tomodir + os.sep + 'inv' + os.sep + 'eps.ctr'
if not os.path.isfile(epsctr_file):
print('eps.ctr not found: {0}'.format(epsctr_file))
print(os.getcwd())
return 1
with open(epsctr_file, 'r') as fid:
lines = fid.readlines()
group = itertools.groupby(lines, lambda x: x == '\n')
dfs = []
# group
for x in group:
# print(x)
if not x[0]:
data = [y for y in x[1]]
if data[0].startswith('IT') or data[0].startswith('PIT'):
del(data[0])
data[0] = data[0].replace('-Phase (rad)', '-Phase(rad)')
tfile = StringIO(''.join(data))
df = pd.read_csv(
tfile,
delim_whitespace=True,
na_values=['Infinity'],
)
dfs.append(df)
return dfs | 0.001619 |
def tx_serialization_order(provider: Provider, blockhash: str, txid: str) -> int:
'''find index of this tx in the blockid'''
return provider.getblock(blockhash)["tx"].index(txid) | 0.010695 |
def permissions(self):
"""Dynamically generate dictionary of privacy options
"""
# TODO: optimize this, it's kind of a bad solution for listing a mostly
# static set of files.
# We could either add a permissions dict as an attribute or cache this
# in some way. Creating a dict would be another place we have to define
# the permission, so I'm not a huge fan, but it would definitely be the
# easier option.
permissions_dict = {"self": {}, "parent": {}}
for field in self.properties._meta.get_fields():
split_field = field.name.split('_', 1)
if len(split_field) <= 0 or split_field[0] not in ['self', 'parent']:
continue
permissions_dict[split_field[0]][split_field[1]] = getattr(self.properties, field.name)
return permissions_dict | 0.004577 |
def _from_docstring_rst(doc):
"""
format from docstring to ReStructured Text
"""
def format_fn(line, status):
""" format function """
if re_from_data.match(line):
line = re_from_data.sub(r"**\1** ", line)
status["add_line"] = True
line = re_from_defaults.sub(r"*\1*", line)
if status["listing"]:
# parameters
if re_from_param.match(line):
m = re_from_param.match(line)
line = " - ``{}`` {}".format(m.group(1), m.group(3))
# status items
elif re_from_status.match(line):
m = re_from_status.match(line)
line = " - ``{}`` {}".format(m.group(1), m.group(3))
# bullets
elif re_from_item.match(line):
line = re_from_item.sub(r" -", line)
# is continuation line
else:
line = " " * 4 + line.lstrip()
# in .rst format code samples use double backticks vs single ones for
# .md This converts them.
line = re_lone_backtick.sub("``", line)
return line
return _reformat_docstring(doc, format_fn, code_newline="\n") | 0.000829 |
def has_protocol(self, name):
"""Tells if a certain protocol is available"""
return self.query(Protocol).filter(Protocol.name==name).count() != 0 | 0.012987 |
def _AtNonLeaf(self, attr_value, path):
"""Makes dictionaries expandable when dealing with plists."""
if isinstance(attr_value, dict):
for value in self.Expand(attr_value, path[1:]):
yield value
else:
for v in objectfilter.ValueExpander._AtNonLeaf(self, attr_value, path):
yield v | 0.009375 |
def scheduled_time(self):
"""Time this band was scheduled to be recorded."""
timeline = "{:04d}".format(self.basic_info['observation_timeline'][0])
return self.start_time.replace(hour=int(timeline[:2]), minute=int(timeline[2:4]), second=0, microsecond=0) | 0.010791 |
def fromimportupdate(cls, bundle, import_reg):
# type: (Bundle, ImportRegistration) -> RemoteServiceAdminEvent
"""
Creates a RemoteServiceAdminEvent object from the update of an
ImportRegistration
"""
exc = import_reg.get_exception()
if exc:
return RemoteServiceAdminEvent(
RemoteServiceAdminEvent.IMPORT_ERROR,
bundle,
import_reg.get_import_container_id(),
import_reg.get_remoteservice_id(),
None,
None,
exc,
import_reg.get_description(),
)
return RemoteServiceAdminEvent(
RemoteServiceAdminEvent.IMPORT_UPDATE,
bundle,
import_reg.get_import_container_id(),
import_reg.get_remoteservice_id(),
import_reg.get_import_reference(),
None,
None,
import_reg.get_description(),
) | 0.003021 |
def to_native(self, obj, name, value): # pylint:disable=unused-argument
"""Transform the MongoDB value into a Marrow Mongo value."""
if self.mapping:
for original, new in self.mapping.items():
value = value.replace(original, new)
return load(value, self.namespace) | 0.038732 |
def listfolderpath(p):
"""
generator of list folder in the path.
folders only
"""
for entry in scandir.scandir(p):
if entry.is_dir():
yield entry.path | 0.005263 |
def open(self):
"""Open Connection.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
"""
LOGGER.debug('Connection Opening')
self.set_state(self.OPENING)
self._exceptions = []
self._channels = {}
self._last_channel_id = None
self._io.open()
self._send_handshake()
self._wait_for_connection_state(state=Stateful.OPEN)
self.heartbeat.start(self._exceptions)
LOGGER.debug('Connection Opened') | 0.003591 |
def request_access_token(self, code, redirect_uri=None):
'''
Return access token as a JsonDict: {"access_token":"your-access-token","expires":12345678,"uid":1234}, expires is represented using standard unix-epoch-time
'''
redirect = redirect_uri or self._redirect_uri
resp_text = _http('POST', 'https://api.weibo.com/oauth2/access_token',
client_id=self._client_id, client_secret=self._client_secret,
redirect_uri=redirect, code=code, grant_type='authorization_code')
r = _parse_json(resp_text)
current = int(time.time())
expires = r.expires_in + current
remind_in = r.get('remind_in', None)
if remind_in:
rtime = int(remind_in) + current
if rtime < expires:
expires = rtime
return JsonDict(access_token=r.access_token, expires=expires, uid=r.get('uid', None)) | 0.006383 |
def to_string(self):
"""
Return the current NDEF as a string (always 64 bytes).
"""
data = self.ndef_str
if self.ndef_type == _NDEF_URI_TYPE:
data = self._encode_ndef_uri_type(data)
elif self.ndef_type == _NDEF_TEXT_TYPE:
data = self._encode_ndef_text_params(data)
if len(data) > _NDEF_DATA_SIZE:
raise YubiKeyNEO_USBHIDError("NDEF payload too long")
# typedef struct {
# unsigned char len; // Payload length
# unsigned char type; // NDEF type specifier
# unsigned char data[NDEF_DATA_SIZE]; // Payload size
# unsigned char curAccCode[ACC_CODE_SIZE]; // Access code
# } YKNDEF;
#
fmt = '< B B %ss %ss' % (_NDEF_DATA_SIZE, _ACC_CODE_SIZE)
first = struct.pack(fmt,
len(data),
self.ndef_type,
data.ljust(_NDEF_DATA_SIZE, b'\0'),
self.access_code,
)
#crc = 0xffff - yubico_util.crc16(first)
#second = first + struct.pack('<H', crc) + self.unlock_code
return first | 0.003257 |
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = Quitter('quit')
builtins.exit = Quitter('exit') | 0.004896 |
def check_has_version(self, api):
'''An API class must have a `version` attribute.'''
if not hasattr(api, 'version'):
msg = 'The Api class "{}" lacks a `version` attribute.'
return [msg.format(api.__name__)] | 0.008097 |
def lookup(self, vtype, vname, target_id=None):
"""Return value of vname from the variable store vtype.
Valid vtypes are `strings` 'counters', and `pending`. If the value
is not found in the current steps store, earlier steps will be
checked. If not found, '', 0, or (None, None) is returned.
"""
nullvals = {'strings': '', 'counters': 0, 'pending': (None, None)}
nullval = nullvals[vtype]
vstyle = None
if vtype == 'counters':
if len(vname) > 1:
vname, vstyle = vname
else:
vname = vname[0]
if target_id is not None:
try:
state = self.state[vtype][target_id]
steps = self.state[vtype][target_id].keys()
except KeyError:
log(WARN, u'Bad ID target lookup {}'.format(
target_id).encode('utf-8'))
return nullval
else:
state = self.state
steps = self.state['scope']
for step in steps:
if vname in state[step][vtype]:
if vtype == 'pending':
return(state[step][vtype][vname], step)
else:
val = state[step][vtype][vname]
if vstyle is not None:
return self.counter_style(val, vstyle)
return val
else:
return nullval | 0.001366 |
def create_from_yamlfile(cls, yamlfile):
"""Create a Castro data object from a yaml file contains
the likelihood data."""
data = load_yaml(yamlfile)
nebins = len(data)
emin = np.array([data[i]['emin'] for i in range(nebins)])
emax = np.array([data[i]['emax'] for i in range(nebins)])
ref_flux = np.array([data[i]['flux'][1] for i in range(nebins)])
ref_eflux = np.array([data[i]['eflux'][1] for i in range(nebins)])
conv = np.array([data[i]['eflux2npred'] for i in range(nebins)])
ref_npred = conv*ref_eflux
ones = np.ones(ref_flux.shape)
ref_spec = ReferenceSpec(emin, emax, ones, ref_flux, ref_eflux, ref_npred)
norm_data = np.array([data[i]['eflux'] for i in range(nebins)])
ll_data = np.array([data[i]['logLike'] for i in range(nebins)])
max_ll = ll_data.max(1)
nll_data = (max_ll - ll_data.T).T
return cls(norm_data, nll_data, ref_spec, 'eflux') | 0.004053 |
def set_measurements(test):
"""Test phase that sets a measurement."""
test.measurements.level_none = 0
time.sleep(1)
test.measurements.level_some = 8
time.sleep(1)
test.measurements.level_all = 9
time.sleep(1)
level_all = test.get_measurement('level_all')
assert level_all.value == 9 | 0.033223 |
def interpolate_delta_t(delta_t_table, tt):
"""Return interpolated Delta T values for the times in `tt`.
The 2xN table should provide TT values as element 0 and
corresponding Delta T values for element 1. For times outside the
range of the table, a long-term formula is used instead.
"""
tt_array, delta_t_array = delta_t_table
delta_t = _to_array(interp(tt, tt_array, delta_t_array, nan, nan))
missing = isnan(delta_t)
if missing.any():
# Test if we are dealing with an array and proceed appropriately
if missing.shape:
tt = tt[missing]
delta_t[missing] = delta_t_formula_morrison_and_stephenson_2004(tt)
else:
delta_t = delta_t_formula_morrison_and_stephenson_2004(tt)
return delta_t | 0.001267 |
def debug(self, *debugReqs):
"""send a debug command to control the game state's setup"""
return self._client.send(debug=sc2api_pb2.RequestDebug(debug=debugReqs)) | 0.016854 |
def checkgrad(f, fprime, x, *args,**kw_args):
"""
Analytical gradient calculation using a 3-point method
"""
LG.debug("Checking gradient ...")
import numpy as np
# using machine precision to choose h
eps = np.finfo(float).eps
step = np.sqrt(eps)*(x.min())
# shake things up a bit by taking random steps for each x dimension
h = step*np.sign(np.random.uniform(-1, 1, x.size))
f_ph = f(x+h, *args, **kw_args)
f_mh = f(x-h, *args, **kw_args)
numerical_gradient = (f_ph - f_mh)/(2*h)
analytical_gradient = fprime(x, *args, **kw_args)
ratio = (f_ph - f_mh)/(2*np.dot(h, analytical_gradient))
h = np.zeros_like(x)
for i in range(len(x)):
pdb.set_trace()
h[i] = step
f_ph = f(x+h, *args, **kw_args)
f_mh = f(x-h, *args, **kw_args)
numerical_gradient = (f_ph - f_mh)/(2*step)
analytical_gradient = fprime(x, *args, **kw_args)[i]
ratio = (f_ph - f_mh)/(2*step*analytical_gradient)
h[i] = 0
LG.debug("[%d] numerical: %f, analytical: %f, ratio: %f" % (i, numerical_gradient,analytical_gradient,ratio)) | 0.004533 |
def stft(func=None, **kwparams):
"""
Short Time Fourier Transform block processor / phase vocoder wrapper.
This function can be used in many ways:
* Directly as a signal processor builder, wrapping a spectrum block/grain
processor function;
* Directly as a decorator to a block processor;
* Called without the ``func`` parameter for a partial evalution style
changing the defaults.
See the examples below for more information about these use cases.
The resulting function performs a full block-by-block analysis/synthesis
phase vocoder keeping this sequence of actions:
1. Blockenize the signal with the given ``size`` and ``hop``;
2. Lazily apply the given ``wnd`` window to each block;
3. Perform the 5 actions calling their functions in order:
a. ``before``: Pre-processing;
b. ``transform``: A transform like the FFT;
c. ``func``: the positional parameter with the single block processor;
d. ``inverse_transform``: inverse FFT;
e. ``after``: Post-processing.
4. Overlap-add with the ``ola`` overlap-add strategy. The given ``ola``
would deal with its own window application and normalization.
Any parameter from steps 3 and 4 can be set to ``None`` to skip it from
the full process, without changing the other [sub]steps. The parameters
defaults are based on the Numpy FFT subpackage.
Parameters
----------
func :
The block/grain processor function that receives a transformed block in
the frequency domain (the ``transform`` output) and should return the
processed data (it will be the first ``inverse_transform`` input). This
parameter shouldn't appear when this function is used as a decorator.
size :
Block size for the STFT process, in samples.
hop :
Duration in samples between two blocks. Defaults to the ``size`` value.
transform :
Function that receives the windowed block (in time domain) and the
``size`` as two positional inputs and should return the block (in
frequency domain). Defaults to ``numpy.fft.rfft``, which outputs a
Numpy 1D array with length equals to ``size // 2 + 1``.
inverse_transform :
Function that receives the processed block (in frequency domain) and the
``size`` as two positional inputs and should return the block (in
time domain). Defaults to ``numpy.fft.irfft``.
wnd :
Window function to be called as ``wnd(size)`` or window iterable with
length equals to ``size``. The windowing/apodization values are used
before taking the FFT of each block. Defaults to None, which means no
window should be applied (same behavior of a rectangular window).
before :
Function to be applied just before taking the transform, after the
windowing. Defaults to the ``numpy.fft.ifftshift``, which, together with
the ``after`` default, puts the time reference at the ``size // 2``
index of the block, centralizing it for the FFT (e.g. blocks
``[0, 1, 0]`` and ``[0, 0, 1, 0]`` would have zero phase). To disable
this realignment, just change both ``before=None`` and ``after=None``
keywords.
after :
Function to be applied just after the inverse transform, before calling
the overlap-add (as well as before its windowing, if any). Defaults to
the ``numpy.fft.fftshift`` function, which undo the changes done by the
default ``before`` pre-processing for block phase alignment. To avoid
the default time-domain realignment, set both ``before=None`` and
``after=None`` keywords.
ola :
Overlap-add strategy. Uses the ``overlap_add`` default strategy when
not given. The strategy should allow at least size and hop keyword
arguments, besides a first positional argument for the iterable with
blocks. If ``ola=None``, the result from using the STFT processor will be
the ``Stream`` of blocks that would be the overlap-add input.
ola_* :
Extra keyword parameters for the overlap-add strategy, if any. The extra
``ola_`` prefix is removed when calling it. See the overlap-add strategy
docs for more information about the valid parameters.
Returns
-------
A function with the same parameters above, besides ``func``, which is
replaced by the signal input (if func was given). The parameters used when
building the function should be seen as defaults that can be changed when
calling the resulting function with the respective keyword arguments.
Examples
--------
Let's process something:
>>> my_signal = Stream(.1, .3, -.1, -.3, .5, .4, .3)
Wrapping directly the processor function:
>>> processor_w = stft(abs, size=64)
>>> sig = my_signal.copy() # Any iterable
>>> processor_w(sig)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> peek200_w = _.peek(200) # Needs Numpy
>>> type(peek200_w[0]).__name__ # Result is a signal (numpy.float64 data)
'float64'
Keyword parameters in a partial evaluation style (can be reassigned):
>>> stft64 = stft(size=64) # Same to ``stft`` but with other defaults
>>> processor_p = stft64(abs)
>>> sig = my_signal.copy() # Any iterable
>>> processor_p(sig)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> _.peek(200) == peek200_w # This should do the same thing
True
As a decorator, this time with other windowing configuration:
>>> stft64hann = stft64(wnd=window.hann, ola_wnd=window.hann)
>>> @stft64hann # stft(...) can also be used as an anonymous decorator
... def processor_d(blk):
... return abs(blk)
>>> processor_d(sig) # This leads to a different result
<audiolazy.lazy_stream.Stream object at 0x...>
>>> _.peek(200) == peek200_w
False
You can also use other iterables as input, and keep the parameters to be
passed afterwards, as well as change transform calculation:
>>> stft_no_zero_phase = stft(before=None, after=None)
>>> stft_no_wnd = stft_no_zero_phase(ola=overlap_add.list, ola_wnd=None,
... ola_normalize=False)
>>> on_blocks = stft_no_wnd(transform=None, inverse_transform=None)
>>> processor_a = on_blocks(reversed, hop=4) # Reverse
>>> processor_a([1, 2, 3, 4, 5], size=4, hop=2)
<audiolazy.lazy_stream.Stream object at 0x...>
>>> list(_) # From blocks [1, 2, 3, 4] and [3, 4, 5, 0.0]
[4.0, 3.0, 2.0, 6, 4, 3]
>>> processor_a([1, 2, 3, 4, 5], size=4) # Default hop instead
<audiolazy.lazy_stream.Stream object at 0x...>
>>> list(_) # No overlap, blocks [1, 2, 3, 4] and [5, 0.0, 0.0, 0.0]
[4, 3, 2, 1, 0.0, 0.0, 0.0, 5]
>>> processor_a([1, 2, 3, 4, 5]) # Size was never given
Traceback (most recent call last):
...
TypeError: Missing 'size' argument
For analysis only, one can set ``ola=None``:
>>> from numpy.fft import ifftshift # [1, 2, 3, 4, 5] -> [3, 4, 5, 1, 2]
>>> analyzer = stft(ifftshift, ola=None, size=8, hop=2)
>>> sig = Stream(1, 0, -1, 0) # A pi/2 rad/sample cosine signal
>>> result = analyzer(sig)
>>> result
<audiolazy.lazy_stream.Stream object at 0x...>
Let's see the result contents. That processing "rotates" the frequencies,
converting the original ``[0, 0, 4, 0, 0]`` real FFT block to a
``[4, 0, 0, 0, 0]`` block, which means the block cosine was moved to
a DC-only signal keeping original energy/integral:
>>> result.take()
array([ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5])
>>> result.take() # From [0, 0, -4, 0, 0] to [-4, 0, 0, 0, 0]
array([-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5])
Note
----
Parameters should be passed as keyword arguments. The only exception
is ``func`` for this function and ``sig`` for the returned function,
which are always the first positional argument, ald also the one that
shouldn't appear when using this function as a decorator.
Hint
----
1. When using Numpy FFT, one can keep data in place and return the
changed input block to save time;
2. Actually, there's nothing in this function that imposes FFT or Numpy
besides the default values. One can still use this even for other
transforms that have nothing to do with the Fourier Transform.
See Also
--------
overlap_add :
Overlap-add algorithm for an iterable (e.g. a Stream instance) of blocks
(sequences such as lists or Numpy arrays). It's also a StrategyDict.
window :
Window/apodization/tapering functions for a given size as a StrategyDict.
"""
# Using as a decorator or to "replicate" this function with other defaults
if func is None:
cfi = chain.from_iterable
mix_dict = lambda *dicts: dict(cfi(iteritems(d) for d in dicts))
result = lambda f=None, **new_kws: stft(f, **mix_dict(kwparams, new_kws))
return result
# Using directly
@tostream
@wraps(func)
def wrapper(sig, **kwargs):
kws = kwparams.copy()
kws.update(kwargs)
if "size" not in kws:
raise TypeError("Missing 'size' argument")
if "hop" in kws and kws["hop"] > kws["size"]:
raise ValueError("Hop value can't be higher than size")
blk_params = {"size": kws.pop("size")}
blk_params["hop"] = kws.pop("hop", None)
ola_params = blk_params.copy() # Size and hop
blk_params["wnd"] = kws.pop("wnd", None)
ola = kws.pop("ola", overlap_add)
class NotSpecified(object):
pass
for name in ["transform", "inverse_transform", "before", "after"]:
blk_params[name] = kws.pop(name, NotSpecified)
for k, v in kws.items():
if k.startswith("ola_"):
if ola is not None:
ola_params[k[len("ola_"):]] = v
else:
raise TypeError("Extra '{}' argument with no overlap-add "
"strategy".format(k))
else:
raise TypeError("Unknown '{}' extra argument".format(k))
def blk_gen(size, hop, wnd, transform, inverse_transform, before, after):
if transform is NotSpecified:
from numpy.fft import rfft as transform
if inverse_transform is NotSpecified:
from numpy.fft import irfft as inverse_transform
if before is NotSpecified:
from numpy.fft import ifftshift as before
if after is NotSpecified:
from numpy.fft import fftshift as after
# Find the right windowing function to be applied
if callable(wnd) and not isinstance(wnd, Stream):
wnd = wnd(size)
if isinstance(wnd, Iterable):
wnd = list(wnd)
if len(wnd) != size:
raise ValueError("Incompatible window size")
elif wnd is not None:
raise TypeError("Window should be an iterable or a callable")
# Pad size lambdas
trans = transform and (lambda blk: transform(blk, size))
itrans = inverse_transform and (lambda blk:
inverse_transform(blk, size))
# Continuation style calling
funcs = [f for f in [before, trans, func, itrans, after]
if f is not None]
process = lambda blk: reduce(lambda data, f: f(data), funcs, blk)
if wnd is None:
for blk in Stream(sig).blocks(size=size, hop=hop):
yield process(blk)
else:
blk_with_wnd = wnd[:]
mul = operator.mul
for blk in Stream(sig).blocks(size=size, hop=hop):
blk_with_wnd[:] = xmap(mul, blk, wnd)
yield process(blk_with_wnd)
if ola is None:
return blk_gen(**blk_params)
else:
return ola(blk_gen(**blk_params), **ola_params)
return wrapper | 0.00396 |
def source_properties(data, segment_img, error=None, mask=None,
background=None, filter_kernel=None, wcs=None,
labels=None):
"""
Calculate photometry and morphological properties of sources defined
by a labeled segmentation image.
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The 2D array from which to calculate the source photometry and
properties. ``data`` should be background-subtracted.
Non-finite ``data`` values (e.g. NaN or inf) are automatically
masked.
segment_img : `SegmentationImage` or array_like (int)
A 2D segmentation image, either as a `SegmentationImage` object
or an `~numpy.ndarray`, with the same shape as ``data`` where
sources are labeled by different positive integer values. A
value of zero is reserved for the background.
error : array_like or `~astropy.units.Quantity`, optional
The total error array corresponding to the input ``data`` array.
``error`` is assumed to include *all* sources of error,
including the Poisson error of the sources (see
`~photutils.utils.calc_total_error`) . ``error`` must have the
same shape as the input ``data``. Non-finite ``error`` values
(e.g. NaN or inf) are not automatically masked, unless they are
at the same position of non-finite values in the input ``data``
array. Such pixels can be masked using the ``mask`` keyword.
See the Notes section below for details on the error
propagation.
mask : array_like (bool), optional
A boolean mask with the same shape as ``data`` where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked data are excluded from all calculations. Non-finite
values (e.g. NaN or inf) in the input ``data`` are automatically
masked.
background : float, array_like, or `~astropy.units.Quantity`, optional
The background level that was *previously* present in the input
``data``. ``background`` may either be a scalar value or a 2D
image with the same shape as the input ``data``. Inputting the
``background`` merely allows for its properties to be measured
within each source segment. The input ``background`` does *not*
get subtracted from the input ``data``, which should already be
background-subtracted. Non-finite ``background`` values (e.g.
NaN or inf) are not automatically masked, unless they are at the
same position of non-finite values in the input ``data`` array.
Such pixels can be masked using the ``mask`` keyword.
filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional
The 2D array of the kernel used to filter the data prior to
calculating the source centroid and morphological parameters.
The kernel should be the same one used in defining the source
segments, i.e. the detection image (e.g., see
:func:`~photutils.detect_sources`). If `None`, then the
unfiltered ``data`` will be used instead.
wcs : `~astropy.wcs.WCS`
The WCS transformation to use. If `None`, then any sky-based
properties will be set to `None`.
labels : int, array-like (1D, int)
The segmentation labels for which to calculate source
properties. If `None` (default), then the properties will be
calculated for all labeled sources.
Returns
-------
output : `SourceCatalog` instance
A `SourceCatalog` instance containing the properties of each
source.
Notes
-----
`SExtractor`_'s centroid and morphological parameters are always
calculated from a filtered "detection" image, i.e. the image used to
define the segmentation image. The usual downside of the filtering
is the sources will be made more circular than they actually are.
If you wish to reproduce `SExtractor`_ centroid and morphology
results, then input a filtered and background-subtracted "detection"
image into the ``filtered_data`` keyword. If ``filtered_data`` is
`None`, then the unfiltered ``data`` will be used for the source
centroid and morphological parameters.
Negative data values (``filtered_data`` or ``data``) within the
source segment are set to zero when calculating morphological
properties based on image moments. Negative values could occur, for
example, if the segmentation image was defined from a different
image (e.g., different bandpass) or if the background was
oversubtracted. Note that `~photutils.SourceProperties.source_sum`
always includes the contribution of negative ``data`` values.
The input ``error`` is assumed to include *all* sources of error,
including the Poisson error of the sources.
`~photutils.SourceProperties.source_sum_err` is simply the
quadrature sum of the pixel-wise total errors over the non-masked
pixels within the source segment:
.. math:: \\Delta F = \\sqrt{\\sum_{i \\in S}
\\sigma_{\\mathrm{tot}, i}^2}
where :math:`\\Delta F` is
`~photutils.SourceProperties.source_sum_err`, :math:`S` are the
non-masked pixels in the source segment, and
:math:`\\sigma_{\\mathrm{tot}, i}` is the input ``error`` array.
.. _SExtractor: http://www.astromatic.net/software/sextractor
See Also
--------
SegmentationImage, SourceProperties, detect_sources
Examples
--------
>>> import numpy as np
>>> from photutils import SegmentationImage, source_properties
>>> image = np.arange(16.).reshape(4, 4)
>>> print(image) # doctest: +SKIP
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 8. 9. 10. 11.]
[12. 13. 14. 15.]]
>>> segm = SegmentationImage([[1, 1, 0, 0],
... [1, 0, 0, 2],
... [0, 0, 2, 2],
... [0, 2, 2, 0]])
>>> props = source_properties(image, segm)
Print some properties of the first object (labeled with ``1`` in the
segmentation image):
>>> props[0].id # id corresponds to segment label number
1
>>> props[0].centroid # doctest: +FLOAT_CMP
<Quantity [0.8, 0.2] pix>
>>> props[0].source_sum # doctest: +FLOAT_CMP
5.0
>>> props[0].area # doctest: +FLOAT_CMP
<Quantity 3. pix2>
>>> props[0].max_value # doctest: +FLOAT_CMP
4.0
Print some properties of the second object (labeled with ``2`` in
the segmentation image):
>>> props[1].id # id corresponds to segment label number
2
>>> props[1].centroid # doctest: +FLOAT_CMP
<Quantity [2.36363636, 2.09090909] pix>
>>> props[1].perimeter # doctest: +FLOAT_CMP
<Quantity 5.41421356 pix>
>>> props[1].orientation # doctest: +FLOAT_CMP
<Quantity -0.74175931 rad>
"""
if not isinstance(segment_img, SegmentationImage):
segment_img = SegmentationImage(segment_img)
if segment_img.shape != data.shape:
raise ValueError('segment_img and data must have the same shape.')
# filter the data once, instead of repeating for each source
if filter_kernel is not None:
filtered_data = filter_data(data, filter_kernel, mode='constant',
fill_value=0.0, check_normalization=True)
else:
filtered_data = None
if labels is None:
labels = segment_img.labels
labels = np.atleast_1d(labels)
sources_props = []
for label in labels:
if label not in segment_img.labels:
warnings.warn('label {} is not in the segmentation image.'
.format(label), AstropyUserWarning)
continue # skip invalid labels
sources_props.append(SourceProperties(
data, segment_img, label, filtered_data=filtered_data,
error=error, mask=mask, background=background, wcs=wcs))
if len(sources_props) == 0:
raise ValueError('No sources are defined.')
return SourceCatalog(sources_props, wcs=wcs) | 0.000245 |
def get_page_content(id):
"""Return XHTML content of a page.
Parameters:
- id: id of a Confluence page.
"""
data = _json.loads(_api.rest("/" + str(id) + "?expand=body.storage"))
return data["body"]["storage"]["value"] | 0.004149 |
def Deserialize(self, reader):
"""
Deserialize full object.
Args:
reader (neocore.IO.BinaryReader):
"""
super(StorageItem, self).Deserialize(reader)
self.Value = reader.ReadVarBytes() | 0.008197 |
def current_model(self, controller_name=None, model_only=False):
'''Return the current model, qualified by its controller name.
If controller_name is specified, the current model for
that controller will be returned.
If model_only is true, only the model name, not qualified by
its controller name, will be returned.
'''
# TODO respect JUJU_MODEL environment variable.
if not controller_name:
controller_name = self.current_controller()
if not controller_name:
raise JujuError('No current controller')
models = self.models()[controller_name]
if 'current-model' not in models:
return None
if model_only:
return models['current-model']
return controller_name + ':' + models['current-model'] | 0.002378 |
def set_pickle_converters(encode, decode):
"""
Modify the default Pickle conversion functions. This affects all
:class:`~couchbase.bucket.Bucket` instances.
These functions will be called instead of the default ones
(``pickle.dumps`` and ``pickle.loads``) to encode and decode values to and
from the Pickle format (when :const:`FMT_PICKLE` is used).
:param callable encode: Callable to invoke when encoding an object to
Pickle. This should have the same prototype as ``pickle.dumps`` with
the exception that it is only ever called with a single argument
:param callable decode: Callable to invoke when decoding a Pickle encoded
object to a Python object. Should have the same prototype as
``pickle.loads`` with the exception that it is only ever passed a
single argument
:return: A tuple of ``(old encoder, old decoder)``
No exceptions are raised and it is the responsibility of the caller to
ensure that the provided functions operate correctly.
"""
ret = _LCB._modify_helpers(pickle_encode=encode, pickle_decode=decode)
return (ret['pickle_encode'], ret['pickle_decode']) | 0.000852 |
def rollback_one_iter(self):
"""Rollback one iteration.
Returns
-------
self : Booster
Booster with rolled back one iteration.
"""
_safe_call(_LIB.LGBM_BoosterRollbackOneIter(
self.handle))
self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)]
return self | 0.008219 |
def _packb3(obj, **options):
"""
Serialize a Python object into MessagePack bytes.
Args:
obj: a Python object
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping a custom type
to a callable that packs an instance of the type
into an Ext object
force_float_precision (str): "single" to force packing floats as
IEEE-754 single-precision floats,
"double" to force packing floats as
IEEE-754 double-precision floats.
Returns:
A 'bytes' containing serialized MessagePack bytes.
Raises:
UnsupportedType(PackException):
Object type not supported for packing.
Example:
>>> umsgpack.packb({u"compact": True, u"schema": 0})
b'\x82\xa7compact\xc3\xa6schema\x00'
>>>
"""
fp = io.BytesIO()
_pack3(obj, fp, **options)
return fp.getvalue() | 0.000981 |
def get_multiplicon_leaves(self, redundant=False):
""" Return a generator of the IDs of multiplicons found at leaves
of the tree (i.e. from which no further multiplicons were derived).
Arguments:
o redundant - if true, report redundant multiplicons
"""
for node in self._multiplicon_graph.nodes():
if not len(self._multiplicon_graph.out_edges(node)):
if not self.is_redundant_multiplicon(node):
yield node
elif redundant:
yield node
else:
continue
else:
continue | 0.002976 |
def sample_wr(population, k):
"Chooses k random elements (with replacement) from a population"
n = len(population)
_random, _int = random.random, int # speed hack
result = [None] * k
for i in xrange(k):
j = _int(_random() * n)
result[i] = population[j]
return result | 0.006329 |
def check_key(request):
"""
Check to see if we already have an access_key stored,
if we do then we have already gone through
OAuth. If not then we haven't and we probably need to.
"""
try:
access_key = request.session.get('oauth_token', None)
if not access_key:
return False
except KeyError:
return False
return True | 0.002604 |
def install_ui_colorscheme(self, name, style_dict):
"""
Install a new UI color scheme.
"""
assert isinstance(name, six.text_type)
assert isinstance(style_dict, dict)
self.ui_styles[name] = style_dict | 0.008065 |
def get_options_for_model(self, model):
"""
Thin wrapper around ``_get_options_for_model`` to preserve the
semantic of throwing exception for models not directly registered.
"""
opts = self._get_options_for_model(model)
if not opts.registered and not opts.related:
raise NotRegistered('The model "%s" is not registered for '
'translation' % model.__name__)
return opts | 0.004274 |
def _add_colorbar(self, m, CS, ax, name):
''' Add colorbar to the map instance '''
cb = m.colorbar(CS, "right", size="5%", pad="2%")
cb.set_label(name, size=34)
cb.ax.tick_params(labelsize=18) | 0.008889 |
def enter(self, container_alias):
'''
a method to open up a terminal inside a running container
:param container_alias: string with name or id of container
:return: None
'''
title = '%s.enter' % self.__class__.__name__
# validate inputs
input_fields = {
'container_alias': container_alias
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# compose system command
from os import system
sys_cmd = 'docker exec -it %s sh' % container_alias
if self.localhost.os.sysname in ('Windows'):
sys_cmd = 'winpty %s' % sys_cmd
# open up terminal
system(sys_cmd) | 0.005794 |
def getTextTitle(self):
"""Return a title for texts and listings
"""
request_id = self.getRequestID()
if not request_id:
return ""
analysis = self.getAnalysis()
if not analysis:
return request_id
return "%s - %s" % (request_id, analysis.Title()) | 0.006116 |
def transform_title(self, content_metadata_item):
"""
Return the title of the content item.
"""
title_with_locales = []
for locale in self.enterprise_configuration.get_locales():
title_with_locales.append({
'locale': locale,
'value': content_metadata_item.get('title', '')
})
return title_with_locales | 0.004914 |
def next_datetime(min_year = None, max_year = None):
"""
Generates a random Date and time in the range ['minYear', 'maxYear'].
This method generate dates without time (or time set to 00:00:00)
:param min_year: (optional) minimum range value
:param max_year: max range value
:return: a random Date and time value.
"""
date = RandomDateTime.next_date(min_year, max_year).date()
time = RandomDateTime.next_time()
return datetime.datetime.combine(date, time) | 0.011173 |
def sort_card_indices(cards, indices, ranks=None):
"""
Sorts the given Deck indices by the given ranks. Must also supply the
``Stack``, ``Deck``, or ``list`` that the indices are from.
:arg cards:
The cards the indices are from. Can be a ``Stack``, ``Deck``, or
``list``
:arg list indices:
The indices to sort.
:arg dict ranks:
The rank dict to reference for sorting. If ``None``, it will
default to ``DEFAULT_RANKS``.
:returns:
The sorted indices.
"""
ranks = ranks or DEFAULT_RANKS
if ranks.get("suits"):
indices = sorted(
indices,
key=lambda x: ranks["suits"][cards[x].suit] if
cards[x].suit != None else 0
)
if ranks.get("values"):
indices = sorted(
indices,
key=lambda x: ranks["values"][cards[x].value]
)
return indices | 0.003254 |
def _case_stmt(self, stmt: Statement, sctx: SchemaContext) -> None:
"""Handle case statement."""
self._handle_child(CaseNode(), stmt, sctx) | 0.012903 |
def labels(ctx):
"""Crate or update labels in github
"""
config = ctx.obj['agile']
repos = config.get('repositories')
labels = config.get('labels')
if not isinstance(repos, list):
raise CommandError(
'You need to specify the "repos" list in the config'
)
if not isinstance(labels, dict):
raise CommandError(
'You need to specify the "labels" dictionary in the config'
)
git = GithubApi()
for repo in repos:
repo = git.repo(repo)
for label, color in labels.items():
if repo.label(label, color):
click.echo('Created label "%s" @ %s' % (label, repo))
else:
click.echo('Updated label "%s" @ %s' % (label, repo)) | 0.001297 |
def _set_collector_vrf(self, v, load=False):
"""
Setter method for collector_vrf, mapped from YANG variable /sflow/collector_vrf (common-def:vrf-name)
If this variable is read-only (config: false) in the
source YANG file, then _set_collector_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_collector_vrf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.)*([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.?)|\\.', 'length': [u'1..32']}), is_leaf=True, yang_name="collector-vrf", rest_name="collector-vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Sflow Collector VRF Configuration', u'hidden': u'full'}}, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='common-def:vrf-name', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """collector_vrf must be of a type compatible with common-def:vrf-name""",
'defined-type': "common-def:vrf-name",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.)*([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.?)|\\.', 'length': [u'1..32']}), is_leaf=True, yang_name="collector-vrf", rest_name="collector-vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Sflow Collector VRF Configuration', u'hidden': u'full'}}, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='common-def:vrf-name', is_config=True)""",
})
self.__collector_vrf = t
if hasattr(self, '_set'):
self._set() | 0.00484 |
def _catch_errors(a_func, to_catch):
"""Updates a_func to wrap exceptions with GaxError
Args:
a_func (callable): A callable.
to_catch (list[Exception]): Configures the exceptions to wrap.
Returns:
Callable: A function that will wrap certain exceptions with GaxError
"""
def inner(*args, **kwargs):
"""Wraps specified exceptions"""
try:
return a_func(*args, **kwargs)
# pylint: disable=catching-non-exception
except tuple(to_catch) as exception:
utils.raise_with_traceback(
gax.errors.create_error('RPC failed', cause=exception))
return inner | 0.001506 |
async def raw(self, command, *args, _conn=None, **kwargs):
"""
Send the raw command to the underlying client. Note that by using this CMD you
will lose compatibility with other backends.
Due to limitations with aiomcache client, args have to be provided as bytes.
For rest of backends, str.
:param command: str with the command.
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: whatever the underlying client returns
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
ret = await self._raw(
command, *args, encoding=self.serializer.encoding, _conn=_conn, **kwargs
)
logger.debug("%s (%.4f)s", command, time.monotonic() - start)
return ret | 0.006734 |
def as_tensor_dict(self,
padding_lengths: Dict[str, Dict[str, int]] = None,
verbose: bool = False) -> Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]]:
# This complex return type is actually predefined elsewhere as a DataArray,
# but we can't use it because mypy doesn't like it.
"""
This method converts this ``Batch`` into a set of pytorch Tensors that can be passed
through a model. In order for the tensors to be valid tensors, all ``Instances`` in this
batch need to be padded to the same lengths wherever padding is necessary, so we do that
first, then we combine all of the tensors for each field in each instance into a set of
batched tensors for each field.
Parameters
----------
padding_lengths : ``Dict[str, Dict[str, int]]``
If a key is present in this dictionary with a non-``None`` value, we will pad to that
length instead of the length calculated from the data. This lets you, e.g., set a
maximum value for sentence length if you want to throw out long sequences.
Entries in this dictionary are keyed first by field name (e.g., "question"), then by
padding key (e.g., "num_tokens").
verbose : ``bool``, optional (default=``False``)
Should we output logging information when we're doing this padding? If the batch is
large, this is nice to have, because padding a large batch could take a long time.
But if you're doing this inside of a data generator, having all of this output per
batch is a bit obnoxious (and really slow).
Returns
-------
tensors : ``Dict[str, DataArray]``
A dictionary of tensors, keyed by field name, suitable for passing as input to a model.
This is a `batch` of instances, so, e.g., if the instances have a "question" field and
an "answer" field, the "question" fields for all of the instances will be grouped
together into a single tensor, and the "answer" fields for all instances will be
similarly grouped in a parallel set of tensors, for batched computation. Additionally,
for complex ``Fields``, the value of the dictionary key is not necessarily a single
tensor. For example, with the ``TextField``, the output is a dictionary mapping
``TokenIndexer`` keys to tensors. The number of elements in this sub-dictionary
therefore corresponds to the number of ``TokenIndexers`` used to index the
``TextField``. Each ``Field`` class is responsible for batching its own output.
"""
if padding_lengths is None:
padding_lengths = defaultdict(dict)
# First we need to decide _how much_ to pad. To do that, we find the max length for all
# relevant padding decisions from the instances themselves. Then we check whether we were
# given a max length for a particular field and padding key. If we were, we use that
# instead of the instance-based one.
if verbose:
logger.info("Padding batch of size %d to lengths %s", len(self.instances), str(padding_lengths))
logger.info("Getting max lengths from instances")
instance_padding_lengths = self.get_padding_lengths()
if verbose:
logger.info("Instance max lengths: %s", str(instance_padding_lengths))
lengths_to_use: Dict[str, Dict[str, int]] = defaultdict(dict)
for field_name, instance_field_lengths in instance_padding_lengths.items():
for padding_key in instance_field_lengths.keys():
if padding_lengths[field_name].get(padding_key) is not None:
lengths_to_use[field_name][padding_key] = padding_lengths[field_name][padding_key]
else:
lengths_to_use[field_name][padding_key] = instance_field_lengths[padding_key]
# Now we actually pad the instances to tensors.
field_tensors: Dict[str, list] = defaultdict(list)
if verbose:
logger.info("Now actually padding instances to length: %s", str(lengths_to_use))
for instance in self.instances:
for field, tensors in instance.as_tensor_dict(lengths_to_use).items():
field_tensors[field].append(tensors)
# Finally, we combine the tensors that we got for each instance into one big tensor (or set
# of tensors) per field. The `Field` classes themselves have the logic for batching the
# tensors together, so we grab a dictionary of field_name -> field class from the first
# instance in the batch.
field_classes = self.instances[0].fields
final_fields = {}
for field_name, field_tensor_list in field_tensors.items():
final_fields[field_name] = field_classes[field_name].batch_tensors(field_tensor_list)
return final_fields | 0.00855 |
def p_pause(p):
""" statement : PAUSE expr
"""
p[0] = make_sentence('PAUSE',
make_typecast(TYPE.uinteger, p[2], p.lineno(1))) | 0.006173 |
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
del contents
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body | 0.002963 |
def OAuthClient(
domain,
consumer_key,
consumer_secret,
token,
token_secret,
user_agent=None,
request_encoder=default_request_encoder,
response_decoder=default_response_decoder
):
"""Creates a Freshbooks client for a freshbooks domain, using
OAuth. Token management is assumed to have been handled out of band.
The optional request_encoder and response_decoder parameters can be
passed the logging_request_encoder and logging_response_decoder objects
from this module, or custom encoders, to aid debugging or change the
behaviour of refreshbooks' request-to-XML-to-response mapping.
The optional user_agent keyword parameter can be used to specify the
user agent string passed to FreshBooks. If unset, a default user agent
string is used.
"""
return _create_oauth_client(
AuthorizingClient,
domain,
consumer_key,
consumer_secret,
token,
token_secret,
user_agent=user_agent,
request_encoder=request_encoder,
response_decoder=response_decoder
) | 0.002717 |
def fit_from_cfg(cls, choosers, chosen_fname, alternatives, cfgname, outcfgname=None):
"""
Parameters
----------
choosers : DataFrame
A dataframe in which rows represent choosers.
chosen_fname : string
A string indicating the column in the choosers dataframe which
gives which alternatives the choosers have chosen.
alternatives : DataFrame
A table of alternatives. It should include the choices
from the choosers table as well as other alternatives from
which to sample. Values in choosers[chosen_fname] should index
into the alternatives dataframe.
cfgname : string
The name of the yaml config file from which to read the discrete
choice model.
outcfgname : string, optional (default cfgname)
The name of the output yaml config file where estimation results are written into.
Returns
-------
lcm : MNLDiscreteChoiceModel which was used to fit
"""
logger.debug('start: fit from configuration {}'.format(cfgname))
lcm = cls.from_yaml(str_or_buffer=cfgname)
lcm.fit(choosers, alternatives, choosers[chosen_fname])
lcm.report_fit()
outcfgname = outcfgname or cfgname
lcm.to_yaml(str_or_buffer=outcfgname)
logger.debug('finish: fit into configuration {}'.format(outcfgname))
return lcm | 0.002732 |
def standardized_compound(self):
"""Return the :class:`~pubchempy.Compound` that was produced when this Substance was standardized.
Requires an extra request. Result is cached.
"""
for c in self.record['compound']:
if c['id']['type'] == CompoundIdType.STANDARDIZED:
return Compound.from_cid(c['id']['id']['cid']) | 0.008043 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.