text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_files(file_tokens, cwd=None):
""" Given a list of parser file tokens, return a list of input objects
for them.
"""
if not file_tokens:
return []
token = file_tokens.pop()
try:
filename = token.filename
except AttributeError:
filename = ''
if cwd:
input = Input(token.alias, filename, cwd=cwd)
else:
input = Input(token.alias, filename)
return [input] + get_files(file_tokens) | 0.002146 |
def gaussApprox(self,xy,**kwargs):
"""
NAME:
gaussApprox
PURPOSE:
return the mean and variance of a Gaussian approximation to the stream DF at a given phase-space point in Galactocentric rectangular coordinates (distribution is over missing directions)
INPUT:
xy - phase-space point [X,Y,Z,vX,vY,vZ]; the distribution of the dimensions set to None is returned
interp= (object-wide interp default) if True, use the interpolated stream track
cindx= index of the closest point on the (interpolated) stream track if not given, determined from the dimensions given
lb= (False) if True, xy contains [l,b,D,vlos,pmll,pmbb] in [deg,deg,kpc,km/s,mas/yr,mas/yr] and the Gaussian approximation in these coordinates is returned
OUTPUT:
(mean,variance) of the approximate Gaussian DF for the missing directions in xy
HISTORY:
2013-12-12 - Written - Bovy (IAS)
"""
interp= kwargs.get('interp',self._useInterp)
lb= kwargs.get('lb',False)
#What are we looking for
coordGiven= numpy.array([not x is None for x in xy],dtype='bool')
nGiven= numpy.sum(coordGiven)
#First find the nearest track point
if not 'cindx' in kwargs and lb:
cindx= self._find_closest_trackpointLB(*xy,interp=interp,
usev=True)
elif not 'cindx' in kwargs and not lb:
cindx= self._find_closest_trackpoint(*xy,xy=True,interp=interp,
usev=True)
else:
cindx= kwargs['cindx']
#Get the covariance matrix
if interp and lb:
tcov= self._interpolatedAllErrCovsLBUnscaled[cindx]
tmean= self._interpolatedObsTrackLB[cindx]
elif interp and not lb:
tcov= self._interpolatedAllErrCovsXY[cindx]
tmean= self._interpolatedObsTrackXY[cindx]
elif not interp and lb:
tcov= self._allErrCovsLBUnscaled[cindx]
tmean= self._ObsTrackLB[cindx]
elif not interp and not lb:
tcov= self._allErrCovsXY[cindx]
tmean= self._ObsTrackXY[cindx]
if lb:#Apply scale factors
tcov= copy.copy(tcov)
tcov*= numpy.tile(self._ErrCovsLBScale,(6,1))
tcov*= numpy.tile(self._ErrCovsLBScale,(6,1)).T
#Fancy indexing to recover V22, V11, and V12; V22, V11, V12 as in Appendix B of 0905.2979v1
V11indx0= numpy.array([[ii for jj in range(6-nGiven)] for ii in range(6) if not coordGiven[ii]])
V11indx1= numpy.array([[ii for ii in range(6) if not coordGiven[ii]] for jj in range(6-nGiven)])
V11= tcov[V11indx0,V11indx1]
V22indx0= numpy.array([[ii for jj in range(nGiven)] for ii in range(6) if coordGiven[ii]])
V22indx1= numpy.array([[ii for ii in range(6) if coordGiven[ii]] for jj in range(nGiven)])
V22= tcov[V22indx0,V22indx1]
V12indx0= numpy.array([[ii for jj in range(nGiven)] for ii in range(6) if not coordGiven[ii]])
V12indx1= numpy.array([[ii for ii in range(6) if coordGiven[ii]] for jj in range(6-nGiven)])
V12= tcov[V12indx0,V12indx1]
#Also get m1 and m2, again following Appendix B of 0905.2979v1
m1= tmean[True^coordGiven]
m2= tmean[coordGiven]
#conditional mean and variance
V22inv= numpy.linalg.inv(V22)
v2= numpy.array([xy[ii] for ii in range(6) if coordGiven[ii]])
condMean= m1+numpy.dot(V12,numpy.dot(V22inv,v2-m2))
condVar= V11-numpy.dot(V12,numpy.dot(V22inv,V12.T))
return (condMean,condVar) | 0.022102 |
def skip_whitespace(self):
"""Consume input until a non-whitespace character is encountered.
The non-whitespace character is then ungotten, and the number of
whitespace characters consumed is returned.
If the tokenizer is in multiline mode, then newlines are whitespace.
@rtype: int
"""
skipped = 0
while True:
c = self._get_char()
if c != ' ' and c != '\t':
if (c != '\n') or not self.multiline:
self._unget_char(c)
return skipped
skipped += 1 | 0.003311 |
def b6_evalue_filter(handle, e_value, *args, **kwargs):
"""Yields lines from handle with E-value less than or equal to e_value
Args:
handle (file): B6/M8 file handle, can be any iterator so long as it
it returns subsequent "lines" of a B6/M8 entry
e_value (float): max E-value to return
*args: Variable length argument list for b6_iter
**kwargs: Arbitrary keyword arguments for b6_iter
Yields:
B6Entry: class containing all B6/M8 data
Example:
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> b6_handle = open('test.b6')
>>> for entry in b6_evalue_filter(b6_handle, 1e5)
... print(entry.evalue) # Print E-value of filtered entry
"""
for entry in b6_iter(handle, *args, **kwargs):
if entry.evalue <= e_value:
yield entry | 0.001053 |
def _format_property_values(self, previous, current):
"""
Format WMI Object's RAW data based on the previous sample.
Do not override the original WMI Object !
"""
formatted_wmi_object = CaseInsensitiveDict()
for property_name, property_raw_value in iteritems(current):
counter_type = self._property_counter_types.get(property_name)
property_formatted_value = property_raw_value
if counter_type:
calculator = self._get_property_calculator(counter_type)
property_formatted_value = calculator(previous, current, property_name)
formatted_wmi_object[property_name] = property_formatted_value
return formatted_wmi_object | 0.003974 |
def scan(self,
table_name,
index_name=None,
consistent_read=None,
projection_expression=None,
filter_expression=None,
expression_attribute_names=None,
expression_attribute_values=None,
segment=None,
total_segments=None,
select=None,
limit=None,
exclusive_start_key=None,
return_consumed_capacity=None):
"""The `Scan`_ operation returns one or more items and item attributes
by accessing every item in a table or a secondary index.
If the total number of scanned items exceeds the maximum data set size
limit of 1 MB, the scan stops and results are returned to the user as a
``LastEvaluatedKey`` value to continue the scan in a subsequent
operation. The results also include the number of items exceeding the
limit. A scan can result in no table data meeting the filter criteria.
By default, Scan operations proceed sequentially; however, for faster
performance on a large table or secondary index, applications can
request a parallel *Scan* operation by providing the ``segment`` and
``total_segments`` parameters. For more information, see
`Parallel Scan <http://docs.aws.amazon.com/amazondynamodb/latest/
developerguide/QueryAndScan.html#QueryAndScanParallelScan>`_ in the
Amazon DynamoDB Developer Guide.
By default, *Scan* uses eventually consistent reads when accessing the
data in a table; therefore, the result set might not include the
changes to data in the table immediately before the operation began. If
you need a consistent copy of the data, as of the time that the *Scan*
begins, you can set the ``consistent_read`` parameter to ``True``.
:rtype: dict
.. _Scan: http://docs.aws.amazon.com/amazondynamodb/
latest/APIReference/API_Scan.html
"""
payload = {'TableName': table_name}
if index_name:
payload['IndexName'] = index_name
if consistent_read is not None:
payload['ConsistentRead'] = consistent_read
if filter_expression:
payload['FilterExpression'] = filter_expression
if expression_attribute_names:
payload['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
payload['ExpressionAttributeValues'] = \
utils.marshall(expression_attribute_values)
if projection_expression:
payload['ProjectionExpression'] = projection_expression
if segment:
payload['Segment'] = segment
if total_segments:
payload['TotalSegments'] = total_segments
if select:
_validate_select(select)
payload['Select'] = select
if exclusive_start_key:
payload['ExclusiveStartKey'] = utils.marshall(exclusive_start_key)
if limit:
payload['Limit'] = limit
if return_consumed_capacity:
_validate_return_consumed_capacity(return_consumed_capacity)
payload['ReturnConsumedCapacity'] = return_consumed_capacity
return self.execute('Scan', payload) | 0.004518 |
def make_error_response(self, cond):
"""Create error response for the a "get" or "set" iq stanza.
:Parameters:
- `cond`: error condition name, as defined in XMPP specification.
:return: new `Iq` object with the same "id" as self, "from" and "to"
attributes swapped, type="error" and containing <error /> element
plus payload of `self`.
:returntype: `Iq`"""
if self.stanza_type in ("result", "error"):
raise ValueError("Errors may not be generated for"
" 'result' and 'error' iq")
stanza = Iq(stanza_type="error", from_jid = self.to_jid,
to_jid = self.from_jid, stanza_id = self.stanza_id,
error_cond = cond)
if self._payload is None:
self.decode_payload()
for payload in self._payload:
# use Stanza.add_payload to skip the payload length check
Stanza.add_payload(stanza, payload)
return stanza | 0.012393 |
def _remove_magic(self, data):
'''Verify and remove magic'''
if not self.magic:
return data
magic_size = len(self.magic)
magic = data[:magic_size]
if magic != self.magic:
raise Exception('Invalid magic')
data = data[magic_size:]
return data | 0.006192 |
def update(self, w, offset=0):
"""Compute gradient and Hessian matrix with respect to `w`."""
time = self.time
x = self.x
exp_xw = numpy.exp(offset + numpy.dot(x, w))
n_samples, n_features = x.shape
gradient = numpy.zeros((1, n_features), dtype=float)
hessian = numpy.zeros((n_features, n_features), dtype=float)
inv_n_samples = 1. / n_samples
risk_set = 0
risk_set_x = 0
risk_set_xx = 0
k = 0
# iterate time in descending order
for i in range(n_samples):
ti = time[i]
while k < n_samples and ti == time[k]:
risk_set += exp_xw[k]
# preserve 2D shape of row vector
xk = x[k:k + 1]
risk_set_x += exp_xw[k] * xk
# outer product
xx = numpy.dot(xk.T, xk)
risk_set_xx += exp_xw[k] * xx
k += 1
if self.event[i]:
gradient -= (x[i:i + 1] - risk_set_x / risk_set) * inv_n_samples
a = risk_set_xx / risk_set
z = risk_set_x / risk_set
# outer product
b = numpy.dot(z.T, z)
hessian += (a - b) * inv_n_samples
if self.alpha > 0:
gradient += self.alpha * inv_n_samples * w
diag_idx = numpy.diag_indices(n_features)
hessian[diag_idx] += self.alpha * inv_n_samples
self.gradient = gradient.ravel()
self.hessian = hessian | 0.00194 |
def directory(cls, prefix=None):
"""
Path that should be used for caching. Different for all subclasses.
"""
prefix = prefix or utility.read_config().directory
name = cls.__name__.lower()
directory = os.path.expanduser(os.path.join(prefix, name))
utility.ensure_directory(directory)
return directory | 0.00551 |
def delete_namespace(self, name):
"""
Delete namespace with specific name
:param name: str, namespace to delete
:return: None
"""
self.core_api.delete_namespace(name, client.V1DeleteOptions())
logger.info("Deleting namespace: %s", name) | 0.006826 |
def _init_map(self):
"""stub"""
super(MultiLanguageDragAndDropQuestionFormRecord, self)._init_map()
self.my_osid_object_form._my_map['droppables'] = \
self._droppables_metadata['default_object_values'][0]
self.my_osid_object_form._my_map['targets'] = \
self._targets_metadata['default_object_values'][0]
self.my_osid_object_form._my_map['zones'] = \
self._zones_metadata['default_object_values'][0]
self.my_osid_object_form._my_map['shuffleDroppables'] = \
bool(self._shuffle_droppables_metadata['default_boolean_values'][0])
self.my_osid_object_form._my_map['shuffleTargets'] = \
bool(self._shuffle_targets_metadata['default_boolean_values'][0])
self.my_osid_object_form._my_map['shuffleZones'] = \
bool(self._shuffle_zones_metadata['default_boolean_values'][0]) | 0.003337 |
def update(
self,
filepath,
cache=False,
remove=False,
bumpversion=None,
prerelease=None,
dependencies=None,
metadata=None,
message=None):
'''
Enter a new version to a DataArchive
Parameters
----------
filepath : str
The path to the file on your local file system
cache : bool
Turn on caching for this archive if not already on before update
remove : bool
removes a file from your local directory
bumpversion : str
Version component to update on write if archive is versioned. Valid
bumpversion values are 'major', 'minor', and 'patch', representing
the three components of the strict version numbering system (e.g.
"1.2.3"). If bumpversion is None the version number is not updated
on write. Either bumpversion or prerelease (or both) must be a
non-None value. If the archive is not versioned, bumpversion is
ignored.
prerelease : str
Prerelease component of archive version to update on write if
archive is versioned. Valid prerelease values are 'alpha' and
'beta'. Either bumpversion or prerelease (or both) must be a
non-None value. If the archive is not versioned, prerelease is
ignored.
metadata : dict
Updates to archive metadata. Pass {key: None} to remove a key from
the archive's metadata.
'''
if metadata is None:
metadata = {}
latest_version = self.get_latest_version()
hashval = self.api.hash_file(filepath)
checksum = hashval['checksum']
algorithm = hashval['algorithm']
if checksum == self.get_latest_hash():
self.update_metadata(metadata)
if remove and os.path.isfile(filepath):
os.remove(filepath)
return
if self.versioned:
if latest_version is None:
latest_version = BumpableVersion()
next_version = latest_version.bump(
kind=bumpversion,
prerelease=prerelease,
inplace=False)
else:
next_version = None
next_path = self.get_version_path(next_version)
if cache:
self.cache(next_version)
if self.is_cached(next_version):
self.authority.upload(filepath, next_path)
self.api.cache.upload(filepath, next_path, remove=remove)
else:
self.authority.upload(filepath, next_path, remove=remove)
self._update_manager(
archive_metadata=metadata,
version_metadata=dict(
checksum=checksum,
algorithm=algorithm,
version=next_version,
dependencies=dependencies,
message=message)) | 0.000662 |
def build(self, builder):
"""Build XML by appending to builder"""
params = dict(OID=self.oid, Name=self.name)
if self.unit_dictionary_name:
params["mdsol:UnitDictionaryName"] = self.unit_dictionary_name
for suffix in ["A", "B", "C", "K"]:
val = getattr(self, "constant_{0}".format(suffix.lower()))
params["mdsol:Constant{0}".format(suffix)] = str(val)
if self.standard_unit:
params["mdsol:StandardUnit"] = "Yes"
builder.start("MeasurementUnit", params)
for child in self.symbols:
child.build(builder)
builder.end("MeasurementUnit") | 0.00303 |
def summary(self):
"""
:rtype: twilio.rest.insights.v1.summary.CallSummaryList
"""
if self._summary is None:
self._summary = CallSummaryList(self)
return self._summary | 0.009132 |
def dtype(self, byte_order='='):
'''
Return the numpy dtype of the in-memory representation of the
data. (If there are no list properties, and the PLY format is
binary, then this also accurately describes the on-disk
representation of the element.)
'''
return _np.dtype([(prop.name, prop.dtype(byte_order))
for prop in self.properties]) | 0.004762 |
def main():
"""The main entry point."""
if sys.version_info < (2, 7):
sys.exit('crispy requires at least Python 2.7')
elif sys.version_info[0] == 3 and sys.version_info < (3, 4):
sys.exit('crispy requires at least Python 3.4')
kwargs = dict(
name='crispy',
version=get_version(),
description='Core-Level Spectroscopy Simulations in Python',
long_description=get_readme(),
license='MIT',
author='Marius Retegan',
author_email='[email protected]',
url='https://github.com/mretegan/crispy',
download_url='https://github.com/mretegan/crispy/releases',
keywords='gui, spectroscopy, simulation, synchrotron, science',
install_requires=get_requirements(),
platforms=[
'MacOS :: MacOS X',
'Microsoft :: Windows',
'POSIX :: Linux',
],
packages=[
'crispy',
'crispy.gui',
'crispy.gui.uis',
'crispy.gui.icons',
'crispy.modules',
'crispy.modules.quanty',
'crispy.modules.orca',
'crispy.utils',
],
package_data={
'crispy.gui.uis': [
'*.ui',
'quanty/*.ui',
],
'crispy.gui.icons': [
'*.svg',
],
'crispy.modules.quanty': [
'parameters/*.json.gz',
'templates/*.lua',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: X11 Applications :: Qt',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Visualization',
]
)
# At the moment pip/setuptools doesn't play nice with shebang paths
# containing white spaces.
# See: https://github.com/pypa/pip/issues/2783
# https://github.com/xonsh/xonsh/issues/879
# The most straight forward workaround is to have a .bat script to run
# crispy on Windows.
if 'win32' in sys.platform:
kwargs['scripts'] = ['scripts/crispy.bat']
else:
kwargs['scripts'] = ['scripts/crispy']
setup(**kwargs) | 0.000362 |
def evaluate(self, verbose=True, passes=None):
"""Summary
Returns:
TYPE: Description
"""
if self.is_pivot:
index, pivot, columns = LazyOpResult(
self.expr,
self.weld_type,
0
).evaluate(verbose=verbose, passes=passes)
df_dict = {}
for i, column_name in enumerate(columns):
df_dict[column_name] = pivot[i]
return DataFrameWeld(pd.DataFrame(df_dict, index=index))
else:
df = pd.DataFrame(columns=[])
weldvec_type_list = []
for type in self.column_types:
weldvec_type_list.append(WeldVec(type))
columns = LazyOpResult(
grizzly_impl.unzip_columns(
self.expr,
self.column_types
),
WeldStruct(weldvec_type_list),
0
).evaluate(verbose=verbose, passes=passes)
for i, column_name in enumerate(self.column_names):
df[column_name] = columns[i]
return DataFrameWeld(df) | 0.00173 |
def copy(self):
"""Create a new copy of selfe. does not do a deep copy for payload
:return: copied range
:rtype: GenomicRange
"""
return type(self)(self.chr,
self.start+self._start_offset,
self.end,
self.payload,
self.dir) | 0.003003 |
def connect(polylines, max_dist=10):
"""
connect polylines that are close and have a similar orientation
o---o <-> o---o ==> o----o--o----o
TODO: max_dist as function of cell size
"""
ll = len(polylines)
remove = []
for n in range(ll - 1, -1, -1):
c = polylines[n]
if len(c) > 1:
for d0, p0 in enumerate((c[0, 0], c[-1, 0])):
for m in range(len(polylines) - 1, -1, -1):
# combine all x all polylines
if n == m:
continue
cc = polylines[m]
for d1, p1 in enumerate((cc[0, 0], cc[-1, 0])):
# for end points of other polylines:
# measure closest distance for current polyline
ndist = norm(p0 - p1)
if ndist < max_dist:
is_same, c = _connect(c, cc, d0, d1)
if is_same:
if m not in remove:
remove.append(m)
polylines[n] = c
break
# remove those which are already in connected to other polyline:
remove.sort()
remove.reverse()
for r in remove:
polylines.pop(r) | 0.000743 |
def _load_response(response):
'''
Load the response from json data, return the dictionary or raw text
'''
try:
data = salt.utils.json.loads(response.text)
except ValueError:
data = response.text
ret = {'code': response.status_code, 'content': data}
return ret | 0.003268 |
def _quote(self, value, multiline=True):
"""
Return a safely quoted version of a value.
Raise a ConfigObjError if the value cannot be safely quoted.
If multiline is ``True`` (default) then use triple quotes
if necessary.
* Don't quote values that don't need it.
* Recursively quote members of a list and return a comma joined list.
* Multiline is ``False`` for lists.
* Obey list syntax for empty and single member lists.
If ``list_values=False`` then the value is only quoted if it contains
a ``\\n`` (is multiline) or '#'.
If ``write_empty_values`` is set, and the value is an empty string, it
won't be quoted.
"""
if multiline and self.write_empty_values and value == '':
# Only if multiline is set, so that it is used for values not
# keys, and not values that are part of a list
return ''
if multiline and isinstance(value, (list, tuple)):
if not value:
return ','
elif len(value) == 1:
return self._quote(value[0], multiline=False) + ','
return ', '.join([self._quote(val, multiline=False)
for val in value])
if not isinstance(value, string_types):
if self.stringify:
value = str(value)
else:
raise TypeError('Value "%s" is not a string.' % value)
if not value:
return '""'
no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
if check_for_single:
if not self.list_values:
# we don't quote if ``list_values=False``
quot = noquot
# for normal values either single or double quotes will do
elif '\n' in value:
# will only happen if multiline is off - e.g. '\n' in key
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif ((value[0] not in wspace_plus) and
(value[-1] not in wspace_plus) and
(',' not in value)):
quot = noquot
else:
quot = self._get_single_quote(value)
else:
# if value has '\n' or "'" *and* '"', it will need triple quotes
quot = self._get_triple_quote(value)
if quot == noquot and '#' in value and self.list_values:
quot = self._get_single_quote(value)
return quot % value | 0.003145 |
def close(self):
"""Flush the buffer and finalize the file.
When this returns the new file is available for reading.
"""
if not self.closed:
self.closed = True
self._flush(finish=True)
self._buffer = None | 0.016736 |
def visit_Expr(self, node: ast.Expr) -> Optional[ast.Expr]:
"""Eliminate no-op constant expressions which are in the tree
as standalone statements."""
if isinstance(
node.value,
(
ast.Constant, # type: ignore
ast.Name,
ast.NameConstant,
ast.Num,
ast.Str,
),
):
return None
return node | 0.004415 |
def translate_z(self, d):
"""Translate mesh for z-direction
:param float d: Amount to translate
"""
mat = numpy.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, d, 1]
])
self.vectors = self.vectors.dot(mat)
return self | 0.006042 |
def seed(cache_dir=CACHE_DIR, product=DEFAULT_PRODUCT, bounds=None, max_download_tiles=9, **kwargs):
"""Seed the DEM to given bounds.
:param cache_dir: Root of the DEM cache folder.
:param product: DEM product choice.
:param bounds: Output bounds in 'left bottom right top' order.
:param max_download_tiles: Maximum number of tiles to process.
:param kwargs: Pass additional kwargs to ensure_tiles.
"""
datasource_root, spec = ensure_setup(cache_dir, product)
ensure_tiles_names = list(spec['tile_names'](*bounds))
# FIXME: emergency hack to enforce the no-bulk-download policy
if len(ensure_tiles_names) > max_download_tiles:
raise RuntimeError("Too many tiles: %d. Please consult the providers' websites "
"for how to bulk download tiles." % len(ensure_tiles_names))
with util.lock_tiles(datasource_root, ensure_tiles_names):
ensure_tiles(datasource_root, ensure_tiles_names, **kwargs)
with util.lock_vrt(datasource_root, product):
util.check_call_make(datasource_root, targets=['all'])
return datasource_root | 0.003568 |
def simple_moving_average(data, period):
"""
Simple Moving Average.
Formula:
SUM(data / N)
"""
catch_errors.check_for_period_error(data, period)
# Mean of Empty Slice RuntimeWarning doesn't affect output so it is
# supressed
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
sma = [np.mean(data[idx-(period-1):idx+1]) for idx in range(0, len(data))]
sma = fill_for_noncomputable_vals(data, sma)
return sma | 0.00396 |
def hpss(y, **kwargs):
'''Decompose an audio time series into harmonic and percussive components.
This function automates the STFT->HPSS->ISTFT pipeline, and ensures that
the output waveforms have equal length to the input waveform `y`.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
kwargs : additional keyword arguments.
See `librosa.decompose.hpss` for details.
Returns
-------
y_harmonic : np.ndarray [shape=(n,)]
audio time series of the harmonic elements
y_percussive : np.ndarray [shape=(n,)]
audio time series of the percussive elements
See Also
--------
harmonic : Extract only the harmonic component
percussive : Extract only the percussive component
librosa.decompose.hpss : HPSS on spectrograms
Examples
--------
>>> # Extract harmonic and percussive components
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_harmonic, y_percussive = librosa.effects.hpss(y)
>>> # Get a more isolated percussive component by widening its margin
>>> y_harmonic, y_percussive = librosa.effects.hpss(y, margin=(1.0,5.0))
'''
# Compute the STFT matrix
stft = core.stft(y)
# Decompose into harmonic and percussives
stft_harm, stft_perc = decompose.hpss(stft, **kwargs)
# Invert the STFTs. Adjust length to match the input.
y_harm = util.fix_length(core.istft(stft_harm, dtype=y.dtype), len(y))
y_perc = util.fix_length(core.istft(stft_perc, dtype=y.dtype), len(y))
return y_harm, y_perc | 0.000628 |
def Magic(self):
#self.view.setFixedSize(self.width(), self.width())
self.WholeData = []
self.x_scale = self.width_plot / self.width_load
self.y_scale = self.height_plot / self.height_load
self.z_scale = self.depth_plot / self.depth_load
# print(self.x_scale,' and ',self.x_scale)
raw = self._df
a = int(self.x_element.value())
b = int(self.y_element.value())
c = int(self.z_element.value())
self.x_element_label.setText(self.items[a])
self.y_element_label.setText(self.items[b])
self.z_element_label.setText(self.items[c])
if (self.Left != self.Right) and (self.Down != self.Up) and abs(self.Left) + abs(self.Right) + abs(
self.Down) + abs(self.Up) != 0:
self.extent = [self.Left, self.Right, self.Down, self.Up]
elif (self.Left == self.Right and abs(self.Left) + abs(self.Right) != 0):
reply = QMessageBox.warning(self, 'Warning', 'You set same value to Left and Right limits.')
self.extent = 0
elif (self.Down == self.Up and abs(self.Down) + abs(self.Up) != 0):
reply = QMessageBox.warning(self, 'Warning', 'You set same value to Up and Down limits.')
self.extent = 0
else:
self.extent = 0
standardnamechosen = self.StandardsName[int(self.norm_slider.value())]
standardchosen = self.Standards[standardnamechosen]
self.norm_slider_label.setText(standardnamechosen)
PointLabels = []
XtoDraw = []
YtoDraw = []
ZtoDraw = []
Colors=[]
Alphas=[]
Markers=[]
Names=[]
for i in range(len(raw)):
# raw.at[i, 'DataType'] == 'User' or raw.at[i, 'DataType'] == 'user' or raw.at[i, 'DataType'] == 'USER'
TmpLabel = ''
# self.WholeData.append(math.log(tmp, 10))
if (raw.at[i, 'Label'] in PointLabels or raw.at[i, 'Label'] == ''):
TmpLabel = ''
else:
PointLabels.append(raw.at[i, 'Label'])
TmpLabel = raw.at[i, 'Label']
x, y ,z = 0, 0, 0
xuse, yuse,zuse = 0, 0, 0
x, y,z = raw.at[i, self.items[a]], raw.at[i, self.items[b]],raw.at[i, self.items[c]]
try:
xuse = x
yuse = y
zuse = z
self.xlabel = self.items[a]
self.ylabel = self.items[b]
self.zlabel = self.items[c]
if (self.Normalize_cb.isChecked()):
self.xlabel = self.items[a] + ' Norm by ' + standardnamechosen
self.x_element_label.setText(self.xlabel)
self.ylabel = self.items[b] + ' Norm by ' + standardnamechosen
self.y_element_label.setText(self.ylabel)
self.zlabel = self.items[c] + ' Norm by ' + standardnamechosen
self.z_element_label.setText(self.zlabel)
if self.items[a] in self.Element:
xuse = xuse / standardchosen[self.items[a]]
if self.items[b] in self.Element:
yuse = yuse / standardchosen[self.items[b]]
if self.items[c] in self.Element:
zuse = zuse / standardchosen[self.items[c]]
if (self.logx_cb.isChecked()):
xuse = math.log(x, 10)
self.xlabel = '$log10$ ' + self.xlabel
if (self.logy_cb.isChecked()):
yuse = math.log(y, 10)
self.ylabel = '$log10$ ' + self.ylabel
if (self.logz_cb.isChecked()):
zuse = math.log(z, 10)
self.zlabel = '$log10$ ' + self.zlabel
XtoDraw.append(xuse)
YtoDraw.append(yuse)
ZtoDraw.append(zuse)
Colors.append(raw.at[i, 'Color'])
Alphas.append(raw.at[i, 'Alpha'])
Names.append(raw.at[i, 'Label'])
Markers.append(raw.at[i, 'Marker'])
except(ValueError):
pass
if self.LimSet==False:
self.Xleft, self.Xright, self.Ydown, self.Yup, self.Tail, self.Head = min(XtoDraw), max(XtoDraw), min(YtoDraw), max(YtoDraw), min(ZtoDraw), max(ZtoDraw)
xmin, xmax = min(XtoDraw), max(XtoDraw)
ymin, ymax = min(YtoDraw), max(YtoDraw)
zmin, zmax = min(ZtoDraw), max(ZtoDraw)
xmean = np.mean(XtoDraw)
ymean = np.mean(YtoDraw)
zmean = np.mean(ZtoDraw)
Xoriginal = np.arange(xmin, xmax, (xmax - xmin) / 10)
Yoriginal = np.arange(ymin, ymax, (ymax - ymin) / 10)
Zoriginal = np.arange(zmin, zmax, (zmax - zmin) / 10)
XonPlot = self.GetASequence(tail=self.ShapeGroups)
YonPlot = self.GetASequence(tail=self.ShapeGroups)
ZonPlot = self.GetASequence(tail=self.ShapeGroups)
XonStick = []
YonStick = []
ZonStick = []
for i in range(len(XonPlot)):
XonStick.append([XonPlot[i], Xoriginal[i]])
YonStick.append([YonPlot[i], Yoriginal[i]])
ZonStick.append([ZonPlot[i], Zoriginal[i]])
pass
#print(XtoDraw,'\n', YtoDraw,'\n', ZtoDraw)
toDf = {self.xlabel:XtoDraw,
self.ylabel:YtoDraw,
self.zlabel:ZtoDraw}
newdf = pd.DataFrame(toDf)
pos = newdf.as_matrix()
print(pos)
ThreeDimView = gl.GLScatterPlotItem(pos=pos, color=(100, 255, 255, 88), size=0.1, pxMode=False)
print(xmean,'\n', ymean,'\n', zmean,'\n')
self.view.pan(xmean, ymean, zmean)
xgrid = gl.GLGridItem(size=QtGui.QVector3D(10, 10, 1), color=1)
ygrid = gl.GLGridItem(size=QtGui.QVector3D(20, 20, 2), color=2)
zgrid = gl.GLGridItem(size=QtGui.QVector3D(30, 30, 3), color=3)
## rotate x and y grids to face the correct direction
xgrid.rotate(90, 0, 1, 0)
ygrid.rotate(90, 1, 0, 0)
xgrid.translate(xmean, ymean, zmean)
ygrid.translate(xmean, ymean, zmean)
zgrid.translate(xmean, ymean, zmean)
## scale each grid differently
'''
xgrid.scale(12.8, 12.8, 12.8)
ygrid.scale(12.8, 12.8, 12.8)
zgrid.scale(12.8, 12.8, 12.8)
'''
# xgrid.setTransform(xmean,ymean,zmean)
self.view.addItem(xgrid)
self.view.addItem(ygrid)
self.view.addItem(zgrid)
self.view.addItem(ThreeDimView) | 0.007513 |
def write_desc(self) -> None:
""" Writes a description of the model to the exp_dir. """
path = os.path.join(self.exp_dir, "model_description.txt")
with open(path, "w") as desc_f:
for key, val in self.__dict__.items():
print("%s=%s" % (key, val), file=desc_f)
import json
json_path = os.path.join(self.exp_dir, "model_description.json")
desc = { } #type: Dict[str, Any]
# For use in decoding from a saved model
desc["topology"] = {
"batch_x_name" : self.batch_x.name, #type: ignore
"batch_x_lens_name" : self.batch_x_lens.name, #type: ignore
"dense_decoded_name" : self.dense_decoded.name #type: ignore
}
desc["model_type"] = str(self.__class__)
for key, val in self.__dict__.items():
if isinstance(val, int):
desc[str(key)] = val
elif isinstance(val, tf.Tensor):
desc[key] = {
"type": "tf.Tensor",
"name": val.name, #type: ignore
"shape": str(val.shape), #type: ignore
"dtype" : str(val.dtype), #type: ignore
"value" : str(val),
}
elif isinstance(val, tf.SparseTensor): #type: ignore
desc[key] = {
"type": "tf.SparseTensor",
"value": str(val), #type: ignore
}
else:
desc[str(key)] = str(val)
with open(json_path, "w") as json_desc_f:
json.dump(desc, json_desc_f, skipkeys=True) | 0.015902 |
def format_output(instances, flag):
"""return formatted string for instance"""
out = []
line_format = '{0}\t{1}\t{2}\t{3}\t{4}'
name_len = _get_max_name_len(instances) + 3
if flag:
line_format = '{0:<' + str(name_len) + '}{1:<16}{2:<21}{3:<16}{4:<16}'
for i in instances:
tag_name = get_tag_value(i.tags, 'Name')
out.append(line_format.format(
tag_name, i.state['Name'], i.id, i.private_ip_address, str(i.public_ip_address)))
return out | 0.003992 |
def pyprf_sim(strPrior, strStmApr, lgcNoise=False, lgcRtnNrl=True,
lstRat=None, lgcTest=False):
"""
Simulate pRF response given pRF parameters and stimulus apertures.
Parameters
----------
strPrior : str
Absolute file path of config file used for pRF fitting.
strStmApr : str
Absolute file path to stimulus aperture used in in-silico experiment.
lgcNoise : boolean
Should noise be added to the simulated pRF time course. By default, no
noise is added.
lgcRtnNrl : boolean
Should neural time course, unconvolved with hrf, be returned as well?
lstRat : None or list
Ratio of size of center to size of suppressive surround.
lgcTest : boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
Notes
-----
[1] This function does not return any arguments but, instead, saves nii
filex to disk.
[2] strStmApr should be a path to a npy file that contains a 3D numpy
array. This arrays consists of binary images in boolean array from that
represent the stimulus aperture. Images are stacked along last axis.
"""
# %% Load configuration settings that were used for fitting
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strPrior, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# If suppressive surround flag is on, make sure to retrieve results from
# that fitting
if lstRat is not None:
cfg.strPathOut = cfg.strPathOut + '_supsur'
# %% Load previous pRF fitting results
# Derive paths to the x, y, sigma winner parameters from pyprf_feature
lstWnrPrm = [cfg.strPathOut + '_x_pos.nii.gz',
cfg.strPathOut + '_y_pos.nii.gz',
cfg.strPathOut + '_SD.nii.gz']
# Check if fitting has been performed, i.e. whether parameter files exist
# Throw error message if they do not exist.
errorMsg = 'Files that should have resulted from fitting do not exist. \
\nPlease perform pRF fitting first, calling e.g.: \
\npyprf_feature -config /path/to/my_config_file.csv'
assert os.path.isfile(lstWnrPrm[0]), errorMsg
assert os.path.isfile(lstWnrPrm[1]), errorMsg
assert os.path.isfile(lstWnrPrm[2]), errorMsg
# Load the x, y, sigma winner parameters from pyprf_feature
aryIntGssPrm = load_res_prm(lstWnrPrm,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Also load suppresive surround params if suppressive surround flag was on
if lstRat is not None:
# Load beta parameters estimates, aka weights, this is later needed to
# scale responses of the center wrt to the surround
lstPathBeta = [cfg.strPathOut + '_Betas.nii.gz']
aryBetas = load_res_prm(lstPathBeta,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Load ratio of prf sizes
lstPathRat = [cfg.strPathOut + '_Ratios.nii.gz']
aryRat = load_res_prm(lstPathRat, lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Some voxels were excluded because they did not have sufficient mean
# and/or variance - exclude their initial parameters, too
# Get inclusion mask and nii header
aryLgcMsk, aryLgcVar, hdrMsk, aryAff, _, tplNiiShp = prep_func(
cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=100.)
# Apply inclusion mask
aryIntGssPrm = aryIntGssPrm[aryLgcVar, :]
if lstRat is not None:
aryBetas = aryBetas[aryLgcVar, :]
aryRat = aryRat[aryLgcVar]
# %% Load stimulus aperture and create model responses to stimuli
# Load stimulus aperture
aryStmApr = np.load(strStmApr)
# Which dimensions does the representation have in pixel space?
tplStmApr = aryStmApr.shape[:2]
# Convert winner parameters from degrees of visual angle to pixel
vecIntX, vecIntY, vecIntSd = rmp_deg_pixel_xys(aryIntGssPrm[:, 0],
aryIntGssPrm[:, 1],
aryIntGssPrm[:, 2],
tplStmApr,
cfg.varExtXmin,
cfg.varExtXmax,
cfg.varExtYmin,
cfg.varExtYmax)
aryIntGssPrmPxl = np.column_stack((vecIntX, vecIntY, vecIntSd))
# Create 2D Gauss model responses to spatial conditions.
print('---Create 2D Gauss model responses to spatial conditions')
aryMdlRsp = crt_mdl_rsp(aryStmApr, tplStmApr, aryIntGssPrmPxl, cfg.varPar)
# If supsur flag was provided, also create responses with supsur params
# and combine positive center response with negative surround response
if lstRat is not None:
aryIntGssPrmPxlSur = np.copy(aryIntGssPrmPxl)
# Adjust pRF sizes using the ratio of pRF sizes
aryIntGssPrmPxlSur[:, 2] = np.multiply(aryIntGssPrmPxlSur[:, 2],
aryRat)
aryMdlRspSur = crt_mdl_rsp(aryStmApr, tplStmApr, aryIntGssPrmPxlSur,
cfg.varPar)
# Now the responses of the center and the surround need to be combined
# in a meaningful way. One way this could be done is to take the ratio
# of gain parameters that were found when fitting (i.e. betas)
varGainRat = np.divide(aryBetas[:, 0], aryBetas[:, 1])
aryMdlRsp = np.subtract(aryMdlRsp,
np.multiply(varGainRat, aryMdlRspSur))
# %% Convolve time courses with hrf function
# First temporally upsamle the model response
aryMdlRspUps = np.repeat(aryMdlRsp, cfg.varTmpOvsmpl, axis=-1)
# Convolve with hrf function
arySimRsp = crt_prf_tc(aryMdlRspUps, aryMdlRsp.shape[-1], cfg.varTr,
cfg.varTmpOvsmpl, 1, tplStmApr, cfg.varPar)
# Squeeze simulated reponse. This step is necessary because crt_prf_tc is,
# in principle, capable of convolving with deriavtes of canonical function
if arySimRsp.shape[1] > 1:
print('***WARNING: pyprf_sim expects 1 hrf function, currently***')
arySimRsp = np.squeeze(arySimRsp)
# Save memory by deleting upsampled time course
del(aryMdlRspUps)
# %% Add auto-correlated noise
if lgcNoise:
print('***Adding noise feature not yet implemented***')
# %% Export simulated prf, and if desired neural, time courses as nii
# List with name suffices of output images:
lstNiiNames = ['_SimPrfTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export beta parameter as a single 4D nii file
print('---Save simulated pRF time courses')
export_nii(arySimRsp, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.')
if lgcRtnNrl:
# List with name suffices of output images:
lstNiiNames = ['_SimNrlTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export beta parameter as a single 4D nii file
print('---Save simulated neural time courses')
export_nii(aryMdlRsp, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.') | 0.000129 |
def add_attribute(self, attribute, value):
"""
Add an attribute to the current instance
:param str attribute: Attribute name
:type value: Union[datetime,bool,str]
:param value: Attribute value
"""
class_name = self.__class__.__name__
if class_name.startswith('ItemData'):
# ItemData* Elements
class_name = 'ItemData'
if attribute not in MODMExtensionRegistry[class_name].value:
raise ValueError("Can't add {} to {}".format(attribute, self.__class__.__name__))
self.attributes.append(MODMAttribute(attribute, value)) | 0.004747 |
def remove_object(collision_object):
"""Remove the collision object from the Manager"""
global collidable_objects
if isinstance(collision_object, CollidableObj):
# print "Collision object of type ", type(collision_object), " removed from the collision manager."
try:
collidable_objects.remove(collision_object)
except:
print
"Ragnarok Says: Collision_Object with ID # " + str(
collision_object.obj_id) + " could not be found in the Collision Manager. Skipping over..." | 0.008389 |
def _get_qe(self, key, obj):
"""Instantiate a query engine, or retrieve a cached one.
"""
if key in self._cached:
return self._cached[key]
qe = create_query_engine(obj, self._class)
self._cached[key] = qe
return qe | 0.007299 |
def send_vdp_query_msg(self, mode, mgrid, typeid, typeid_ver, vsiid_frmt,
vsiid, filter_frmt, gid, mac, vlan, oui_id,
oui_data):
"""Constructs and Sends the VDP Query Message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param mode: Associate or De-associate
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
:return reply: Reply from vdptool
"""
if not self.is_ncb:
LOG.error("EVB cannot be set on NB")
return
vdp_key_str = self.construct_vdp_dict(mode, mgrid, typeid,
typeid_ver, vsiid_frmt, vsiid,
filter_frmt, gid, mac, vlan,
None, None)
if len(vdp_key_str) == 0:
LOG.error("NULL List")
return
reply = self.run_vdptool(["-t", "-i", self.port_name, "-R", "-V", mode,
"-c", vdp_key_str['mode'],
"-c", vdp_key_str['mgrid'],
"-c", vdp_key_str['typeid'],
"-c", vdp_key_str['typeid_ver'],
"-c", vdp_key_str['vsiid']])
return reply | 0.002154 |
def _value__get(self):
"""
Get/set the value (which is the contents of this element)
"""
content = self.text or ''
if self.tag.startswith("{%s}" % XHTML_NAMESPACE):
serialisation_method = 'xml'
else:
serialisation_method = 'html'
for el in self:
# it's rare that we actually get here, so let's not use ''.join()
content += etree.tostring(
el, method=serialisation_method, encoding='unicode')
return content | 0.003745 |
def future_add_done_callback( # noqa: F811
future: "Union[futures.Future[_T], Future[_T]]", callback: Callable[..., None]
) -> None:
"""Arrange to call ``callback`` when ``future`` is complete.
``callback`` is invoked with one argument, the ``future``.
If ``future`` is already done, ``callback`` is invoked immediately.
This may differ from the behavior of ``Future.add_done_callback``,
which makes no such guarantee.
.. versionadded:: 5.0
"""
if future.done():
callback(future)
else:
future.add_done_callback(callback) | 0.003448 |
def _format_options_usage(options):
"""
Format the Options-part of the usage text.
Parameters
----------
options : list[sacred.commandline_options.CommandLineOption]
A list of all supported commandline options.
Returns
-------
str
Text formatted as a description for the commandline options
"""
options_usage = ""
for op in options:
short, long = op.get_flags()
if op.arg:
flag = "{short} {arg} {long}={arg}".format(
short=short, long=long, arg=op.arg)
else:
flag = "{short} {long}".format(short=short, long=long)
wrapped_description = textwrap.wrap(inspect.cleandoc(op.__doc__),
width=79,
initial_indent=' ' * 32,
subsequent_indent=' ' * 32)
wrapped_description = "\n".join(wrapped_description).strip()
options_usage += " {0:28} {1}\n".format(flag, wrapped_description)
return options_usage | 0.000911 |
def add_usable_app(name, app):
'Add app to local registry by name'
name = slugify(name)
global usable_apps # pylint: disable=global-statement
usable_apps[name] = app
return name | 0.010152 |
def CheckTaskReadyForMerge(self, task):
"""Checks if a task is ready for merging with this session storage.
If the task is ready to be merged, this method also sets the task's
storage file size.
Args:
task (Task): task.
Returns:
bool: True if the task is ready to be merged.
Raises:
IOError: if the storage type is not supported or
OSError: if the storage type is not supported or
if the temporary path for the task storage does not exist.
"""
if self._storage_type != definitions.STORAGE_TYPE_SESSION:
raise IOError('Unsupported storage type.')
if not self._processed_task_storage_path:
raise IOError('Missing processed task storage path.')
processed_storage_file_path = self._GetProcessedStorageFilePath(task)
try:
stat_info = os.stat(processed_storage_file_path)
except (IOError, OSError):
return False
task.storage_file_size = stat_info.st_size
return True | 0.005092 |
def place_objects(self):
"""Places objects randomly until no collisions or max iterations hit."""
pos_arr, quat_arr = self.initializer.sample()
for k, obj_name in enumerate(self.objects):
self.objects[obj_name].set("pos", array_to_string(pos_arr[k]))
self.objects[obj_name].set("quat", array_to_string(quat_arr[k])) | 0.008264 |
def create(cls, jar):
"""Creates an actual M2Coordinate from the given M2Coordinate-like object (eg a JarDependency).
:API: public
:param JarDependency jar: the input coordinate.
:return: A new M2Coordinate, unless the input is already an M2Coordinate in which case it just
returns the input unchanged.
:rtype: M2Coordinate
"""
if isinstance(jar, cls):
return jar
return cls(org=jar.org, name=jar.name, rev=jar.rev, classifier=jar.classifier, ext=jar.ext) | 0.00998 |
def update(self, value):
"""Update the mean and variance estimates.
Args:
value: Batch or single value tensor.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/update'):
if value.shape.ndims == self._mean.shape.ndims:
# Add a batch dimension if necessary.
value = value[None, ...]
count = tf.shape(value)[0]
with tf.control_dependencies([self._count.assign_add(count)]):
step = tf.cast(self._count, tf.float32)
mean_delta = tf.reduce_sum(value - self._mean[None, ...], 0)
new_mean = self._mean + mean_delta / step
new_mean = tf.cond(self._count > 1, lambda: new_mean, lambda: value[0])
var_delta = (
value - self._mean[None, ...]) * (value - new_mean[None, ...])
new_var_sum = self._var_sum + tf.reduce_sum(var_delta, 0)
with tf.control_dependencies([new_mean, new_var_sum]):
update = self._mean.assign(new_mean), self._var_sum.assign(new_var_sum)
with tf.control_dependencies(update):
if value.shape.ndims == 1:
value = tf.reduce_mean(value)
return self._summary('value', tf.reduce_mean(value)) | 0.005917 |
def get_or_create(self, login):
"""
Get the qid of the item by its external id or create if doesn't exist
:param login: WDLogin item
:return: tuple of (qid, list of warnings (strings), success (True if success, returns the Exception otherwise))
"""
if self.p:
try:
return self.p.get_or_create(login)
except Exception as e:
return None, self.p.warnings, e
else:
return None, [], self.e | 0.005906 |
def load_file(self, currency_file):
"""To be subclassed if alternate methods of loading data.
"""
if currency_file.startswith(('http://', 'https://')):
content = urlopen(currency_file).read()
else:
with open(currency_file, 'rb') as f:
content = f.read()
if currency_file.endswith('.zip'):
self.load_lines(get_lines_from_zip(content))
else:
self.load_lines(content.decode('utf-8').splitlines()) | 0.003953 |
def cmd_alias(args):
'''alias commands'''
usage = "usage: alias <add|remove|list>"
if len(args) < 1 or args[0] == "list":
if len(args) >= 2:
wildcard = args[1].upper()
else:
wildcard = '*'
for a in sorted(mpstate.aliases.keys()):
if fnmatch.fnmatch(a.upper(), wildcard):
print("%-15s : %s" % (a, mpstate.aliases[a]))
elif args[0] == "add":
if len(args) < 3:
print(usage)
return
a = args[1]
mpstate.aliases[a] = ' '.join(args[2:])
elif args[0] == "remove":
if len(args) != 2:
print(usage)
return
a = args[1]
if a in mpstate.aliases:
mpstate.aliases.pop(a)
else:
print("no alias %s" % a)
else:
print(usage)
return | 0.00117 |
def url(self):
'''
Executes the methods to send request, process the response and then
publishes the url.
'''
self.get_response()
url = self.process_response()
if url:
logging.info('Your paste has been published at %s' %(url))
return url
else:
logging.error('Did not get a URL back for the paste')
raise PasteException("No URL for paste") | 0.006652 |
def all_conditional_solidity_variables_read(self, include_loop=True):
"""
Return the Soldiity variables directly used in a condtion
Use of the IR to filter index access
Assumption: the solidity vars are used directly in the conditional node
It won't work if the variable is assigned to a temp variable
"""
if include_loop:
if self._all_conditional_solidity_variables_read_with_loop is None:
self._all_conditional_solidity_variables_read_with_loop = self._explore_functions(
lambda x: self._explore_func_conditional(x,
self._solidity_variable_in_binary,
include_loop))
return self._all_conditional_solidity_variables_read_with_loop
else:
if self._all_conditional_solidity_variables_read is None:
self._all_conditional_solidity_variables_read = self._explore_functions(
lambda x: self._explore_func_conditional(x,
self._solidity_variable_in_binary,
include_loop))
return self._all_conditional_solidity_variables_read | 0.005151 |
def read1(self, size=-1):
"""Read up to *size* bytes.
This function reads from the buffer only once. It is useful in case you
need to read a large input, and want to do so efficiently. If *size* is
big enough, then this method will return the chunks passed into the
memory buffer verbatim without any copying or slicing.
"""
self._check_readable()
chunk = self._buffer.get_chunk(size)
if not chunk and not self._buffer.eof and self._buffer.error:
raise compat.saved_exc(self._buffer.error)
return chunk | 0.003361 |
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv | 0.004684 |
def ingest(self, text, log_message=None):
"""
Ingest a new object into Fedora. Returns the pid of the new object on
success. Calls :meth:`ApiFacade.ingest`.
:param text: full text content of the object to be ingested
:param log_message: optional log message
:rtype: string
"""
kwargs = {'text': text}
if log_message:
kwargs['logMessage'] = log_message
response = self.api.ingest(**kwargs)
return response.content | 0.003891 |
def setup_args(parser, config_files=[], ignore_unknown=False):
"""Parses arguments."""
global args
arglist = sys.argv[1:]
# Load arguments from config files
for config_file in filter(os.path.isfile, config_files):
arglist.insert(0, "@" + config_file)
args, unknown = parser.parse_known_args(arglist)
if unknown and not ignore_unknown:
msg = gettext('unrecognized arguments: %s')
parser.error(msg % ' '.join(unknown))
# Force lowercase to allow case-insensitive lookup
if args.stream:
args.stream = [stream.lower() for stream in args.stream]
if not args.url and args.url_param:
args.url = args.url_param | 0.001458 |
def list_pubs(self, buf):
"""SSH v2 public keys are serialized and returned."""
assert not buf.read()
keys = self.conn.parse_public_keys()
code = util.pack('B', msg_code('SSH2_AGENT_IDENTITIES_ANSWER'))
num = util.pack('L', len(keys))
log.debug('available keys: %s', [k['name'] for k in keys])
for i, k in enumerate(keys):
log.debug('%2d) %s', i+1, k['fingerprint'])
pubs = [util.frame(k['blob']) + util.frame(k['name']) for k in keys]
return util.frame(code, num, *pubs) | 0.003604 |
def remove(self):
'''
Remove this environment
'''
self.run_hook('preremove')
utils.rmtree(self.path)
self.run_hook('postremove') | 0.011364 |
def evaluate_call_args(self, calculator):
"""Interpreting this literal as a function call, return a 2-tuple of
``(args, kwargs)``.
"""
args = []
kwargs = OrderedDict() # Sass kwargs preserve order
for var_node, value_node in self.argpairs:
value = value_node.evaluate(calculator, divide=True)
if var_node is None:
# Positional
args.append(value)
else:
# Named
if not isinstance(var_node, Variable):
raise TypeError(
"Expected variable name, got {0!r}".format(var_node))
kwargs[var_node.name] = value
# Slurpy arguments go on the end of the args
if self.slurp:
args.extend(self.slurp.evaluate(calculator, divide=True))
return args, kwargs | 0.002262 |
def remove_non_magic_cols(self):
"""
Remove all non-MagIC columns from all tables.
"""
for table_name in self.tables:
table = self.tables[table_name]
table.remove_non_magic_cols_from_table() | 0.00813 |
def insertIndividual(self, individual):
"""
Inserts the specified individual into this repository.
"""
try:
models.Individual.create(
id=individual.getId(),
datasetId=individual.getParentContainer().getId(),
name=individual.getLocalId(),
description=individual.getDescription(),
created=individual.getCreated(),
updated=individual.getUpdated(),
species=json.dumps(individual.getSpecies()),
sex=json.dumps(individual.getSex()),
attributes=json.dumps(individual.getAttributes()))
except Exception:
raise exceptions.DuplicateNameException(
individual.getLocalId(),
individual.getParentContainer().getLocalId()) | 0.002361 |
def local_targets(self):
"""Iterator over the targets defined in this build file."""
for node in self.node:
if (node.repo, node.path) == (self.target.repo, self.target.path):
yield node | 0.008734 |
def armor(data, versioned=True):
"""
Returns a string in ASCII Armor format, for the given binary data. The
output of this is compatiple with pgcrypto's armor/dearmor functions.
"""
template = '-----BEGIN PGP MESSAGE-----\n%(headers)s%(body)s\n=%(crc)s\n-----END PGP MESSAGE-----'
body = base64.b64encode(data)
# The 24-bit CRC should be in big-endian, strip off the first byte (it's already masked in crc24).
crc = base64.b64encode(struct.pack('>L', crc24(data))[1:])
return template % {
'headers': 'Version: django-pgcrypto %s\n\n' % __version__ if versioned else '\n',
'body': body.decode('ascii'),
'crc': crc.decode('ascii'),
} | 0.005755 |
def main(api_key, token):
"""List out the boards for our client"""
trello_client = TrelloClient(
api_key=api_key,
token=token,
)
print('Boards')
print('-----')
print('Name: Id')
for board in trello_client.list_boards():
print('{board.name}: {board.id}'.format(board=board)) | 0.003077 |
def prefix(expof10):
'''
Args:
expof10 : Exponent of a power of 10 associated with a SI unit
character.
Returns:
str : One of the characters in "yzafpnum kMGTPEZY".
'''
prefix_levels = (len(SI_PREFIX_UNITS) - 1) // 2
si_level = expof10 // 3
if abs(si_level) > prefix_levels:
raise ValueError("Exponent out range of available prefixes.")
return SI_PREFIX_UNITS[si_level + prefix_levels] | 0.002188 |
def handle_time(msg):
"""Process an internal time request message."""
return msg.copy(ack=0, payload=calendar.timegm(time.localtime())) | 0.006993 |
def best_buy_1(self):
"""量大收紅
"""
result = self.data.capacity[-1] > self.data.capacity[-2] and \
self.data.price[-1] > self.data.open[-1]
return result | 0.015 |
def _vcf_info(start, end, mate_id, info=None):
"""Return breakend information line with mate and imprecise location.
"""
out = "SVTYPE=BND;MATEID={mate};IMPRECISE;CIPOS=0,{size}".format(
mate=mate_id, size=end-start)
if info is not None:
extra_info = ";".join("{0}={1}".format(k, v) for k, v in info.iteritems())
out = "{0};{1}".format(out, extra_info)
return out | 0.004914 |
def start(cls, ev=None):
"""
Start the query to aleph by ISSN.
"""
ViewController.log_view.add("Beginning AlephReader request..")
ViewController.issnbox_error.reset()
issn = ViewController.issn.strip()
# make sure, that `issn` was filled
if not issn:
ViewController.issnbox_error.show("ISSN nebylo vyplněno!")
ViewController.log_view.add("No ISSN! Aborting.")
return
ViewController.issnbox_error.hide()
ViewController.issn_progressbar.reset()
ViewController.issn_progressbar.show(50)
ViewController.log_view.add("For ISSN `%s`." % issn)
make_request(
url=join(settings.API_PATH, "aleph/records_by_issn"),
data={'issn': issn},
on_complete=cls.on_complete
) | 0.002372 |
def extract_rzip (archive, compression, cmd, verbosity, interactive, outdir):
"""Extract an RZIP archive."""
cmdlist = [cmd, '-d', '-k']
if verbosity > 1:
cmdlist.append('-v')
outfile = util.get_single_outfile(outdir, archive)
cmdlist.extend(["-o", outfile, archive])
return cmdlist | 0.006369 |
def Parse(self, stat, file_object, knowledge_base):
"""Parse the History file."""
_, _ = stat, knowledge_base
# TODO(user): Convert this to use the far more intelligent plaso parser.
ff = Firefox3History(file_object)
for timestamp, unused_entry_type, url, title in ff.Parse():
yield rdf_webhistory.BrowserHistoryItem(
url=url,
domain=urlparse.urlparse(url).netloc,
access_time=timestamp,
program_name="Firefox",
source_path=file_object.Path(),
title=title) | 0.00369 |
def find_covalent_bonds(ampal, max_range=2.2, threshold=1.1, tag=True):
"""Finds all covalent bonds in the AMPAL object.
Parameters
----------
ampal : AMPAL Object
Any AMPAL object with a `get_atoms` method.
max_range : float, optional
Used to define the sector size, so interactions at longer ranges
will not be found.
threshold : float, optional
Allows deviation from ideal covalent bond distance to be included.
For example, a value of 1.1 would allow interactions up to 10% further
from the ideal distance to be included.
tag : bool, optional
If `True`, will add the covalent bond to the tags dictionary of
each `Atom` involved in the interaction under the `covalent_bonds`
key.
"""
sectors=gen_sectors(ampal.get_atoms(), max_range * 1.1)
bonds=[]
for sector in sectors.values():
atoms=itertools.combinations(sector, 2)
bonds.extend(covalent_bonds(atoms, threshold=threshold))
bond_set=list(set(bonds))
if tag:
for bond in bond_set:
a, b=bond.a, bond.b
if 'covalent_bonds' not in a.tags:
a.tags['covalent_bonds']=[b]
else:
a.tags['covalent_bonds'].append(b)
if 'covalent_bonds' not in b.tags:
b.tags['covalent_bonds']=[a]
else:
b.tags['covalent_bonds'].append(a)
return bond_set | 0.005483 |
def turn_on(self, time):
"""(Helper) Turn on an output"""
self._elk.send(cn_encode(self._index, time)) | 0.016949 |
def build(self):
"""Generate a TermDocMatrix from data in parameters.
Returns
----------
term_doc_matrix : TermDocMatrix
The object that this factory class builds.
"""
if self._category_text_iter is None:
raise CategoryTextIterNotSetError()
nlp = self.get_nlp()
category_document_iter = (
(category, self._clean_function(raw_text))
for category, raw_text
in self._category_text_iter
)
term_doc_matrix = self._build_from_category_spacy_doc_iter(
(
(category, nlp(text))
for (category, text)
in category_document_iter
if text.strip() != ''
)
)
return term_doc_matrix | 0.00246 |
def show(self):
"""Publish HTML."""
IPython.display.display(IPython.display.HTML(self.as_html())) | 0.017699 |
def _getSerialTimeout(self, device):
"""
Get the serial timeout stored on the hardware device.
Caution, more that one value returned from the Qik can have the same
actual timeout value according the the formula below. I have verified
this as an idiosyncrasy of the Qik itself. There are only a total of
72 unique values that the Qik can logically use the remaining 56
values are repeats of the 72.
:Parameters:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol.
:Returns:
The timeout value in seconds.
"""
num = self._getConfig(self.SERIAL_TIMEOUT, device)
if isinstance(num, int):
x = num & 0x0F
y = (num >> 4) & 0x07
result = self.DEFAULT_SERIAL_TIMEOUT * x * pow(2, y)
else:
result = num
return result | 0.002041 |
def subgraph(self, nodes):
"""
Return the subgraph consisting of the given nodes and edges
between thses nodes.
Parameters
----------
nodes : array_like(int, ndim=1)
Array of node indices.
Returns
-------
DiGraph
A DiGraph representing the subgraph.
"""
adj_matrix = self.csgraph[np.ix_(nodes, nodes)]
weighted = True # To copy the dtype
if self.node_labels is not None:
node_labels = self.node_labels[nodes]
else:
node_labels = None
return DiGraph(adj_matrix, weighted=weighted, node_labels=node_labels) | 0.00295 |
def hmac_hex_key(self, hmac_hex_key):
"""
Sets the hmac_hex_key of this CfsslAuthCredentials.
The key that is used to compute the HMAC of the request using the HMAC-SHA-256 algorithm. Must contain an even number of hexadecimal characters.
:param hmac_hex_key: The hmac_hex_key of this CfsslAuthCredentials.
:type: str
"""
if hmac_hex_key is None:
raise ValueError("Invalid value for `hmac_hex_key`, must not be `None`")
if hmac_hex_key is not None and len(hmac_hex_key) > 64:
raise ValueError("Invalid value for `hmac_hex_key`, length must be less than or equal to `64`")
if hmac_hex_key is not None and not re.search('^([a-fA-F0-9][a-fA-F0-9]){1,32}$', hmac_hex_key):
raise ValueError("Invalid value for `hmac_hex_key`, must be a follow pattern or equal to `/^([a-fA-F0-9][a-fA-F0-9]){1,32}$/`")
self._hmac_hex_key = hmac_hex_key | 0.00843 |
def start(self):
'''
Startup the kafka consumer.
'''
log.debug('Creating the consumer using the bootstrap servers: %s and the group ID: %s',
self.bootstrap_servers,
self.group_id)
try:
self.consumer = kafka.KafkaConsumer(bootstrap_servers=self.bootstrap_servers,
group_id=self.group_id)
except kafka.errors.NoBrokersAvailable as err:
log.error(err, exc_info=True)
raise ListenerException(err)
log.debug('Subscribing to the %s topic', self.topic)
self.consumer.subscribe(topics=[self.topic]) | 0.005935 |
def homogenize(series_dict):
"""
Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex
corresponding to the locations where they all have data
Parameters
----------
series_dict : dict or DataFrame
Notes
-----
Using the dumbest algorithm I could think of. Should put some more thought
into this
Returns
-------
homogenized : dict of SparseSeries
"""
index = None
need_reindex = False
for _, series in series_dict.items():
if not np.isnan(series.fill_value):
raise TypeError('this method is only valid with NaN fill values')
if index is None:
index = series.sp_index
elif not series.sp_index.equals(index):
need_reindex = True
index = index.intersect(series.sp_index)
if need_reindex:
output = {}
for name, series in series_dict.items():
if not series.sp_index.equals(index):
series = series.sparse_reindex(index)
output[name] = series
else:
output = series_dict
return output | 0.000892 |
def summary(self, campaign_id=None):
""" Returns the campaign summary """
resource_cls = CampaignSummary
single_resource = False
if not campaign_id:
resource_cls = CampaignSummaries
single_resource = True
return super(API, self).get(
resource_id=campaign_id,
resource_action='summary',
resource_cls=resource_cls,
single_resource=single_resource) | 0.004357 |
def RandomNormalInitializer(stddev=1e-2):
"""An initializer function for random normal coefficients."""
def init(shape, rng):
return (stddev * backend.random.normal(rng, shape)).astype('float32')
return init | 0.018433 |
def _parse(self, root, line, idx):
"""
:param root: Tree node.
:param line: String to parse.
:param idx: Global counter of characters parsed.
:return: (list of parsed graphemes, incremented character count)
"""
# Base (or degenerate..) case.
if len(line) == 0:
return [], idx
parse = []
curr = 0
node = root
cidx = idx
while curr < len(line):
node = node.children.get(line[curr])
curr += 1
if not node:
break
if node.sentinel:
subparse, cidx = self._parse(root, line[curr:], idx + curr)
# Always keep the latest valid parse, which will be
# the longest-matched (greedy match) graphemes.
parse = [line[:curr]]
parse.extend(subparse)
if parse:
idx = cidx
return parse, idx | 0.002086 |
def recognize(mol):
""" Detect cycle basis, biconnected and isolated components (DFS).
This will add following attribute to the molecule instance object.
mol.ring: Cycle basis
mol.scaffold: biconnected components
mol.isolated: isolated components other than the largest one
To find minimum set of rings, additionally execute topology.minify_ring.
Reference:
networkx cycle_basis function
"""
g = set(i for i, _ in mol.atoms_iter())
bccs = {} # BiConnected Components
isoc = [] # ISOlated Components
while g:
start = g.pop()
stack = [start]
pred = {start: start}
used = {start: set()}
root = {start: start}
while stack:
tail = stack.pop()
for nbr in mol.neighbors(tail):
if nbr not in used: # New node
pred[nbr] = tail
stack.append(nbr)
used[nbr] = {tail}
root[nbr] = nbr
elif nbr in stack: # Cycle found
pn = used[nbr]
cyc = [nbr, tail]
p = pred[tail]
end = pred[nbr]
root[nbr] = root[tail] = root[end]
while p not in pn: # Backtrack
cyc.append(p)
root[p] = root[end]
if p in bccs: # Append scaffold to new cycle
if root[end] not in bccs:
bccs[root[end]] = []
bccs[root[end]].extend(bccs[p])
del bccs[p]
p = pred[p]
cyc.append(p)
if root[end] not in bccs: # Append new cycle to scaffold
bccs[root[end]] = []
bccs[root[end]].append(cyc)
used[nbr].add(tail)
isoc.append(list(pred.keys()))
# print(pred)
g -= set(pred)
mol.rings = []
mol.scaffolds = []
for cycles in bccs.values():
rcnt = len(mol.rings)
mol.rings.extend(cycles)
mol.scaffolds.append(list(range(rcnt, rcnt + len(cycles))))
mol.isolated = list(sorted(isoc, key=len, reverse=True))[1:]
mol.descriptors.add("Topology") | 0.000429 |
def ksum(p, K=2):
"""From
T. Ogita, S.M. Rump, and S. Oishi.
Accurate Sum and Dot Product,
SIAM J. Sci. Comput., 26(6), 1955–1988 (34 pages).
<https://doi.org/10.1137/030601818>.
Algorithm 4.8. Summation as in K-fold precision by (K−1)-fold error-free
vector transformation.
"""
# Don't override the input data.
q = p.copy()
distill(q, K - 1)
return numpy.sum(q[:-1], axis=0) + q[-1] | 0.002309 |
def add_postfix(file_path, postfix):
# type: (AnyStr, AnyStr) -> AnyStr
"""Add postfix for a full file path.
Examples:
>>> FileClass.add_postfix('/home/zhulj/dem.tif', 'filled')
'/home/zhulj/dem_filled.tif'
>>> FileClass.add_postfix('dem.tif', 'filled')
'dem_filled.tif'
>>> FileClass.add_postfix('dem', 'filled')
'dem_filled'
"""
cur_sep = ''
for sep in ['\\', '/', os.sep]:
if sep in file_path:
cur_sep = sep
break
corename = FileClass.get_core_name_without_suffix(file_path)
tmpspliter = os.path.basename(file_path).split('.')
suffix = ''
if len(tmpspliter) > 1:
suffix = tmpspliter[-1]
newname = os.path.dirname(file_path) + cur_sep + corename + '_' + postfix
if suffix != '':
newname += '.' + suffix
return str(newname) | 0.004128 |
def check(self, src_tgt, actual_deps):
"""Check for missing deps.
See docstring for _compute_missing_deps for details.
"""
if self._check_missing_direct_deps or self._check_unnecessary_deps:
missing_file_deps, missing_direct_tgt_deps = \
self._compute_missing_deps(src_tgt, actual_deps)
buildroot = get_buildroot()
def shorten(path): # Make the output easier to read.
if path.startswith(buildroot):
return os.path.relpath(path, buildroot)
return path
def filter_whitelisted(missing_deps):
# Removing any targets that exist in the whitelist from the list of dependency issues.
return [(tgt_pair, evidence) for (tgt_pair, evidence) in missing_deps
if tgt_pair[0].address not in self._target_whitelist]
missing_direct_tgt_deps = filter_whitelisted(missing_direct_tgt_deps)
if self._check_missing_direct_deps and missing_direct_tgt_deps:
log_fn = (self.context.log.error if self._check_missing_direct_deps == 'fatal'
else self.context.log.warn)
for (tgt_pair, evidence) in missing_direct_tgt_deps:
evidence_str = '\n'.join([' {} uses {}'.format(shorten(e[0]), shorten(e[1]))
for e in evidence])
log_fn('Missing direct BUILD dependency {} -> {} because:\n{}'
.format(tgt_pair[0].address.spec, tgt_pair[1].address.spec, evidence_str))
if self._check_missing_direct_deps == 'fatal':
raise TaskError('Missing direct deps.')
if self._check_unnecessary_deps:
log_fn = (self.context.log.error if self._check_unnecessary_deps == 'fatal'
else self.context.log.warn)
had_unused = self._do_check_unnecessary_deps(src_tgt, actual_deps, log_fn)
if had_unused and self._check_unnecessary_deps == 'fatal':
raise TaskError('Unnecessary deps.') | 0.010797 |
def refitPrefixes(self):
"""
Refit namespace qualification by replacing prefixes
with explicit namespaces. Also purges prefix mapping table.
@return: self
@rtype: L{Element}
"""
for c in self.children:
c.refitPrefixes()
if self.prefix is not None:
ns = self.resolvePrefix(self.prefix)
if ns[1] is not None:
self.expns = ns[1]
self.prefix = None
self.nsprefixes = {}
return self | 0.003868 |
def allhexlify(data):
"""Hexlify given data into a string representation with hex values for all chars
Input like
'ab\x04ce'
becomes
'\x61\x62\x04\x63\x65'
"""
hx = binascii.hexlify(data)
return b''.join([b'\\x' + o for o in re.findall(b'..', hx)]) | 0.006944 |
def on_pause(self):
"""Sync the database with the current state of the game."""
self.engine.commit()
self.strings.save()
self.funcs.save()
self.config.write() | 0.010101 |
def _get_cythonized_result(self, how, grouper, aggregate=False,
cython_dtype=None, needs_values=False,
needs_mask=False, needs_ngroups=False,
result_is_index=False,
pre_processing=None, post_processing=None,
**kwargs):
"""
Get result for Cythonized functions.
Parameters
----------
how : str, Cythonized function name to be called
grouper : Grouper object containing pertinent group info
aggregate : bool, default False
Whether the result should be aggregated to match the number of
groups
cython_dtype : default None
Type of the array that will be modified by the Cython call. If
`None`, the type will be inferred from the values of each slice
needs_values : bool, default False
Whether the values should be a part of the Cython call
signature
needs_mask : bool, default False
Whether boolean mask needs to be part of the Cython call
signature
needs_ngroups : bool, default False
Whether number of groups is part of the Cython call signature
result_is_index : bool, default False
Whether the result of the Cython operation is an index of
values to be retrieved, instead of the actual values themselves
pre_processing : function, default None
Function to be applied to `values` prior to passing to Cython.
Function should return a tuple where the first element is the
values to be passed to Cython and the second element is an optional
type which the values should be converted to after being returned
by the Cython operation. Raises if `needs_values` is False.
post_processing : function, default None
Function to be applied to result of Cython function. Should accept
an array of values as the first argument and type inferences as its
second argument, i.e. the signature should be
(ndarray, Type).
**kwargs : dict
Extra arguments to be passed back to Cython funcs
Returns
-------
`Series` or `DataFrame` with filled values
"""
if result_is_index and aggregate:
raise ValueError("'result_is_index' and 'aggregate' cannot both "
"be True!")
if post_processing:
if not callable(pre_processing):
raise ValueError("'post_processing' must be a callable!")
if pre_processing:
if not callable(pre_processing):
raise ValueError("'pre_processing' must be a callable!")
if not needs_values:
raise ValueError("Cannot use 'pre_processing' without "
"specifying 'needs_values'!")
labels, _, ngroups = grouper.group_info
output = collections.OrderedDict()
base_func = getattr(libgroupby, how)
for name, obj in self._iterate_slices():
if aggregate:
result_sz = ngroups
else:
result_sz = len(obj.values)
if not cython_dtype:
cython_dtype = obj.values.dtype
result = np.zeros(result_sz, dtype=cython_dtype)
func = partial(base_func, result, labels)
inferences = None
if needs_values:
vals = obj.values
if pre_processing:
vals, inferences = pre_processing(vals)
func = partial(func, vals)
if needs_mask:
mask = isna(obj.values).view(np.uint8)
func = partial(func, mask)
if needs_ngroups:
func = partial(func, ngroups)
func(**kwargs) # Call func to modify indexer values in place
if result_is_index:
result = algorithms.take_nd(obj.values, result)
if post_processing:
result = post_processing(result, inferences)
output[name] = result
if aggregate:
return self._wrap_aggregated_output(output)
else:
return self._wrap_transformed_output(output) | 0.001588 |
def fix_code(code, directory):
"""Formats Python code to conform to the PEP 8 style guide.
"""
if not black:
raise Fault("black not installed", code=400)
try:
if parse_version(black.__version__) < parse_version("19.0"):
reformatted_source = black.format_file_contents(
src_contents=code, line_length=black.DEFAULT_LINE_LENGTH, fast=False
)
else:
fm = black.FileMode()
reformatted_source = black.format_file_contents(
src_contents=code, fast=False, mode=fm
)
return reformatted_source
except black.NothingChanged:
return code
except Exception as e:
raise Fault("Error during formatting: {}".format(e), code=400) | 0.002577 |
def _set_dag_run_state(dag_id, execution_date, state, session=None):
"""
Helper method that set dag run state in the DB.
:param dag_id: dag_id of target dag run
:param execution_date: the execution date from which to start looking
:param state: target state
:param session: database session
"""
DR = DagRun
dr = session.query(DR).filter(
DR.dag_id == dag_id,
DR.execution_date == execution_date
).one()
dr.state = state
if state == State.RUNNING:
dr.start_date = timezone.utcnow()
dr.end_date = None
else:
dr.end_date = timezone.utcnow()
session.merge(dr) | 0.001536 |
def _parse_line(cls, line):
"""
Helper method for parsing package line with or without SOS report information.
Args:
line (str): package line with or without SOS report information
Returns:
dict: dictionary containing 'name', 'version', 'release' and 'arch' keys plus
additionally 'installtime', 'buildtime', 'vendor', 'buildserver', 'pgpsig',
'pgpsig_short' if these are present.
"""
try:
pkg, rest = line.split(None, 1)
except ValueError:
rpm = cls._parse_package(line.strip())
return rpm
rpm = cls._parse_package(pkg)
rest = rest.split('\t')
for i, value in enumerate(rest):
rpm[cls.SOSREPORT_KEYS[i]] = value
return rpm | 0.00609 |
def view(self, repo):
"""
View repository information
"""
status = "{0}disabled{1}".format(self.meta.color["RED"],
self.meta.color["ENDC"])
self.form["Status:"] = status
self.form["Default:"] = "no"
if repo in self.meta.default_repositories:
self.form["Default:"] = "yes"
if (repo in self.meta.repositories and
os.path.isfile(self.meta.lib_path + "{0}_repo/PACKAGES."
"TXT".format(repo))):
status = "{0}enabled{1}".format(self.meta.color["GREEN"],
self.meta.color["ENDC"])
if repo != "sbo":
data = self.repository_data(repo)
size = units(data[1], data[2])
self.form["Repo id:"] = repo
self.form["Repo url:"] = self.all_repos[repo]
self.form["Total compressed packages:"] = "{0} {1}".format(
str(size[1][0]), str(size[0][0]))
self.form["Total uncompressed packages:"] = "{0} {1}".format(
str(size[1][1]), str(size[0][1]))
self.form["Number of packages:"] = data[0]
self.form["Status:"] = status
self.form["Last updated:"] = data[3]
elif (repo == "sbo" and os.path.isfile(self.meta.lib_path + "{0}_repo/"
"SLACKBUILDS.TXT".format(repo))):
status = "{0}enabled{1}".format(self.meta.color["GREEN"],
self.meta.color["ENDC"])
sum_sbo_pkgs = 0
for line in (Utils().read_file(
self.meta.lib_path + "sbo_repo/SLACKBUILDS."
"TXT").splitlines()):
if line.startswith("SLACKBUILD NAME: "):
sum_sbo_pkgs += 1
changelog_txt = Utils().read_file(
self.meta.log_path + "sbo/ChangeLog.txt")
last_upd = changelog_txt.split("\n", 1)[0]
self.form["Repo id:"] = repo
self.form["Repo url:"] = self.all_repos[repo]
self.form["Total compressed packages:"] = ""
self.form["Total uncompressed packages:"] = ""
self.form["Number of packages:"] = sum_sbo_pkgs
self.form["Status:"] = status
self.form["Last updated:"] = last_upd
print("")
for key, value in sorted(self.form.iteritems()):
print self.meta.color["GREY"] + key + self.meta.color["ENDC"], value
print("")
raise SystemExit() | 0.00151 |
def checkPrediction2(self, patternNZs, output=None, confidence=None,
details=False):
"""
This function will replace checkPrediction.
This function produces goodness-of-match scores for a set of input patterns, by
checking for their presense in the current and predicted output of the TP.
Returns a global count of the number of extra and missing bits, the
confidence scores for each input pattern, and (if requested) the
bits in each input pattern that were not present in the TP's prediction.
todo: Add option to check predictedState only.
Parameters:
==========
patternNZs: a list of input patterns that we want to check for. Each element
is a list of the non-zeros in that pattern.
output: The output of the TP. If not specified, then use the
TP's current output. This can be specified if you are
trying to check the prediction metric for an output from
the past.
confidence: The cell confidences. If not specified, then use the
TP's current self.confidence. This can be specified if you are
trying to check the prediction metrics for an output
from the past.
details: if True, also include details of missing bits per pattern.
Return value:
============
The following list is returned:
[
totalExtras,
totalMissing,
[conf_1, conf_2, ...],
[missing1, missing2, ...]
]
totalExtras: a global count of the number of 'extras', i.e. bits that
are on in the current output but not in the or of all the
passed in patterns
totalMissing: a global count of all the missing bits, i.e. the bits that
are on in the or of the patterns, but not in the current
output
conf_i the confidence score for the i'th pattern in patternsToCheck
missing_i the bits in the i'th pattern that were missing
in the output. This list is only returned if details is
True.
"""
# Get the non-zeros in each pattern
numPatterns = len(patternNZs)
# Compute the union of all the expected patterns
orAll = set()
orAll = orAll.union(*patternNZs)
# Get the list of active columns in the output
if output is None:
assert self.currentOutput is not None
output = self.currentOutput
output = set(output.sum(axis=1).nonzero()[0])
# Compute the total extra and missing in the output
totalExtras = len(output.difference(orAll))
totalMissing = len(orAll.difference(output))
# Get the percent confidence level per column by summing the confidence levels
# of the cells in the column. During training, each segment's confidence
# number is computed as a running average of how often it correctly
# predicted bottom-up activity on that column. A cell's confidence number
# is taken from the first active segment found in the cell. Note that
# confidence will only be non-zero for predicted columns.
if confidence is None:
confidence = self.confidence['t']
# Set the column confidence to be the max of the cell confidences in that
# column.
colConfidence = self.columnConfidences(confidence)
# Assign confidences to each pattern
confidences = []
for i in xrange(numPatterns):
# Sum of the column confidences for this pattern
positivePredictionSum = colConfidence[patternNZs[i]].sum()
# How many columns in this pattern
positiveColumnCount = len(patternNZs[i])
# Sum of all the column confidences
totalPredictionSum = colConfidence.sum()
# Total number of columns
totalColumnCount = len(colConfidence)
negativePredictionSum = totalPredictionSum - positivePredictionSum
negativeColumnCount = totalColumnCount - positiveColumnCount
# Compute the average confidence score per column for this pattern
if positiveColumnCount != 0:
positivePredictionScore = positivePredictionSum/positiveColumnCount
else:
positivePredictionScore = 0.0
# Compute the average confidence score per column for the other patterns
if negativeColumnCount != 0:
negativePredictionScore = negativePredictionSum/negativeColumnCount
else:
negativePredictionScore = 0.0
predictionScore = positivePredictionScore - negativePredictionScore
confidences.append((predictionScore,
positivePredictionScore,
negativePredictionScore))
# Include detail? (bits in each pattern that were missing from the output)
if details:
missingPatternBits = [set(pattern).difference(output) \
for pattern in patternNZs]
return (totalExtras, totalMissing, confidences, missingPatternBits)
else:
return (totalExtras, totalMissing, confidences) | 0.00875 |
def has_na(eqdata):
"""
Return false if `eqdata` contains no missing values.
Parameters
----------
eqdata : DataFrame or ndarray
Data to check for missing values (NaN, None)
Returns
----------
answer : bool
False iff `eqdata` contains no missing values.
"""
if isinstance(eqdata, pd.DataFrame):
_values = eqdata.values
else:
_values = eqdata
return len(_values[pd.isnull(_values)]) > 0 | 0.002141 |
def start(self):
"""
Start the node on the cloud using the given instance properties.
This method is non-blocking: as soon as the node id is returned from
the cloud provider, it will return. The `is_alive`:meth: and
`update_ips`:meth: methods should be used to further gather details
about the state of the node.
"""
log.info("Starting node `%s` from image `%s` with flavor %s ...",
self.name, self.image_id, self.flavor)
self.instance_id = self._cloud_provider.start_instance(
self.user_key_name, self.user_key_public, self.user_key_private,
self.security_group,
self.flavor, self.image_id, self.image_userdata,
username=self.image_user,
node_name=("%s-%s" % (self.cluster_name, self.name)),
**self.extra)
log.debug("Node `%s` has instance ID `%s`", self.name, self.instance_id) | 0.003161 |
def add(self, child, name=None, coordinates=None):
"""
Adds child to the :Placeable:, storing it's :name: and :coordinates:
This method is used to add :Well:s to the :Container:,
add :Slot:s to :Deck:, etc
"""
if not name:
name = str(child)
if name in self.children_by_name:
raise RuntimeWarning(
'Child with name {} already in slot, use custom name'.format(
name))
if coordinates:
child._coordinates = Vector(coordinates)
child.parent = self
self.children_by_name[name] = child
self.children_by_reference[child] = name | 0.002933 |
def _add_defaults_python(self):
"""getting python files"""
if self.distribution.has_pure_modules():
build_py = self.get_finalized_command('build_py')
self.filelist.extend(build_py.get_source_files())
# This functionality is incompatible with include_package_data, and
# will in fact create an infinite recursion if include_package_data
# is True. Use of include_package_data will imply that
# distutils-style automatic handling of package_data is disabled
if not self.distribution.include_package_data:
for _, src_dir, _, filenames in build_py.data_files:
self.filelist.extend([os.path.join(src_dir, filename)
for filename in filenames]) | 0.002451 |
def _get_jsmap_name(self, url):
"""return 'name' of the map in .js format"""
ret = urlopen(url)
return ret.read().decode('utf-8').split('=')[0].replace(" ", "") | 0.015544 |
def error(self, table_name, key, error_message, error_stack=None):
"""
Log an error message. The job reservation is replaced with an error entry.
if an error occurs, leave an entry describing the problem
:param table_name: `database`.`table_name`
:param key: the dict of the job's primary key
:param error_message: string error message
:param error_stack: stack trace
"""
if len(error_message) > ERROR_MESSAGE_LENGTH:
error_message = error_message[:ERROR_MESSAGE_LENGTH-len(TRUNCATION_APPENDIX)] + TRUNCATION_APPENDIX
job_key = dict(table_name=table_name, key_hash=key_hash(key))
self.insert1(
dict(job_key,
status="error",
host=platform.node(),
pid=os.getpid(),
connection_id=self.connection.connection_id,
user=self._user,
key=key,
error_message=error_message,
error_stack=error_stack),
replace=True, ignore_extra_fields=True) | 0.003673 |
def clear(self):
"""Remove all sources from this configuration."""
super(LazyConfig, self).clear()
self._lazy_suffix = []
self._lazy_prefix = [] | 0.011364 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.