text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def DirectoryStimuliFactory(loader):
"""
Takes an input path to the images folder of an experiment and generates
automatically the category - filenumber list needed to construct an
appropriate _categories object.
Parameters :
loader : Loader object which contains
impath : string
path to the input, i.e. image-, files of the experiment. All
subfolders in that path will be treated as categories. If no
subfolders are present, category 1 will be assigned and all
files in the folder are considered input images.
Images have to end in '.png'.
ftrpath : string
path to the feature folder. It is expected that the folder
structure corresponds to the structure in impath, i.e.
ftrpath/category/featurefolder/featuremap.mat
Furthermore, features are assumed to be the same for all
categories.
"""
impath = loader.impath
ftrpath = loader.ftrpath
# checks whether user has reading permission for the path
assert os.access(impath, os.R_OK)
assert os.access(ftrpath, os.R_OK)
# EXTRACTING IMAGE NAMES
img_per_cat = {}
# extract only directories in the given folder
subfolders = [name for name in os.listdir(impath) if os.path.isdir(
os.path.join(impath, name))]
# if there are no subfolders, walk through files. Take 1 as key for the
# categories object
if not subfolders:
[_, _, files] = next(os.walk(os.path.join(impath)))
# this only takes entries that end with '.png'
entries = {1:
[int(cur_file[cur_file.find('_')+1:-4]) for cur_file
in files if cur_file.endswith('.png')]}
img_per_cat.update(entries)
subfolders = ['']
# if there are subfolders, walk through them
else:
for directory in subfolders:
[_, _, files] = next(os.walk(os.path.join(impath, directory)))
# this only takes entries that end with '.png'. Strips ending and
# considers everything after the first '_' as the imagenumber
imagenumbers = [int(cur_file[cur_file.find('_')+1:-4])
for cur_file in files
if (cur_file.endswith('.png') & (len(cur_file) > 4))]
entries = {int(directory): imagenumbers}
img_per_cat.update(entries)
del directory
del imagenumbers
# in case subfolders do not exist, '' is appended here.
_, features, files = next(os.walk(os.path.join(ftrpath,
subfolders[0])))
return Categories(loader, img_per_cat = img_per_cat, features = features) | 0.007546 |
def isosurface(image, smoothing=0, threshold=None, connectivity=False):
"""Return a ``vtkActor`` isosurface extracted from a ``vtkImageData`` object.
:param float smoothing: gaussian filter to smooth vtkImageData, in units of sigmas
:param threshold: value or list of values to draw the isosurface(s)
:type threshold: float, list
:param bool connectivity: if True only keeps the largest portion of the polydata
.. hint:: |isosurfaces| |isosurfaces.py|_
"""
if smoothing:
smImg = vtk.vtkImageGaussianSmooth()
smImg.SetDimensionality(3)
smImg.SetInputData(image)
smImg.SetStandardDeviations(smoothing, smoothing, smoothing)
smImg.Update()
image = smImg.GetOutput()
scrange = image.GetScalarRange()
if scrange[1] > 1e10:
print("Warning, high scalar range detected:", scrange)
cf = vtk.vtkContourFilter()
cf.SetInputData(image)
cf.UseScalarTreeOn()
cf.ComputeScalarsOn()
if utils.isSequence(threshold):
cf.SetNumberOfContours(len(threshold))
for i, t in enumerate(threshold):
cf.SetValue(i, t)
cf.Update()
else:
if not threshold:
threshold = (2 * scrange[0] + scrange[1]) / 3.0
cf.SetValue(0, threshold)
cf.Update()
clp = vtk.vtkCleanPolyData()
clp.SetInputConnection(cf.GetOutputPort())
clp.Update()
poly = clp.GetOutput()
if connectivity:
conn = vtk.vtkPolyDataConnectivityFilter()
conn.SetExtractionModeToLargestRegion()
conn.SetInputData(poly)
conn.Update()
poly = conn.GetOutput()
a = Actor(poly, c=None).phong()
a.mapper.SetScalarRange(scrange[0], scrange[1])
return a | 0.002292 |
def create_context(pip_version=None, python_version=None):
"""Create a context containing the specific pip and python.
Args:
pip_version (str or `Version`): Version of pip to use, or latest if None.
python_version (str or `Version`): Python version to use, or latest if
None.
Returns:
`ResolvedContext`: Context containing pip and python.
"""
# determine pip pkg to use for install, and python variants to install on
if pip_version:
pip_req = "pip-%s" % str(pip_version)
else:
pip_req = "pip"
if python_version:
ver = Version(str(python_version))
major_minor_ver = ver.trim(2)
py_req = "python-%s" % str(major_minor_ver)
else:
# use latest major.minor
package = get_latest_package("python")
if package:
major_minor_ver = package.version.trim(2)
else:
# no python package. We're gonna fail, let's just choose current
# python version (and fail at context creation time)
major_minor_ver = '.'.join(map(str, sys.version_info[:2]))
py_req = "python-%s" % str(major_minor_ver)
# use pip + latest python to perform pip download operations
request = [pip_req, py_req]
with convert_errors(from_=(PackageFamilyNotFoundError, PackageNotFoundError),
to=BuildError, msg="Cannot run - pip or python rez "
"package is not present"):
context = ResolvedContext(request)
# print pip package used to perform the install
pip_variant = context.get_resolved_package("pip")
pip_package = pip_variant.parent
print_info("Using %s (%s)" % (pip_package.qualified_name, pip_variant.uri))
return context | 0.001694 |
def rank1d(X, y=None, ax=None, algorithm='shapiro', features=None,
orient='h', show_feature_names=True, **kwargs):
"""Scores each feature with the algorithm and ranks them in a bar plot.
This helper function is a quick wrapper to utilize the Rank1D Visualizer
(Transformer) for one-off analysis.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
ax : matplotlib axes
the axis to plot the figure on.
algorithm : one of {'shapiro', }, default: 'shapiro'
The ranking algorithm to use, default is 'Shapiro-Wilk.
features : list
A list of feature names to use.
If a DataFrame is passed to fit and features is None, feature
names are selected as the columns of the DataFrame.
orient : 'h' or 'v'
Specifies a horizontal or vertical bar chart.
show_feature_names : boolean, default: True
If True, the feature names are used to label the axis ticks in the
plot.
Returns
-------
ax : matplotlib axes
Returns the axes that the parallel coordinates were drawn on.
"""
# Instantiate the visualizer
visualizer = Rank1D(ax, algorithm, features, orient, show_feature_names,
**kwargs)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y, **kwargs)
visualizer.transform(X)
# Return the axes object on the visualizer
return visualizer.ax | 0.000628 |
def video_list(request, slug):
"""
Displays list of videos for given event.
"""
event = get_object_or_404(Event, slug=slug)
return render(request, 'video/video_list.html', {
'event': event,
'video_list': event.eventvideo_set.all()
}) | 0.003663 |
def add_tagfile(self, path, timestamp=None):
# type: (Text, datetime.datetime) -> None
"""Add tag files to our research object."""
self.self_check()
checksums = {}
# Read file to calculate its checksum
if os.path.isdir(path):
return
# FIXME: do the right thing for directories
with open(path, "rb") as tag_file:
# FIXME: Should have more efficient open_tagfile() that
# does all checksums in one go while writing through,
# adding checksums after closing.
# Below probably OK for now as metadata files
# are not too large..?
checksums[SHA1] = checksum_copy(tag_file, hasher=hashlib.sha1)
tag_file.seek(0)
checksums[SHA256] = checksum_copy(tag_file, hasher=hashlib.sha256)
tag_file.seek(0)
checksums[SHA512] = checksum_copy(tag_file, hasher=hashlib.sha512)
rel_path = _posix_path(os.path.relpath(path, self.folder))
self.tagfiles.add(rel_path)
self.add_to_manifest(rel_path, checksums)
if timestamp is not None:
self._file_provenance[rel_path] = {"createdOn": timestamp.isoformat()} | 0.003255 |
def IsTableProperties(Type, tagname, attrs):
"""
obsolete. see .CheckProperties() method of pycbc_glue.ligolw.table.Table
class.
"""
import warnings
warnings.warn("lsctables.IsTableProperties() is deprecated. use pycbc_glue.ligolw.table.Table.CheckProperties() instead", DeprecationWarning)
return Type.CheckProperties(tagname, attrs) | 0.026239 |
def delta_hv(scatterer):
"""
Delta_hv for the current setup.
Args:
scatterer: a Scatterer instance.
Returns:
Delta_hv [rad].
"""
Z = scatterer.get_Z()
return np.arctan2(Z[2,3] - Z[3,2], -Z[2,2] - Z[3,3]) | 0.02008 |
def load(path_or_file, validate=True, strict=True, fmt='auto'):
r"""Load a JAMS Annotation from a file.
Parameters
----------
path_or_file : str or file-like
Path to the JAMS file to load
OR
An open file handle to load from.
validate : bool
Attempt to validate the JAMS object
strict : bool
if `validate == True`, enforce strict schema validation
fmt : str ['auto', 'jams', 'jamz']
The encoding format of the input
If `auto`, encoding is inferred from the file name.
If the input is an open file handle, `jams` encoding
is used.
Returns
-------
jam : JAMS
The loaded JAMS object
Raises
------
SchemaError
if `validate == True`, `strict==True`, and validation fails
See also
--------
JAMS.validate
JAMS.save
Examples
--------
>>> # Load a jams object from a file name
>>> J = jams.load('data.jams')
>>> # Or from an open file descriptor
>>> with open('data.jams', 'r') as fdesc:
... J = jams.load(fdesc)
>>> # Non-strict validation
>>> J = jams.load('data.jams', strict=False)
>>> # No validation at all
>>> J = jams.load('data.jams', validate=False)
"""
with _open(path_or_file, mode='r', fmt=fmt) as fdesc:
jam = JAMS(**json.load(fdesc))
if validate:
jam.validate(strict=strict)
return jam | 0.000693 |
def _smooth(values: List[float], beta: float) -> List[float]:
""" Exponential smoothing of values """
avg_value = 0.
smoothed = []
for i, value in enumerate(values):
avg_value = beta * avg_value + (1 - beta) * value
smoothed.append(avg_value / (1 - beta ** (i + 1)))
return smoothed | 0.003145 |
def load(stream, Loader=Loader):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
"""
loader = Loader(stream)
try:
return loader.get_single_data()
finally:
loader.dispose() | 0.003876 |
def get_next_version() -> str:
"""
Returns: next version for this Git repository
"""
LOGGER.info('computing next version')
should_be_alpha = bool(CTX.repo.get_current_branch() != 'master')
LOGGER.info('alpha: %s', should_be_alpha)
calver = _get_calver()
LOGGER.info('current calver: %s', calver)
calver_tags = _get_current_calver_tags(calver)
LOGGER.info('found %s matching tags for this calver', len(calver_tags))
next_stable_version = _next_stable_version(calver, calver_tags)
LOGGER.info('next stable version: %s', next_stable_version)
if should_be_alpha:
return _next_alpha_version(next_stable_version, calver_tags)
return next_stable_version | 0.001406 |
def ordered_covering(routing_table, target_length, aliases=dict(),
no_raise=False):
"""Reduce the size of a routing table by merging together entries where
possible.
.. warning::
The input routing table *must* also include entries which could be
removed and replaced by default routing.
.. warning::
It is assumed that the input routing table is not in any particular
order and may be reordered into ascending order of generality (number
of don't cares/Xs in the key-mask) without affecting routing
correctness. It is also assumed that if this table is unordered it is
at least orthogonal (i.e., there are no two entries which would match
the same key) and reorderable.
.. note::
If *all* the keys in the table are derived from a single instance
of :py:class:`~rig.bitfield.BitField` then the table is guaranteed
to be orthogonal and reorderable.
.. note::
Use :py:meth:`~rig.routing_table.expand_entries` to generate an
orthogonal table and receive warnings if the input table is not
orthogonal.
Parameters
----------
routing_table : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Routing entries to be merged.
target_length : int or None
Target length of the routing table; the minimisation procedure will
halt once either this target is reached or no further minimisation is
possible. If None then the table will be made as small as possible.
Other Parameters
----------------
aliases : {(key, mask): {(key, mask), ...}, ...}
Dictionary of which keys and masks in the routing table are
combinations of other (now removed) keys and masks; this allows us to
consider only the keys and masks the user actually cares about when
determining if inserting a new entry will break the correctness of the
table. This should be supplied when using this method to update an
already minimised table.
no_raise : bool
If False (the default) then an error will be raised if the table cannot
be minimised to be smaller than `target_length` and `target_length` is
not None. If True then a table will be returned regardless of the size
of the final table.
Raises
------
MinimisationFailedError
If the smallest table that can be produced is larger than
`target_length` and `no_raise` is False.
Returns
-------
[:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Reduced routing table entries.
{(key, mask): {(key, mask), ...}, ...}
A new aliases dictionary.
"""
# Copy the aliases dictionary
aliases = dict(aliases)
# Perform an initial sort of the routing table in order of increasing
# generality.
routing_table = sorted(
routing_table,
key=lambda entry: _get_generality(entry.key, entry.mask)
)
while target_length is None or len(routing_table) > target_length:
# Get the best merge
merge = _get_best_merge(routing_table, aliases)
# If there is no merge then stop
if merge.goodness <= 0:
break
# Otherwise apply the merge, this returns a new routing table and a new
# aliases dictionary.
routing_table, aliases = merge.apply(aliases)
# If the table is still too big then raise an error
if (not no_raise and
target_length is not None and
len(routing_table) > target_length):
raise MinimisationFailedError(target_length, len(routing_table))
# Return the finished routing table and aliases table
return routing_table, aliases | 0.000263 |
def info(self, request):
"""Return height of the latest committed block."""
self.abort_if_abci_chain_is_not_synced()
# Check if BigchainDB supports the Tendermint version
if not (hasattr(request, 'version') and tendermint_version_is_compatible(request.version)):
logger.error(f'Unsupported Tendermint version: {getattr(request, "version", "no version")}.'
f' Currently, BigchainDB only supports {__tm_supported_versions__}. Exiting!')
sys.exit(1)
logger.info(f"Tendermint version: {request.version}")
r = ResponseInfo()
block = self.bigchaindb.get_latest_block()
if block:
chain_shift = 0 if self.chain is None else self.chain['height']
r.last_block_height = block['height'] - chain_shift
r.last_block_app_hash = block['app_hash'].encode('utf-8')
else:
r.last_block_height = 0
r.last_block_app_hash = b''
return r | 0.00497 |
def capacity_sp_meyerhof_and_hanna_1978(sp, fd, verbose=0):
"""
Calculates the two-layered foundation capacity according Meyerhof and Hanna (1978)
:param sp: Soil profile object
:param fd: Foundation object
:param wtl: water table level
:param verbose: verbosity
:return: ultimate bearing stress
"""
assert isinstance(sp, sm.SoilProfile)
sl_0 = sp.layer(1)
sl_1 = sp.layer(2)
h0 = sp.layer_depth(2)
gwl = sp.gwl
sl_0.nq_factor_0 = (
(np.tan(np.pi / 4 + np.deg2rad(sl_0.phi / 2))) ** 2 * np.exp(np.pi * np.tan(np.deg2rad(sl_0.phi))))
if sl_0.phi == 0:
sl_0.nc_factor_0 = 5.14
else:
sl_0.nc_factor_0 = (sl_0.nq_factor_0 - 1) / np.tan(np.deg2rad(sl_0.phi))
sl_0.ng_factor_0 = (sl_0.nq_factor_0 - 1) * np.tan(1.4 * np.deg2rad(sl_0.phi))
sl_1.nq_factor_1 = (
(np.tan(np.pi / 4 + np.deg2rad(sl_1.phi / 2))) ** 2 * np.exp(np.pi * np.tan(np.deg2rad(sl_1.phi))))
if sl_1.phi == 0:
sl_1.nc_factor_1 = 5.14
else:
sl_1.nc_factor_1 = (sl_1.nq_factor_1 - 1) / np.tan(np.deg2rad(sl_1.phi))
sl_1.ng_factor_1 = (sl_1.nq_factor_1 - 1) * np.tan(1.4 * np.deg2rad(sl_1.phi))
if verbose:
log("Nc: ", sl_1.nc_factor_1)
log("Nq: ", sl_1.nq_factor_1)
log("Ng: ", sl_1.ng_factor_1)
sl_0.kp_0 = (np.tan(np.pi / 4 + np.deg2rad(sl_0.phi / 2))) ** 2
sl_1.kp_1 = (np.tan(np.pi / 4 + np.deg2rad(sl_1.phi / 2))) ** 2
# shape factors
if sl_0.phi >= 10:
sl_0.s_c_0 = 1 + 0.2 * sl_0.kp_0 * (fd.width / fd.length)
sl_0.s_q_0 = 1.0 + 0.1 * sl_0.kp_0 * (fd.width / fd.length)
else:
sl_0.s_c_0 = 1 + 0.2 * (fd.width / fd.length)
sl_0.s_q_0 = 1.0
sl_0.s_g_0 = sl_0.s_q_0
if sl_1.phi >= 10:
sl_1.s_c_1 = 1 + 0.2 * sl_1.kp_1 * (fd.width / fd.length)
sl_1.s_q_1 = 1.0 + 0.1 * sl_1.kp_1 * (fd.width / fd.length)
else:
sl_1.s_c_1 = 1 + 0.2 * (fd.width / fd.length)
sl_1.s_q_1 = 1.0
sl_1.s_g_1 = sl_1.s_q_1
# Note: this method explicitly accounts for the foundation depth, so there are no depth factors
# TODO: inclination factors, see doi.org/10.1139/t78-060
# Capacity
a = 1 # assumed to be one but can range between 1.1 and 1.27 for square footings according to Das (1999) Ch 4
s = 1
r = 1 + (fd.width / fd.length)
# put the same things before that condition
# effective weight not in the soil object
if gwl == 0: # case 1: GWL at surface
q_at_interface = sl_0.unit_bouy_weight * h0
unit_eff_weight_0_at_fd_depth = sl_0.unit_bouy_weight
unit_eff_weight_0_at_interface = sl_0.unit_bouy_weight
unit_eff_weight_1_below_foundation = sl_1.unit_bouy_weight
elif 0 < gwl <= fd.depth: # Case 2: GWL at between foundation depth and surface
q_at_interface = (sl_0.unit_dry_weight * gwl) + (sl_0.unit_bouy_weight * (h0 - gwl))
q_d = (sl_0.unit_dry_weight * gwl) + (sl_0.unit_bouy_weight * (fd.depth - gwl))
unit_eff_weight_0_at_fd_depth = q_d / fd.depth
unit_eff_weight_0_at_interface = sl_0.unit_bouy_weight
unit_eff_weight_1_below_foundation = sl_1.unit_bouy_weight
elif fd.depth < gwl <= fd.width + fd.depth:
if gwl < h0: # Case 3: GWL at between foundation depth and foundation depth plus width, and GWL < layer 1 depth
average_unit_bouy_weight = sl_0.unit_bouy_weight + (
((gwl - fd.depth) / fd.width) * (sl_0.unit_dry_weight - sl_0.unit_bouy_weight))
q_at_interface = (sl_0.unit_dry_weight * gwl) + (sl_0.unit_bouy_weight * (h0 - gwl))
unit_eff_weight_0_at_fd_depth = sl_0.unit_dry_weight
unit_eff_weight_0_at_interface = average_unit_bouy_weight
unit_eff_weight_1_below_foundation = sl_1.unit_bouy_weight
else: # Case 4: GWL at between foundation depth and foundation depth plus width, and GWL > layer 1 depth
average_unit_bouy_weight = sl_1.unit_bouy_weight + (
((gwl - h0) / fd.width) * (sl_1.unit_dry_weight - sl_1.unit_bouy_weight))
q_at_interface = sl_0.unit_dry_weight * h0
unit_eff_weight_0_at_fd_depth = sl_0.unit_dry_weight
unit_eff_weight_0_at_interface = sl_0.unit_dry_weight
unit_eff_weight_1_below_foundation = average_unit_bouy_weight
elif gwl > fd.depth + fd.width: # Case 5: GWL beyond foundation depth plus width
q_at_interface = sl_0.unit_dry_weight * h0
unit_eff_weight_0_at_fd_depth = sl_0.unit_dry_weight
unit_eff_weight_0_at_interface = sl_0.unit_dry_weight
unit_eff_weight_1_below_foundation = sl_1.unit_dry_weight
else:
raise ValueError("Could not interpret inputs") # never reached
# maximum value (qu <= qt)
q_ult6 = q_at_interface - unit_eff_weight_0_at_fd_depth * fd.depth
q_0 = (sl_0.cohesion * sl_0.nc_factor_0) + (0.5 * unit_eff_weight_0_at_interface * fd.width * sl_0.ng_factor_0)
q_b2 = (q_at_interface * sl_1.nq_factor_1 * sl_1.s_q_1)
q_1 = (sl_1.cohesion * sl_1.nc_factor_1) + (0.5 * unit_eff_weight_1_below_foundation * fd.width * sl_1.ng_factor_1)
q_b3 = (unit_eff_weight_1_below_foundation * fd.width * sl_1.ng_factor_1 * sl_1.s_g_1 / 2)
q_ult5 = r * (unit_eff_weight_0_at_interface * ((h0 - fd.depth) ** 2)) * (1 + (2 * fd.depth / (h0 - fd.depth))) * (
np.tan(np.deg2rad(sl_0.phi)) / fd.width) * s
q_t2 = (unit_eff_weight_0_at_fd_depth * fd.depth * sl_0.nq_factor_0 * sl_0.s_q_0)
q_t3 = (unit_eff_weight_0_at_interface * fd.width * sl_0.ng_factor_0 * sl_0.s_g_0 / 2)
# qb
q_b1 = (sl_1.cohesion * sl_1.nc_factor_1 * sl_1.s_c_1)
q_b = q_b1 + q_b2 + q_b3
q1_q0 = q_1 / q_0
# calculate the ca factor
# if sl_0.cohesion == 0:
# c1_c0 = 0
# else:
# c1_c0 = sl_1.cohesion / sl_0.cohesion
x = np.array([0.000, 0.082, 0.206, 0.298, 0.404, 0.509, 0.598, 0.685, 0.772])
y = np.array([0.627, 0.700, 0.794, 0.855, 0.912, 0.948, 0.968, 0.983, 0.997])
# raise Warning("ca should be interpolated using q1/q2 not cohesion, see Figure 4 in MH1978")
ca_c0 = np.interp(q1_q0, x, y)
ca = ca_c0 * sl_0.cohesion
# ks
x_0 = np.array([0, 20.08, 22.42, 25.08, 27.58, 30.08, 32.58, 34.92, 37.83, 40.00, 42.67, 45.00, 47.00, 49.75])
y_0 = np.array([0.93, 0.93, 0.93, 0.93, 1.01, 1.17, 1.32, 1.56, 1.87, 2.26, 2.72, 3.35, 3.81, 4.82])
x_2 = np.array([0, 20.08, 22.50, 25.08, 27.58, 30.08, 32.50, 35.00, 37.67, 40.17, 42.67, 45.00, 47.50, 50.00])
y_2 = np.array([1.55, 1.55, 1.71, 1.86, 2.10, 2.33, 2.72, 3.11, 3.81, 4.43, 5.28, 6.14, 7.46, 9.24])
x_4 = np.array([0, 20.00, 22.51, 25.10, 27.69, 30.11, 32.45, 35.04, 37.88, 40.14, 42.65, 45.07, 47.33, 50.08])
y_4 = np.array([2.49, 2.49, 2.64, 2.87, 3.34, 3.81, 4.43, 5.20, 6.29, 7.38, 9.01, 11.11, 14.29, 19.34])
x_10 = np.array([0, 20.00, 22.50, 25.08, 28.00, 30.00, 32.50, 34.92, 37.50, 40.17, 42.42, 45.00, 47.17, 50.08])
y_10 = np.array([3.27, 3.27, 3.74, 4.44, 5.37, 6.07, 7.16, 8.33, 10.04, 12.30, 15.95, 21.17, 27.47, 40.00])
x_int = sl_0.phi
if sl_0.phi < 1:
fd.ks = 0
else:
if q1_q0 == 0:
fd.ks = np.interp(x_int, x_0, y_0)
elif q1_q0 == 0.2:
fd.ks = np.interp(x_int, x_2, y_2)
elif q1_q0 == 0.4:
fd.ks = np.interp(x_int, x_4, y_4)
elif q1_q0 == 1.0:
fd.ks = np.interp(x_int, x_10, y_10)
elif 0 < q1_q0 < 0.2:
ks_1 = np.interp(x_int, x_0, y_0)
ks_2 = np.interp(x_int, x_2, y_2)
fd.ks = (((ks_2 - ks_1) * q1_q0) / 0.2) + ks_1
elif 0.2 < q1_q0 < 0.4:
ks_1 = np.interp(x_int, x_2, y_2)
ks_2 = np.interp(x_int, x_4, y_4)
fd.ks = (((ks_2 - ks_1) * (q1_q0 - 0.2)) / 0.2) + ks_1
elif 0.4 < q1_q0 < 1.0:
ks_1 = np.interp(x_int, x_4, y_4)
ks_2 = np.interp(x_int, x_10, y_10)
fd.ks = (((ks_2 - ks_1) * (q1_q0 - 0.4)) / 0.6) + ks_1
else:
raise DesignError(
"Cannot compute 'ks', bearing ratio out-of-range (q1_q0 = %.3f) required: 0-1." % q1_q0)
# qu
q_ult4 = (r * (2 * ca * (h0 - fd.depth) / fd.width) * a)
q_ult5_ks = q_ult5 * fd.ks
q_ult = q_b + q_ult4 + q_ult5_ks - q_ult6
q_t1 = (sl_0.cohesion * sl_0.nc_factor_0 * sl_0.s_c_0)
q_t = q_t1 + q_t2 + q_t3
if q_ult > q_t:
if h0 > fd.width/2:
fd.q_ult = q_t
else:
vert_eff_stress_interface = sp.vertical_effective_stress(h0)
vert_eff_stress_lowest = sp.vertical_effective_stress(fd.width+fd.depth)
average_eff_stress = (vert_eff_stress_interface + vert_eff_stress_lowest) / 2
c_2_eff = sl_1.cohesion + average_eff_stress * np.tan(np.radians(sl_1.phi))
if sl_0.cohesion > c_2_eff:
fd.q_ult = q_t
else:
# vd = {}
# vd[1] =[1, 1, 1, 1, 1]
# vd[0.667] = [1, 1.033, 1.064, 1.088, 1.109]
# vd[0.5] = [1, 1.056, 1.107, 1.152, 1.193]
# vd[0.333] = [1, 1.088, 1.167, 1.241, 1.311]
# vd[0.25] = [1, 1.107, 1.208, 1.302, 1.389]
# vd[0.2] = [1, 1.121, 1.235, 1.342, 1.444]
# vd[0.1] = [1, 1.154, 1.302, 1.446, 1.584]
h_over_b = (h0 - fd.depth)/fd.width
c1_over_c2 =sl_0.cohesion/c_2_eff
c_1_over_c_2 = [0.1, 0.2, 0.25, 0.333, 0.5, 0.667, 1.]
m_1 = [1.584, 1.444, 1.389, 1.311, 1.193, 1.109, 1.]
m_125 = [1.446, 1.342, 1.302, 1.241, 1.152, 1.088, 1.]
m_167 = [1.302, 1.235, 1.208, 1.167, 1.107, 1.064, 1.]
m_25 = [1.154, 1.121, 1.107, 1.088, 1.056, 1.033, 1.]
m_5 = [1, 1, 1, 1, 1, 1, 1]
if h_over_b == 0.1:
m = np.interp(c1_over_c2, c_1_over_c_2, m_1)
elif h_over_b == 0.125:
m = np.interp(c1_over_c2, c_1_over_c_2, m_125)
elif h_over_b == 0.167:
m = np.interp(c1_over_c2, c_1_over_c_2, m_167)
elif h_over_b == 0.250:
m = np.interp(c1_over_c2, c_1_over_c_2, m_25)
elif h_over_b >= 0.5:
m = np.interp(c1_over_c2, c_1_over_c_2, m_5)
elif 0.1 < h_over_b < 0.125:
m_a = np.interp(c1_over_c2, c_1_over_c_2, m_1)
m_b = np.interp(c1_over_c2, c_1_over_c_2, m_125)
m = np.interp(h_over_b, [0.1,0.125], [m_a,m_b])
elif 0.125 < h_over_b < 0.167:
m_a = np.interp(c1_over_c2, c_1_over_c_2, m_125)
m_b = np.interp(c1_over_c2, c_1_over_c_2, m_167)
m = np.interp(h_over_b, [0.125, 0.167], [m_a, m_b])
elif 0.167 < h_over_b < 0.25:
m_a = np.interp(c1_over_c2, c_1_over_c_2, m_167)
m_b = np.interp(c1_over_c2, c_1_over_c_2, m_25)
m = np.interp(h_over_b, [0.167, 0.250], [m_a, m_b])
elif 0.25 < h_over_b < 0.5:
m_a = np.interp(c1_over_c2, c_1_over_c_2, m_25)
m_b = np.interp(c1_over_c2, c_1_over_c_2, m_5)
m = np.interp(h_over_b, [0.250, 0.500], [m_a, m_b])
fd.q_ult = (sl_0.cohesion * m * sl_0.nc_factor_0) + (unit_eff_weight_0_at_fd_depth * fd.depth)
else:
fd.q_ult = q_ult
return fd.q_ult | 0.004058 |
def evaluate(data_file, pred_file):
'''
Evaluate.
'''
expected_version = '1.1'
with open(data_file) as dataset_file:
dataset_json = json.load(dataset_file)
if dataset_json['version'] != expected_version:
print('Evaluation expects v-' + expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
with open(pred_file) as prediction_file:
predictions = json.load(prediction_file)
# print(json.dumps(evaluate(dataset, predictions)))
result = _evaluate(dataset, predictions)
# print('em:', result['exact_match'], 'f1:', result['f1'])
return result['exact_match'] | 0.001351 |
def add_node(self, payload):
"""
Returns
-------
int
Identifier for the inserted node.
"""
self.nodes.append(Node(len(self.nodes), payload))
return len(self.nodes) - 1 | 0.008511 |
def validate_urls(urls, allowed_response_codes=None):
"""Validates that a list of urls can be opened and each responds with an allowed response code
urls -- the list of urls to ping
allowed_response_codes -- a list of response codes that the validator will ignore
"""
for url in urls:
validate_url(url, allowed_response_codes=allowed_response_codes)
return True | 0.007595 |
def applet_set_properties(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /applet-xxxx/setProperties API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Properties#API-method%3A-%2Fclass-xxxx%2FsetProperties
"""
return DXHTTPRequest('/%s/setProperties' % object_id, input_params, always_retry=always_retry, **kwargs) | 0.010152 |
def get_last_modified_datetime(self):
"""Return datetime object of modified time of machine file. Return now if not a file."""
if self._path:
statbuf = os.stat(self._path)
return datetime.utcfromtimestamp(statbuf.st_mtime)
else:
return datetime.now() | 0.009677 |
def headers(self):
""" Return the name of all headers currently defined for the
table. """
if self._headers is None:
query = CellQuery()
query.max_row = '1'
feed = self._service.GetCellsFeed(self._ss.id, self.id,
query=query)
self._headers = feed.entry
return [normalize_header(h.cell.text) for h in self._headers] | 0.004545 |
def decode_modified_utf8(s: bytes) -> str:
"""
Decodes a bytestring containing modified UTF-8 as defined in section
4.4.7 of the JVM specification.
:param s: bytestring to be converted.
:returns: A unicode representation of the original string.
"""
s = bytearray(s)
buff = []
buffer_append = buff.append
ix = 0
while ix < len(s):
x = s[ix]
ix += 1
if x >> 7 == 0:
# Just an ASCII character, nothing else to do.
pass
elif x >> 6 == 6:
y = s[ix]
ix += 1
x = ((x & 0x1F) << 6) + (y & 0x3F)
elif x >> 4 == 14:
y, z = s[ix:ix+2]
ix += 2
x = ((x & 0xF) << 12) + ((y & 0x3F) << 6) + (z & 0x3F)
elif x == 0xED:
v, w, x, y, z = s[ix:ix+6]
ix += 5
x = 0x10000 + (
((v & 0x0F) << 16) +
((w & 0x3F) << 10) +
((y & 0x0F) << 6) +
(z & 0x3F)
)
elif x == 0xC0 and s[ix] == 0x80:
ix += 1
x = 0
buffer_append(x)
return u''.join(chr(b) for b in buff) | 0.00085 |
def from_yaml_file(f):
"""
Read a yaml file and convert to Python objects (including rpcq messages).
"""
return from_json(to_json(yaml.load(f, Loader=yaml.Loader))) | 0.005556 |
def system_piped(self, cmd):
"""Call the given cmd in a subprocess, piping stdout/err
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported. Should not be a command that expects input
other than simple text.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
# We do not support backgrounding processes because we either use
# pexpect or pipes to read from. Users can always just call
# os.system() or use ip.system=ip.system_raw
# if they really want a background process.
raise OSError("Background processes not supported.")
# we explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns.
self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1)) | 0.001931 |
def main(argv=None):
"""Tifffile command line usage main function."""
if argv is None:
argv = sys.argv
log.setLevel(logging.INFO)
import optparse # TODO: use argparse
parser = optparse.OptionParser(
usage='usage: %prog [options] path',
description='Display image data in TIFF files.',
version='%%prog %s' % __version__, prog='tifffile')
opt = parser.add_option
opt('-p', '--page', dest='page', type='int', default=-1,
help='display single page')
opt('-s', '--series', dest='series', type='int', default=-1,
help='display series of pages of same shape')
opt('--nomultifile', dest='nomultifile', action='store_true',
default=False, help='do not read OME series from multiple files')
opt('--noplots', dest='noplots', type='int', default=10,
help='maximum number of plots')
opt('--interpol', dest='interpol', metavar='INTERPOL', default=None,
help='image interpolation method')
opt('--dpi', dest='dpi', type='int', default=96,
help='plot resolution')
opt('--vmin', dest='vmin', type='int', default=None,
help='minimum value for colormapping')
opt('--vmax', dest='vmax', type='int', default=None,
help='maximum value for colormapping')
opt('--debug', dest='debug', action='store_true', default=False,
help='raise exception on failures')
opt('--doctest', dest='doctest', action='store_true', default=False,
help='runs the docstring examples')
opt('-v', '--detail', dest='detail', type='int', default=2)
opt('-q', '--quiet', dest='quiet', action='store_true')
settings, path = parser.parse_args()
path = ' '.join(path)
if settings.doctest:
import doctest
if sys.version_info < (3, 6):
print('Doctests work with Python >=3.6 only')
return 0
doctest.testmod(optionflags=doctest.ELLIPSIS)
return 0
if not path:
path = askopenfilename(title='Select a TIFF file',
filetypes=TIFF.FILEOPEN_FILTER)
if not path:
parser.error('No file specified')
if any(i in path for i in '?*'):
path = glob.glob(path)
if not path:
print('No files match the pattern')
return 0
# TODO: handle image sequences
path = path[0]
if not settings.quiet:
print_('\nReading TIFF header:', end=' ', flush=True)
start = time.time()
try:
tif = TiffFile(path, multifile=not settings.nomultifile)
except Exception as exc:
if settings.debug:
raise
print('\n\n%s: %s' % (exc.__class__.__name__, exc))
sys.exit(0)
if not settings.quiet:
print('%.3f ms' % ((time.time()-start) * 1e3))
if tif.is_ome:
settings.norgb = True
images = []
if settings.noplots > 0:
if not settings.quiet:
print_('Reading image data: ', end=' ', flush=True)
def notnone(x):
return next(i for i in x if i is not None)
start = time.time()
try:
if settings.page >= 0:
images = [(tif.asarray(key=settings.page),
tif[settings.page], None)]
elif settings.series >= 0:
images = [(tif.asarray(series=settings.series),
notnone(tif.series[settings.series]._pages),
tif.series[settings.series])]
else:
for i, s in enumerate(tif.series[:settings.noplots]):
try:
images.append((tif.asarray(series=i),
notnone(s._pages),
tif.series[i]))
except Exception as exc:
images.append((None, notnone(s.pages), None))
if settings.debug:
raise
print('\nSeries %i failed with %s: %s... ' %
(i, exc.__class__.__name__, exc), end='')
except Exception as exc:
if settings.debug:
raise
print('%s: %s' % (exc.__class__.__name__, exc))
if not settings.quiet:
print('%.3f ms' % ((time.time()-start) * 1e3))
if not settings.quiet:
print_('Generating printout:', end=' ', flush=True)
start = time.time()
info = TiffFile.__str__(tif, detail=int(settings.detail))
print('%.3f ms' % ((time.time()-start) * 1e3))
print()
print(info)
print()
tif.close()
if images and settings.noplots > 0:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot
except ImportError as exc:
log.warning('tifffile.main: %s: %s', exc.__class__.__name__, exc)
else:
for img, page, series in images:
if img is None:
continue
vmin, vmax = settings.vmin, settings.vmax
if 'GDAL_NODATA' in page.tags:
try:
vmin = numpy.min(
img[img > float(page.tags['GDAL_NODATA'].value)])
except ValueError:
pass
if tif.is_stk:
try:
vmin = tif.stk_metadata['MinScale']
vmax = tif.stk_metadata['MaxScale']
except KeyError:
pass
else:
if vmax <= vmin:
vmin, vmax = settings.vmin, settings.vmax
if series:
title = '%s\n%s\n%s' % (str(tif), str(page), str(series))
else:
title = '%s\n %s' % (str(tif), str(page))
photometric = 'MINISBLACK'
if page.photometric not in (3,):
photometric = TIFF.PHOTOMETRIC(page.photometric).name
imshow(img, title=title, vmin=vmin, vmax=vmax,
bitspersample=page.bitspersample,
photometric=photometric,
interpolation=settings.interpol,
dpi=settings.dpi)
pyplot.show()
return 0 | 0.000156 |
def rollaxis(a, axis, start=0):
"""Roll the specified axis backwards, until it lies in a given position.
Args:
a (array_like): Input array.
axis (int): The axis to roll backwards. The positions of the other axes
do not change relative to one another.
start (int, optional): The axis is rolled until it lies before this
position. The default, 0, results in a "complete" roll.
Returns:
res (ndarray)
"""
if isinstance(a, np.ndarray):
return np.rollaxis(a, axis, start)
if axis not in range(a.ndim):
raise ValueError(
'rollaxis: axis (%d) must be >=0 and < %d' % (axis, a.ndim))
if start not in range(a.ndim + 1):
raise ValueError(
'rollaxis: start (%d) must be >=0 and < %d' % (axis, a.ndim+1))
axes = list(range(a.ndim))
axes.remove(axis)
axes.insert(start, axis)
return transpose(a, axes) | 0.003212 |
def ConvertGlobIntoPathComponents(self, pattern):
r"""Converts a glob pattern into a list of pathspec components.
Wildcards are also converted to regular expressions. The pathspec components
do not span directories, and are marked as a regex or a literal component.
We also support recursion into directories using the ** notation. For
example, /home/**2/foo.txt will find all files named foo.txt recursed 2
directories deep. If the directory depth is omitted, it defaults to 3.
Example:
/home/test/* -> ['home', 'test', '.*\\Z(?ms)']
Args:
pattern: A glob expression with wildcards.
Returns:
A list of PathSpec instances for each component.
Raises:
ValueError: If the glob is invalid.
"""
components = []
for path_component in pattern.split("/"):
# A ** in the path component means recurse into directories that match the
# pattern.
m = rdf_paths.GlobExpression.RECURSION_REGEX.search(path_component)
if m:
path_component = path_component.replace(m.group(0), "*")
component = rdf_paths.PathSpec(
path=fnmatch.translate(path_component),
pathtype=self.state.pathtype,
path_options=rdf_paths.PathSpec.Options.RECURSIVE)
# Allow the user to override the recursion depth.
if m.group(1):
component.recursion_depth = int(m.group(1))
elif self.GLOB_MAGIC_CHECK.search(path_component):
component = rdf_paths.PathSpec(
path=fnmatch.translate(path_component),
pathtype=self.state.pathtype,
path_options=rdf_paths.PathSpec.Options.REGEX)
else:
pathtype = self.state.pathtype
# TODO(amoser): This is a backwards compatibility hack. Remove when
# all clients reach 3.0.0.2.
if (pathtype == rdf_paths.PathSpec.PathType.TSK and
re.match("^.:$", path_component)):
path_component = "%s\\" % path_component
component = rdf_paths.PathSpec(
path=path_component,
pathtype=pathtype,
path_options=rdf_paths.PathSpec.Options.CASE_INSENSITIVE)
components.append(component)
return components | 0.005858 |
def write_metadata(self, symbol, metadata):
'''
writes user defined metadata for the given symbol
Parameters
----------
symbol: str
symbol for the given item in the DB
metadata: ?
metadata to write
'''
sym = self._get_symbol_info(symbol)
if not sym:
raise NoDataFoundException("Symbol does not exist.")
sym[USERMETA] = metadata
self._symbols.replace_one({SYMBOL: symbol}, sym) | 0.003984 |
def start_kex(self):
"""
Start the GSS-API / SSPI Authenticated Diffie-Hellman Key Exchange.
"""
self._generate_x()
if self.transport.server_mode:
# compute f = g^x mod p, but don't send it yet
self.f = pow(self.G, self.x, self.P)
self.transport._expect_packet(MSG_KEXGSS_INIT)
return
# compute e = g^x mod p (where g=2), and send it
self.e = pow(self.G, self.x, self.P)
# Initialize GSS-API Key Exchange
self.gss_host = self.transport.gss_host
m = Message()
m.add_byte(c_MSG_KEXGSS_INIT)
m.add_string(self.kexgss.ssh_init_sec_context(target=self.gss_host))
m.add_mpint(self.e)
self.transport._send_message(m)
self.transport._expect_packet(
MSG_KEXGSS_HOSTKEY,
MSG_KEXGSS_CONTINUE,
MSG_KEXGSS_COMPLETE,
MSG_KEXGSS_ERROR,
) | 0.002114 |
def contains_no(self, prototype):
"""
Ensures no item of :attr:`subject` is of class *prototype*.
"""
for element in self._subject:
self._run(unittest_case.assertNotIsInstance, (element, prototype))
return ChainInspector(self._subject) | 0.006969 |
def parse_sections(self, offset):
"""Fetch the PE file sections.
The sections will be readily available in the "sections" attribute.
Its attributes will contain all the section information plus "data"
a buffer containing the section's data.
The "Characteristics" member will be processed and attributes
representing the section characteristics (with the 'IMAGE_SCN_'
string trimmed from the constant's names) will be added to the
section instance.
Refer to the SectionStructure class for additional info.
"""
self.sections = []
for i in xrange(self.FILE_HEADER.NumberOfSections):
section = SectionStructure( self.__IMAGE_SECTION_HEADER_format__, pe=self )
if not section:
break
section_offset = offset + section.sizeof() * i
section.set_file_offset(section_offset)
section.__unpack__(self.__data__[section_offset : section_offset + section.sizeof()])
self.__structures__.append(section)
if section.SizeOfRawData > len(self.__data__):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'SizeOfRawData is larger than file.')
if adjust_FileAlignment( section.PointerToRawData,
self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'PointerToRawData points beyond the end of the file.')
if section.Misc_VirtualSize > 0x10000000:
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualSize is extremely large > 256MiB.')
if adjust_SectionAlignment( section.VirtualAddress,
self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) > 0x10000000:
self.__warnings.append(
('Suspicious value found parsing section %d. ' % i) +
'VirtualAddress is beyond 0x10000000.')
#
# Some packer used a non-aligned PointerToRawData in the sections,
# which causes several common tools not to load the section data
# properly as they blindly read from the indicated offset.
# It seems that Windows will round the offset down to the largest
# offset multiple of FileAlignment which is smaller than
# PointerToRawData. The following code will do the same.
#
#alignment = self.OPTIONAL_HEADER.FileAlignment
#self.update_section_data(section)
if ( self.OPTIONAL_HEADER.FileAlignment != 0 and
( section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0):
self.__warnings.append(
('Error parsing section %d. ' % i) +
'Suspicious value for FileAlignment in the Optional Header. ' +
'Normally the PointerToRawData entry of the sections\' structures ' +
'is a multiple of FileAlignment, this might imply the file ' +
'is trying to confuse tools which parse this incorrectly')
section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')
# Set the section's flags according the the Characteristics member
set_flags(section, section.Characteristics, section_flags)
if ( section.__dict__.get('IMAGE_SCN_MEM_WRITE', False) and
section.__dict__.get('IMAGE_SCN_MEM_EXECUTE', False) ):
self.__warnings.append(
('Suspicious flags set for section %d. ' % i) +
'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set. ' +
'This might indicate a packed executable.')
self.sections.append(section)
if self.FILE_HEADER.NumberOfSections > 0 and self.sections:
return offset + self.sections[0].sizeof()*self.FILE_HEADER.NumberOfSections
else:
return offset | 0.012593 |
def create_mirror_settings(repo_url):
"""
Creates settings.xml in current working directory, which when used makes Maven use given repo URL as a mirror of all
repositories to look at.
:param repo_url: the repository URL to use
:returns: filepath to the created file
"""
cwd = os.getcwd()
settings_path = os.path.join(cwd, "settings.xml")
settings_file = None
try:
settings_file = open(settings_path, "w")
settings_file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
settings_file.write('<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"\n')
settings_file.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n')
settings_file.write(' xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">\n')
settings_file.write('<mirrors>\n')
settings_file.write(' <mirror>\n')
settings_file.write(' <id>repo-mirror</id>\n')
settings_file.write(' <url>%s</url>\n' % repo_url)
settings_file.write(' <mirrorOf>*</mirrorOf>\n')
settings_file.write(' </mirror>\n')
settings_file.write(' </mirrors>\n')
settings_file.write('</settings>\n')
finally:
if settings_file:
settings_file.close()
return settings_path | 0.003628 |
def delete_variable_group(self, project, group_id):
"""DeleteVariableGroup.
[Preview API] Delete a variable group
:param str project: Project ID or project name
:param int group_id: Id of the variable group.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'int')
self._send(http_method='DELETE',
location_id='f5b09dd5-9d54-45a1-8b5a-1c8287d634cc',
version='5.1-preview.1',
route_values=route_values) | 0.005587 |
def _getservicetuple(servicerecord):
"""
Returns a (device-addr, service-channel, service-name) tuple from the given
IOBluetoothSDPServiceRecord.
"""
addr = _macutil.formatdevaddr(servicerecord.getDevice().getAddressString())
name = servicerecord.getServiceName()
try:
result, channel = servicerecord.getRFCOMMChannelID_(None) # pyobjc 2.0
except TypeError:
result, channel = servicerecord.getRFCOMMChannelID_()
if result != _macutil.kIOReturnSuccess:
try:
result, channel = servicerecord.getL2CAPPSM_(None) # pyobjc 2.0
except:
result, channel = servicerecord.getL2CAPPSM_()
if result != _macutil.kIOReturnSuccess:
channel = None
return (addr, channel, name) | 0.006353 |
def simple_db_engine(reader=None, srnos=None):
"""engine that gets values from the simple excel 'db'"""
if reader is None:
reader = dbreader.Reader()
logger.debug("No reader provided. Creating one myself.")
info_dict = dict()
info_dict["filenames"] = [reader.get_cell_name(srno) for srno in srnos]
info_dict["masses"] = [reader.get_mass(srno) for srno in srnos]
info_dict["total_masses"] = [reader.get_total_mass(srno) for srno in srnos]
info_dict["loadings"] = [reader.get_loading(srno) for srno in srnos]
info_dict["fixed"] = [reader.inspect_hd5f_fixed(srno) for srno in srnos]
info_dict["labels"] = [reader.get_label(srno) for srno in srnos]
info_dict["cell_type"] = [reader.get_cell_type(srno) for srno in srnos]
info_dict["raw_file_names"] = []
info_dict["cellpy_file_names"] = []
logger.debug("created info-dict")
for key in list(info_dict.keys()):
logger.debug("%s: %s" % (key, str(info_dict[key])))
_groups = [reader.get_group(srno) for srno in srnos]
logger.debug(">\ngroups: %s" % str(_groups))
groups = helper.fix_groups(_groups)
info_dict["groups"] = groups
my_timer_start = time.time()
filename_cache = []
info_dict = helper.find_files(info_dict, filename_cache)
my_timer_end = time.time()
if (my_timer_end - my_timer_start) > 5.0:
logger.info(
"The function _find_files was very slow. "
"Save your info_df so you don't have to run it again!"
)
info_df = pd.DataFrame(info_dict)
info_df = info_df.sort_values(["groups", "filenames"])
info_df = helper.make_unique_groups(info_df)
info_df["labels"] = info_df["filenames"].apply(helper.create_labels)
info_df.set_index("filenames", inplace=True)
return info_df | 0.000553 |
def _StartWorkerProcess(self, process_name, storage_writer):
"""Creates, starts, monitors and registers a worker process.
Args:
process_name (str): process name.
storage_writer (StorageWriter): storage writer for a session storage used
to create task storage.
Returns:
MultiProcessWorkerProcess: extraction worker process or None on error.
"""
analysis_plugin = self._analysis_plugins.get(process_name, None)
if not analysis_plugin:
logger.error('Missing analysis plugin: {0:s}'.format(process_name))
return None
if self._use_zeromq:
queue_name = '{0:s} output event queue'.format(process_name)
output_event_queue = zeromq_queue.ZeroMQPushBindQueue(
name=queue_name, timeout_seconds=self._QUEUE_TIMEOUT)
# Open the queue so it can bind to a random port, and we can get the
# port number to use in the input queue.
output_event_queue.Open()
else:
output_event_queue = multi_process_queue.MultiProcessingQueue(
timeout=self._QUEUE_TIMEOUT)
self._event_queues[process_name] = output_event_queue
if self._use_zeromq:
queue_name = '{0:s} input event queue'.format(process_name)
input_event_queue = zeromq_queue.ZeroMQPullConnectQueue(
name=queue_name, delay_open=True, port=output_event_queue.port,
timeout_seconds=self._QUEUE_TIMEOUT)
else:
input_event_queue = output_event_queue
process = analysis_process.AnalysisProcess(
input_event_queue, storage_writer, self._knowledge_base,
analysis_plugin, self._processing_configuration,
data_location=self._data_location,
event_filter_expression=self._event_filter_expression,
name=process_name)
process.start()
logger.info('Started analysis plugin: {0:s} (PID: {1:d}).'.format(
process_name, process.pid))
try:
self._StartMonitoringProcess(process)
except (IOError, KeyError) as exception:
logger.error((
'Unable to monitor analysis plugin: {0:s} (PID: {1:d}) '
'with error: {2!s}').format(process_name, process.pid, exception))
process.terminate()
return None
self._RegisterProcess(process)
return process | 0.007089 |
def setM1Coast(self, device=DEFAULT_DEVICE_ID):
"""
Set motor 1 to coast.
:Keywords:
device : `int`
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol. Defaults to the hardware's
default value.
:Exceptions:
* `SerialTimeoutException`
If the low level serial package times out.
* `SerialException`
IO error when the port is not open.
"""
cmd = self._COMMAND.get('m1-coast')
self._writeData(cmd, device) | 0.003333 |
def _builder_connect_signals(self, _dict):
"""Called by controllers which want to autoconnect their
handlers with signals declared in internal Gtk.Builder.
This method accumulates handlers, and books signal
autoconnection later on the idle of the next occurring gtk
loop. After the autoconnection is done, this method cannot be
called anymore."""
assert not self.builder_connected, "Gtk.Builder not already connected"
if _dict and not self.builder_pending_callbacks:
# this is the first call, book the builder connection for
# later gtk loop
GLib.idle_add(self.__builder_connect_pending_signals)
for n, v in _dict.items():
if n not in self.builder_pending_callbacks:
_set = set()
self.builder_pending_callbacks[n] = _set
else:
_set = self.builder_pending_callbacks[n]
_set.add(v) | 0.002051 |
def update_port(self, port_information, id_or_uri, timeout=-1):
"""
Updates an interconnect port.
Args:
id_or_uri: Can be either the interconnect id or the interconnect uri.
port_information (dict): object to update
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: The interconnect.
"""
uri = self._client.build_uri(id_or_uri) + "/ports"
return self._client.update(port_information, uri, timeout) | 0.006192 |
def authenticate(username, password, service='login', encoding='utf-8',
resetcred=True):
"""Returns True if the given username and password authenticate for the
given service. Returns False otherwise.
``username``: the username to authenticate
``password``: the password in plain text
``service``: the PAM service to authenticate against.
Defaults to 'login'
The above parameters can be strings or bytes. If they are strings,
they will be encoded using the encoding given by:
``encoding``: the encoding to use for the above parameters if they
are given as strings. Defaults to 'utf-8'
``resetcred``: Use the pam_setcred() function to
reinitialize the credentials.
Defaults to 'True'.
"""
if sys.version_info >= (3,):
if isinstance(username, str):
username = username.encode(encoding)
if isinstance(password, str):
password = password.encode(encoding)
if isinstance(service, str):
service = service.encode(encoding)
@conv_func
def my_conv(n_messages, messages, p_response, app_data):
"""Simple conversation function that responds to any
prompt where the echo is off with the supplied password"""
# Create an array of n_messages response objects
addr = calloc(n_messages, sizeof(PamResponse))
p_response[0] = cast(addr, POINTER(PamResponse))
for i in range(n_messages):
if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
pw_copy = strdup(password)
p_response.contents[i].resp = cast(pw_copy, c_char_p)
p_response.contents[i].resp_retcode = 0
return 0
handle = PamHandle()
conv = PamConv(my_conv, 0)
retval = pam_start(service, username, byref(conv), byref(handle))
if retval != 0:
# TODO: This is not an authentication error, something
# has gone wrong starting up PAM
return False
retval = pam_authenticate(handle, 0)
auth_success = (retval == 0)
# Re-initialize credentials (for Kerberos users, etc)
# Don't check return code of pam_setcred(), it shouldn't matter
# if this fails
if auth_success and resetcred:
retval = pam_setcred(handle, PAM_REINITIALIZE_CRED)
pam_end(handle, retval)
return auth_success | 0.000412 |
def people(self):
'''Generates a list of all People.'''
people_response = self.get_request('people/')
return [Person(self, pjson['user']) for pjson in people_response] | 0.010471 |
def __metadata_helper(json_path):
""" Returns json for specific cluster metadata. Important to realize that
this was introduced in dcos-1.9. Clusters prior to 1.9 and missing metadata
will return None
"""
url = shakedown.dcos_url_path('dcos-metadata/{}'.format(json_path))
try:
response = dcos.http.request('get', url)
if response.status_code == 200:
return response.json()
except:
pass
return None | 0.006276 |
def get_client_for_file(self, filename):
"""Get client associated with a given file."""
client = None
for idx, cl in enumerate(self.get_clients()):
if self.filenames[idx] == filename:
self.tabwidget.setCurrentIndex(idx)
client = cl
break
return client | 0.005698 |
def peek_string(self, lpBaseAddress, fUnicode = False, dwMaxSize = 0x1000):
"""
Tries to read an ASCII or Unicode string
from the address space of the process.
@see: L{read_string}
@type lpBaseAddress: int
@param lpBaseAddress: Memory address to begin reading.
@type fUnicode: bool
@param fUnicode: C{True} is the string is expected to be Unicode,
C{False} if it's expected to be ANSI.
@type dwMaxSize: int
@param dwMaxSize: Maximum allowed string length to read, in bytes.
@rtype: str, compat.unicode
@return: String read from the process memory space.
It B{doesn't} include the terminating null character.
Returns an empty string on failure.
"""
# Validate the parameters.
if not lpBaseAddress or dwMaxSize == 0:
if fUnicode:
return u''
return ''
if not dwMaxSize:
dwMaxSize = 0x1000
# Read the string.
szString = self.peek(lpBaseAddress, dwMaxSize)
# If the string is Unicode...
if fUnicode:
# Decode the string.
szString = compat.unicode(szString, 'U16', 'replace')
## try:
## szString = compat.unicode(szString, 'U16')
## except UnicodeDecodeError:
## szString = struct.unpack('H' * (len(szString) / 2), szString)
## szString = [ unichr(c) for c in szString ]
## szString = u''.join(szString)
# Truncate the string when the first null char is found.
szString = szString[ : szString.find(u'\0') ]
# If the string is ANSI...
else:
# Truncate the string when the first null char is found.
szString = szString[ : szString.find('\0') ]
# Return the decoded string.
return szString | 0.008286 |
def after_submit(analysis):
"""Method triggered after a 'submit' transition for the analysis passed in
is performed. Promotes the submit transition to the Worksheet to which the
analysis belongs to. Note that for the worksheet there is already a guard
that assures the transition to the worksheet will only be performed if all
analyses within the worksheet have already been transitioned.
This function is called automatically by
bika.lims.workfow.AfterTransitionEventHandler
"""
# Mark this analysis as ISubmitted
alsoProvides(analysis, ISubmitted)
# Promote to analyses this analysis depends on
promote_to_dependencies(analysis, "submit")
# TODO: REFLEX TO REMOVE
# Do all the reflex rules process
if IRequestAnalysis.providedBy(analysis):
analysis._reflex_rule_process('submit')
# Promote transition to worksheet
ws = analysis.getWorksheet()
if ws:
doActionFor(ws, 'submit')
push_reindex_to_actions_pool(ws)
# Promote transition to Analysis Request
if IRequestAnalysis.providedBy(analysis):
doActionFor(analysis.getRequest(), 'submit')
reindex_request(analysis) | 0.000842 |
def _process_change(self, server_description):
"""Process a new ServerDescription on an opened topology.
Hold the lock when calling this.
"""
td_old = self._description
if self._publish_server:
old_server_description = td_old._server_descriptions[
server_description.address]
self._events.put((
self._listeners.publish_server_description_changed,
(old_server_description, server_description,
server_description.address, self._topology_id)))
self._description = updated_topology_description(
self._description, server_description)
self._update_servers()
self._receive_cluster_time_no_lock(server_description.cluster_time)
if self._publish_tp:
self._events.put((
self._listeners.publish_topology_description_changed,
(td_old, self._description, self._topology_id)))
# Wake waiters in select_servers().
self._condition.notify_all() | 0.001876 |
def build_pipeline_string(self, forks):
"""Parses, filters and merge all possible pipeline forks into the
final pipeline string
This method checks for shared start and end sections between forks
and merges them according to the shared processes::
[[spades, ...], [skesa, ...], [...,[spades, skesa]]]
-> [..., [[spades, ...], [skesa, ...]]]
Then it defines the pipeline string by replacing the arrays levels
to the flowcraft fork format::
[..., [[spades, ...], [skesa, ...]]]
-> ( ... ( spades ... | skesa ... ) )
Parameters
----------
forks : list
List with all the possible pipeline forks.
Returns
-------
str : String with the pipeline definition used as input for
parse_pipeline
"""
final_forks = []
for i in range(0, len(forks)):
needs_merge = [False, 0, 0, 0, 0, ""]
is_merged = False
for i2 in range(0, len(forks[i])):
for j in range(i, len(forks)):
needs_merge[0] = False
for j2 in range(0, len(forks[j])):
try:
j2_fork = forks[j][j2].split("|")
except AttributeError:
j2_fork = forks[j][j2]
# Gets the indexes of the forks matrix that need to
# be merged
if forks[i][i2] in j2_fork and (i2 == 0 or j2 == 0) and i != j:
needs_merge[0] = True
needs_merge[1] = i
needs_merge[2] = i2
needs_merge[3] = j
needs_merge[4] = j2
needs_merge[5] = forks[i][i2]
if needs_merge[0]:
index_merge_point = forks[needs_merge[3]][-1].index(needs_merge[5])
# Merges the forks. If only one fork is possible,
# that fork is neglected and it merges into a single
# channel.
if needs_merge[2] == 0:
if len(forks[needs_merge[3]][-1]) < 2:
forks[needs_merge[3]] = forks[needs_merge[3]][:-1] + forks[needs_merge[1]][::]
else:
forks[needs_merge[3]][-1][index_merge_point] = forks[needs_merge[1]]
elif needs_merge[4] == 0:
if len(forks[needs_merge[3]][-1]) < 2:
forks[needs_merge[3]] = forks[needs_merge[3]][:-1] + forks[needs_merge[1]][::]
else:
forks[needs_merge[3]][-1][index_merge_point] = forks[needs_merge[1]]
is_merged = True
# Adds forks that dont need merge to the final forks
if needs_merge[0] is not None and not is_merged:
if bool([nf for nf in forks[i] if "|" in nf]):
continue
final_forks.append(forks[i])
if len(final_forks) == 1:
final_forks = str(final_forks[0])
# parses the string array to the flowcraft nomenclature
pipeline_string = " " + str(final_forks)\
.replace("[[", "( ")\
.replace("]]", " )")\
.replace("]", " |")\
.replace(", [", " ")\
.replace("'", "")\
.replace(",", "")\
.replace("[", "")
if pipeline_string[-1] == "|":
pipeline_string = pipeline_string[:-1]
to_search = " {} "
to_replace = " {}={} "
# Replace only names by names + process ids
for key, val in self.process_to_id.items():
# Case only one process in the pipeline
pipeline_string = pipeline_string\
.replace(to_search.format(key),
to_replace.format(key, val))
return pipeline_string | 0.001924 |
def new_img_like(ref_niimg, data, affine=None, copy_header=False):
"""Create a new image of the same class as the reference image
Parameters
----------
ref_niimg: image
Reference image. The new image will be of the same type.
data: numpy array
Data to be stored in the image
affine: 4x4 numpy array, optional
Transformation matrix
copy_header: boolean, optional
Indicated if the header of the reference image should be used to
create the new image
Returns
-------
new_img: image
A loaded image with the same type (and header) as the reference image.
"""
# Hand-written loading code to avoid too much memory consumption
if not (hasattr(ref_niimg, 'get_data')
and hasattr(ref_niimg,'get_affine')):
if isinstance(ref_niimg, _basestring):
ref_niimg = nib.load(ref_niimg)
elif operator.isSequenceType(ref_niimg):
ref_niimg = nib.load(ref_niimg[0])
else:
raise TypeError(('The reference image should be a niimg, %r '
'was passed') % ref_niimg )
if affine is None:
affine = ref_niimg.get_affine()
if data.dtype == bool:
default_dtype = np.int8
if (LooseVersion(nib.__version__) >= LooseVersion('1.2.0') and
isinstance(ref_niimg, nib.freesurfer.mghformat.MGHImage)):
default_dtype = np.uint8
data = as_ndarray(data, dtype=default_dtype)
header = None
if copy_header:
header = copy.copy(ref_niimg.get_header())
header['scl_slope'] = 0.
header['scl_inter'] = 0.
header['glmax'] = 0.
header['cal_max'] = np.max(data) if data.size > 0 else 0.
header['cal_max'] = np.min(data) if data.size > 0 else 0.
return ref_niimg.__class__(data, affine, header=header) | 0.002128 |
def _half_log_det(self, M):
""" Return log(|M|)*0.5. For positive definite matrix M
of more than 2 dimensions, calculate this for the
last two dimension and return a value corresponding
to each element in the first few dimensions.
"""
chol = np.linalg.cholesky(M)
if M.ndim == 2:
return np.sum(np.log(np.abs(np.diag(chol))))
else:
return np.sum(np.log(np.abs(np.diagonal(
chol, axis1=-2, axis2=-1))), axis=-1) | 0.00381 |
def get_diagnostics(self):
"""
Reads diagnostic data from the sensor.
OCF (Offset Compensation Finished) - logic high indicates the finished Offset Compensation Algorithm. After power up the flag remains always to logic high.
COF (Cordic Overflow) - logic high indicates an out of range error in the CORDIC part. When this bit is set, the angle and magnitude data is invalid.
The absolute output maintains the last valid angular value.
COMP low, indicates a high magnetic field. It is recommended to monitor in addition the magnitude value.
COMP high, indicated a weak magnetic field. It is recommended to monitor the magnitude value.
"""
status = self.bus.read_byte_data(self.address, self.diagnostics_reg)
bits_values = dict([('OCF',status & 0x01 == 0x01),
('COF',status & 0x02 == 0x02),
('Comp_Low',status & 0x04 == 0x04),
('Comp_High',status & 0x08 == 0x08)])
return bits_values | 0.010358 |
def Verify(self, message, signature, hash_algorithm=None):
"""Verifies a given message."""
# This method accepts both PSS and PKCS1v15 padding. PSS is preferred but
# old clients only support PKCS1v15.
if hash_algorithm is None:
hash_algorithm = hashes.SHA256()
last_e = None
for padding_algorithm in [
padding.PSS(
mgf=padding.MGF1(hash_algorithm),
salt_length=padding.PSS.MAX_LENGTH),
padding.PKCS1v15()
]:
try:
self._value.verify(signature, message, padding_algorithm,
hash_algorithm)
return True
except exceptions.InvalidSignature as e:
last_e = e
raise VerificationError(last_e) | 0.005495 |
def maps_get_rules_output_rules_rulename(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
maps_get_rules = ET.Element("maps_get_rules")
config = maps_get_rules
output = ET.SubElement(maps_get_rules, "output")
rules = ET.SubElement(output, "rules")
rulename = ET.SubElement(rules, "rulename")
rulename.text = kwargs.pop('rulename')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003861 |
def _run(self):
"""The inside of ``run``'s infinite loop.
Separate from BatchingBolt's implementation because
we need to be able to acquire the batch lock after
reading the tuple.
We can't acquire the lock before reading the tuple because if
that hangs (i.e. the topology is shutting down) the lock being
acquired will freeze the rest of the bolt, which is precisely
what this batcher seeks to avoid.
"""
tup = self.read_tuple()
with self._batch_lock:
self._current_tups = [tup]
if self.is_heartbeat(tup):
self.send_message({"command": "sync"})
elif self.is_tick(tup):
self.process_tick(tup)
else:
self.process(tup)
# reset so that we don't accidentally fail the wrong Tuples
# if a successive call to read_tuple fails
self._current_tups = [] | 0.002075 |
def build_api_url(host=REMOTES['default']['ip'],
port=REMOTES['default']['port'],
path=REMOTES['default']['path'],
username=None,
password=None,
ssl=False):
"""Build API URL from components."""
credentials = make_http_credentials(username, password)
scheme = 'http'
if not path:
path = ''
if path and not path.startswith('/'):
path = "/%s" % path
if ssl:
scheme += 's'
return "%s://%s%s:%i%s" % (scheme, credentials, host, port, path) | 0.001736 |
def app_uninstall(self, package_name, keep_data=False):
"""
Uninstall package
Args:
- package_name(string): package name ex: com.example.demo
- keep_data(bool): keep the data and cache directories
"""
if keep_data:
return self.run_cmd('uninstall', '-k', package_name)
else:
return self.run_cmd('uninstall', package_name) | 0.004796 |
def records():
"""Load test data fixture."""
import uuid
from invenio_records.api import Record
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
create_test_user()
indexer = RecordIndexer()
# Record 1 - Live record
with db.session.begin_nested():
rec_uuid = uuid.uuid4()
pid1 = PersistentIdentifier.create(
'recid', '1', object_type='rec', object_uuid=rec_uuid,
status=PIDStatus.REGISTERED)
Record.create({
'title': 'Registered',
'description': 'This is an awesome description',
'control_number': '1',
'access_right': 'restricted',
'access_conditions': 'fuu',
'owners': [1, 2],
'recid': 1
}, id_=rec_uuid)
indexer.index_by_id(pid1.object_uuid)
db.session.commit()
sleep(3) | 0.001131 |
def setText(self, text: str):
"""
Undo safe wrapper for the native ``setText`` method.
|Args|
* ``text`` (**str**): text to insert at the specified position.
|Returns|
**None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
undoObj = UndoSetText(self, text)
self.qteUndoStack.push(undoObj) | 0.004751 |
def build_joins(self):
"""
Generates the sql for the JOIN portion of the query
:return: the JOIN portion of the query
:rtype: str
"""
join_parts = []
# get the sql for each join object
for join_item in self.joins:
join_parts.append(join_item.get_sql())
# if there are any joins, combine them
if len(join_parts):
combined_joins = ' '.join(join_parts)
return '{0} '.format(combined_joins)
return '' | 0.003824 |
def find_all(self, pattern):
"""
Return the subset of this RcParams dictionary whose keys match,
using :func:`re.search`, the given ``pattern``.
Parameters
----------
pattern: str
pattern as suitable for re.compile
Returns
-------
RcParams
RcParams instance with entries that match the given `pattern`
Notes
-----
Changes to the returned dictionary are (different from
:meth:`find_and_replace` are *not* propagated to the parent RcParams
dictionary.
See Also
--------
find_and_replace"""
pattern_re = re.compile(pattern)
ret = RcParams()
ret.defaultParams = self.defaultParams
ret.update((key, value) for key, value in self.items()
if pattern_re.search(key))
return ret | 0.002242 |
def weibull(x, alpha, beta, kappa, delta):
"""Weibull model
http://www.pisces-conservation.com/growthhelp/index.html?morgan_mercer_floden.htm
Parameters
----------
x: int
alpha: float
beta: float
kappa: float
delta: float
Returns
-------
float
alpha - (alpha - beta) * np.exp(-(kappa * x)**delta)
"""
return alpha - (alpha - beta) * np.exp(-(kappa * x)**delta) | 0.002347 |
def list_liked_topics(self, user_alias=None, start=0):
"""
喜欢过的话题
:param user_alias: 指定用户,默认当前
:param start: 翻页
:return: 带下一页的列表
"""
user_alias = user_alias or self.api.user_alias
xml = self.api.xml(API_GROUP_LIST_USER_LIKED_TOPICS % user_alias, params={'start': start})
return build_list_result(self._parse_topic_table(xml, 'title,comment,time,group'), xml) | 0.01139 |
def _get_date_range_query(self, start_date, end_date, timespan= 'DAY', date_field= None):
'''
Gets counts of items per specified date range.
:param collection: Solr Collection to use.
:param timespan: Solr Date Math compliant value for faceting ex HOUR, MONTH, DAY
'''
if date_field is None:
date_field = self._date_field
query ={'q':'*:*',
'rows':0,
'facet':'true',
'facet.range': date_field,
'facet.range.gap': '+1{}'.format(timespan),
'facet.range.end': '{}'.format(end_date),
'facet.range.start': '{}'.format(start_date),
'facet.range.include': 'all'
}
if self._per_shard:
query['distrib'] = 'false'
return query | 0.011641 |
def send_audio(self, audio: str, reply: Message=None, on_success: callable=None,
reply_markup: botapi.ReplyMarkup=None):
"""
Send audio clip to this peer.
:param audio: File path to audio to send.
:param reply: Message object or message_id to reply to.
:param on_success: Callback to call when call is complete.
:type reply: int or Message
"""
self.twx.send_audio(peer=self, audio=audio, reply_to_message_id=reply, on_success=on_success,
reply_markup=reply_markup) | 0.019064 |
def get_metric_history(self, slugs, since=None, to=None, granularity='daily'):
"""Get history for one or more metrics.
* ``slugs`` -- a slug OR a list of slugs
* ``since`` -- the date from which we start pulling metrics
* ``to`` -- the date until which we start pulling metrics
* ``granularity`` -- seconds, minutes, hourly,
daily, weekly, monthly, yearly
Returns a list of tuples containing the Redis key and the associated
metric::
r = R()
r.get_metric_history('test', granularity='weekly')
[
('m:test:w:2012-52', '15'),
]
To get history for multiple metrics, just provide a list of slugs::
metrics = ['test', 'other']
r.get_metric_history(metrics, granularity='weekly')
[
('m:test:w:2012-52', '15'),
('m:other:w:2012-52', '42'),
]
"""
if not type(slugs) == list:
slugs = [slugs]
# Build the set of Redis keys that we need to get.
keys = []
for slug in slugs:
for date in self._date_range(granularity, since, to):
keys += self._build_keys(slug, date, granularity)
keys = list(dedupe(keys))
# Fetch our data, replacing any None-values with zeros
results = [0 if v is None else v for v in self.r.mget(keys)]
results = zip(keys, results)
return sorted(results, key=lambda t: t[0]) | 0.001295 |
def run_transgene(job, snpeffed_file, univ_options, transgene_options):
"""
This module will run transgene on the input vcf file from the aggregator and produce the
peptides for MHC prediction
ARGUMENTS
1. snpeffed_file: <JSid for snpeffed vcf>
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. transgene_options: Dict of parameters specific to transgene
transgene_options
+- 'gencode_peptide_fasta': <JSid for the gencode protein fasta>
RETURN VALUES
1. output_files: Dict of transgened n-mer peptide fastas
output_files
|- 'transgened_tumor_9_mer_snpeffed.faa': <JSid>
|- 'transgened_tumor_10_mer_snpeffed.faa': <JSid>
+- 'transgened_tumor_15_mer_snpeffed.faa': <JSid>
This module corresponds to node 17 on the tree
"""
job.fileStore.logToMaster('Running transgene on %s' % univ_options['patient'])
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'snpeffed_muts.vcf': snpeffed_file,
'pepts.fa': transgene_options['gencode_peptide_fasta']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['--peptides', input_files['pepts.fa'],
'--snpeff', input_files['snpeffed_muts.vcf'],
'--prefix', 'transgened',
'--pep_lens', '9,10,15']
docker_call(tool='transgene', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_files = defaultdict()
for peplen in ['9', '10', '15']:
peptfile = '_'.join(['transgened_tumor', peplen, 'mer_snpeffed.faa'])
mapfile = '_'.join(['transgened_tumor', peplen, 'mer_snpeffed.faa.map'])
output_files[peptfile] = job.fileStore.writeGlobalFile(os.path.join(work_dir, peptfile))
output_files[mapfile] = job.fileStore.writeGlobalFile(os.path.join(work_dir, mapfile))
return output_files | 0.004337 |
def p_ExtendedAttributeNamedArgList(p):
"""ExtendedAttributeNamedArgList : IDENTIFIER "=" IDENTIFIER "(" ArgumentList ")"
"""
p[0] = model.ExtendedAttribute(
name=p[1],
value=model.ExtendedAttributeValue(name=p[3], arguments=p[5])) | 0.016327 |
def FileEntryExistsByPathSpec(self, path_spec):
"""Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): path specification.
Returns:
bool: True if the file entry exists.
"""
location = getattr(path_spec, 'location', None)
if (location is None or
not location.startswith(self.LOCATION_ROOT)):
return False
if len(location) == 1:
return True
try:
self._tar_file.getmember(location[1:])
return True
except KeyError:
pass
# Check if location could be a virtual directory.
for name in iter(self._tar_file.getnames()):
# The TAR info name does not have the leading path separator as
# the location string does.
if name.startswith(location[1:]):
return True
return False | 0.012063 |
def static_url(redis, path):
"""Gets the static path for a file"""
file_hash = get_cache_buster(redis, path)
return "%s/%s?v=%s" % (oz.settings["static_host"], path, file_hash) | 0.005319 |
def static_adam_preprocessing_dag(job, inputs, sample, output_dir, suffix=''):
"""
A Toil job function performing ADAM preprocessing on a single sample
"""
inputs.sample = sample
inputs.output_dir = output_dir
inputs.suffix = suffix
if inputs.master_ip is not None or inputs.run_local:
if not inputs.run_local and inputs.master_ip == 'auto':
# Static, standalone Spark cluster managed by uberscript
spark_on_toil = False
scale_up = job.wrapJobFn(scale_external_spark_cluster, 1)
job.addChild(scale_up)
spark_work = job.wrapJobFn(download_run_and_upload,
inputs.master_ip, inputs, spark_on_toil)
scale_up.addChild(spark_work)
scale_down = job.wrapJobFn(scale_external_spark_cluster, -1)
spark_work.addChild(scale_down)
else:
# Static, external Spark cluster
spark_on_toil = False
spark_work = job.wrapJobFn(download_run_and_upload,
inputs.master_ip, inputs, spark_on_toil)
job.addChild(spark_work)
else:
# Dynamic subclusters, i.e. Spark-on-Toil
spark_on_toil = True
cores = multiprocessing.cpu_count()
master_ip = spawn_spark_cluster(job,
False, # Sudo
inputs.num_nodes-1,
cores=cores,
memory=inputs.memory)
spark_work = job.wrapJobFn(download_run_and_upload,
master_ip, inputs, spark_on_toil)
job.addChild(spark_work) | 0.001155 |
def getTarget(self, target_name_and_version, additional_config=None):
''' Return a derived target object representing the selected target: if
the target is not installed, or is invalid then the returned object
will test false in a boolean context.
Returns derived_target
Errors are not displayed.
'''
derived_target, errors = self.satisfyTarget(
target_name_and_version,
additional_config = additional_config,
install_missing = False
)
if len(errors):
return None
else:
return derived_target | 0.011976 |
def render(self,
*,
block_span_x: Optional[int] = None,
block_span_y: Optional[int] = None,
min_block_width: int = 0,
min_block_height: int = 0) -> str:
"""Outputs text containing the diagram.
Args:
block_span_x: The width of the diagram in blocks. Set to None to
default to using the smallest width that would include all
accessed blocks and columns with a specified minimum width.
block_span_y: The height of the diagram in blocks. Set to None to
default to using the smallest height that would include all
accessed blocks and rows with a specified minimum height.
min_block_width: A global minimum width for all blocks.
min_block_height: A global minimum height for all blocks.
Returns:
The diagram as a string.
"""
# Determine desired size of diagram in blocks.
if block_span_x is None:
block_span_x = 1 + max(
max(x for x, _ in self._blocks.keys()),
max(self._min_widths.keys()),
)
if block_span_y is None:
block_span_y = 1 + max(
max(y for _, y in self._blocks.keys()),
max(self._min_heights.keys()),
)
# Method for accessing blocks without creating new entries.
empty = Block()
def block(x: int, y: int) -> Block:
return self._blocks.get((x, y), empty)
# Determine the width of every column and the height of every row.
widths = {
x: max(
max(block(x, y).min_width() for y in range(block_span_y)),
self._min_widths.get(x, 0),
min_block_width,
)
for x in range(block_span_x)
}
heights = {
y: max(
max(block(x, y).min_height() for x in range(block_span_x)),
self._min_heights.get(y, 0),
min_block_height,
)
for y in range(block_span_y)
}
# Get the individually rendered blocks.
block_renders = {
(x, y): block(x, y).render(widths[x], heights[y])
for x in range(block_span_x)
for y in range(block_span_y)
}
# Paste together all of the rows of rendered block content.
out_lines = [] # type: List[str]
for y in range(block_span_y):
for by in range(heights[y]):
out_line_chunks = [] # type: List[str]
for x in range(block_span_x):
out_line_chunks.extend(block_renders[x, y][by])
out_lines.append(''.join(out_line_chunks).rstrip())
# Then paste together the rows.
return '\n'.join(out_lines) | 0.002766 |
def fit(self, X, y, n_iter=None):
"""w = w + α * δ * X"""
self.n_iter = self.n_iter if n_iter is None else n_iter
X = getattr(X, 'values', X).reshape(len(X), 1)
X_1 = self.homogenize(X)
for i in range(self.n_iter):
for i in range(0, len(X), 10): # minibatch learning for numerical stability
batch = slice(i, min(i + 10, len(X)))
Xbatch, ybatch = X[batch, :], y[batch]
X_1_batch = X_1[batch, :]
self.W += (self.alpha / len(X) ** 1.5) * (
self.delta(Xbatch, ybatch).reshape((len(Xbatch), 1)).T.dot(X_1_batch))
return self | 0.006024 |
def zip_estimate_state(data, clusters, init_means=None, init_weights=None, max_iters=10, tol=1e-4, disp=True, inner_max_iters=400, normalize=True):
"""
Uses a Zero-inflated Poisson Mixture model to estimate cell states and
cell state mixing weights.
Args:
data (array): genes x cells
clusters (int): number of mixture components
init_means (array, optional): initial centers - genes x clusters. Default: kmeans++ initializations
init_weights (array, optional): initial weights - clusters x cells. Default: random(0,1)
max_iters (int, optional): maximum number of iterations. Default: 10
tol (float, optional): if both M and W change by less than tol (in RMSE), then the iteration is stopped. Default: 1e-4
disp (bool, optional): whether or not to display optimization parameters. Default: True
inner_max_iters (int, optional): Number of iterations to run in the scipy minimizer for M and W. Default: 400
normalize (bool, optional): True if the resulting W should sum to 1 for each cell. Default: True.
Returns:
M: genes x clusters - state centers
W: clusters x cells - state mixing components for each cell
ll: final log-likelihood
"""
genes, cells = data.shape
# TODO: estimate ZIP parameter?
if init_means is None:
means, assignments = kmeans_pp(data, clusters)
else:
means = init_means.copy()
clusters = means.shape[1]
w_init = np.random.random(cells*clusters)
if init_weights is not None:
if len(init_weights.shape)==1:
init_weights = initialize_from_assignments(init_weights, clusters)
w_init = init_weights.reshape(cells*clusters)
m_init = means.reshape(genes*clusters)
# using zero-inflated parameters...
L, Z = zip_fit_params_mle(data)
# repeat steps 1 and 2 until convergence:
ll = np.inf
for i in range(max_iters):
if disp:
print('iter: {0}'.format(i))
w_bounds = [(0, 1.0) for x in w_init]
m_bounds = [(0, None) for x in m_init]
# step 1: given M, estimate W
w_objective = _create_w_objective(means, data, Z)
w_res = minimize(w_objective, w_init, method='L-BFGS-B', jac=True, bounds=w_bounds, options={'disp':disp, 'maxiter':inner_max_iters})
w_diff = np.sqrt(np.sum((w_res.x-w_init)**2))/w_init.size
w_new = w_res.x.reshape((clusters, cells))
w_init = w_res.x
# step 2: given W, update M
m_objective = _create_m_objective(w_new, data, Z)
# method could be 'L-BFGS-B' or 'SLSQP'... SLSQP gives a memory error...
# or use TNC...
m_res = minimize(m_objective, m_init, method='L-BFGS-B', jac=True, bounds=m_bounds, options={'disp':disp, 'maxiter':inner_max_iters})
ll = m_res.fun
m_diff = np.sqrt(np.sum((m_res.x-m_init)**2))/m_init.size
m_new = m_res.x.reshape((genes, clusters))
m_init = m_res.x
means = m_new
if w_diff < tol and m_diff < tol:
break
if normalize:
w_new = w_new/w_new.sum(0)
return m_new, w_new, ll | 0.005092 |
def click_and_hold(self, on_element=None):
"""
Holds down the left mouse button on an element.
:Args:
- on_element: The element to mouse down.
If None, clicks on current mouse position.
"""
if on_element:
self.move_to_element(on_element)
if self._driver.w3c:
self.w3c_actions.pointer_action.click_and_hold()
self.w3c_actions.key_action.pause()
else:
self._actions.append(lambda: self._driver.execute(
Command.MOUSE_DOWN, {}))
return self | 0.003317 |
def actualize_sources (self, sources, prop_set):
""" Creates actual jam targets for sources. Initializes two member
variables:
'self.actual_sources_' -- sources which are passed to updating action
'self.dependency_only_sources_' -- sources which are made dependencies, but
are not used otherwise.
New values will be *appended* to the variables. They may be non-empty,
if caller wants it.
"""
assert is_iterable_typed(sources, VirtualTarget)
assert isinstance(prop_set, property_set.PropertySet)
dependencies = self.properties_.get ('<dependency>')
self.dependency_only_sources_ += self.actualize_source_type (dependencies, prop_set)
self.actual_sources_ += self.actualize_source_type (sources, prop_set)
# This is used to help bjam find dependencies in generated headers
# in other main targets.
# Say:
#
# make a.h : ....... ;
# exe hello : hello.cpp : <implicit-dependency>a.h ;
#
# However, for bjam to find the dependency the generated target must
# be actualized (i.e. have the jam target). In the above case,
# if we're building just hello ("bjam hello"), 'a.h' won't be
# actualized unless we do it here.
implicit = self.properties_.get("<implicit-dependency>")
for i in implicit:
i.actualize() | 0.006882 |
def eval(self, expr, n, extra_constraints=(), solver=None, model_callback=None):
"""
This function returns up to `n` possible solutions for expression `expr`.
:param expr: expression (an AST) to evaluate
:param n: number of results to return
:param solver: a solver object, native to the backend, to assist in
the evaluation (for example, a z3.Solver)
:param extra_constraints: extra constraints (as ASTs) to add to the solver for this solve
:param model_callback: a function that will be executed with recovered models (if any)
:return: A sequence of up to n results (backend objects)
"""
if self._solver_required and solver is None:
raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__)
return self._eval(
self.convert(expr), n, extra_constraints=self.convert_list(extra_constraints),
solver=solver, model_callback=model_callback
) | 0.007707 |
def decode_produce_response(cls, data):
"""
Decode bytes to a ProduceResponse
:param bytes data: bytes to decode
:returns: iterable of `afkak.common.ProduceResponse`
"""
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for _i in range(num_topics):
topic, cur = read_short_ascii(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for _i in range(num_partitions):
((partition, error, offset), cur) = relative_unpack('>ihq', data, cur)
yield ProduceResponse(topic, partition, error, offset) | 0.00458 |
def check(conn, command, exit=False, timeout=None, **kw):
"""
Execute a remote command with ``subprocess.Popen`` but report back the
results in a tuple with three items: stdout, stderr, and exit status.
This helper function *does not* provide any logging as it is the caller's
responsibility to do so.
"""
command = conn.cmd(command)
stop_on_error = kw.pop('stop_on_error', True)
timeout = timeout or conn.global_timeout
if not kw.get('env'):
# get the remote environment's env so we can explicitly add
# the path without wiping out everything
kw = extend_env(conn, kw)
conn.logger.info('Running command: %s' % ' '.join(admin_command(conn.sudo, command)))
result = conn.execute(_remote_check, cmd=command, **kw)
response = None
try:
response = result.receive(timeout)
except Exception as err:
# the things we need to do here :(
# because execnet magic, we cannot catch this as
# `except TimeoutError`
if err.__class__.__name__ == 'TimeoutError':
msg = 'No data was received after %s seconds, disconnecting...' % timeout
conn.logger.warning(msg)
# there is no stdout, stderr, or exit code but make the exit code
# an error condition (non-zero) regardless
return [], [], -1
else:
remote_trace = traceback.format_exc()
remote_error = RemoteError(remote_trace)
if remote_error.exception_name == 'RuntimeError':
conn.logger.error(remote_error.exception_line)
else:
for tb_line in remote_trace.split('\n'):
conn.logger.error(tb_line)
if stop_on_error:
raise RuntimeError(
'Failed to execute command: %s' % ' '.join(command)
)
if exit:
conn.exit()
return response | 0.001554 |
def to_object(self, data):
"""
Deserialize input data
:param data: serialized input Data object
:return: Deserialized object
"""
if not isinstance(data, Data):
return data
if is_null_data(data):
return None
inp = self._create_data_input(data)
try:
type_id = data.get_type()
serializer = self._registry.serializer_by_type_id(type_id)
if serializer is None:
if self._active:
raise HazelcastSerializationError("Missing Serializer for type-id:{}".format(type_id))
else:
raise HazelcastInstanceNotActiveError()
return serializer.read(inp)
except:
handle_exception(sys.exc_info()[1], sys.exc_info()[2])
finally:
pass | 0.004614 |
def write_error(self, status_code, **kwargs):
"""Override of RequestHandler.write_error
Calls ``error()`` or ``fail()`` from JSendMixin depending on which
exception was raised with provided reason and status code.
:type status_code: int
:param status_code: HTTP status code
"""
def get_exc_message(exception):
return exception.log_message if \
hasattr(exception, "log_message") else str(exception)
self.clear()
self.set_status(status_code)
# Any APIError exceptions raised will result in a JSend fail written
# back with the log_message as data. Hence, log_message should NEVER
# expose internals. Since log_message is proprietary to HTTPError
# class exceptions, all exceptions without it will return their
# __str__ representation.
# All other exceptions result in a JSend error being written back,
# with log_message only written if debug mode is enabled
exception = kwargs["exc_info"][1]
if any(isinstance(exception, c) for c in [APIError, ValidationError]):
# ValidationError is always due to a malformed request
if isinstance(exception, ValidationError):
self.set_status(400)
self.fail(get_exc_message(exception))
else:
self.error(
message=self._reason,
data=get_exc_message(exception) if self.settings.get("debug")
else None,
code=status_code
) | 0.001269 |
def get_hints(self, plugin):
''' Return plugin hints from ``plugin``. '''
hints = []
for hint_name in getattr(plugin, 'hints', []):
hint_plugin = self._plugins.get(hint_name)
if hint_plugin:
hint_result = Result(
name=hint_plugin.name,
homepage=hint_plugin.homepage,
from_url=self.requested_url,
type=HINT_TYPE,
plugin=plugin.name,
)
hints.append(hint_result)
logger.debug(f'{plugin.name} & hint {hint_result.name} detected')
else:
logger.error(f'{plugin.name} hints an invalid plugin: {hint_name}')
return hints | 0.005242 |
def get_new_edges(self, subject_graph):
"""Get new edges from the subject graph for the graph search algorithm
The Graph search algorithm extends the matches iteratively by adding
matching vertices that are one edge further from the starting vertex
at each iteration.
"""
result = []
#print "Match.get_new_edges self.previous_ends1", self.previous_ends1
for vertex in self.previous_ends1:
for neighbor in subject_graph.neighbors[vertex]:
if neighbor not in self.reverse:
result.append((vertex, neighbor))
return result | 0.00463 |
def repr_new_instance(self, class_data):
"""Create code like this::
person = Person(name='Jack', person_id=1)
"""
classname = self.formatted_classname(class_data["classname"])
instancename = self.formatted_instancename(class_data["classname"])
arguments = list()
for key, value in self.sorted_dict(class_data.get("metadata", dict())):
arguments.append("%s=%r" % (key, value))
return "%s = %s(%s)" % (
instancename, classname, ", ".join(arguments)) | 0.005474 |
def restore_from_archive(self, parent=None):
"""Function to restore a DP from archived copy
Asks for confirmation along the way if parent is not None
(in which case it will be the parent widget for confirmation dialogs)
"""
from PyQt4.Qt import QMessageBox
exists = os.path.exists(self.sourcepath)
if parent:
msg = """<P>Do you really want to restore <tt>%s</tt> from
this entry's copy of <tt>%s</tt>?</P>""" % (self.sourcepath, self.filename)
exists = os.path.exists(self.sourcepath)
if exists:
msg += """<P>Current file exists, and will be overwritten.</P>"""
if QMessageBox.warning(parent, "Restoring from archive", msg,
QMessageBox.Yes, QMessageBox.No) != QMessageBox.Yes:
return False
else:
if QMessageBox.question(parent, "Restoring from archive", msg,
QMessageBox.Yes, QMessageBox.No) != QMessageBox.Yes:
return False
busy = Purr.BusyIndicator()
# remove file if in the way
if exists:
if os.system("/bin/rm -fr '%s'" % self.sourcepath):
busy = None
if parent:
QMessageBox.warning(parent, "Error removing file", """<P>
There was an error removing %s. Archived copy was not restored.
The text console may have more information.</P>""" % self.sourcepath,
QMessageBox.Ok, 0)
return False
# unpack archived file
if self.fullpath.endswith('.tgz'):
parent_dir = os.path.dirname(self.sourcepath.rstrip('/'))
os.system("/bin/rm -fr %s" % self.sourcepath)
if os.system("tar zxf '%s' -C '%s'" % (self.fullpath, parent_dir)):
busy = None
if parent:
QMessageBox.warning(parent, "Error unpacking file", """<P>
There was an error unpacking the archived version to %s. The text console may have more information.</P>""" % self.sourcepath,
QMessageBox.Ok, 0)
return False
# else simply copy over
else:
if os.system("/bin/cp -a '%s' '%s'" % (self.fullpath, self.sourcepath)):
busy = None
if parent:
QMessageBox.warning(parent, "Error copying file", """<P>
There was an error copying the archived version to %s. The text console may have more information.</P>""" % self.sourcepath,
QMessageBox.Ok, 0)
return False
busy = None
if parent:
QMessageBox.information(parent, "Restored file", """<P>Restored %s from this entry's
archived copy.</P>""" % self.sourcepath,
QMessageBox.Ok, 0)
return True | 0.003626 |
def list_active_times_gen(self):
"""Generator for the LIST ACTIVE.TIMES command.
Generates a list of newsgroups including the creation time and who
created them.
See <http://tools.ietf.org/html/rfc3977#section-7.6.4>
Yields:
A tuple containing the name, creation date as a datetime object and
creator as a string for the newsgroup.
"""
code, message = self.command("LIST ACTIVE.TIMES")
if code != 215:
raise NNTPReplyError(code, message)
for line in self.info_gen(code, message):
parts = line.split()
try:
name = parts[0]
timestamp = date.datetimeobj_epoch(parts[1])
creator = parts[2]
except (IndexError, ValueError):
raise NNTPDataError("Invalid LIST ACTIVE.TIMES")
yield name, timestamp, creator | 0.002169 |
def RET(cpu, *operands):
"""
Returns from procedure.
Transfers program control to a return address located on the top of
the stack. The address is usually placed on the stack by a CALL instruction,
and the return is made to the instruction that follows the CALL instruction.
The optional source operand specifies the number of stack bytes to be
released after the return address is popped; the default is none.
:param cpu: current CPU.
:param operands: variable operands list.
"""
# TODO FIX 64Bit FIX segment
N = 0
if len(operands) > 0:
N = operands[0].read()
cpu.PC = cpu.pop(cpu.address_bit_size)
cpu.STACK += N | 0.005348 |
def sginfo(self, include_core_files=False,
include_slapcat_output=False,
filename='sginfo.gz'):
"""
Get the SG Info of the specified node. Optionally provide
a filename, otherwise default to 'sginfo.gz'. Once you run
gzip -d <filename>, the inner contents will be in .tar format.
:param include_core_files: flag to include or not core files
:param include_slapcat_output: flag to include or not slapcat output
:raises NodeCommandFailed: failed getting sginfo with reason
:return: string path of download location
:rtype: str
"""
params = {
'include_core_files': include_core_files,
'include_slapcat_output': include_slapcat_output}
result = self.make_request(
NodeCommandFailed,
raw_result=True,
resource='sginfo',
filename=filename,
params=params)
return result.content | 0.005958 |
def generate(data, dimOrder, maxWindowSize, overlapPercent, transforms = []):
"""
Generates a set of sliding windows for the specified dataset.
"""
# Determine the dimensions of the input data
width = data.shape[dimOrder.index('w')]
height = data.shape[dimOrder.index('h')]
# Generate the windows
return generateForSize(width, height, dimOrder, maxWindowSize, overlapPercent, transforms) | 0.0401 |
def _tree_stat(self, mode, infostr, stat):
""" update stat regarding to tree generating process,
e.g. show_tree()
"""
action_str = mode.upper()
info_str = infostr
if stat == 'OK':
self.action_st_panel.SetBackgroundColour('#00FF00')
else: # ERR
self.action_st_panel.SetBackgroundColour('#FF0000')
self.action_st.SetLabel(action_str)
if info_str is None:
pass
if stat == 'OK':
child_cnt_0 = self.mainview_tree.GetChildrenCount(
self.mainview_tree.GetRootItem(),
recursively=False)
child_cnt_1 = self.mainview_tree.GetChildrenCount(
self.mainview_tree.GetRootItem(),
recursively=True)
self.info_st.SetLabel("{0} ({1} elements.)".format(info_str, child_cnt_0))
self.log.append({'stat': stat,
'logstr':
"[{ts}] {acts:<10s} : {infs} ({cnt1}|{cnt2})".format(
ts=time.strftime("%Y/%m/%d-%H:%M:%S",
time.localtime()),
acts=action_str,
infs=info_str,
cnt1=child_cnt_0,
cnt2=child_cnt_1), })
else:
self.info_st.SetLabel("{0} (0 elements.)".format(info_str))
self.log.append({'stat': stat,
'logstr':
"[{ts}] {acts:<10s} : {infs} ({cnt1}|{cnt2})".format(
ts=time.strftime("%Y/%m/%d-%H:%M:%S",
time.localtime()),
acts=action_str,
infs=info_str,
cnt1=0,
cnt2=0), })
if self.info_st.IsEllipsized():
self.info_st.SetToolTip(wx.ToolTip(info_str)) | 0.002363 |
def get_xml_string_with_self_contained_assertion_within_encrypted_assertion(
self, assertion_tag):
""" Makes a encrypted assertion only containing self contained
namespaces.
:param assertion_tag: Tag for the assertion to be transformed.
:return: A new samlp.Resonse in string representation.
"""
prefix_map = self.get_prefix_map(
[self.encrypted_assertion._to_element_tree().find(assertion_tag)])
tree = self._to_element_tree()
self.set_prefixes(
tree.find(
self.encrypted_assertion._to_element_tree().tag).find(
assertion_tag), prefix_map)
return ElementTree.tostring(tree, encoding="UTF-8").decode('utf-8') | 0.00266 |
def dynamize_attribute_updates(self, pending_updates):
"""
Convert a set of pending item updates into the structure
required by Layer1.
"""
d = {}
for attr_name in pending_updates:
action, value = pending_updates[attr_name]
if value is None:
# DELETE without an attribute value
d[attr_name] = {"Action": action}
else:
d[attr_name] = {"Action": action,
"Value": self.dynamize_value(value)}
return d | 0.003515 |
def parse_localclasspath(self, tup_tree):
"""
Parse a LOCALCLASSPATH element and return the class path it represents
as a CIMClassName object.
::
<!ELEMENT LOCALCLASSPATH (LOCALNAMESPACEPATH, CLASSNAME)>
"""
self.check_node(tup_tree, 'LOCALCLASSPATH')
k = kids(tup_tree)
if len(k) != 2:
raise CIMXMLParseError(
_format("Element {0!A} has invalid number of child elements "
"{1!A} (expecting two child elements "
"(LOCALNAMESPACEPATH, CLASSNAME))", name(tup_tree), k),
conn_id=self.conn_id)
namespace = self.parse_localnamespacepath(k[0])
class_path = self.parse_classname(k[1])
class_path.namespace = namespace
return class_path | 0.002389 |
def y_tic_points(self, interval):
"Return the list of Y values for which tick marks and grid lines are drawn."
if type(interval) == FunctionType:
return interval(*self.y_range)
return self.y_coord.get_tics(self.y_range[0], self.y_range[1], interval) | 0.013986 |
def list(self, storagemodel:object, modeldefinition = None, where=None) ->list:
""" list blob messages in container """
try:
blobnames = []
if where is None:
generator = modeldefinition['blobservice'].list_blobs(modeldefinition['container'])
else:
generator = modeldefinition['blobservice'].list_blobs(modeldefinition['container'], prefix=where)
for blob in generator:
blobnames.append(blob)
except Exception as e:
msg = 'can not list blobs in container {} because {!s}'.format(storagemodel._containername, e)
raise AzureStorageWrapException(storagemodel, msg=msg)
finally:
return blobnames | 0.013021 |
def save_image(byteio, imgfmt):
"""Saves the specified image to disk.
Args:
byteio (bytes): image bytes to save to disk.
imgfmt (str): used as the extension of the saved file.
Returns:
str: a uuid for the saved image that can be added to the database entry.
"""
from os import path, mkdir
ptdir = "{}.{}".format(project, task)
uuid = str(uuid4())
#Save the image within the project/task specific folder.
idir = path.join(dbdir, ptdir)
if not path.isdir(idir):
mkdir(idir)
ipath = path.join(idir, "{}.{}".format(uuid, imgfmt))
with open(ipath, 'wb') as f:
f.write(byteio)
return uuid | 0.005822 |
def paramsinfo(param="", short=False):
""" This is the human readable version of the paramsinfo() function.
You give it a param and it prints to stdout.
"""
if short:
desc = 1
else:
desc = 0
if param == "*":
for key in pinfo:
print(pinfo[str(key)][desc])
elif param:
try:
print(pinfo[str(param)][desc])
except (KeyError, ValueError) as err:
## TODO: paramsinfo get description by param string not working.
## It would be cool to have an assembly object bcz then you could
## just do this:
##
## print(pinfo[data.paramsinfo.keys().index(param)])
print("\tKey name/number not recognized", err)
raise
else:
print("Enter a name or number for explanation of the parameter\n")
for key in pinfo:
print(pinfo[str(key)][desc].split("\n")[1][2:-10]) | 0.005252 |
def main(args=None):
# parse command-line options
parser = argparse.ArgumentParser(prog='rect_wpoly_for_mos')
# required arguments
parser.add_argument("input_list",
help="TXT file with list JSON files derived from "
"longslit data")
parser.add_argument("--fitted_bound_param", required=True,
help="Input JSON with fitted boundary parameters",
type=argparse.FileType('rt'))
parser.add_argument("--out_MOSlibrary", required=True,
help="Output JSON file with results",
type=lambda x: arg_file_is_new(parser, x))
# optional arguments
parser.add_argument("--debugplot",
help="Integer indicating plotting & debugging options"
" (default=0)",
default=0, type=int,
choices=DEBUGPLOT_CODES)
parser.add_argument("--echo",
help="Display full command line",
action="store_true")
args = parser.parse_args(args)
if args.echo:
print('\033[1m\033[31m% ' + ' '.join(sys.argv) + '\033[0m\n')
# ---
# Read input TXT file with list of JSON files
list_json_files = list_fileinfo_from_txt(args.input_list)
nfiles = len(list_json_files)
if abs(args.debugplot) >= 10:
print('>>> Number of input JSON files:', nfiles)
for item in list_json_files:
print(item)
if nfiles < 2:
raise ValueError("Insufficient number of input JSON files")
# read fitted boundary parameters and check that all the longslit JSON
# files have been computed using the same fitted boundary parameters
refined_boundary_model = RefinedBoundaryModelParam._datatype_load(
args.fitted_bound_param.name)
for ifile in range(nfiles):
coef_rect_wpoly = RectWaveCoeff._datatype_load(
list_json_files[ifile].filename)
uuid_tmp = coef_rect_wpoly.meta_info['origin']['bound_param']
if uuid_tmp[4:] != refined_boundary_model.uuid:
print('Expected uuid:', refined_boundary_model.uuid)
print('uuid for ifile #' + str(ifile + 1) + ": " + uuid_tmp)
raise ValueError("Fitted boundary parameter uuid's do not match")
# check consistency of grism, filter, DTU configuration and list of
# valid slitlets
coef_rect_wpoly_first_longslit = RectWaveCoeff._datatype_load(
list_json_files[0].filename)
filter_name = coef_rect_wpoly_first_longslit.tags['filter']
grism_name = coef_rect_wpoly_first_longslit.tags['grism']
dtu_conf = DtuConfiguration.define_from_dictionary(
coef_rect_wpoly_first_longslit.meta_info['dtu_configuration']
)
list_valid_islitlets = list(range(1, EMIR_NBARS + 1))
for idel in coef_rect_wpoly_first_longslit.missing_slitlets:
list_valid_islitlets.remove(idel)
for ifile in range(1, nfiles):
coef_rect_wpoly = RectWaveCoeff._datatype_load(
list_json_files[ifile].filename)
filter_tmp = coef_rect_wpoly.tags['filter']
if filter_name != filter_tmp:
print(filter_name)
print(filter_tmp)
raise ValueError("Unexpected different filter found")
grism_tmp = coef_rect_wpoly.tags['grism']
if grism_name != grism_tmp:
print(grism_name)
print(grism_tmp)
raise ValueError("Unexpected different grism found")
coef_rect_wpoly = RectWaveCoeff._datatype_load(
list_json_files[ifile].filename)
dtu_conf_tmp = DtuConfiguration.define_from_dictionary(
coef_rect_wpoly.meta_info['dtu_configuration']
)
if dtu_conf != dtu_conf_tmp:
print(dtu_conf)
print(dtu_conf_tmp)
raise ValueError("Unexpected different DTU configurations found")
list_valid_islitlets_tmp = list(range(1, EMIR_NBARS + 1))
for idel in coef_rect_wpoly.missing_slitlets:
list_valid_islitlets_tmp.remove(idel)
if list_valid_islitlets != list_valid_islitlets_tmp:
print(list_valid_islitlets)
print(list_valid_islitlets_tmp)
raise ValueError("Unexpected different list of valid slitlets")
# check consistency of horizontal bounding box limits (bb_nc1_orig and
# bb_nc2_orig) and ymargin_bb, and store the values for each slitlet
dict_bb_param = {}
print("Checking horizontal bounding box limits and ymargin_bb:")
for islitlet in list_valid_islitlets:
islitlet_progress(islitlet, EMIR_NBARS)
cslitlet = 'slitlet' + str(islitlet).zfill(2)
dict_bb_param[cslitlet] = {}
for par in ['bb_nc1_orig', 'bb_nc2_orig', 'ymargin_bb']:
value_initial = \
coef_rect_wpoly_first_longslit.contents[islitlet - 1][par]
for ifile in range(1, nfiles):
coef_rect_wpoly = RectWaveCoeff._datatype_load(
list_json_files[ifile].filename)
value_tmp = coef_rect_wpoly.contents[islitlet - 1][par]
if value_initial != value_tmp:
print(islitlet, value_initial, value_tmp)
print(value_tmp)
raise ValueError("Unexpected different " + par)
dict_bb_param[cslitlet][par] = value_initial
print('OK!')
# ---
# Read and store all the longslit data
list_coef_rect_wpoly = []
for ifile in range(nfiles):
coef_rect_wpoly = RectWaveCoeff._datatype_load(
list_json_files[ifile].filename)
list_coef_rect_wpoly.append(coef_rect_wpoly)
# ---
# Initialize structure to save results into an ouptut JSON file
outdict = {}
outdict['refined_boundary_model'] = refined_boundary_model.__getstate__()
outdict['instrument'] = 'EMIR'
outdict['meta_info'] = {}
outdict['meta_info']['creation_date'] = datetime.now().isoformat()
outdict['meta_info']['description'] = \
'rectification and wavelength calibration polynomial coefficients ' \
'as a function of csu_bar_slit_center for MOS'
outdict['meta_info']['recipe_name'] = 'undefined'
outdict['meta_info']['origin'] = {}
outdict['meta_info']['origin']['wpoly_longslits'] = {}
for ifile in range(nfiles):
cdum = 'longslit_' + str(ifile + 1).zfill(3) + '_uuid'
outdict['meta_info']['origin']['wpoly_longslits'][cdum] = \
list_coef_rect_wpoly[ifile].uuid
outdict['tags'] = {}
outdict['tags']['grism'] = grism_name
outdict['tags']['filter'] = filter_name
outdict['dtu_configuration'] = dtu_conf.outdict()
outdict['uuid'] = str(uuid4())
outdict['contents'] = {}
# include bb_nc1_orig, bb_nc2_orig and ymargin_bb for each slitlet
# (note that the values of bb_ns1_orig and bb_ns2_orig cannot be
# computed at this stage because they depend on csu_bar_slit_center)
for islitlet in list_valid_islitlets:
cslitlet = 'slitlet' + str(islitlet).zfill(2)
outdict['contents'][cslitlet] = dict_bb_param[cslitlet]
# check that order for rectification transformations is the same for all
# the slitlets and longslit configurations
order_check_list = []
for ifile in range(nfiles):
tmpdict = list_coef_rect_wpoly[ifile].contents
for islitlet in list_valid_islitlets:
ttd_order = tmpdict[islitlet - 1]['ttd_order']
if ttd_order is not None:
order_check_list.append(ttd_order)
ttd_order_modeled = \
tmpdict[islitlet - 1]['ttd_order_longslit_model']
order_check_list.append(ttd_order_modeled)
# remove duplicates in list
order_no_duplicates = list(set(order_check_list))
if len(order_no_duplicates) != 1:
print('order_no_duplicates:', order_no_duplicates)
raise ValueError('tdd_order is not constant!')
ttd_order = int(order_no_duplicates[0])
ncoef_rect = ncoef_fmap(ttd_order)
if abs(args.debugplot) >= 10:
print('>>> ttd_order........:', ttd_order)
print('>>> ncoef_rect.......:', ncoef_rect)
# check that polynomial degree in frontiers and spectrails are the same
poldeg_check_list = []
for ifile in range(nfiles):
tmpdict = list_coef_rect_wpoly[ifile].contents
for islitlet in list_valid_islitlets:
tmppoly = tmpdict[islitlet - 1]['frontier']['poly_coef_lower']
poldeg_check_list.append(len(tmppoly) - 1)
tmppoly = tmpdict[islitlet - 1]['frontier']['poly_coef_upper']
poldeg_check_list.append(len(tmppoly) - 1)
tmppoly = tmpdict[islitlet - 1]['spectrail']['poly_coef_lower']
poldeg_check_list.append(len(tmppoly) - 1)
tmppoly = tmpdict[islitlet - 1]['spectrail']['poly_coef_middle']
poldeg_check_list.append(len(tmppoly) - 1)
tmppoly = tmpdict[islitlet - 1]['spectrail']['poly_coef_upper']
poldeg_check_list.append(len(tmppoly) - 1)
# remove duplicates in list
poldeg_no_duplicates = list(set(poldeg_check_list))
if len(poldeg_no_duplicates) != 1:
print('poldeg_no_duplicates:', poldeg_no_duplicates)
raise ValueError('poldeg is not constant in frontiers and '
'spectrails!')
poldeg_spectrails = int(poldeg_no_duplicates[0])
if abs(args.debugplot) >= 10:
print('>>> poldeg spectrails:', poldeg_spectrails)
# check that polynomial degree of wavelength calibration is the same for
# all the slitlets
poldeg_check_list = []
for ifile in range(nfiles):
tmpdict = list_coef_rect_wpoly[ifile].contents
for islitlet in list_valid_islitlets:
tmppoly = tmpdict[islitlet - 1]['wpoly_coeff']
poldeg_check_list.append(len(tmppoly) - 1)
tmppoly = tmpdict[islitlet - 1]['wpoly_coeff_longslit_model']
poldeg_check_list.append(len(tmppoly) - 1)
# remove duplicates in list
poldeg_no_duplicates = list(set(poldeg_check_list))
if len(poldeg_no_duplicates) != 1:
print('poldeg_no_duplicates:', poldeg_no_duplicates)
raise ValueError('poldeg is not constant in wavelength calibration '
'polynomials!')
poldeg_wavecal = int(poldeg_no_duplicates[0])
if abs(args.debugplot) >= 10:
print('>>> poldeg wavecal...:', poldeg_wavecal)
# ---
# csu_bar_slit_center values for each slitlet
print("CSU_bar_slit_center values:")
for islitlet in list_valid_islitlets:
islitlet_progress(islitlet, EMIR_NBARS)
cslitlet = 'slitlet' + str(islitlet).zfill(2)
list_csu_bar_slit_center = []
for ifile in range(nfiles):
tmpdict = list_coef_rect_wpoly[ifile].contents[islitlet - 1]
csu_bar_slit_center = tmpdict['csu_bar_slit_center']
list_csu_bar_slit_center.append(csu_bar_slit_center)
# check that list_csu_bar_slit_center is properly sorted
if not np.all(list_csu_bar_slit_center[:-1] <=
list_csu_bar_slit_center[1:]):
print('cslitlet: ', cslitlet)
print('list_csu_bar_slit_center: ', list_csu_bar_slit_center)
raise ValueError('Unsorted list_csu_bar_slit_center')
outdict['contents'][cslitlet]['list_csu_bar_slit_center'] = \
list_csu_bar_slit_center
print('OK!')
# ---
# rectification polynomial coefficients
# note: when aij and bij have not been computed, we use the modeled
# version aij_longslit_model and bij_longslit_model
print("Rectification polynomial coefficients:")
for islitlet in list_valid_islitlets:
islitlet_progress(islitlet, EMIR_NBARS)
cslitlet = 'slitlet' + str(islitlet).zfill(2)
outdict['contents'][cslitlet]['ttd_order'] = ttd_order
outdict['contents'][cslitlet]['ncoef_rect'] = ncoef_rect
for keycoef in ['ttd_aij', 'ttd_bij', 'tti_aij', 'tti_bij']:
for icoef in range(ncoef_rect):
ccoef = str(icoef).zfill(2)
list_cij = []
for ifile in range(nfiles):
tmpdict = \
list_coef_rect_wpoly[ifile].contents[islitlet - 1]
cij = tmpdict[keycoef]
if cij is not None:
list_cij.append(cij[icoef])
else:
cij_modeled = tmpdict[keycoef + '_longslit_model']
if cij_modeled is None:
raise ValueError("Unexpected cij_modeled=None!")
else:
list_cij.append(cij_modeled[icoef])
if abs(args.debugplot) >= 10:
print("Warning: using " + keycoef +
"_longslit_model for " + cslitlet +
" in file " +
list_json_files[ifile].filename)
cdum = 'list_' + keycoef + '_' + ccoef
outdict['contents'][cslitlet][cdum] = list_cij
print('OK!')
# ---
# wavelength calibration polynomial coefficients
# note: when wpoly_coeff have not been computed, we use the
# wpoly_coeff_longslit_model
print("Wavelength calibration polynomial coefficients:")
for islitlet in list_valid_islitlets:
islitlet_progress(islitlet, EMIR_NBARS)
cslitlet = 'slitlet' + str(islitlet).zfill(2)
outdict['contents'][cslitlet]['wpoly_degree'] = poldeg_wavecal
for icoef in range(poldeg_wavecal + 1):
ccoef = str(icoef).zfill(2)
list_cij = []
for ifile in range(nfiles):
tmpdict = list_coef_rect_wpoly[ifile].contents[islitlet - 1]
cij = tmpdict['wpoly_coeff']
if cij is not None:
list_cij.append(cij[icoef])
else:
cij_modeled = tmpdict['wpoly_coeff_longslit_model']
if cij_modeled is None:
raise ValueError("Unexpected cij_modeled=None!")
else:
list_cij.append(cij_modeled[icoef])
if abs(args.debugplot) >= 10:
print("Warning: using wpoly_coeff_longslit_model" +
" for " + cslitlet +
" in file " +
list_json_files[ifile].filename)
outdict['contents'][cslitlet]['list_wpoly_coeff_' + ccoef] = \
list_cij
print('OK!')
# ---
# OBSOLETE
# Save resulting JSON structure
'''
with open(args.out_MOSlibrary.name + '_old', 'w') as fstream:
json.dump(outdict, fstream, indent=2, sort_keys=True)
print('>>> Saving file ' + args.out_MOSlibrary.name + '_old')
'''
# --
# Create object of type MasterRectWave with library of coefficients
# for rectification and wavelength calibration
master_rectwv = MasterRectWave(instrument='EMIR')
master_rectwv.quality_control = numina.types.qc.QC.GOOD
master_rectwv.tags['grism'] = grism_name
master_rectwv.tags['filter'] = filter_name
master_rectwv.meta_info['dtu_configuration'] = outdict['dtu_configuration']
master_rectwv.meta_info['refined_boundary_model'] = {
'parmodel': refined_boundary_model.meta_info['parmodel']
}
master_rectwv.meta_info['refined_boundary_model'].update(
outdict['refined_boundary_model']['contents']
)
master_rectwv.total_slitlets = EMIR_NBARS
master_rectwv.meta_info['origin'] = {
'bound_param': 'uuid' + refined_boundary_model.uuid,
'longslit_frames': ['uuid:' + list_coef_rect_wpoly[ifile].uuid
for ifile in range(nfiles)]
}
for i in range(EMIR_NBARS):
islitlet = i + 1
dumdict = {'islitlet': islitlet}
cslitlet = 'slitlet' + str(islitlet).zfill(2)
if cslitlet in outdict['contents']:
dumdict.update(outdict['contents'][cslitlet])
else:
dumdict.update({
'bb_nc1_orig': 0,
'bb_nc2_orig': 0,
'ymargin_bb': 0,
'list_csu_bar_slit_center': [],
'ttd_order': 0,
'ncoef_rect': 0,
'wpolydegree': 0
})
master_rectwv.missing_slitlets.append(islitlet)
master_rectwv.contents.append(dumdict)
master_rectwv.writeto(args.out_MOSlibrary.name)
print('>>> Saving file ' + args.out_MOSlibrary.name) | 0.000119 |
def _scaled_dist(self, X, X2=None):
"""
Efficiently compute the scaled distance, r.
..math::
r = \sqrt( \sum_{q=1}^Q (x_q - x'q)^2/l_q^2 )
Note that if thre is only one lengthscale, l comes outside the sum. In
this case we compute the unscaled distance first (in a separate
function for caching) and divide by lengthscale afterwards
"""
if self.ARD:
if X2 is not None:
X2 = X2 / self.lengthscale
return self._unscaled_dist(X/self.lengthscale, X2)
else:
return self._unscaled_dist(X, X2)/self.lengthscale | 0.006231 |
def buildType(valtype, extra=[], display=False, control=False, valueAlarm=False):
"""Build a Type
:param str valtype: A type code to be used with the 'value' field. See :ref:`valuecodes`
:param list extra: A list of tuples describing additional non-standard fields
:param bool display: Include optional fields for display meta-data
:param bool control: Include optional fields for control meta-data
:param bool valueAlarm: Include optional fields for alarm level meta-data
:returns: A :py:class:`Type`
"""
isarray = valtype[:1] == 'a'
F = [
('value', valtype),
('alarm', alarm),
('timeStamp', timeStamp),
]
_metaHelper(F, valtype, display=display, control=control, valueAlarm=valueAlarm)
F.extend(extra)
return Type(id="epics:nt/NTScalarArray:1.0" if isarray else "epics:nt/NTScalar:1.0",
spec=F) | 0.008282 |
def classify_import(module_name, application_directories=('.',)):
"""Classifies an import by its package.
Returns a value in ImportType.__all__
:param text module_name: The dotted notation of a module
:param tuple application_directories: tuple of paths which are considered
application roots.
"""
# Only really care about the first part of the path
base, _, _ = module_name.partition('.')
found, module_path, is_builtin = _get_module_info(
base, application_directories,
)
if base == '__future__':
return ImportType.FUTURE
# Relative imports: `from .foo import bar`
elif base == '':
return ImportType.APPLICATION
# If imp tells us it is builtin, it is builtin
elif is_builtin:
return ImportType.BUILTIN
# If the module path exists in the project directories
elif _module_path_is_local_and_is_not_symlinked(
module_path, application_directories,
):
return ImportType.APPLICATION
# Otherwise we assume it is a system module or a third party module
elif (
found and
PACKAGES_PATH not in module_path and
not _due_to_pythonpath(module_path)
):
return ImportType.BUILTIN
else:
return ImportType.THIRD_PARTY | 0.00077 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.