text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _convert_epytext(line):
"""
>>> _convert_epytext("L{A}")
:class:`A`
"""
line = line.replace('@', ':')
for p, sub in RULES:
line = re.sub(p, sub, line)
return line | 0.00495 |
def appliance_device_read_community(self):
"""
Gets the ApplianceDeviceReadCommunity API client.
Returns:
ApplianceDeviceReadCommunity:
"""
if not self.__appliance_device_read_community:
self.__appliance_device_read_community = ApplianceDeviceReadCommunity(self.__connection)
return self.__appliance_device_read_community | 0.007614 |
def apply(
self, value, locale, currency=None, currency_digits=True,
decimal_quantization=True):
"""Renders into a string a number following the defined pattern.
Forced decimal quantization is active by default so we'll produce a
number string that is strictly following CLDR pattern definitions.
"""
if not isinstance(value, decimal.Decimal):
value = decimal.Decimal(str(value))
value = value.scaleb(self.scale)
# Separate the absolute value from its sign.
is_negative = int(value.is_signed())
value = abs(value).normalize()
# Prepare scientific notation metadata.
if self.exp_prec:
value, exp, exp_sign = self.scientific_notation_elements(
value, locale)
# Adjust the precision of the fractionnal part and force it to the
# currency's if neccessary.
frac_prec = self.frac_prec
if currency and currency_digits:
frac_prec = (babel.numbers.get_currency_precision(currency), ) * 2
# Bump decimal precision to the natural precision of the number if it
# exceeds the one we're about to use. This adaptative precision is only
# triggered if the decimal quantization is disabled or if a scientific
# notation pattern has a missing mandatory fractional part (as in the
# default '#E0' pattern). This special case has been extensively
# discussed at
# https://github.com/python-babel/babel/pull/494#issuecomment-307649969
if not decimal_quantization or (self.exp_prec and frac_prec == (0, 0)):
frac_prec = (frac_prec[0], max([frac_prec[1],
get_decimal_precision(value)]))
# Render scientific notation.
if self.exp_prec:
number = ''.join([
self._quantize_value(value, locale, frac_prec),
babel.numbers.get_exponential_symbol(locale),
exp_sign,
self._format_int(
str(exp), self.exp_prec[0], self.exp_prec[1], locale)])
# Is it a siginificant digits pattern?
elif '@' in self.pattern:
text = self._format_significant(value,
self.int_prec[0],
self.int_prec[1])
a, sep, b = text.partition(".")
number = self._format_int(a, 0, 1000, locale)
if sep:
number += babel.numbers.get_decimal_symbol(locale) + b
# A normal number pattern.
else:
number = self._quantize_value(value, locale, frac_prec)
retval = ''.join([
self.prefix[is_negative],
number,
self.suffix[is_negative]])
if u'¤' in retval:
retval = retval.replace(u'¤¤¤',
babel.numbers.get_currency_name(
currency, value, locale))
retval = retval.replace(u'¤¤', currency.upper())
retval = retval.replace(u'¤', babel.numbers.get_currency_symbol(
currency, locale))
return retval | 0.000618 |
def create_sym_log_bar_chart(self, x_labels, y_values, y_label):
"""Creates bar chart (log version)
:param x_labels: Names for each variable
:param y_values: Values of x labels
:param y_label: Label of y axis
:return: Sym-log bar chart
"""
ax1 = self.create_bar_chart(x_labels, y_values, y_label)
ax1.set_yscale("sym-log", linthreshy=1e-12) # logarithmic plot
return ax1 | 0.004505 |
def __split_genomic_interval_filename(fn):
"""
Split a filename of the format chrom:start-end.ext or chrom.ext (full chrom).
:return: tuple of (chrom, start, end) -- 'start' and 'end' are None if not
present in the filename.
"""
if fn is None or fn == "":
raise ValueError("invalid filename: " + str(fn))
fn = ".".join(fn.split(".")[:-1])
parts = fn.split(":")
if len(parts) == 1:
return (parts[0].strip(), None, None)
else:
r_parts = parts[1].split("-")
if len(r_parts) != 2:
raise ValueError("Invalid filename: " + str(fn))
return (parts[0].strip(), int(r_parts[0]), int(r_parts[1])) | 0.01248 |
def ignore_after(seconds, coro=None, *args, timeout_result=None):
'''Execute the specified coroutine and return its result. Issue a
cancellation request after seconds have elapsed. When a timeout
occurs, no exception is raised. Instead, timeout_result is
returned.
If coro is None, the result is an asynchronous context manager
that applies a timeout to a block of statements. For the context
manager case, the resulting context manager object has an expired
attribute set to True if time expired.
Note: ignore_after() may also be composed with other timeout
operations. TimeoutCancellationError and UncaughtTimeoutError
exceptions might be raised according to the same rules as for
timeout_after().
'''
if coro:
return _ignore_after_func(seconds, False, coro, args, timeout_result)
return TimeoutAfter(seconds, ignore=True) | 0.001117 |
def _add_series_or_dataframe_operations(cls):
"""
Add the series or dataframe only operations to the cls; evaluate
the doc strings again.
"""
from pandas.core import window as rwindow
@Appender(rwindow.rolling.__doc__)
def rolling(self, window, min_periods=None, center=False,
win_type=None, on=None, axis=0, closed=None):
axis = self._get_axis_number(axis)
return rwindow.rolling(self, window=window,
min_periods=min_periods,
center=center, win_type=win_type,
on=on, axis=axis, closed=closed)
cls.rolling = rolling
@Appender(rwindow.expanding.__doc__)
def expanding(self, min_periods=1, center=False, axis=0):
axis = self._get_axis_number(axis)
return rwindow.expanding(self, min_periods=min_periods,
center=center, axis=axis)
cls.expanding = expanding
@Appender(rwindow.ewm.__doc__)
def ewm(self, com=None, span=None, halflife=None, alpha=None,
min_periods=0, adjust=True, ignore_na=False,
axis=0):
axis = self._get_axis_number(axis)
return rwindow.ewm(self, com=com, span=span, halflife=halflife,
alpha=alpha, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na, axis=axis)
cls.ewm = ewm | 0.001294 |
def get(self, buffer_type, offset):
"""Get a reading from the buffer at offset.
Offset is specified relative to the start of the data buffer.
This means that if the buffer rolls over, the offset for a given
item will appear to change. Anyone holding an offset outside of this
engine object will need to be notified when rollovers happen (i.e.
popn is called so that they can update their offset indices)
Args:
buffer_type (str): The buffer to pop from (either u"storage" or u"streaming")
offset (int): The offset of the reading to get
"""
if buffer_type == u'streaming':
chosen_buffer = self.streaming_data
else:
chosen_buffer = self.storage_data
if offset >= len(chosen_buffer):
raise StreamEmptyError("Invalid index given in get command", requested=offset, stored=len(chosen_buffer), buffer=buffer_type)
return chosen_buffer[offset] | 0.004016 |
def dump_poly_data(dataset_dir, data_dir, dataset, color_array_info, root=None, compress=True):
"""Dump poly data object to vtkjs"""
if root is None:
root = {}
root['vtkClass'] = 'vtkPolyData'
container = root
# Points
points = dump_data_array(dataset_dir, data_dir,
dataset.GetPoints().GetData(), {}, compress)
points['vtkClass'] = 'vtkPoints'
container['points'] = points
# Cells
_cells = container
# Verts
if dataset.GetVerts() and dataset.GetVerts().GetData().GetNumberOfTuples() > 0:
_verts = dump_data_array(dataset_dir, data_dir,
dataset.GetVerts().GetData(), {}, compress)
_cells['verts'] = _verts
_cells['verts']['vtkClass'] = 'vtkCellArray'
# Lines
if dataset.GetLines() and dataset.GetLines().GetData().GetNumberOfTuples() > 0:
_lines = dump_data_array(dataset_dir, data_dir,
dataset.GetLines().GetData(), {}, compress)
_cells['lines'] = _lines
_cells['lines']['vtkClass'] = 'vtkCellArray'
# Polys
if dataset.GetPolys() and dataset.GetPolys().GetData().GetNumberOfTuples() > 0:
_polys = dump_data_array(dataset_dir, data_dir,
dataset.GetPolys().GetData(), {}, compress)
_cells['polys'] = _polys
_cells['polys']['vtkClass'] = 'vtkCellArray'
# Strips
if dataset.GetStrips() and dataset.GetStrips().GetData().GetNumberOfTuples() > 0:
_strips = dump_data_array(dataset_dir, data_dir,
dataset.GetStrips().GetData(), {}, compress)
_cells['strips'] = _strips
_cells['strips']['vtkClass'] = 'vtkCellArray'
dump_color_array(dataset_dir, data_dir, color_array_info, container, compress)
# PointData TCoords
dump_t_coords(dataset_dir, data_dir, dataset, container, compress)
# dump_normals(dataset_dir, data_dir, dataset, container, compress)
return root | 0.005967 |
def main():
"""
Simple command-line program for powering on virtual machines on a system.
"""
args = GetArgs()
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and user %s: ' % (args.host,args.user))
try:
vmnames = args.vmname
if not len(vmnames):
print("No virtual machine specified for poweron")
sys.exit()
context = None
if hasattr(ssl, '_create_unverified_context'):
context = ssl._create_unverified_context()
si = SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port),
sslContext=context)
if not si:
print("Cannot connect to specified host using specified username and password")
sys.exit()
atexit.register(Disconnect, si)
# Retreive the list of Virtual Machines from the inventory objects
# under the rootFolder
content = si.content
objView = content.viewManager.CreateContainerView(content.rootFolder,
[vim.VirtualMachine],
True)
vmList = objView.view
objView.Destroy()
# Find the vm and power it on
tasks = [vm.PowerOn() for vm in vmList if vm.name in vmnames]
# Wait for power on to complete
WaitForTasks(tasks, si)
print("Virtual Machine(s) have been powered on successfully")
except vmodl.MethodFault as e:
print("Caught vmodl fault : " + e.msg)
except Exception as e:
print("Caught Exception : " + str(e)) | 0.022042 |
def create_class(self, method):
"""
Build the estimator class.
Returns
-------
:return : string
The built class as string.
"""
temp_type = self.temp('type')
temp_arr = self.temp('arr')
temp_arr_ = self.temp('arr[]')
temp_arr__ = self.temp('arr[][]')
# Samples:
temps = []
for atts in enumerate(self.estimator._fit_X): # pylint: disable=W0212
tmp = [temp_type.format(self.repr(a)) for a in atts[1]]
tmp = temp_arr.format(', '.join(tmp))
temps.append(tmp)
temps = ', '.join(temps)
temps = temp_arr__.format(type='double', name='X', values=temps,
n=self.n_templates, m=self.n_features)
# Classes:
classes = self.estimator._y # pylint: disable=W0212
classes = [temp_type.format(int(c)) for c in classes]
classes = ', '.join(classes)
classes = temp_arr_.format(type='int', name='y', values=classes,
n=self.n_templates)
temp_class = self.temp('separated.class')
return temp_class.format(class_name=self.class_name,
method_name=self.method_name, method=method,
n_features=self.n_features, X=temps, y=classes,
n_neighbors=self.n_neighbors,
n_templates=self.n_templates,
n_classes=self.n_classes,
power=self.power_param) | 0.001866 |
def read_excitation_energies(self):
"""
Read a excitation energies after a TD-DFT calculation.
Returns:
A list: A list of tuple for each transition such as
[(energie (eV), lambda (nm), oscillatory strength), ... ]
"""
transitions = list()
# read in file
with zopen(self.filename, "r") as f:
line = f.readline()
td = False
while line != "":
if re.search(r"^\sExcitation energies and oscillator strengths:", line):
td = True
if td:
if re.search(r"^\sExcited State\s*\d", line):
val = [float(v) for v in float_patt.findall(line)]
transitions.append(tuple(val[0:3]))
line = f.readline()
return transitions | 0.003432 |
def sls(mods, saltenv='base', test=None, exclude=None, **kwargs):
'''
Create the seed file for a state.sls run
'''
st_kwargs = __salt__.kwargs
__opts__['grains'] = __grains__
__pillar__.update(kwargs.get('pillar', {}))
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
st_ = salt.client.ssh.state.SSHHighState(
opts,
__pillar__,
__salt__,
__context__['fileclient'])
st_.push_active()
mods = _parse_mods(mods)
high_data, errors = st_.render_highstate({saltenv: mods})
if exclude:
if isinstance(exclude, six.string_types):
exclude = exclude.split(',')
if '__exclude__' in high_data:
high_data['__exclude__'].extend(exclude)
else:
high_data['__exclude__'] = exclude
high_data, ext_errors = st_.state.reconcile_extend(high_data)
errors += ext_errors
errors += st_.state.verify_high(high_data)
if errors:
return errors
high_data, req_in_errors = st_.state.requisite_in(high_data)
errors += req_in_errors
high_data = st_.state.apply_exclude(high_data)
# Verify that the high data is structurally sound
if errors:
return errors
# Compile and verify the raw chunks
chunks = st_.state.compile_high_data(high_data)
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get('extra_filerefs', ''),
opts.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(opts, opts.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
_cleanup_slsmod_low_data(chunks)
trans_tar = salt.client.ssh.state.prep_trans_tar(
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs['id_'],
roster_grains)
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, opts['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
opts['thin_dir'],
test,
trans_tar_sum,
opts['hash_type'])
single = salt.client.ssh.Single(
opts,
cmd,
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
'{0}/salt_state.tgz'.format(opts['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
try:
os.remove(trans_tar)
except (OSError, IOError):
pass
# Read in the JSON data and return the data structure
try:
return salt.utils.json.loads(stdout)
except Exception as e:
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(six.text_type(e))
# If for some reason the json load fails, return the stdout
return stdout | 0.000658 |
def getBucketInfo(self, buckets):
""" See the function description in base.py
"""
# For the category encoder, the bucket index is the category index
bucketInfo = self.encoder.getBucketInfo(buckets)[0]
categoryIndex = int(round(bucketInfo.value))
category = self.indexToCategory[categoryIndex]
return [EncoderResult(value=category, scalar=categoryIndex,
encoding=bucketInfo.encoding)] | 0.004556 |
def to_html_(self) -> str:
"""Convert the main dataframe to html
:return: html data
:rtype: str
:example: ``ds.to_html_()``
"""
try:
renderer = pytablewriter.HtmlTableWriter
data = self._build_export(renderer)
return data
except Exception as e:
self.err(e, "Can not convert data to html") | 0.005076 |
def Open(self, filename):
"""Opens the database reader object.
Args:
filename (str): filename of the database.
Returns:
bool: True if successful.
Raises:
RuntimeError: if the version or string format of the database
is not supported.
"""
if not super(WinevtResourcesSqlite3DatabaseReader, self).Open(filename):
return False
version = self.GetMetadataAttribute('version')
if not version or version != '20150315':
raise RuntimeError('Unsupported version: {0:s}'.format(version))
string_format = self.GetMetadataAttribute('string_format')
if not string_format:
string_format = 'wrc'
if string_format not in ('pep3101', 'wrc'):
raise RuntimeError('Unsupported string format: {0:s}'.format(
string_format))
self._string_format = string_format
return True | 0.005682 |
def set_config_variables(repo, variables):
"""Set config variables
Args:
repo (git.Repo): repo
variables (dict): entries of the form 'user.email': '[email protected]'
"""
with repo.config_writer() as writer:
for k, value in variables.items():
section, option = k.split('.')
writer.set_value(section, option, value)
writer.release() | 0.002488 |
def record_process(self, process, prg=''):
"""
log a process or program - log a physical program (.py, .bat, .exe)
"""
self._log(self.logFileProcess, force_to_string(process), prg) | 0.009434 |
def read_inp(path):
"""
Reads Abaqus inp file
"""
def lineInfo(line):
out = {"type": "data"}
if line[0] == "*":
if line[1] == "*":
out["type"] = "comment"
out["text"] = line[2:]
else:
out["type"] = "command"
words = line[1:].split(",")
out["value"] = words[0].strip()
out["options"] = {}
for word in words[1:]:
key, value = [s.strip() for s in word.split("=")]
out["options"][key] = value
return out
def elementMapper(inpeltype):
if inpeltype == "t3d2": return "Line2"
if inpeltype[:3] in ["cps", "cpe", "cax"]:
if inpeltype[3] == "3": return "tri3"
if inpeltype[3] == "4": return "quad4"
if inpeltype[:3] in ["c3d"]:
if inpeltype[3] == "4": return "tetra4"
if inpeltype[3] == "5": return "pyra5"
if inpeltype[3] == "6": return "prism6"
if inpeltype[3] == "8": return "hexa8"
nlabels = []
coords = []
nsets = {}
elabels = []
etypes = []
connectivity = []
esets = {}
surfaces = {}
# File preprocessing
lines = np.array([l.strip().lower() for l in open(path).readlines()])
lines = [line for line in lines if len(line) != 0]
# Data processing
env, setlabel = None, None
for line in lines:
d = lineInfo(line)
if d["type"] == "command":
env = d["value"]
# Nodes
if env == "node":
opt = d["options"]
currentset = None
if "nset" in opt.keys():
currentset = opt["nset"]
nsets[currentset] = []
# Elements
if env == "element":
opt = d["options"]
eltype = elementMapper(opt["type"])
currentset = None
if "elset" in opt.keys():
currentset = opt["elset"]
esets[currentset] = []
# Nsets
if env == "nset":
opt = d["options"]
currentset = opt["nset"]
nsets[currentset] = []
# Elsets
if env == "elset":
opt = d["options"]
currentset = opt["elset"]
esets[currentset] = []
# Surfaces
if env == "surface":
opt = d["options"]
currentsurface = opt["name"]
if opt["type"] == "element":
surfaces[currentsurface] = []
if d["type"] == "data":
words = line.strip().split(",")
if env == "node":
label = int(words[0])
nlabels.append(label)
coords.append(
np.array([np.float64(w) for w in words[1:4]])
)
if currentset != None: nsets[currentset].append(label)
if env == "element":
label = int(words[0])
elabels.append(label)
connectivity.append(
np.array( [np.int32(w) for w in words[1:] if len(w) != 0 ])
)
etypes.append(eltype)
if currentset != None: esets[currentset].append(label)
if env == "nset":
nsets[currentset] += [int(w) for w in words if len(w) != 0]
if env == "elset":
esets[currentset] += [int(w) for w in words if len(w) != 0]
if env == "surface":
if opt["type"] == "element":
surfaces[currentsurface].append([w.strip() for w in words])
surfaces2 = {}
for tag, surface in surfaces.items():
surfaces2[tag] = []
for sdata in surface:
labels = esets[sdata[0]]
face = int(sdata[1].split("s")[1].strip())-1
for label in labels:
surfaces2[tag].append((label, face))
return Mesh(nlabels = nlabels,
coords = coords,
nsets = nsets,
elabels = elabels,
etypes = etypes,
connectivity = connectivity,
esets = esets,) | 0.034579 |
def setComponentByName(self, name, value=noValue,
verifyConstraints=True,
matchTags=True,
matchConstraints=True):
"""Assign |ASN.1| type component by name.
Equivalent to Python :class:`dict` item assignment operation (e.g. `[]`).
Parameters
----------
name: :class:`str`
|ASN.1| type component name
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component.
verifyConstraints: :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self
"""
if self._componentTypeLen:
idx = self.componentType.getPositionByName(name)
else:
try:
idx = self._dynamicNames.getPositionByName(name)
except KeyError:
raise error.PyAsn1Error('Name %s not found' % (name,))
return self.setComponentByPosition(
idx, value, verifyConstraints, matchTags, matchConstraints
) | 0.005398 |
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(ForLoop, self).fix_config(options)
opt = "min"
if opt not in options:
options[opt] = 1
if opt not in self.help:
self.help[opt] = "The minimum for the loop (included, int)."
opt = "max"
if opt not in options:
options[opt] = 10
if opt not in self.help:
self.help[opt] = "The maximum for the loop (included, int)."
opt = "step"
if opt not in options:
options[opt] = 1
if opt not in self.help:
self.help[opt] = "The step size (int)."
return options | 0.003272 |
def name_to_hex(name, spec=u'css3'):
"""
Convert a color name to a normalized hexadecimal color value.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used; valid values are
``html4``, ``css2``, ``css21`` and ``css3``, and the default is
``css3``.
When no color of that name exists in the given specification,
``ValueError`` is raised.
"""
if spec not in SUPPORTED_SPECIFICATIONS:
raise ValueError(SPECIFICATION_ERROR_TEMPLATE.format(spec=spec))
normalized = name.lower()
hex_value = {u'css2': CSS2_NAMES_TO_HEX,
u'css21': CSS21_NAMES_TO_HEX,
u'css3': CSS3_NAMES_TO_HEX,
u'html4': HTML4_NAMES_TO_HEX}[spec].get(normalized)
if hex_value is None:
raise ValueError(
u"'{name}' is not defined as a named color in {spec}".format(
name=name, spec=spec
)
)
return hex_value | 0.001008 |
def pack_data(self, remaining_size):
"""Pack data. readoffset has to be increased by one, seems like HANA starts from 1, not zero."""
payload = self.part_struct.pack(self.locator_id, self.readoffset + 1, self.readlength, b' ')
return 4, payload | 0.01476 |
def init_app(self, app):
"""
Initializes a Flask app object for the extension.
Args:
app(Flask): Flask app
"""
app.config.setdefault('FEDORA_BASE_URL', 'http://localhost:8080')
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown)
else:
app.teardown_request(self.teardown) | 0.005128 |
def error_asymptotes(pca,**kwargs):
"""
Plots asymptotic error bounds for
hyperbola on a stereonet.
"""
ax = kwargs.pop("ax",current_axes())
lon,lat = pca.plane_errors('upper', n=1000)
ax.plot(lon,lat,'-')
lon,lat = pca.plane_errors('lower', n=1000)
ax.plot(lon,lat,'-')
ax.plane(*pca.strike_dip()) | 0.026393 |
def gen_gmfs(self):
"""
Compute the GMFs for the given realization and
yields arrays of the dtype (sid, eid, imti, gmv), one for rupture
"""
self.sig_eps = []
for computer in self.computers:
rup = computer.rupture
sids = computer.sids
eids_by_rlz = rup.get_eids_by_rlz(self.rlzs_by_gsim)
data = []
for gs, rlzs in self.rlzs_by_gsim.items():
num_events = sum(len(eids_by_rlz[rlzi]) for rlzi in rlzs)
if num_events == 0:
continue
# NB: the trick for performance is to keep the call to
# compute.compute outside of the loop over the realizations
# it is better to have few calls producing big arrays
array, sig, eps = computer.compute(gs, num_events)
array = array.transpose(1, 0, 2) # from M, N, E to N, M, E
for i, miniml in enumerate(self.min_iml): # gmv < minimum
arr = array[:, i, :]
arr[arr < miniml] = 0
n = 0
for rlzi in rlzs:
eids = eids_by_rlz[rlzi]
e = len(eids)
if not e:
continue
for ei, eid in enumerate(eids):
gmf = array[:, :, n + ei] # shape (N, M)
tot = gmf.sum(axis=0) # shape (M,)
if not tot.sum():
continue
sigmas = sig[:, n + ei]
self.sig_eps.append((eid, sigmas, eps[:, n + ei]))
for sid, gmv in zip(sids, gmf):
if gmv.sum():
data.append((rlzi, sid, eid, gmv))
n += e
yield numpy.array(data, self.gmv_dt) | 0.00104 |
def endpoint(cls):
"""Return the :class:`sandman.model.Model`'s endpoint.
:rtype: string
"""
endpoint = ''
if cls.__endpoint__ is not None:
return cls.__endpoint__
elif cls.__from_class__ is not None:
endpoint = cls.__from_class__.__name__.lower()
else:
endpoint = cls.__tablename__.lower()
if not endpoint.endswith('s'):
endpoint += 's'
return endpoint | 0.004211 |
def setup_catalog_mappings(portal):
"""Setup portal_type -> catalog mappings
"""
logger.info("*** Setup Catalog Mappings ***")
at = api.get_tool("archetype_tool")
for portal_type, catalogs in CATALOG_MAPPINGS:
at.setCatalogsByType(portal_type, catalogs) | 0.003546 |
def main(arguments=None):
'''Converts a given url with the specified arguments.'''
parsed_options, arguments = get_options(arguments)
image_url = arguments[0]
image_url = quote(image_url)
try:
config = Config.load(None)
except Exception:
config = None
if not parsed_options.key and not config:
sys.stdout.write('Error: The -k or --key argument is mandatory. For more information type thumbor-url -h\n')
return
security_key, thumbor_params = get_thumbor_params(image_url, parsed_options, config)
crypto = CryptoURL(key=security_key)
url = crypto.generate(**thumbor_params)
sys.stdout.write('URL:\n')
sys.stdout.write('%s\n' % url)
return url | 0.004104 |
def get_application_logo_label(self):
"""
Provides the default **Application_Logo_label** widget.
:return: Application logo label.
:rtype: QLabel
"""
logo_label = QLabel()
logo_label.setObjectName("Application_Logo_label")
logo_label.setPixmap(QPixmap(umbra.ui.common.get_resource_path(UiConstants.logo_image)))
return logo_label | 0.007444 |
def rgb_percent_to_name(rgb_percent_triplet, spec=u'css3'):
"""
Convert a 3-tuple of percentages, suitable for use in an ``rgb()``
color triplet, to its corresponding normalized color name, if any
such name exists.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used; valid values are
``html4``, ``css2``, ``css21`` and ``css3``, and the default is
``css3``.
If there is no matching name, ``ValueError`` is raised.
"""
return rgb_to_name(
rgb_percent_to_rgb(
normalize_percent_triplet(
rgb_percent_triplet
)
),
spec=spec
) | 0.001449 |
def reverse_code_map(self):
"""Return a map from a code ( usually a string ) to the shorter numeric value"""
return {c.value: (c.ikey if c.ikey else c.key) for c in self.codes} | 0.015464 |
def has_slave(self):
'''Returns True/False wether we have a slave agency which is not
standalone running.'''
slave = first(x for x in self.slaves.itervalues()
if not x.is_standalone)
return slave is not None | 0.007663 |
def _check_ising_quadratic_ranges(quad_ranges, graph):
"""check correctness/populate defaults for ising_quadratic_ranges."""
if quad_ranges is None:
quad_ranges = {}
# first just populate the top level so we can rely on the structure
for u in graph:
if u not in quad_ranges:
quad_ranges[u] = {}
# next let's propgate and check what is already present
for u, neighbors in iteritems(quad_ranges):
for v, rang in iteritems(neighbors):
# check the range
rang = Specification._check_range(rang)
if u in quad_ranges[v]:
# it's symmetric
if quad_ranges[u][v] != quad_ranges[v][u]:
raise ValueError("mismatched ranges for ising_quadratic_ranges")
quad_ranges[v][u] = quad_ranges[u][v] = rang
# finally fill in the missing stuff
for u, v in graph.edges:
if u not in quad_ranges[v]:
quad_ranges[u][v] = quad_ranges[v][u] = [-1, 1]
return quad_ranges | 0.002669 |
def copy_table(tbl, start=0, stop=None, blen=None, storage=None,
create='table', **kwargs):
"""Copy `tbl` block-wise into a new table."""
# setup
names, columns = _util.check_table_like(tbl)
storage = _util.get_storage(storage)
blen = _util.get_blen_table(tbl, blen)
if stop is None:
stop = len(columns[0])
else:
stop = min(stop, len(columns[0]))
length = stop - start
if length < 0:
raise ValueError('invalid stop/start')
# copy block-wise
out = None
for i in range(start, stop, blen):
j = min(i+blen, stop)
res = [c[i:j] for c in columns]
if out is None:
out = getattr(storage, create)(res, names=names,
expectedlen=length, **kwargs)
else:
out.append(res)
return out | 0.001161 |
def db_connect(connection_string=None, **kwargs):
"""Function to supply a database connection object."""
if connection_string is None:
connection_string = get_current_registry().settings[CONNECTION_STRING]
db_conn = psycopg2.connect(connection_string, **kwargs)
try:
with db_conn:
yield db_conn
finally:
db_conn.close() | 0.002667 |
def objectConfusion(self):
"""
Compute overlap between each pair of objects. Computes the average number
of feature/location pairs that are identical, as well as the average number
of shared locations and features.
This function will raise an exception if two objects are identical.
Returns the tuple:
(avg common pairs, avg common locations, avg common features)
"""
objects = self.getObjects()
if len(objects) == 0:
return 0.0, 0.0, 0.0
sumCommonLocations = 0
sumCommonFeatures = 0
sumCommonPairs = 0
numObjects = 0
commonPairHistogram = numpy.zeros(len(objects[0]), dtype=numpy.int32)
for o1, s1 in objects.iteritems():
for o2, s2 in objects.iteritems():
if o1 != o2:
# Count number of common locations id's and common feature id's
commonLocations = 0
commonFeatures = 0
for pair1 in s1:
for pair2 in s2:
if pair1[0] == pair2[0]: commonLocations += 1
if pair1[1] == pair2[1]: commonFeatures += 1
# print "Confusion",o1,o2,", common pairs=",len(set(s1)&set(s2)),
# print ", common locations=",commonLocations,"common features=",commonFeatures
if len(set(s1) & set(s2)) == len(s1):
raise RuntimeError("Two objects are identical!")
sumCommonPairs += len(set(s1) & set(s2))
sumCommonLocations += commonLocations
sumCommonFeatures += commonFeatures
commonPairHistogram[len(set(s1) & set(s2))] += 1
numObjects += 1
# print "Common pair histogram=", commonPairHistogram
return (sumCommonPairs / float(numObjects),
sumCommonLocations / float(numObjects),
sumCommonFeatures / float(numObjects)
) | 0.011105 |
def gen_edge_knots(data, dtype, verbose=True):
"""
generate uniform knots from data including the edges of the data
for discrete data, assumes k categories in [0, k-1] interval
Parameters
----------
data : array-like with one dimension
dtype : str in {'categorical', 'numerical'}
verbose : bool, default: True
whether to print warnings
Returns
-------
np.array containing ordered knots
"""
if dtype not in ['categorical', 'numerical']:
raise ValueError('unsupported dtype: {}'.format(dtype))
if dtype == 'categorical':
return np.r_[np.min(data) - 0.5, np.max(data) + 0.5]
else:
knots = np.r_[np.min(data), np.max(data)]
if knots[0] == knots[1] and verbose:
warnings.warn('Data contains constant feature. '\
'Consider removing and setting fit_intercept=True',
stacklevel=2)
return knots | 0.002083 |
def _to_patches(self, X):
"""
Reshapes input to patches of the size of classifier's receptive field.
For example:
input X shape: [n_samples, n_pixels_y, n_pixels_x, n_bands]
output: [n_samples * n_pixels_y/receptive_field_y * n_pixels_x/receptive_field_x,
receptive_field_y, receptive_field_x, n_bands]
"""
window = self.patch_size
asteps = self.patch_size
if len(X.shape) == 4:
window += (0,)
asteps += (1,)
image_view = rolling_window(X, window, asteps)
new_shape = image_view.shape
return image_view, new_shape | 0.004405 |
def _total_seconds(t):
'''
Takes a `datetime.timedelta` object and returns the delta in seconds.
>>> _total_seconds(datetime.timedelta(23, 42, 123456))
1987242
>>> _total_seconds(datetime.timedelta(23, 42, 654321))
1987243
'''
return sum([
int(t.days * 86400 + t.seconds),
int(round(t.microseconds / 1000000.0))
]) | 0.002725 |
def list_databases(self, name):
'''
List the SQL databases defined on the specified server name
'''
response = self._perform_get(self._get_list_databases_path(name),
None)
return _MinidomXmlToObject.parse_service_resources_response(
response, Database) | 0.005865 |
def set_output_fields(self, output_fields):
"""Defines where to put the dictionary output of the extractor in the doc, but renames
the fields of the extracted output for the document or just filters the keys"""
if isinstance(output_fields, dict) or isinstance(output_fields, list):
self.output_fields = output_fields
elif isinstance(output_fields, basestring):
self.output_field = output_fields
else:
raise ValueError("set_output_fields requires a dictionary of "
+ "output fields to remap, a list of keys to filter, or a scalar string")
return self | 0.007553 |
def value_to_string(self, obj):
"""Prepare field for serialization."""
if DJANGO_VERSION > (1, 9):
value = self.value_from_object(obj)
else:
value = self._get_val_from_obj(obj)
return self.get_prep_value(value) | 0.007519 |
def absent(name, orgname=None, profile='grafana'):
'''
Ensure the named grafana dashboard is absent.
name
Name of the grafana dashboard.
orgname
Name of the organization in which the dashboard should be present.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if isinstance(profile, six.string_types):
profile = __salt__['config.option'](profile)
existing_dashboard = __salt__['grafana4.get_dashboard'](
name, orgname, profile)
if existing_dashboard:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Dashboard {0} is set to be deleted.'.format(name)
return ret
__salt__['grafana4.delete_dashboard'](name, profile=profile)
ret['comment'] = 'Dashboard {0} deleted.'.format(name)
ret['changes']['new'] = 'Dashboard {0} deleted.'.format(name)
return ret
ret['comment'] = 'Dashboard absent'
return ret | 0.000907 |
def add_edge(self, fro, to):
"""
When doing topological sorting, the semantics of the edge mean that
the depedency runs from the parent to the child - which is to say that
the parent is required to be sorted *before* the child.
[ FROM ] ------> [ TO ]
Committee on Finance -> Subcommittee of the Finance Committee on Budget
-> Subcommittee of the Finance Committee on Roads
"""
self.add_node(fro)
self.add_node(to)
self.edges[fro].add(to) | 0.003584 |
def _build(self, inputs, memory, treat_input_as_matrix=False):
"""Adds relational memory to the TensorFlow graph.
Args:
inputs: Tensor input.
memory: Memory output from the previous time step.
treat_input_as_matrix: Optional, whether to treat `input` as a sequence
of matrices. Defaulta to False, in which case the input is flattened
into a vector.
Returns:
output: This time step's output.
next_memory: The next version of memory to use.
"""
if treat_input_as_matrix:
inputs = basic.BatchFlatten(preserve_dims=2)(inputs)
inputs_reshape = basic.BatchApply(
basic.Linear(self._mem_size), n_dims=2)(inputs)
else:
inputs = basic.BatchFlatten()(inputs)
inputs = basic.Linear(self._mem_size)(inputs)
inputs_reshape = tf.expand_dims(inputs, 1)
memory_plus_input = tf.concat([memory, inputs_reshape], axis=1)
next_memory = self._attend_over_memory(memory_plus_input)
n = inputs_reshape.get_shape().as_list()[1]
next_memory = next_memory[:, :-n, :]
if self._gate_style == 'unit' or self._gate_style == 'memory':
self._input_gate, self._forget_gate = self._create_gates(
inputs_reshape, memory)
next_memory = self._input_gate * tf.tanh(next_memory)
next_memory += self._forget_gate * memory
output = basic.BatchFlatten()(next_memory)
return output, next_memory | 0.006325 |
def jr6_jr6(mag_file, dir_path=".", input_dir_path="",
meas_file="measurements.txt", spec_file="specimens.txt",
samp_file="samples.txt", site_file="sites.txt", loc_file="locations.txt",
specnum=1, samp_con='1', location='unknown', lat='', lon='',
noave=False, meth_code="LP-NO", volume=12, JR=False, user=""):
"""
Convert JR6 .jr6 files to MagIC file(s)
Parameters
----------
mag_file : str
input file name
dir_path : str
working directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
meas_file : str
output measurement file name, default "measurements.txt"
spec_file : str
output specimen file name, default "specimens.txt"
samp_file: str
output sample file name, default "samples.txt"
site_file : str
output site file name, default "sites.txt"
loc_file : str
output location file name, default "locations.txt"
specnum : int
number of characters to designate a specimen, default 0
samp_con : str
sample/site naming convention, default '1', see info below
location : str
location name, default "unknown"
lat : float
latitude, default ""
lon : float
longitude, default ""
noave : bool
do not average duplicate measurements, default False (so by default, DO average)
meth_code : str
colon-delimited method codes, default "LP-NO"
volume : float
volume in ccs, default 12
JR : bool
IODP samples were measured on the JOIDES RESOLUTION, default False
user : str
user name, default ""
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, meas_file name written)
Info
--------
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
"""
version_num = pmag.get_version()
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path)
specnum = - int(specnum)
samp_con = str(samp_con)
volume = float(volume) * 1e-6
# need to add these
meas_file = pmag.resolve_file_name(meas_file, output_dir_path)
spec_file = pmag.resolve_file_name(spec_file, output_dir_path)
samp_file = pmag.resolve_file_name(samp_file, output_dir_path)
site_file = pmag.resolve_file_name(site_file, output_dir_path)
loc_file = pmag.resolve_file_name(loc_file, output_dir_path)
mag_file = pmag.resolve_file_name(mag_file, input_dir_path)
if JR:
if meth_code == "LP-NO":
meth_code = ""
meth_code = meth_code+":FS-C-DRILL-IODP:SP-SS-C:SO-V"
meth_code = meth_code.strip(":")
samp_con = '5'
# format variables
tmp_file = mag_file.split(os.extsep)[0]+os.extsep+'tmp'
mag_file = pmag.resolve_file_name(mag_file, input_dir_path)
if samp_con.startswith("4"):
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
Z = samp_con.split("-")[1]
samp_con = "4"
elif samp_con.startswith("7"):
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "naming convention option [7] must be in form 7-Z where Z is an integer"
else:
Z = samp_con.split("-")[1]
samp_con = "7"
else:
Z = 1
# parse data
# fix .jr6 file so that there are spaces between all the columns.
pre_data = open(mag_file, 'r')
tmp_data = open(tmp_file, 'w')
if samp_con != '2':
fixed_data = pre_data.read().replace('-', ' -')
else:
fixed_data = ""
for line in pre_data.readlines():
entries = line.split()
if len(entries) < 2:
continue
fixed_line = entries[0] + ' ' + reduce(
lambda x, y: x+' '+y, [x.replace('-', ' -') for x in entries[1:]])
fixed_data += fixed_line+os.linesep
tmp_data.write(fixed_data)
tmp_data.close()
pre_data.close()
if not JR:
column_names = ['specimen', 'step', 'x', 'y', 'z', 'expon', 'azimuth', 'dip', 'bed_dip_direction',
'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd']
else: # measured on the Joides Resolution JR6
column_names = ['specimen', 'step', 'negz', 'y', 'x', 'expon', 'azimuth', 'dip', 'bed_dip_direction',
'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd']
data = pd.read_csv(tmp_file, delim_whitespace=True,
names=column_names, index_col=False)
if isinstance(data['x'][0], str):
column_names = ['specimen', 'step', 'step_unit', 'x', 'y', 'z', 'expon', 'azimuth', 'dip', 'bed_dip_direction',
'bed_dip', 'bed_dip_dir2', 'bed_dip2', 'param1', 'param2', 'param3', 'param4', 'dir_csd']
data = pd.read_csv(tmp_file, delim_whitespace=True,
names=column_names, index_col=False)
if JR:
data['z'] = -data['negz']
cart = np.array([data['x'], data['y'], data['z']]).transpose()
dir_dat = pmag.cart2dir(cart).transpose()
data['dir_dec'] = dir_dat[0]
data['dir_inc'] = dir_dat[1]
# the data are in A/m - this converts to Am^2
data['magn_moment'] = dir_dat[2]*(10.0**data['expon'])*volume
data['magn_volume'] = dir_dat[2] * \
(10.0**data['expon']) # A/m - data in A/m
data['dip'] = -data['dip']
data['specimen']
# put data into magic tables
MagRecs, SpecRecs, SampRecs, SiteRecs, LocRecs = [], [], [], [], []
for rowNum, row in data.iterrows():
MeasRec, SpecRec, SampRec, SiteRec, LocRec = {}, {}, {}, {}, {}
specimen = row['specimen']
if specnum != 0:
sample = specimen[:specnum]
else:
sample = specimen
site = pmag.parse_site(sample, samp_con, Z)
if specimen != "" and specimen not in [x['specimen'] if 'specimen' in list(x.keys()) else "" for x in SpecRecs]:
SpecRec['specimen'] = specimen
SpecRec['sample'] = sample
SpecRec["citations"] = "This study"
SpecRec["analysts"] = user
SpecRec['volume'] = volume
SpecRecs.append(SpecRec)
if sample != "" and sample not in [x['sample'] if 'sample' in list(x.keys()) else "" for x in SampRecs]:
SampRec['sample'] = sample
SampRec['site'] = site
SampRec["citations"] = "This study"
SampRec["analysts"] = user
SampRec['azimuth'] = row['azimuth']
SampRec['dip'] = row['dip']
SampRec['bed_dip_direction'] = row['bed_dip_direction']
SampRec['bed_dip'] = row['bed_dip']
SampRec['method_codes'] = meth_code
SampRecs.append(SampRec)
if site != "" and site not in [x['site'] if 'site' in list(x.keys()) else "" for x in SiteRecs]:
SiteRec['site'] = site
SiteRec['location'] = location
SiteRec["citations"] = "This study"
SiteRec["analysts"] = user
SiteRec['lat'] = lat
SiteRec['lon'] = lon
SiteRecs.append(SiteRec)
if location != "" and location not in [x['location'] if 'location' in list(x.keys()) else "" for x in LocRecs]:
LocRec['location'] = location
LocRec["citations"] = "This study"
LocRec["analysts"] = user
LocRec['lat_n'] = lat
LocRec['lon_e'] = lon
LocRec['lat_s'] = lat
LocRec['lon_w'] = lon
LocRecs.append(LocRec)
MeasRec["citations"] = "This study"
MeasRec["analysts"] = user
MeasRec["specimen"] = specimen
MeasRec['software_packages'] = version_num
MeasRec["treat_temp"] = '%8.3e' % (273) # room temp in kelvin
MeasRec["meas_temp"] = '%8.3e' % (273) # room temp in kelvin
MeasRec["quality"] = 'g'
MeasRec["standard"] = 'u'
MeasRec["treat_step_num"] = 0
MeasRec["treat_ac_field"] = '0'
if row['step'] == 'NRM':
meas_type = "LT-NO"
elif 'step_unit' in row and row['step_unit'] == 'C':
meas_type = "LT-T-Z"
treat = float(row['step'])
MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin
elif row['step'][0:2] == 'AD':
meas_type = "LT-AF-Z"
treat = float(row['step'][2:])
MeasRec["treat_ac_field"] = '%8.3e' % (
treat*1e-3) # convert from mT to tesla
elif row['step'][0] == 'A':
meas_type = "LT-AF-Z"
treat = float(row['step'][1:])
MeasRec["treat_ac_field"] = '%8.3e' % (
treat*1e-3) # convert from mT to tesla
elif row['step'][0] == 'TD':
meas_type = "LT-T-Z"
treat = float(row['step'][2:])
MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin
elif row['step'][0] == 'T':
meas_type = "LT-T-Z"
treat = float(row['step'][1:])
MeasRec["treat_temp"] = '%8.3e' % (treat+273.) # temp in kelvin
else: # need to add IRM, and ARM options
print("measurement type unknown", row['step'])
return False, "measurement type unknown"
MeasRec["magn_moment"] = str(row['magn_moment'])
MeasRec["magn_volume"] = str(row['magn_volume'])
MeasRec["dir_dec"] = str(row['dir_dec'])
MeasRec["dir_inc"] = str(row['dir_inc'])
MeasRec['method_codes'] = meas_type
MagRecs.append(MeasRec)
con = cb.Contribution(output_dir_path, read_tables=[])
con.add_magic_table_from_data(dtype='specimens', data=SpecRecs)
con.add_magic_table_from_data(dtype='samples', data=SampRecs)
con.add_magic_table_from_data(dtype='sites', data=SiteRecs)
con.add_magic_table_from_data(dtype='locations', data=LocRecs)
MeasOuts = pmag.measurements_methods3(MagRecs, noave)
con.add_magic_table_from_data(dtype='measurements', data=MeasOuts)
con.tables['specimens'].write_magic_file(custom_name=spec_file)
con.tables['samples'].write_magic_file(custom_name=samp_file)
con.tables['sites'].write_magic_file(custom_name=site_file)
con.tables['locations'].write_magic_file(custom_name=loc_file)
con.tables['measurements'].write_magic_file(custom_name=meas_file)
try:
os.remove(tmp_file)
except (OSError, IOError) as e:
print("couldn't remove temperary fixed JR6 file %s" % tmp_file)
return True, meas_file | 0.001745 |
def wait_for_vacancy(self, processor_type):
"""Waits for a particular processor type to have the capacity to
handle additional transactions or until is_cancelled is True.
Args:
processor_type (ProcessorType): The family, and version of
the transaction processor.
Returns:
Processor
"""
with self._condition:
self._condition.wait_for(lambda: (
self._processor_available(processor_type)
or self._cancelled_event.is_set()))
if self._cancelled_event.is_set():
raise WaitCancelledException()
processor = self[processor_type].next_processor()
return processor | 0.002703 |
def notify_ready(self, apply_result):
"""Called by the ApplyResult object (already registered via
register_result()) that it is now ready (ie. the Job's result
is available or an exception has been raised).
\param apply_result ApplyResult object telling us that the job
has been processed
"""
got_first = False
got_last = False
self._lock.acquire()
try:
assert self._remaining > 0
got_first = len(self._results) == self._remaining
self._remaining -= 1
got_last = self._remaining == 0
finally:
self._lock.release()
if self._to_notify is not None:
if self._as_iterator and got_first:
self._to_notify._set_value(iter(self))
elif not self._as_iterator and got_last:
try:
lst = [r.get(0) for r in self._results]
except:
self._to_notify._set_exception()
else:
self._to_notify._set_value(lst) | 0.00367 |
def getAssociation(self, assoc_handle, dumb, checkExpiration=True):
"""Get the association with the specified handle.
@type assoc_handle: str
@param dumb: Is this association used with dumb mode?
@type dumb: bool
@returns: the association, or None if no valid association with that
handle was found.
@returntype: L{openid.association.Association}
"""
# Hmm. We've created an interface that deals almost entirely with
# assoc_handles. The only place outside the Signatory that uses this
# (and thus the only place that ever sees Association objects) is
# when creating a response to an association request, as it must have
# the association's secret.
if assoc_handle is None:
raise ValueError("assoc_handle must not be None")
if dumb:
key = self._dumb_key
else:
key = self._normal_key
assoc = self.store.getAssociation(key, assoc_handle)
if assoc is not None and assoc.expiresIn <= 0:
logging.info("requested %sdumb key %r is expired (by %s seconds)" %
((not dumb) and 'not-' or '',
assoc_handle, assoc.expiresIn))
if checkExpiration:
self.store.removeAssociation(key, assoc_handle)
assoc = None
return assoc | 0.002126 |
def mutate_node(node, context):
"""
:type context: Context
"""
context.stack.append(node)
try:
if node.type in ('tfpdef', 'import_from', 'import_name'):
return
if node.start_pos[0] - 1 != context.current_line_index:
context.current_line_index = node.start_pos[0] - 1
context.index = 0 # indexes are unique per line, so start over here!
if hasattr(node, 'children'):
mutate_list_of_nodes(node, context=context)
# this is just an optimization to stop early
if context.number_of_performed_mutations and context.mutation_id != ALL:
return
mutation = mutations_by_type.get(node.type)
if mutation is None:
return
for key, value in sorted(mutation.items()):
old = getattr(node, key)
if context.exclude_line():
continue
new = evaluate(
value,
context=context,
node=node,
value=getattr(node, 'value', None),
children=getattr(node, 'children', None),
)
assert not callable(new)
if new is not None and new != old:
if context.should_mutate():
context.number_of_performed_mutations += 1
context.performed_mutation_ids.append(context.mutation_id_of_current_index)
setattr(node, key, new)
context.index += 1
# this is just an optimization to stop early
if context.number_of_performed_mutations and context.mutation_id != ALL:
return
finally:
context.stack.pop() | 0.002882 |
def _building_cost(self, use_mix, stories):
"""
Generate building cost for a set of buildings
Parameters
----------
use_mix : array
The mix of uses for this form
stories : series
A Pandas Series of stories
Returns
-------
array
The cost per sqft for this unit mix and height.
"""
c = self.config
# stories to heights
heights = stories * c.height_per_story
# cost index for this height
costs = np.searchsorted(c.heights_for_costs, heights)
# this will get set to nan later
costs[np.isnan(heights)] = 0
# compute cost with matrix multiply
costs = np.dot(np.squeeze(c.costs[costs.astype('int32')]), use_mix)
# some heights aren't allowed - cost should be nan
costs[np.isnan(stories).flatten()] = np.nan
return costs.flatten() | 0.002134 |
def build_definitions_example(self):
"""Parse all definitions in the swagger specification."""
for def_name, def_spec in self.specification.get('definitions', {}).items():
self.build_one_definition_example(def_name) | 0.012346 |
def situation_parameters(self):
"""
Situation parameters defining detection logic for the context.
This will return a list of SituationParameter indicating how
the detection is made, i.e. regular expression, integer value,
etc.
:rtype: list(SituationParameter)
"""
for param in self.data.get('situation_parameters', []):
cache = ElementCache(data=self.make_request(href=param))
yield type('SituationParameter', (SituationParameter,), {
'data': cache})(name=cache.name, type=cache.type, href=param) | 0.004918 |
def this(obj, **kwargs):
"""Prints series of debugging steps to user.
Runs through pipeline of functions and print results of each.
"""
verbose = kwargs.get("verbose", True)
if verbose:
print('{:=^30}'.format(" whatis.this? "))
for func in pipeline:
s = func(obj, **kwargs)
if s is not None:
print(s)
if verbose:
print('{:=^30}\n'.format(" whatis.this? ")) | 0.002315 |
def frequencies_plot(self, xmin=0, xmax=200):
""" Generate the qualities plot """
helptext = '''
A possible way to assess the complexity of a library even in
absence of a reference sequence is to look at the kmer profile of the reads.
The idea is to count all the kmers (_i.e._, sequence of length `k`) that occur
in the reads. In this way it is possible to know how many kmers occur
`1,2,.., N` times and represent this as a plot.
This plot tell us for each x, how many k-mers (y-axis) are present in the
dataset in exactly x-copies.
In an ideal world (no errors in sequencing, no bias, no repeated regions)
this plot should be as close as possible to a gaussian distribution.
In reality we will always see a peak for `x=1` (_i.e._, the errors)
and another peak close to the expected coverage. If the genome is highly
heterozygous a second peak at half of the coverage can be expected.'''
pconfig = {
'id': 'Jellyfish_kmer_plot',
'title': 'Jellyfish: K-mer plot',
'ylab': 'Counts',
'xlab': 'k-mer frequency',
'xDecimals': False,
'xmin': xmin,
'xmax': xmax
}
self.add_section(
anchor = 'jellyfish_kmer_plot',
description = 'The K-mer plot lets you estimate library complexity and coverage from k-mer content.',
helptext = helptext,
plot = linegraph.plot(self.jellyfish_data, pconfig)
) | 0.011765 |
def symbols_bollinger(symbols='sp5002012',
start=datetime.datetime(2008, 1, 1), end=datetime.datetime(2009, 12, 31), price_type='adjusted_close', cleaner=clean_dataframe,
window=20, sigma=1.):
"""Calculate the Bolinger for a list or set of symbols
Example:
>>> symbols_bollinger(["AAPL", "GOOG", "IBM", "MSFT"], '10-12-01', '10-12-30')[-5:] # doctest: +NORMALIZE_WHITESPACE
GOOG AAPL IBM MSFT
2010-12-23 16:00:00 1.298178 1.185009 1.177220 1.237684
2010-12-27 16:00:00 1.073603 1.371298 0.590403 0.932911
2010-12-28 16:00:00 0.745548 1.436278 0.863406 0.812844
2010-12-29 16:00:00 0.874885 1.464894 2.096242 0.752602
2010-12-30 16:00:00 0.634661 0.793493 1.959324 0.498395
"""
symbols = normalize_symbols(symbols)
prices = price_dataframe(symbols, start=start, end=end, price_type=price_type, cleaner=cleaner)
return frame_bollinger(prices, window=window, sigma=sigma, plot=False) | 0.006979 |
def update_tcs(self):
"""
Periodically update TCS info.
A long running process, so run in a thread and fill a queue
"""
g = get_root(self).globals
if not g.cpars['tcs_on']:
self.after(20000, self.update_tcs)
return
if g.cpars['telins_name'] == 'WHT':
tcsfunc = tcs.getWhtTcs
elif g.cpars['telins_name'] == 'GTC':
tcsfunc = tcs.getGtcTcs
else:
g.clog.debug('TCS error: could not recognise ' +
g.cpars['telins_name'])
return
def tcs_threaded_update():
try:
ra, dec, pa, focus = tcsfunc()
self.tcs_data_queue.put((ra, dec, pa, focus))
except Exception as err:
t, v, tb = sys.exc_info()
error = traceback.format_exception_only(t, v)[0].strip()
tback = 'TCS Traceback (most recent call last):\n' + \
''.join(traceback.format_tb(tb))
g.FIFO.put(('TCS', error, tback))
t = threading.Thread(target=tcs_threaded_update)
t.start()
self.after(20000, self.update_tcs) | 0.00166 |
def prt_report_grp1(self, prt=sys.stdout, **kws_grp):
"""Print full GO/gene report with grouping."""
summaryline = self.str_summaryline()
# Print grouped GO IDs
prt.write("{SUMMARY}\n".format(SUMMARY=summaryline))
self.prt_gos_grouped(prt, **kws_grp)
# genes
genes = sorted(self.gene2gos.keys())
prt.write("\n\n{SUMMARY}\n\n".format(SUMMARY=summaryline))
self.prt_section_key(prt)
self.prt_gene_aart(genes, prt)
# Sort genes
prt.write("\n\n{SUMMARY}\n\n".format(SUMMARY=summaryline))
self.prt_gene_aart_details(genes, prt)
return (self.name, self.get_section_marks()) | 0.002946 |
def _float_check(self, attribute_array, value, irow, key):
'''Checks if value is valid float, appends to array if valid, appends
nan if not'''
value = value.strip(' ')
try:
if value:
attribute_array = np.hstack([attribute_array, float(value)])
else:
attribute_array = np.hstack([attribute_array, np.nan])
except:
print(irow, key)
msg = 'Input file format error at line: %d' % (irow + 2)
msg += ' key: %s' % (key)
raise ValueError(msg)
return attribute_array | 0.004926 |
def unsubscribe(request, watch_id):
"""Unsubscribe from (i.e. delete) the watch of ID ``watch_id``.
Expects an ``s`` querystring parameter matching the watch's secret.
GET will result in a confirmation page (or a failure page if the secret is
wrong). POST will actually delete the watch (again, if the secret is
correct).
Uses these templates:
* tidings/unsubscribe.html - Asks user to confirm deleting a watch
* tidings/unsubscribe_error.html - Shown when a watch is not found
* tidings/unsubscribe_success.html - Shown when a watch is deleted
The shipped templates assume a ``head_title`` and a ``content`` block
in a ``base.html`` template.
The template extension can be changed from the default ``html`` using
the setting :data:`~django.conf.settings.TIDINGS_TEMPLATE_EXTENSION`.
"""
ext = getattr(settings, 'TIDINGS_TEMPLATE_EXTENSION', 'html')
# Grab the watch and secret; complain if either is wrong:
try:
watch = Watch.objects.get(pk=watch_id)
# 's' is for 'secret' but saves wrapping in mails
secret = request.GET.get('s')
if secret != watch.secret:
raise Watch.DoesNotExist
except Watch.DoesNotExist:
return render(request, 'tidings/unsubscribe_error.' + ext)
if request.method == 'POST':
watch.delete()
return render(request, 'tidings/unsubscribe_success.' + ext)
return render(request, 'tidings/unsubscribe.' + ext) | 0.000674 |
def directory(context, data):
"""Store the collected files to a given directory."""
with context.http.rehash(data) as result:
if not result.ok:
return
content_hash = data.get('content_hash')
if content_hash is None:
context.emit_warning("No content hash in data.")
return
path = _get_directory_path(context)
file_name = data.get('file_name', result.file_name)
file_name = safe_filename(file_name, default='raw')
file_name = '%s.%s' % (content_hash, file_name)
data['_file_name'] = file_name
file_path = os.path.join(path, file_name)
if not os.path.exists(file_path):
shutil.copyfile(result.file_path, file_path)
context.log.info("Store [directory]: %s", file_name)
meta_path = os.path.join(path, '%s.json' % content_hash)
with open(meta_path, 'w') as fh:
json.dump(data, fh) | 0.001053 |
def where_entry_first(query, ref):
""" Generate a where clause where this is the first entry
ref -- the entry of reference
"""
return orm.select(
e for e in query
if e.local_date > ref.local_date or
(e.local_date == ref.local_date and
e.id >= ref.id
)
) | 0.003165 |
def read_until_done(self, command, timeout=None):
"""Yield messages read until we receive a 'DONE' command.
Read messages of the given command until we receive a 'DONE' command. If a
command different than the requested one is received, an AdbProtocolError
is raised.
Args:
command: The command to expect, like 'DENT' or 'DATA'.
timeout: The timeouts.PolledTimeout to use for this operation.
Yields:
Messages read, of type self.RECV_MSG_TYPE, see read_message().
Raises:
AdbProtocolError: If an unexpected command is read.
AdbRemoteError: If a 'FAIL' message is read.
"""
message = self.read_message(timeout)
while message.command != 'DONE':
message.assert_command_is(command)
yield message
message = self.read_message(timeout) | 0.004878 |
def process_lines( self, input_lines, **kwargs ):
''' Executes the pipeline of subsequent VISL_CG3 commands. The first process
in pipeline gets input_lines as an input, and each subsequent process gets
the output of the previous process as an input.
The idea of how to construct the pipeline borrows from:
https://github.com/estnltk/estnltk/blob/1.4.0/estnltk/syntax/tagger.py
Returns the result of the last process in the pipeline, either as a string
or, alternatively, as a list of strings (if split_result == True);
Parameters
-----------
input_lines : list of str
The input text for the pipeline; Should be in same format as the output
of SyntaxPreprocessing;
split_result : bool
Optional argument specifying whether the result should be split by
newlines, and returned as a list of strings/lines instead;
Default:False
remove_info : bool
Optional argument specifying whether the additional information added
during the preprocessing and syntactic processing should be removed
from the results;
Default:True;
The method cleanup_lines() will be used for removing additional info,
and all the parameters passed to this method will be also forwarded to
the cleanup method;
'''
split_result_lines = False
remove_info = True
for argName, argVal in kwargs.items() :
if argName in ['split_result_lines', 'split_result'] and argVal in [True, False]:
split_result_lines = argVal
if argName in ['remove_info', 'info_remover', 'clean_up'] and argVal in [True, False]:
remove_info = argVal
# 1) Construct the input file for the first process in the pipeline
temp_input_file = \
tempfile.NamedTemporaryFile(prefix='vislcg3_in.', mode='w', delete=False)
temp_input_file.close()
# We have to open separately here for writing, because Py 2.7 does not support
# passing parameter encoding='utf-8' to the NamedTemporaryFile;
out_f = codecs.open(temp_input_file.name, mode='w', encoding='utf-8')
for line in input_lines:
out_f.write( line.rstrip() )
out_f.write( '\n' )
out_f.close()
# TODO: tempfile is currently used to ensure that the input is in 'utf-8',
# but perhaps we can somehow ensure it without using tempfile ??
# 2) Dynamically construct the pipeline and open processes
pipeline = []
for i in range( len(self.rules_pipeline) ):
rule_file = self.rules_pipeline[i]
process_cmd = [self.vislcg_cmd, '-o', '-g', os.path.join(self.rules_dir, rule_file)]
process = None
if i == 0:
# The first process takes input from the file
process_cmd.extend( ['-I', temp_input_file.name] )
process = Popen(process_cmd, stdin=PIPE, stdout=PIPE)
else:
# A subsequent process takes output of the last process as an input
process = Popen(process_cmd, stdin=pipeline[-1]['process'].stdout, stdout=PIPE)
# Record the process
process_dict = {'process':process, 'cmd':process_cmd}
pipeline.append( process_dict )
# 3) Close all stdout streams, except the last one
for i in range( len(pipeline) ):
if i != len(pipeline) - 1:
pipeline[i]['process'].stdout.close()
# 4) Communicate results form the last item in the pipeline
result = as_unicode( pipeline[-1]['process'].communicate()[0] )
pipeline[-1]['process'].stdout.close() # Close the last process
# Clean-up
# 1) remove temp file
os.remove(temp_input_file.name)
# 2) remove additional info, if required
if remove_info:
result = '\n'.join( cleanup_lines( result.split('\n'), **kwargs ))
return result if not split_result_lines else result.split('\n') | 0.014627 |
def classes(request):
"""Get all classes of current user"""
if not request.user.is_authenticated() or not hasattr(request.user, "userprofile"):
return render_json(request, {
'error': _('User is not logged in'),
'error_type': 'user_unauthorized'
}, template='user_json.html', status=401)
clss = [c.to_json() for c in Class.objects.filter(owner=request.user.userprofile)]
return render_json(request, clss, status=200, template='user_json.html', help_text=classes.__doc__) | 0.00759 |
def load(file):
"""
This function expects a path to a file containing a
**Detailed billing report with resources and tags**
report from AWS.
It returns a ``Costs`` object containing all of the lineitems
from that detailed billing report
"""
fp = open(file)
reader = csv.reader(fp)
headers = next(reader)
costs = Costs(headers)
for line in reader:
data = {}
for i in range(0, len(headers)):
data[headers[i]] = line[i]
data['UnBlendedCost'] = decimal.Decimal(data['UnBlendedCost'])
data['BlendedCost'] = decimal.Decimal(data['BlendedCost'])
costs.add(data)
fp.close()
return costs | 0.001456 |
def groupby_with_null(data, *args, **kwargs):
"""
Groupby on columns with NaN/None/Null values
Pandas currently does have proper support for
groupby on columns with null values. The nulls
are discarded and so not grouped on.
"""
by = kwargs.get('by', args[0])
altered_columns = {}
if not isinstance(by, (list, tuple)):
by = [by]
# Convert NaNs & Nones in the grouping columns
# to sum unique string value. And, for those
# columns record which rows have been converted
# Note: this may affect the dtype of the column,
# so we record the dtype too. Both these changes
# are undone.
for col in by:
bool_idx = pd.isnull(data[col])
idx = bool_idx.index[bool_idx]
if idx.size:
altered_columns[col] = (idx, data[col].dtype)
data.loc[idx, col] = '-*-null-*-'
# Groupby on the columns, making sure to revert back
# to NaN/None and the correct dtype.
for group, df in data.groupby(*args, **kwargs):
for col, (orig_idx, orig_dtype) in altered_columns.items():
# Indices in the grouped df that need correction
sub_idx = orig_idx.intersection(df[col].index)
# NaN/None
if sub_idx.size:
df.loc[sub_idx, col] = None
# dtype
if df[col].dtype != orig_dtype:
df[col] = df[col].astype(orig_dtype)
yield group, df
# Undo the NaN / None conversion and any dtype
# changes on the original dataframe
for col, (orig_idx, orig_dtype) in altered_columns.items():
data.loc[orig_idx, col] = None
if data[col].dtype != orig_dtype:
data[col] = data[col].astype(orig_dtype) | 0.000574 |
def is_set(self, key):
"""Return True if variable is a set"""
data = self.model.get_data()
return isinstance(data[key], set) | 0.013245 |
def wait(self, timeout=None):
"""Wait for command to complete.
Timeout:
- discussion: http://stackoverflow.com/questions/1191374/subprocess-with-timeout
- implementation: threading
:rtype: self
"""
if timeout is not None:
if not self._thread:
self._thread = threading.Thread(target=self._wait4process)
self._thread.daemon = 1
self._thread.start()
if self._thread:
self._thread.join(timeout=timeout)
self.timeout_happened = self.timeout_happened or self._thread.isAlive()
else:
# no timeout and no existing thread
self._wait4process()
return self | 0.005398 |
def gchart(self, s = 0, size = [], candle = 20):
""" Chart for serious stocks
輸出 Google Chart 圖表。
s = 資料筆數
size = 圖表寬度、高度 [寬度,高度]
candle = K 棒的寬度
"""
if s == 0:
s = len(self.raw_data)
if len(size) == 2:
sw,sh = size
else:
sh = 300
sw = 25 * s
if sw > 1000:
sw = 1000
candle = 950/s
stc = ''
for i in self.raw_data[-s:]:
stc += str(i) + ','
sto = ''
for i in self.stock_open[-s:]:
sto += str(i) + ','
sth = ''
for i in self.stock_h[-s:]:
sth += str(i) + ','
stl = ''
for i in self.stock_l[-s:]:
stl += str(i) + ','
stdate = ''
for i in self.data_date[-s:]:
stdate += str(i[-2:]) + '|'
stmax = max(self.stock_h[-s:])
stmin = min(self.stock_l[-s:])
strange = (stmax-stmin) / 10
re = "http://%(rand)s.chart.apis.google.com/chart?chs=%(sw)sx%(sh)s&cht=lc&chd=t1:0,0,0|0,%(h)s0|0,%(c)s0|0,%(o)s0|0,%(l)s0&chm=F,,1,1:-1,%(candle)s&chxt=y,x&chds=%(min)s,%(max)s&chxr=0,%(min)s,%(max)s,%(range)s&chg=20,%(chg)s&chtt=%(chtt)s&chxl=1:||%(chxl)s" % {
'h': sth,
'c': stc,
'o': sto,
'l': stl,
'min': stmin,
'max': stmax,
'sw': sw,
'sh': sh,
'range': strange,
'candle': candle,
'chg': 10,
'rand': random.randint(0,9),
'chxl': stdate,
'chtt': '%s %s' % (self.stock_name,self.stock_no)
}
return re | 0.014393 |
def _get_cookie(self, mgmt_ip, config, refresh=False):
"""Performs authentication and retries cookie."""
if mgmt_ip not in self.credentials:
return None
security_data = self.credentials[mgmt_ip]
verify = security_data[const.HTTPS_CERT_TUPLE]
if not verify:
verify = security_data[const.HTTPS_VERIFY_TUPLE]
if not refresh and security_data[const.COOKIE_TUPLE]:
return security_data[const.COOKIE_TUPLE], verify
payload = {"aaaUser": {"attributes": {
"name": security_data[const.UNAME_TUPLE],
"pwd": security_data[const.PW_TUPLE]}}}
headers = {"Content-type": "application/json", "Accept": "text/plain"}
url = "{0}://{1}/api/aaaLogin.json".format(DEFAULT_SCHEME, mgmt_ip)
try:
response = self.session.request('POST',
url,
data=jsonutils.dumps(payload),
headers=headers,
verify=verify,
timeout=self.timeout * 2)
except Exception as e:
raise cexc.NexusConnectFailed(nexus_host=mgmt_ip,
exc=e)
self.status = response.status_code
if response.status_code == requests.codes.OK:
cookie = response.headers.get('Set-Cookie')
security_data = (
security_data[const.UNAME_TUPLE:const.COOKIE_TUPLE] +
(cookie,))
self.credentials[mgmt_ip] = security_data
return cookie, verify
else:
e = "REST API connect returned Error code: "
e += str(self.status)
raise cexc.NexusConnectFailed(nexus_host=mgmt_ip,
exc=e) | 0.004894 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'word') and self.word is not None:
_dict['word'] = self.word
if hasattr(self, 'sounds_like') and self.sounds_like is not None:
_dict['sounds_like'] = self.sounds_like
if hasattr(self, 'display_as') and self.display_as is not None:
_dict['display_as'] = self.display_as
return _dict | 0.004264 |
def dec2hms(dec):
"""
ADW: This should really be replaced by astropy
"""
DEGREE = 360.
HOUR = 24.
MINUTE = 60.
SECOND = 3600.
dec = float(dec)
fhour = dec*(HOUR/DEGREE)
hour = int(fhour)
fminute = (fhour - hour)*MINUTE
minute = int(fminute)
second = (fminute - minute)*MINUTE
return (hour, minute, second) | 0.008065 |
def mcs_to_rate(mcs, bw=20, long_gi=True):
"""Convert MCS index to rate in Mbps.
See http://mcsindex.com/
Args:
mcs (int): MCS index
bw (int): bandwidth, 20, 40, 80, ...
long_gi(bool): True if long GI is used.
Returns:
rate (float): bitrate in Mbps
>>> mcs_to_rate(5, bw=20, long_gi=False)
57.8
>>> mcs_to_rate(4, bw=40, long_gi=True)
81
>>> mcs_to_rate(3, bw=80, long_gi=False)
130
>>> mcs_to_rate(13, bw=160, long_gi=True)
936
"""
if bw not in [20, 40, 80, 160]:
raise Exception("Unknown bandwidth: %d MHz" % (bw))
if mcs not in MCS_TABLE:
raise Exception("Unknown MCS: %d" % (mcs))
idx = int((math.log(bw/10, 2)-1)*2)
if not long_gi:
idx += 1
return MCS_TABLE[mcs][idx] | 0.001236 |
def state(self):
"""
State of this instance. One of ``OFFLINE``, ``INITIALIZING``,
``INITIALIZED``, ``STARTING``, ``RUNNING``, ``STOPPING`` or
``FAILED``.
"""
if self._proto.HasField('state'):
return yamcsManagement_pb2.YamcsInstance.InstanceState.Name(self._proto.state)
return None | 0.008547 |
def convert_to_LHC(imt):
"""
Converts from GMRotI50 to Larger of two horizontal components using
global equation of:
Boore, D and Kishida, T (2016). Relations between some horizontal-
component ground-motion intensity measures used in practice.
Bulletin of the Seismological Society of America, 107(1), 334-343.
doi:10.1785/0120160250
No standard deviation modification required.
"""
# get period t
if isinstance(imt, SA):
t = imt.period
else:
t = 0.01
T1 = 0.08
T2 = 0.56
T3 = 4.40
T4 = 8.70
R1 = 1.106
R2 = 1.158
R3 = 1.178
R4 = 1.241
R5 = 1.241
Ratio = max(R1,
max(min(R1+(R2-R1)/np.log(T2/T1)*np.log(t/T1),
R2+(R3-R2)/np.log(T3/T2)*np.log(t/T2)),
min(R3+(R4-R3)/np.log(T4/T3)*np.log(t/T3), R5)))
SF = np.log(Ratio)
return SF | 0.001107 |
def parse_log(self, bowtie_log):
"""Parse a bowtie log file.
This is a bowtie log parsing method that populates the
:py:attr:`self.n_reads, self.align_0x, self.align_1x, self.align_mt1x and self.overall_rate` attributes with
data from the log file.
Disclamer: THIS METHOD IS HORRIBLE BECAUSE THE BOWTIE LOG IS HORRIBLE.
The insertion of data on the attribytes is done by the
:py:meth:`set_attribute method.
Parameters
----------
bowtie_log : str
Path to the boetie log file.
"""
print("is here!")
# Regexes - thanks to https://github.com/ewels/MultiQC/blob/master/multiqc/modules/bowtie2/bowtie2.py
regexes = {
'unpaired': {
'unpaired_aligned_none': r"(\\d+) \\([\\d\\.]+%\\) aligned 0 times",
'unpaired_aligned_one': r"(\\d+) \\([\\d\\.]+%\\) aligned exactly 1 time",
'unpaired_aligned_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned >1 times"
},
'paired': {
'paired_aligned_none': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly 0 times",
'paired_aligned_one': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly exactly 1 time",
'paired_aligned_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly >1 times",
'paired_aligned_discord_one': r"(\\d+) \\([\\d\\.]+%\\) aligned discordantly 1 time",
'paired_aligned_discord_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned discordantly >1 times",
'paired_aligned_mate_one': r"(\\d+) \\([\\d\\.]+%\\) aligned exactly 1 time",
'paired_aligned_mate_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned >1 times",
'paired_aligned_mate_none': r"(\\d+) \\([\\d\\.]+%\\) aligned 0 times"
}
}
#Missing parser for unpaired (not implemented in flowcraft yet)
with open(bowtie_log, "r") as f:
#Go through log file line by line
for l in f:
print(l)
#total reads
total = re.search(r"(\\d+) reads; of these:", l)
print(total)
if total:
print(total)
self.set_n_reads(total.group(1))
# Paired end reads aka the pain
paired = re.search(r"(\\d+) \\([\\d\\.]+%\\) were paired; of these:", l)
if paired:
paired_total = int(paired.group(1))
paired_numbers = {}
# Do nested loop whilst we have this level of indentation
l = f.readline()
while l.startswith(' '):
for k, r in regexes['paired'].items():
match = re.search(r, l)
if match:
paired_numbers[k] = int(match.group(1))
l = f.readline()
align_zero_times = paired_numbers['paired_aligned_none'] + paired_numbers['paired_aligned_mate_none']
if align_zero_times:
self.set_align_0x(align_zero_times)
align_one_time = paired_numbers['paired_aligned_one'] + paired_numbers['paired_aligned_mate_one']
if align_one_time:
self.set_align_1x(align_one_time)
align_more_than_one_time = paired_numbers['paired_aligned_multi'] + paired_numbers['paired_aligned_mate_multi']
if align_more_than_one_time:
self.set_align_mt1x(align_more_than_one_time)
# Overall alignment rate
overall = re.search(r"([\\d\\.]+)% overall alignment rate", l)
if overall:
self.overall_rate = float(overall.group(1)) | 0.007156 |
def evaluate(tensor: BKTensor) -> TensorLike:
"""Return the value of a tensor"""
if isinstance(tensor, _DTYPE):
if torch.numel(tensor) == 1:
return tensor.item()
if tensor.numel() == 2:
return tensor[0].cpu().numpy() + 1.0j * tensor[1].cpu().numpy()
return tensor[0].cpu().numpy() + 1.0j * tensor[1].cpu().numpy()
return tensor | 0.002577 |
def league_header(self, league):
"""Prints the league header"""
league_name = " {0} ".format(league)
click.secho("{:=^62}".format(league_name), fg=self.colors.MISC)
click.echo() | 0.009569 |
def refund_order(self, order_id, **params):
"""https://developers.coinbase.com/api/v2#refund-an-order"""
for required in ['currency']:
if required not in params:
raise ValueError("Missing required parameter: %s" % required)
response = self._post('v2', 'orders', order_id, 'refund', data=params)
return self._make_api_object(response, Order) | 0.005 |
def prior_prior_model_dict(self):
"""
Returns
-------
prior_prior_model_dict: {Prior: PriorModel}
A dictionary mapping priors to associated prior models. Each prior will only have one prior model; if a
prior is shared by two prior models then one of those prior models will be in this dictionary.
"""
return {prior: prior_model[1] for prior_model in self.prior_model_tuples for _, prior in
prior_model[1].prior_tuples} | 0.009881 |
def extent(self):
"""
Return the source range (the range of text) occupied by the entity
pointed at by the cursor.
"""
if not hasattr(self, '_extent'):
self._extent = conf.lib.clang_getCursorExtent(self)
return self._extent | 0.007042 |
def add_stats_plot(self):
"""Plots alignment stats as bargraph."""
keys = OrderedDict()
keys['species_a'] = {'color': '#437bb1', 'name': 'Species a'}
keys['species_b'] = {'color': '#b1084c', 'name': 'Species b'}
keys['ambiguous'] = {'color': '#333333', 'name': 'Ambiguous'}
plot_config = {
'id': "disambiguated_alignments",
'title': "Disambiguate: Alignment Counts",
'cpswitch_counts_label': "# Reads",
'ylab': "# Reads"
}
self.add_section(
plot=bargraph.plot(self.data, keys, plot_config)
) | 0.003195 |
def get_display_names_metadata(self):
"""Gets the metadata for all display_names.
return: (osid.Metadata) - metadata for the display_names
*compliance: mandatory -- This method must be implemented.*
"""
metadata = dict(self._display_names_metadata)
metadata.update({'existing_string_values': [t['text'] for t in self.my_osid_object_form._my_map['displayNames']]})
return Metadata(**metadata) | 0.006682 |
def get_value(self, row, column):
"""Return the value of the DataFrame."""
# To increase the performance iat is used but that requires error
# handling, so fallback uses iloc
try:
value = self.df.iat[row, column]
except OutOfBoundsDatetime:
value = self.df.iloc[:, column].astype(str).iat[row]
except:
value = self.df.iloc[row, column]
return value | 0.006667 |
def multi_stream_iter(client, log_group, streams, positions=None):
"""Iterate over the available events coming from a set of log streams in a single log group
interleaving the events from each stream so they're yielded in timestamp order.
Args:
client (boto3 client): The boto client for logs.
log_group (str): The name of the log group.
streams (list of str): A list of the log stream names. The position of the stream in this list is
the stream number.
positions: (list of Positions): A list of pairs of (timestamp, skip) which represents the last record
read from each stream.
Yields:
A tuple of (stream number, cloudwatch log event).
"""
positions = positions or {s: Position(timestamp=0, skip=0) for s in streams}
event_iters = [log_stream(client, log_group, s, positions[s].timestamp, positions[s].skip) for s in streams]
events = []
for s in event_iters:
if not s:
events.append(None)
continue
try:
events.append(next(s))
except StopIteration:
events.append(None)
while some(events):
i = argmin(events, lambda x: x['timestamp'] if x else 9999999999)
yield (i, events[i])
try:
events[i] = next(event_iters[i])
except StopIteration:
events[i] = None | 0.004885 |
def filter_leading_non_json_lines(buf):
'''
used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid.
'''
filtered_lines = StringIO.StringIO()
stop_filtering = False
for line in buf.splitlines():
if stop_filtering or "=" in line or line.startswith('{') or line.startswith('['):
stop_filtering = True
filtered_lines.write(line + '\n')
return filtered_lines.getvalue() | 0.007267 |
def add_renderer(self, klass, *args, **kwargs):
'''Add a renderer to the current scene.
**Parameter**
klass: renderer class
The renderer class to be added
args, kwargs:
Arguments used by the renderer constructor,
except for the *widget* argument.
.. seealso:: :py:class:`~chemlab.graphics.renderers.AbstractRenderer`
.. seealso:: :doc:`/api/chemlab.graphics.renderers`
**Return**
The istantiated renderer. You should keep the return value to
be able to update the renderer at run-time.
'''
renderer = klass(self.widget, *args, **kwargs)
self.widget.renderers.append(renderer)
return renderer | 0.007802 |
async def _deferred_init(self):
"""
Register the web hook onto which Telegram should send its messages.
"""
hook_path = self.make_hook_path()
url = urljoin(settings.BERNARD_BASE_URL, hook_path)
await self.call('setWebhook', url=url)
logger.info('Setting Telegram webhook to "%s"', url) | 0.005848 |
def decode(self, encoded, parentFieldName=''):
""" See the function description in base.py
"""
assert (encoded[0:self.n] <= 1.0).all()
resultString = ""
resultRanges = []
overlaps = (self.sdrs * encoded[0:self.n]).sum(axis=1)
if self.verbosity >= 2:
print "Overlaps for decoding:"
for i in xrange(0, self.ncategories):
print "%d %s" % (overlaps[i], self.categories[i])
matchingCategories = (overlaps > self.thresholdOverlap).nonzero()[0]
for index in matchingCategories:
if resultString != "":
resultString += " "
resultString += str(self.categories[index])
resultRanges.append([int(index),int(index)])
if parentFieldName != '':
fieldName = "%s.%s" % (parentFieldName, self.name)
else:
fieldName = self.name
return ({fieldName: (resultRanges, resultString)}, [fieldName]) | 0.01464 |
def plotMultipleInferenceRun(stats,
fields,
basename,
plotDir="plots"):
"""
Plots individual inference runs.
"""
if not os.path.exists(plotDir):
os.makedirs(plotDir)
plt.figure()
colorList = ['r', 'b', 'g', 'm', 'c', 'k', 'y']
# plot request stats
for i, field in enumerate(fields):
fieldKey = field[0] + " C0"
trace = []
for s in stats:
trace += s[fieldKey]
plt.plot(trace, label=field[1], color=colorList[i])
# format
plt.legend(loc="upper right")
plt.xlabel("Input number")
plt.xticks(range(0, len(stats)*stats[0]["numSteps"]+1,5))
plt.ylabel("Number of cells")
plt.ylim(-5, 55)
plt.title("Inferring combined sensorimotor and temporal sequence stream")
# save
relPath = "{}_exp_combined.pdf".format(basename)
path = os.path.join(plotDir, relPath)
plt.savefig(path)
plt.close() | 0.026115 |
def regroup_vectorized(srccat, eps, far=None, dist=norm_dist):
"""
Regroup the islands of a catalog according to their normalised distance.
Assumes srccat is recarray-like for efficiency.
Return a list of island groups.
Parameters
----------
srccat : np.rec.arry or pd.DataFrame
Should have the following fields[units]:
ra[deg],dec[deg], a[arcsec],b[arcsec],pa[deg], peak_flux[any]
eps : float
maximum normalised distance within which sources are considered to be
grouped
far : float
(degrees) sources that are further than this distance apart will not
be grouped, and will not be tested.
Default = 0.5.
dist : func
a function that calculates the distance between a source and each
element of an array of sources.
Default = :func:`AegeanTools.cluster.norm_dist`
Returns
-------
islands : list of lists
Each island contians integer indices for members from srccat
(in descending dec order).
"""
if far is None:
far = 0.5 # 10*max(a.a/3600 for a in srccat)
# most negative declination first
# XXX: kind='mergesort' ensures stable sorting for determinism.
# Do we need this?
order = np.argsort(srccat.dec, kind='mergesort')[::-1]
# TODO: is it better to store groups as arrays even if appends are more
# costly?
groups = [[order[0]]]
for idx in order[1:]:
rec = srccat[idx]
# TODO: Find out if groups are big enough for this to give us a speed
# gain. If not, get distance to all entries in groups above
# decmin simultaneously.
decmin = rec.dec - far
for group in reversed(groups):
# when an island's largest (last) declination is smaller than
# decmin, we don't need to look at any more islands
if srccat.dec[group[-1]] < decmin:
# new group
groups.append([idx])
rafar = far / np.cos(np.radians(rec.dec))
group_recs = np.take(srccat, group, mode='clip')
group_recs = group_recs[abs(rec.ra - group_recs.ra) <= rafar]
if len(group_recs) and dist(rec, group_recs).min() < eps:
group.append(idx)
break
else:
# new group
groups.append([idx])
# TODO?: a more numpy-like interface would return only an array providing
# the mapping:
# group_idx = np.empty(len(srccat), dtype=int)
# for i, group in enumerate(groups):
# group_idx[group] = i
# return group_idx
return groups | 0.000375 |
def resolve_nested_schema(self, schema):
"""Return the Open API representation of a marshmallow Schema.
Adds the schema to the spec if it isn't already present.
Typically will return a dictionary with the reference to the schema's
path in the spec unless the `schema_name_resolver` returns `None`, in
which case the returned dictoinary will contain a JSON Schema Object
representation of the schema.
:param schema: schema to add to the spec
"""
schema_instance = resolve_schema_instance(schema)
schema_key = make_schema_key(schema_instance)
if schema_key not in self.refs:
schema_cls = self.resolve_schema_class(schema)
name = self.schema_name_resolver(schema_cls)
if not name:
try:
json_schema = self.schema2jsonschema(schema)
except RuntimeError:
raise APISpecError(
"Name resolver returned None for schema {schema} which is "
"part of a chain of circular referencing schemas. Please"
" ensure that the schema_name_resolver passed to"
" MarshmallowPlugin returns a string for all circular"
" referencing schemas.".format(schema=schema)
)
if getattr(schema, "many", False):
return {"type": "array", "items": json_schema}
return json_schema
name = get_unique_schema_name(self.spec.components, name)
self.spec.components.schema(name, schema=schema)
return self.get_ref_dict(schema_instance) | 0.002336 |
def blocks_to_mark_complete_on_view(self, blocks):
"""
Returns a set of blocks which should be marked complete on view and haven't been yet.
"""
blocks = {block for block in blocks if self.can_mark_block_complete_on_view(block)}
completions = self.get_completions({block.location for block in blocks})
return {block for block in blocks if completions.get(block.location, 0) < 1.0} | 0.014019 |
def use_plenary_resource_view(self):
"""Pass through to provider ResourceLookupSession.use_plenary_resource_view"""
self._object_views['resource'] = PLENARY
# self._get_provider_session('resource_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_resource_view()
except AttributeError:
pass | 0.00883 |
def task_list():
"""
Scans the modules set in RQ_JOBS_MODULES for RQ jobs decorated with @task
Compiles a readable list for Job model task choices
"""
try:
jobs_module = settings.RQ_JOBS_MODULE
except AttributeError:
raise ImproperlyConfigured(_("You have to define RQ_JOBS_MODULE in settings.py"))
if isinstance(jobs_module, string_types):
jobs_modules = (jobs_module,)
elif isinstance(jobs_module, (tuple, list)):
jobs_modules = jobs_module
else:
raise ImproperlyConfigured(_("RQ_JOBS_MODULE must be a string or a tuple"))
choices = []
for module in jobs_modules:
try:
tasks = importlib.import_module(module)
except ImportError:
raise ImproperlyConfigured(_("Can not find module {}").format(module))
module_choices = [('%s.%s' % (module, x), underscore_to_camelcase(x)) for x, y in list(tasks.__dict__.items())
if type(y) == FunctionType and hasattr(y, 'delay')]
choices.extend(module_choices)
choices.sort(key=lambda tup: tup[1])
return choices | 0.004433 |
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, h, w)
rle = maskUtils.merge(rles)
elif type(segm['counts']) == list:
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, h, w)
else:
# rle
rle = ann['segmentation']
return rle | 0.002667 |
def _GetRoutingMap(self, router):
"""Returns a routing map for a given router instance."""
try:
routing_map = self._routing_maps_cache.Get(router.__class__)
except KeyError:
routing_map = self._BuildHttpRoutingMap(router.__class__)
self._routing_maps_cache.Put(router.__class__, routing_map)
return routing_map | 0.011561 |
def bot_has_any_role(*items):
"""Similar to :func:`.has_any_role` except checks if the bot itself has
any of the roles listed.
This check raises one of two special exceptions, :exc:`.BotMissingAnyRole` if the bot
is missing all roles, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1.0
Raise :exc:`.BotMissingAnyRole` or :exc:`.NoPrivateMessage`
instead of generic checkfailure
"""
def predicate(ctx):
ch = ctx.channel
if not isinstance(ch, discord.abc.GuildChannel):
raise NoPrivateMessage()
me = ch.guild.me
getter = functools.partial(discord.utils.get, me.roles)
if any(getter(id=item) is not None if isinstance(item, int) else getter(name=item) is not None for item in items):
return True
raise BotMissingAnyRole(items)
return check(predicate) | 0.004193 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.