text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def ChiSquared(target_frequency):
"""Score a text by comparing its frequency distribution against another.
Note:
It is easy to be penalised without knowing it when using this scorer.
English frequency ngrams are capital letters, meaning when using it
any text you score against must be all capitals for it to give correct results.
I am aware of the issue and will work on a fix.
Todo:
Maybe include paramter for ngram size. Havent had a use case for this yet.
Once there is evidence it is needed, I will add it.
Example:
>>> fitness = ChiSquared(english.unigrams)
>>> fitness("ABC")
-32.2
Args:
target_frequency (dict): symbol to frequency mapping of the distribution to compare with
"""
def inner(text):
text = ''.join(text)
return -chi_squared(frequency_analyze(text), target_frequency)
return inner | 0.004283 |
def _unwrap_to_layer(r, L, n=1):
"""For a set of points in a 2 dimensional periodic system, extend the set of
points to tile the points up to to a given period.
Parameters
----------
r: float array, shape (:, 2).
Set of points.
L: float array, shape (2,)
System lengths.
n: integer.
Period to unwrap up to.
Returns
-------
rcu: float array, shape (:, 2).
The set of points. tiled up to the periods at a distance `n` from the
origin.
"""
rcu = []
for i_n in range(n + 1):
rcu.extend(_unwrap_one_layer(r, L, i_n))
return rcu | 0.003185 |
def remove(self):
"""Remove duplicate lines from text files"""
num, sp, newfile = 0, "", []
if os.path.isfile(self.filename):
with open(self.filename, "r") as r:
oldfile = r.read().splitlines()
for line in oldfile:
if self.number:
num += 1
sp = ": "
if self.case_ins:
line = line.lower()
if self.ignore_blank and not line:
newfile.append(line)
elif line not in newfile:
newfile.append(line)
else:
if (self.args[0] in self.options[4:5] or
self.args[0] in self.options[6:7]):
if num == 0:
num = str()
print("{0}{1}{2}".format(num, sp, line))
if self.args[0] not in self.options[6:7]:
with open(self.filename, "w") as w:
for line in newfile:
w.write(line + "\n")
else:
self.not_access() | 0.001657 |
def rpc_fix_code(self, source, directory):
"""Formats Python code to conform to the PEP 8 style guide.
"""
source = get_source(source)
return fix_code(source, directory) | 0.009901 |
def _register_name(self, name):
"""Get register name.
"""
if name not in self._var_name_mappers:
self._var_name_mappers[name] = VariableNamer(name) | 0.010929 |
def buildlist(self, enabled):
"""Run dialog buildlist
"""
choice = []
for item in self.data:
choice.append((item, False))
for item in enabled:
choice.append((item, True))
items = [(tag, tag, sta) for (tag, sta) in choice]
code, self.tags = self.d.buildlist(
text=self.text, items=items, visit_items=True, item_help=False,
title=self.title)
if code == "ok":
self.unicode_to_string()
return self.ununicode
if code in ["cancel", "esc"]:
self.exit() | 0.003328 |
async def set_room_temperatures(self, room_id, sleep_temp=None,
comfort_temp=None, away_temp=None):
"""Set room temps."""
if sleep_temp is None and comfort_temp is None and away_temp is None:
return
room = self.rooms.get(room_id)
if room is None:
_LOGGER.error("No such device")
return
room.sleep_temp = sleep_temp if sleep_temp else room.sleep_temp
room.away_temp = away_temp if away_temp else room.away_temp
room.comfort_temp = comfort_temp if comfort_temp else room.comfort_temp
payload = {"roomId": room_id,
"sleepTemp": room.sleep_temp,
"comfortTemp": room.comfort_temp,
"awayTemp": room.away_temp,
"homeType": 0}
await self.request("changeRoomModeTempInfo", payload)
self.rooms[room_id] = room | 0.003236 |
def load(self):
""" Function load
Get the list of all objects
@return RETURN: A ForemanItem list
"""
return {x[self.index]: self.itemType(self.api, x['id'],
self.objName, self.payloadObj,
x)
for x in self.api.list(self.objName,
limit=self.searchLimit)} | 0.004566 |
def run(self, dag):
"""Expand 3+ qubit gates using their decomposition rules.
Args:
dag(DAGCircuit): input dag
Returns:
DAGCircuit: output dag with maximum node degrees of 2
Raises:
QiskitError: if a 3q+ gate is not decomposable
"""
for node in dag.threeQ_or_more_gates():
# TODO: allow choosing other possible decompositions
rule = node.op.definition
if not rule:
raise QiskitError("Cannot unroll all 3q or more gates. "
"No rule to expand instruction %s." %
node.op.name)
# hacky way to build a dag on the same register as the rule is defined
# TODO: need anonymous rules to address wires by index
decomposition = DAGCircuit()
decomposition.add_qreg(rule[0][1][0][0])
for inst in rule:
decomposition.apply_operation_back(*inst)
decomposition = self.run(decomposition) # recursively unroll
dag.substitute_node_with_dag(node, decomposition)
return dag | 0.002573 |
def getaddrinfo_wrapper(host, port, family=socket.AF_INET, socktype=0, proto=0, flags=0):
"""Patched 'getaddrinfo' with default family IPv4 (enabled by settings IPV4_ONLY=True)"""
return orig_getaddrinfo(host, port, family, socktype, proto, flags) | 0.011765 |
def run(self):
"""Run DDP greenlets."""
self.logger.debug('PostgresGreenlet run')
self.start()
self._stop_event.wait()
# wait for all threads to stop.
gevent.joinall(self.threads + [DDPLauncher.pgworker])
self.threads = [] | 0.007194 |
def score(self, data, data_ref, graph=None):
"""Compute the reconstruction loss over the test set.
Parameters
----------
data : array_like
Data to reconstruct.
data_ref : array_like
Reference data.
Returns
-------
float: Mean error.
"""
g = graph if graph is not None else self.tf_graph
with g.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
feed = {
self.input_data: data,
self.input_labels: data_ref,
self.keep_prob: 1
}
return self.cost.eval(feed) | 0.002618 |
def _signature_hash(self, tx_out_script, unsigned_txs_out_idx, hash_type):
"""
Return the canonical hash for a transaction. We need to
remove references to the signature, since it's a signature
of the hash before the signature is applied.
:param tx_out_script: the script the coins for unsigned_txs_out_idx are coming from
:param unsigned_txs_out_idx: where to put the tx_out_script
:param hash_type: one of SIGHASH_NONE, SIGHASH_SINGLE, SIGHASH_ALL,
optionally bitwise or'ed with SIGHASH_ANYONECANPAY
"""
# In case concatenating two scripts ends up with two codeseparators,
# or an extra one at the end, this prevents all those possible incompatibilities.
tx_out_script = self.delete_subscript(tx_out_script, self.ScriptTools.compile("OP_CODESEPARATOR"))
# blank out other inputs' signatures
txs_in = [self._tx_in_for_idx(i, tx_in, tx_out_script, unsigned_txs_out_idx)
for i, tx_in in enumerate(self.tx.txs_in)]
txs_out = self.tx.txs_out
# Blank out some of the outputs
if (hash_type & 0x1f) == SIGHASH_NONE:
# Wildcard payee
txs_out = []
# Let the others update at will
for i in range(len(txs_in)):
if i != unsigned_txs_out_idx:
txs_in[i].sequence = 0
elif (hash_type & 0x1f) == SIGHASH_SINGLE:
# This preserves the ability to validate existing legacy
# transactions which followed a buggy path in Satoshi's
# original code.
if unsigned_txs_out_idx >= len(txs_out):
# This should probably be moved to a constant, but the
# likelihood of ever getting here is already really small
# and getting smaller
return (1 << 248)
# Only lock in the txout payee at same index as txin; delete
# any outputs after this one and set all outputs before this
# one to "null" (where "null" means an empty script and a
# value of -1)
txs_out = [self.tx.TxOut(0xffffffffffffffff, b'')] * unsigned_txs_out_idx
txs_out.append(self.tx.txs_out[unsigned_txs_out_idx])
# Let the others update at will
for i in range(len(txs_in)):
if i != unsigned_txs_out_idx:
txs_in[i].sequence = 0
# Blank out other inputs completely, not recommended for open transactions
if hash_type & SIGHASH_ANYONECANPAY:
txs_in = [txs_in[unsigned_txs_out_idx]]
tmp_tx = self.tx.__class__(self.tx.version, txs_in, txs_out, self.tx.lock_time)
return from_bytes_32(tmp_tx.hash(hash_type=hash_type)) | 0.003223 |
def processFolder(abfFolder):
"""call processAbf() for every ABF in a folder."""
if not type(abfFolder) is str or not len(abfFolder)>3:
return
files=sorted(glob.glob(abfFolder+"/*.abf"))
for i,fname in enumerate(files):
print("\n\n\n### PROCESSING {} of {}:".format(i,len(files)),os.path.basename(fname))
processAbf(fname,show=False)
plt.show()
return | 0.02005 |
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A JSON serializable version of the remaining input to read.
"""
params = dict(self.__params) # Shallow copy.
if self._PROTOTYPE_REQUEST_PARAM in params:
prototype_request = params[self._PROTOTYPE_REQUEST_PARAM]
params[self._PROTOTYPE_REQUEST_PARAM] = prototype_request.Encode()
if self._OFFSET_PARAM in params:
params[self._OFFSET_PARAM] = base64.b64encode(params[self._OFFSET_PARAM])
return params | 0.007421 |
def frombinary(path, shape=None, dtype=None, ext='bin', start=None, stop=None, recursive=False, nplanes=None, npartitions=None, labels=None, conf='conf.json', order='C', engine=None, credentials=None):
"""
Load images from flat binary files.
Assumes one image per file, each with the shape and ordering as given
by the input arguments.
Parameters
----------
path : str
Path to data files or directory, specified as either a local filesystem path
or in a URI-like format, including scheme. May include a single '*' wildcard character.
shape : tuple of positive int
Dimensions of input image data.
ext : string, optional, default = 'bin'
Extension required on data files to be loaded.
start, stop : nonnegative int, optional, default = None
Indices of the first and last-plus-one file to load, relative to the sorted
filenames matching `path` and `ext`. Interpreted using python slice indexing conventions.
recursive : boolean, optional, default = False
If true, will recursively descend directories from path, loading all files
with an extension matching 'ext'.
nplanes : positive integer, optional, default = None
If passed, will cause single files to be subdivided into nplanes separate images.
Otherwise, each file is taken to represent one image.
npartitions : int, optional, default = None
Number of partitions for computational engine,
if None will use default for engine.
labels : array, optional, default = None
Labels for records. If provided, should be one-dimensional.
"""
import json
from thunder.readers import get_file_reader, FileNotFoundError
try:
reader = get_file_reader(path)(credentials=credentials)
buf = reader.read(path, filename=conf).decode('utf-8')
params = json.loads(buf)
except FileNotFoundError:
params = {}
if 'dtype' in params.keys():
dtype = params['dtype']
if 'dims' in params.keys():
shape = params['dims']
if 'shape' in params.keys():
shape = params['shape']
if not shape:
raise ValueError('Image shape must be specified as argument or in a conf.json file')
if not dtype:
dtype = 'int16'
if nplanes is not None:
if nplanes <= 0:
raise ValueError('nplanes must be positive if passed, got %d' % nplanes)
if shape[-1] % nplanes:
raise ValueError("Last dimension '%d' must be divisible by nplanes '%d'" %
(shape[-1], nplanes))
def getarray(idx_buffer_filename):
idx, buf, _ = idx_buffer_filename
ary = frombuffer(buf, dtype=dtype, count=int(prod(shape))).reshape(shape, order=order)
if nplanes is None:
yield (idx,), ary
else:
# divide array into chunks of nplanes
npoints = shape[-1] / nplanes # integer division
if shape[-1] % nplanes:
npoints += 1
timepoint = 0
last_plane = 0
current_plane = 1
while current_plane < ary.shape[-1]:
if current_plane % nplanes == 0:
slices = [slice(None)] * (ary.ndim - 1) + [slice(last_plane, current_plane)]
yield idx*npoints + timepoint, ary[slices].squeeze()
timepoint += 1
last_plane = current_plane
current_plane += 1
# yield remaining planes
slices = [slice(None)] * (ary.ndim - 1) + [slice(last_plane, ary.shape[-1])]
yield (idx*npoints + timepoint,), ary[slices].squeeze()
recount = False if nplanes is None else True
append = [nplanes] if (nplanes is not None and nplanes > 1) else []
newdims = tuple(list(shape[:-1]) + append) if nplanes else shape
return frompath(path, accessor=getarray, ext=ext, start=start,
stop=stop, recursive=recursive, npartitions=npartitions,
dims=newdims, dtype=dtype, labels=labels, recount=recount,
engine=engine, credentials=credentials) | 0.003352 |
def project_geometry(geometry, source, target):
"""Projects a shapely geometry object from the source to the target projection."""
project = partial(
pyproj.transform,
source,
target
)
return transform(project, geometry) | 0.007634 |
def update_all_apps(self):
"""
Loops through all app names contained in settings.INSTALLED_APPS and calls `update_app`
on each one. Handles any object deletions that happened after all apps have been initialized.
"""
for app in apps.get_app_configs():
self.update_app(app.name)
# During update_app, all apps added model objects that were registered for deletion.
# Delete all objects that were previously managed by the initial data process
self.handle_deletions() | 0.011091 |
def update_project(self, project_key, data, expand=None):
"""
Updates a project.
Update project: /rest/api/2/project/{projectIdOrKey}
:param project_key: project key of project that needs to be updated
:param data: dictionary containing the data to be updated
:param expand: the parameters to expand
"""
if expand:
url = '/rest/api/2/project/{projectIdOrKey}?expand={expand}'.format(projectIdOrKey=project_key, expand=expand)
else:
url = '/rest/api/2/project/{projectIdOrKey}'.format(projectIdOrKey=project_key)
return self.put(url, data) | 0.006221 |
def timer(diff, processed):
"""Return the passed time."""
# Changes seconds into minutes and seconds
minutes, seconds = divmod(diff, 60)
try:
# Finds average time taken by requests
time_per_request = diff / float(len(processed))
except ZeroDivisionError:
time_per_request = 0
return minutes, seconds, time_per_request | 0.00274 |
def check_file_encoding(self, input_file_path):
"""
Check whether the given file is UTF-8 encoded.
:param string input_file_path: the path of the file to be checked
:rtype: :class:`~aeneas.validator.ValidatorResult`
"""
self.log([u"Checking encoding of file '%s'", input_file_path])
self.result = ValidatorResult()
if self._are_safety_checks_disabled(u"check_file_encoding"):
return self.result
if not gf.file_can_be_read(input_file_path):
self._failed(u"File '%s' cannot be read." % (input_file_path))
return self.result
with io.open(input_file_path, "rb") as file_object:
bstring = file_object.read()
self._check_utf8_encoding(bstring)
return self.result | 0.002484 |
def prompt(self, prompt_msg=None, newline=False):
""" Writes prompt message to output stream and
reads line from standard input stream.
`prompt_msg`
Message to write.
`newline`
Append newline character to prompt message before writing.
Return string.
"""
if prompt_msg is not None:
self.write(prompt_msg, newline)
return self._input.readline().rstrip(os.linesep) | 0.004049 |
def dynamics_from_bundle(b, times, compute=None, return_roche_euler=False, use_kepcart=False, **kwargs):
"""
Parse parameters in the bundle and call :func:`dynamics`.
See :func:`dynamics` for more detailed information.
NOTE: you must either provide compute (the label) OR all relevant options
as kwargs (ltte, stepsize, gr, integrator)
Args:
b: (Bundle) the bundle with a set hierarchy
times: (list or array) times at which to run the dynamics
stepsize: (float, optional) stepsize for the integration
[default: 0.01]
orbiterror: (float, optional) orbiterror for the integration
[default: 1e-16]
ltte: (bool, default False) whether to account for light travel time effects.
gr: (bool, default False) whether to account for general relativity effects.
Returns:
t, xs, ys, zs, vxs, vys, vzs. t is a numpy array of all times,
the remaining are a list of numpy arrays (a numpy array per
star - in order given by b.hierarchy.get_stars()) for the cartesian
positions and velocities of each star at those same times.
"""
b.run_delayed_constraints()
hier = b.hierarchy
computeps = b.get_compute(compute, check_visible=False, force_ps=True)
stepsize = computeps.get_value('stepsize', check_visible=False, **kwargs)
ltte = computeps.get_value('ltte', check_visible=False, **kwargs)
gr = computeps.get_value('gr', check_visible=False, **kwargs)
integrator = computeps.get_value('integrator', check_visible=False, **kwargs)
starrefs = hier.get_stars()
orbitrefs = hier.get_orbits() if use_kepcart else [hier.get_parent_of(star) for star in starrefs]
def mean_anom(t0, t0_perpass, period):
# TODO: somehow make this into a constraint where t0 and mean anom
# are both in the compute options if dynamic_method==nbody
# (one is constrained from the other and the orbit.... nvm, this gets ugly)
return 2 * np.pi * (t0 - t0_perpass) / period
masses = [b.get_value('mass', u.solMass, component=component, context='component') * c.G.to('AU3 / (Msun d2)').value for component in starrefs] # GM
smas = [b.get_value('sma', u.AU, component=component, context='component') for component in orbitrefs]
eccs = [b.get_value('ecc', component=component, context='component') for component in orbitrefs]
incls = [b.get_value('incl', u.rad, component=component, context='component') for component in orbitrefs]
per0s = [b.get_value('per0', u.rad, component=component, context='component') for component in orbitrefs]
long_ans = [b.get_value('long_an', u.rad, component=component, context='component') for component in orbitrefs]
t0_perpasses = [b.get_value('t0_perpass', u.d, component=component, context='component') for component in orbitrefs]
periods = [b.get_value('period', u.d, component=component, context='component') for component in orbitrefs]
if return_roche_euler:
# rotperiods are only needed to compute instantaneous syncpars
rotperiods = [b.get_value('period', u.d, component=component, context='component') for component in starrefs]
else:
rotperiods = None
vgamma = b.get_value('vgamma', context='system', unit=u.AU/u.d)
t0 = b.get_value('t0', context='system', unit=u.d)
# mean_anoms = [mean_anom(t0, t0_perpass, period) for t0_perpass, period in zip(t0_perpasses, periods)]
mean_anoms = [b.get_value('mean_anom', u.rad, component=component, context='component') for component in orbitrefs]
return dynamics(times, masses, smas, eccs, incls, per0s, long_ans, \
mean_anoms, rotperiods, t0, vgamma, stepsize, ltte, gr,
integrator, use_kepcart=use_kepcart, return_roche_euler=return_roche_euler) | 0.005227 |
def page_for(self, member, page_size=DEFAULT_PAGE_SIZE):
'''
Determine the page where a member falls in the leaderboard.
@param member [String] Member name.
@param page_size [int] Page size to be used in determining page location.
@return the page where a member falls in the leaderboard.
'''
return self.page_for_in(self.leaderboard_name, member, page_size) | 0.007229 |
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""
Plot a multidimensional dataset in 2D.
Each Series in the DataFrame is represented as a evenly distributed
slice on a circle. Each data point is rendered in the circle according to
the value on each Series. Highly correlated `Series` in the `DataFrame`
are placed closer on the unit circle.
RadViz allow to project a N-dimensional data set into a 2D space where the
influence of each dimension can be interpreted as a balance between the
influence of all dimensions.
More info available at the `original article
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.889>`_
describing RadViz.
Parameters
----------
frame : `DataFrame`
Pandas object holding the data.
class_column : str
Column name containing the name of the data point category.
ax : :class:`matplotlib.axes.Axes`, optional
A plot instance to which to add the information.
color : list[str] or tuple[str], optional
Assign a color to each category. Example: ['blue', 'green'].
colormap : str or :class:`matplotlib.colors.Colormap`, default None
Colormap to select colors from. If string, load colormap with that
name from matplotlib.
kwds : optional
Options to pass to matplotlib scatter plotting method.
Returns
-------
class:`matplotlib.axes.Axes`
See Also
--------
plotting.andrews_curves : Plot clustering visualization.
Examples
--------
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'SepalLength': [6.5, 7.7, 5.1, 5.8, 7.6, 5.0, 5.4, 4.6,
... 6.7, 4.6],
... 'SepalWidth': [3.0, 3.8, 3.8, 2.7, 3.0, 2.3, 3.0, 3.2,
... 3.3, 3.6],
... 'PetalLength': [5.5, 6.7, 1.9, 5.1, 6.6, 3.3, 4.5, 1.4,
... 5.7, 1.0],
... 'PetalWidth': [1.8, 2.2, 0.4, 1.9, 2.1, 1.0, 1.5, 0.2,
... 2.1, 0.2],
... 'Category': ['virginica', 'virginica', 'setosa',
... 'virginica', 'virginica', 'versicolor',
... 'versicolor', 'setosa', 'virginica',
... 'setosa']
... })
>>> rad_viz = pd.plotting.radviz(df, 'Category') # doctest: +SKIP
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax | 0.000217 |
def reset_index(
self, level=None, drop=False, inplace=False, col_level=0, col_fill=""
):
"""Reset this index to default and create column from current index.
Args:
level: Only remove the given levels from the index. Removes all
levels by default
drop: Do not try to insert index into DataFrame columns. This
resets the index to the default integer index.
inplace: Modify the DataFrame in place (do not create a new object)
col_level : If the columns have multiple levels, determines which
level the labels are inserted into. By default it is inserted
into the first level.
col_fill: If the columns have multiple levels, determines how the
other levels are named. If None then the index name is
repeated.
Returns:
A new DataFrame if inplace is False, None otherwise.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# TODO Implement level
if level is not None:
new_query_compiler = self._default_to_pandas(
"reset_index",
level=level,
drop=drop,
inplace=inplace,
col_level=col_level,
col_fill=col_fill,
)
# Error checking for matching Pandas. Pandas does not allow you to
# insert a dropped index into a DataFrame if these columns already
# exist.
elif (
not drop
and not isinstance(self.index, pandas.MultiIndex)
and all(n in self.columns for n in ["level_0", "index"])
):
raise ValueError("cannot insert level_0, already exists")
else:
new_query_compiler = self._query_compiler.reset_index(
drop=drop, level=level
)
return self._create_or_update_from_compiler(new_query_compiler, inplace) | 0.001959 |
def get_time_inqueue(self):
"""
:class:`timedelta` with the time spent in the Queue, None if the Task is not running
.. note:
This value is always greater than the real value computed by the resource manager
as we start to count only when check_status sets the `Task` status to S_RUN.
"""
if self.submission is None: return None
if self.start is None:
delta = datetime.datetime.now() - self.submission
else:
delta = self.start - self.submission
# This happens when we read the exact start datetime from the ABINIT log file.
if delta.total_seconds() < 0: delta = datetime.timedelta(seconds=0)
return MyTimedelta.as_timedelta(delta) | 0.01039 |
def manager(self, model):
'''Retrieve the :class:`Manager` for ``model`` which can be any of the
values valid for the :meth:`model` method.'''
try:
return self.router[model]
except KeyError:
meta = getattr(model, '_meta', model)
if meta.type == 'structure':
# this is a structure
if hasattr(model, 'model'):
structure_model = model.model
if structure_model:
return self.manager(structure_model)
else:
manager = self.router.structure(model)
if manager:
return manager
raise InvalidTransaction('"%s" not valid in this session' % meta) | 0.00246 |
def _words_by_distinctiveness_score(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n=None,
least_to_most=False):
"""Return words in `vocab` ordered by distinctiveness score."""
p_t = get_marginal_topic_distrib(doc_topic_distrib, doc_lengths)
distinct = get_word_distinctiveness(topic_word_distrib, p_t)
return _words_by_score(vocab, distinct, least_to_most=least_to_most, n=n) | 0.004535 |
def closing_plugin(self, cancelable=False):
"""Perform actions before parent main window is closed."""
for cl in self.clients:
cl.close()
self.set_option('recent_notebooks', self.recent_notebooks)
return True | 0.007782 |
def update_brand(self) -> None:
"""Update brand group of parameters."""
self.update(path=URL_GET + GROUP.format(group=BRAND)) | 0.014184 |
def Receive(self, replytype, **kw):
'''Parse message, create Python object.
KeyWord data:
faults -- list of WSDL operation.fault typecodes
wsaction -- If using WS-Address, must specify Action value we expect to
receive.
'''
self.ReceiveSOAP(**kw)
ps = self.ps
tp = _find_type(ps.body_root)
isarray = ((type(tp) in (tuple,list) and tp[1] == 'Array') or _find_arraytype(ps.body_root))
if self.typesmodule is None or isarray:
return _Binding.Receive(self, replytype, **kw)
if ps.IsAFault():
msg = FaultFromFaultMessage(ps)
raise FaultException(msg)
tc = replytype
if hasattr(replytype, 'typecode'):
tc = replytype.typecode
#Ignore response wrapper
reply = {}
for elt in _child_elements(ps.body_root):
name = str(elt.localName)
reply[name] = self.__parse_child(elt)
if self.address is not None:
self.address.checkResponse(ps, kw.get('wsaction'))
return reply | 0.006278 |
def copy( self ):
"""
Make a copy of this :class:`ParserElement`. Useful for defining
different parse actions for the same parsing pattern, using copies of
the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of ``expr.copy()`` is just ``expr()``::
integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
"""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy | 0.009242 |
def user(self, **params):
"""Stream user
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/user
"""
url = 'https://userstream.twitter.com/%s/user.json' \
% self.streamer.api_version
self.streamer._request(url, params=params) | 0.006579 |
def refreshUi( self ):
"""
Refreshes the interface based on the current settings.
"""
widget = self.uiContentsTAB.currentWidget()
is_content = isinstance(widget, QWebView)
if is_content:
self._currentContentsIndex = self.uiContentsTAB.currentIndex()
history = widget.page().history()
else:
history = None
self.uiBackACT.setEnabled(is_content and history.canGoBack())
self.uiForwardACT.setEnabled(is_content and history.canGoForward())
self.uiHomeACT.setEnabled(is_content)
self.uiNewTabACT.setEnabled(is_content)
self.uiCopyTextACT.setEnabled(is_content)
self.uiCloseTabACT.setEnabled(is_content and
self.uiContentsTAB.count() > 2)
for i in range(1, self.uiContentsTAB.count()):
widget = self.uiContentsTAB.widget(i)
self.uiContentsTAB.setTabText(i, widget.title()) | 0.008893 |
def sparse_to_matrix(sparse):
"""
Take a sparse (n,3) list of integer indexes of filled cells,
turn it into a dense (m,o,p) matrix.
Parameters
-----------
sparse: (n,3) int, index of filled cells
Returns
------------
dense: (m,o,p) bool, matrix of filled cells
"""
sparse = np.asanyarray(sparse, dtype=np.int)
if not util.is_shape(sparse, (-1, 3)):
raise ValueError('sparse must be (n,3)!')
shape = sparse.max(axis=0) + 1
matrix = np.zeros(np.product(shape), dtype=np.bool)
multiplier = np.array([np.product(shape[1:]), shape[2], 1])
index = (sparse * multiplier).sum(axis=1)
matrix[index] = True
dense = matrix.reshape(shape)
return dense | 0.001372 |
def get_secchan_offs(type_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n927.
Positional arguments:
type_ -- corresponding `ieprinters` dictionary key for the instance.
data -- bytearray data to read.
"""
if data[0] < len(ht_secondary_offset):
return "{0} ({1})".format(ht_secondary_offset[data[0]], data[0])
return "{0}".format(data[0]) | 0.002392 |
def option_completer(cls, k,v):
"Tab completion hook for the %%opts cell magic."
line = v.text_until_cursor
completions = cls.setup_completer()
compositor_defs = {el.group:el.output_type.__name__
for el in Compositor.definitions if el.group}
return cls.line_completer(line, completions, compositor_defs) | 0.010811 |
def _fly(self, board, layers, things, the_plot):
"""Handles the behaviour of visible bolts flying toward the player."""
# Disappear if we've hit a bunker.
if self.character in the_plot['bunker_hitters']:
return self._teleport((-1, -1))
# End the game if we've hit the player.
if self.position == things['P'].position: the_plot.terminate_episode()
self._south(board, the_plot) | 0.007407 |
def upload_file(self, local_path, project_id, parent_data, existing_file_id=None, remote_filename=None):
"""
Upload a file under a specific location in DDSConnection possibly replacing an existing file.
:param local_path: str: path to a local file to upload
:param project_id: str: uuid of the project to add this file to
:param parent_data: ParentData: info about the parent of this file
:param existing_file_id: str: uuid of file to create a new version of (or None to create a new file)
:param remote_filename: str: name to use for our remote file (defaults to local_path basename otherwise)
:return: File
"""
path_data = PathData(local_path)
hash_data = path_data.get_hash()
file_upload_operations = FileUploadOperations(self.data_service, None)
upload_id = file_upload_operations.create_upload(project_id, path_data, hash_data,
remote_filename=remote_filename,
storage_provider=self.config.storage_provider_id)
context = UploadContext(self.config, self.data_service, upload_id, path_data)
ParallelChunkProcessor(context).run()
remote_file_data = file_upload_operations.finish_upload(upload_id, hash_data, parent_data, existing_file_id)
return File(self, remote_file_data) | 0.007714 |
def add_ini_profile(self, cp, sec):
"""Add profile from configuration file.
Parameters
-----------
cp : ConfigParser object
The ConfigParser object holding the workflow configuration settings
sec : string
The section containing options for this job.
"""
for opt in cp.options(sec):
namespace = opt.split('|')[0]
if namespace == 'pycbc' or namespace == 'container':
continue
value = string.strip(cp.get(sec, opt))
key = opt.split('|')[1]
self.add_profile(namespace, key, value, force=True)
# Remove if Pegasus can apply this hint in the TC
if namespace == 'hints' and key == 'execution.site':
self.execution_site = value | 0.002445 |
def get_extensions(cert_type):
'''
Fetch X509 and CSR extension definitions from tls:extensions:
(common|server|client) or set them to standard defaults.
.. versionadded:: 2015.8.0
cert_type:
The type of certificate such as ``server`` or ``client``.
CLI Example:
.. code-block:: bash
salt '*' tls.get_extensions client
'''
assert X509_EXT_ENABLED, ('X509 extensions are not supported in '
'pyOpenSSL prior to version 0.15.1. Your '
'version: {0}'.format(OpenSSL_version))
ext = {}
if cert_type == '':
log.error('cert_type set to empty in tls_ca.get_extensions(); '
'defaulting to ``server``')
cert_type = 'server'
try:
ext['common'] = __salt__['pillar.get']('tls.extensions:common', False)
except NameError as err:
log.debug(err)
if not ext['common'] or ext['common'] == '':
ext['common'] = {
'csr': {
'basicConstraints': 'CA:FALSE',
},
'cert': {
'authorityKeyIdentifier': 'keyid,issuer:always',
'subjectKeyIdentifier': 'hash',
},
}
try:
ext['server'] = __salt__['pillar.get']('tls.extensions:server', False)
except NameError as err:
log.debug(err)
if not ext['server'] or ext['server'] == '':
ext['server'] = {
'csr': {
'extendedKeyUsage': 'serverAuth',
'keyUsage': 'digitalSignature, keyEncipherment',
},
'cert': {},
}
try:
ext['client'] = __salt__['pillar.get']('tls.extensions:client', False)
except NameError as err:
log.debug(err)
if not ext['client'] or ext['client'] == '':
ext['client'] = {
'csr': {
'extendedKeyUsage': 'clientAuth',
'keyUsage': 'nonRepudiation, digitalSignature, keyEncipherment',
},
'cert': {},
}
# possible user-defined profile or a typo
if cert_type not in ext:
try:
ext[cert_type] = __salt__['pillar.get'](
'tls.extensions:{0}'.format(cert_type))
except NameError as e:
log.debug(
'pillar, tls:extensions:%s not available or '
'not operating in a salt context\n%s', cert_type, e
)
retval = ext['common']
for Use in retval:
retval[Use].update(ext[cert_type][Use])
return retval | 0.000778 |
def plot_curvature(self, curv_type='mean', **kwargs):
"""
Plots the curvature of the external surface of the grid
Parameters
----------
curv_type : str, optional
One of the following strings indicating curvature types
- mean
- gaussian
- maximum
- minimum
**kwargs : optional
Optional keyword arguments. See help(vtki.plot)
Returns
-------
cpos : list
Camera position, focal point, and view up. Used for storing and
setting camera view.
"""
trisurf = self.extract_surface().tri_filter()
return trisurf.plot_curvature(curv_type, **kwargs) | 0.002717 |
def getComponentExceptionSummary(self, tmaster, component_name, instances=[], callback=None):
"""
Get the summary of exceptions for component_name and list of instances.
Empty instance list will fetch all exceptions.
"""
if not tmaster or not tmaster.host or not tmaster.stats_port:
return
exception_request = tmaster_pb2.ExceptionLogRequest()
exception_request.component_name = component_name
if len(instances) > 0:
exception_request.instances.extend(instances)
request_str = exception_request.SerializeToString()
port = str(tmaster.stats_port)
host = tmaster.host
url = "http://{0}:{1}/exceptionsummary".format(host, port)
Log.debug("Creating request object.")
request = tornado.httpclient.HTTPRequest(url,
body=request_str,
method='POST',
request_timeout=5)
Log.debug('Making HTTP call to fetch exceptionsummary url: %s', url)
try:
client = tornado.httpclient.AsyncHTTPClient()
result = yield client.fetch(request)
Log.debug("HTTP call complete.")
except tornado.httpclient.HTTPError as e:
raise Exception(str(e))
# Check the response code - error if it is in 400s or 500s
responseCode = result.code
if responseCode >= 400:
message = "Error in getting exceptions from Tmaster, code: " + responseCode
Log.error(message)
raise tornado.gen.Return({
"message": message
})
# Parse the response from tmaster.
exception_response = tmaster_pb2.ExceptionLogResponse()
exception_response.ParseFromString(result.body)
if exception_response.status.status == common_pb2.NOTOK:
if exception_response.status.HasField("message"):
raise tornado.gen.Return({
"message": exception_response.status.message
})
# Send response
ret = []
for exception_log in exception_response.exceptions:
ret.append({'class_name': exception_log.stacktrace,
'lasttime': exception_log.lasttime,
'firsttime': exception_log.firsttime,
'count': str(exception_log.count)})
raise tornado.gen.Return(ret) | 0.006154 |
def _notify_add_at(self, index, length=1):
"""Notify about an AddChange at a caertain index and length."""
slice_ = self._slice_at(index, length)
self._notify_add(slice_) | 0.010309 |
def unpause_topic(self, topic):
"""Resume message flow to channels of an existing, paused, topic."""
nsq.assert_valid_topic_name(topic)
return self._request('POST', '/topic/unpause', fields={'topic': topic}) | 0.008658 |
def vcpu_pin(vm_, vcpu, cpus):
'''
Set which CPUs a VCPU can use.
CLI Example:
.. code-block:: bash
salt 'foo' virt.vcpu_pin domU-id 2 1
salt 'foo' virt.vcpu_pin domU-id 2 2-6
'''
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, 'VM', vm_)
if vm_uuid is False:
return False
# from xm's main
def cpu_make_map(cpulist):
cpus = []
for c in cpulist.split(','):
if c == '':
continue
if '-' in c:
(x, y) = c.split('-')
for i in range(int(x), int(y) + 1):
cpus.append(int(i))
else:
# remove this element from the list
if c[0] == '^':
cpus = [x for x in cpus if x != int(c[1:])]
else:
cpus.append(int(c))
cpus.sort()
return ','.join(map(str, cpus))
if cpus == 'all':
cpumap = cpu_make_map('0-63')
else:
cpumap = cpu_make_map('{0}'.format(cpus))
try:
xapi.VM.add_to_VCPUs_params_live(vm_uuid,
'cpumap{0}'.format(vcpu), cpumap)
return True
# VM.add_to_VCPUs_params_live() implementation in xend 4.1+ has
# a bug which makes the client call fail.
# That code is accurate for all others XenAPI implementations, but
# for that particular one, fallback to xm / xl instead.
except Exception:
return __salt__['cmd.run'](
'{0} vcpu-pin {1} {2} {3}'.format(_get_xtool(), vm_, vcpu, cpus),
python_shell=False) | 0.001114 |
def predict_maxprob(self, x, **kwargs):
"""
Most likely value. Generally equivalent to predict.
"""
return self.base_estimator_.predict(x.values, **kwargs) | 0.010695 |
def get_integrated_channels(self, options):
"""
Generates a list of active integrated channels for active customers, filtered from the given options.
Raises errors when invalid options are encountered.
See ``add_arguments`` for the accepted options.
"""
channel_classes = self.get_channel_classes(options.get('channel'))
filter_kwargs = {
'active': True,
'enterprise_customer__active': True,
}
enterprise_customer = self.get_enterprise_customer(options.get('enterprise_customer'))
if enterprise_customer:
filter_kwargs['enterprise_customer'] = enterprise_customer
for channel_class in channel_classes:
for integrated_channel in channel_class.objects.filter(**filter_kwargs):
yield integrated_channel | 0.005855 |
def acquisition_function(self,x):
"""
Takes an acquisition and weights it so the domain and cost are taken into account.
"""
f_acqu = self._compute_acq(x)
cost_x, _ = self.cost_withGradients(x)
return -(f_acqu*self.space.indicator_constraints(x))/cost_x | 0.013289 |
def parse_warc_record(self, record):
""" Parse warc record
"""
entry = self._create_index_entry(record.rec_type)
if record.rec_type == 'warcinfo':
entry['url'] = record.rec_headers.get_header('WARC-Filename')
entry['urlkey'] = entry['url']
entry['_warcinfo'] = record.raw_stream.read(record.length)
return entry
entry['url'] = record.rec_headers.get_header('WARC-Target-Uri')
# timestamp
entry['timestamp'] = iso_date_to_timestamp(record.rec_headers.
get_header('WARC-Date'))
# mime
if record.rec_type == 'revisit':
entry['mime'] = 'warc/revisit'
elif self.options.get('minimal'):
entry['mime'] = '-'
else:
def_mime = '-' if record.rec_type == 'request' else 'unk'
entry.extract_mime(record.http_headers.
get_header('Content-Type'),
def_mime)
# detected mime from WARC-Identified-Payload-Type
entry['mime-detected'] = record.rec_headers.get_header(
'WARC-Identified-Payload-Type')
# status -- only for response records (by convention):
if record.rec_type == 'response' and not self.options.get('minimal'):
entry.extract_status(record.http_headers)
else:
entry['status'] = '-'
# digest
digest = record.rec_headers.get_header('WARC-Payload-Digest')
entry['digest'] = digest
if digest and digest.startswith('sha1:'):
entry['digest'] = digest[len('sha1:'):]
elif not entry.get('digest'):
entry['digest'] = '-'
# optional json metadata, if present
metadata = record.rec_headers.get_header('WARC-Json-Metadata')
if metadata:
entry['metadata'] = metadata
return entry | 0.001007 |
def remove_entity(self, name):
"""Unload an entity"""
self.entities.remove(name)
self.padaos.remove_entity(name) | 0.014706 |
def title(s=None, additional='', stream=sys.stdout):
"""Utility function to display nice titles
It automatically extracts the name of the function/method it is called from
and you can add additional text. title() will then print the name
of the function/method and the additional text surrounded by tow lines
of dashes. If you don't want the name of the function, you can provide
alternative text (regardless of the additional text)
:param s: (string) text to display, uses the function name and arguments by
default
:param additional: (string) extra text to display (not needed if s is not
None)
:param stream: (stream) the stream to print to. Ny default goes to standard
output
Examples:
.. code-block:: python
def foo():
title()
will display:
.. code-block:: text
---
foo
---
.. code-block:: python
def foo():
title(additional='(), this is cool!!!')
will display:
.. code-block:: text
----------------------
foo(), this is cool!!!
----------------------
.. code-block:: python
def foo():
title('No function name here!')
will display:
.. code-block:: text
----------------------
No function name here!
----------------------
"""
if s is None:
callable_name, file_name, class_name = getCallerInfo(2)
s = callable_name
if class_name is not None:
s = class_name + '.' + callable_name
lines = (s + additional).split('\n')
length = max(len(line) for line in lines)
print >> stream, '-' * length
print >> stream, s + additional
print >> stream, '-' * length | 0.005505 |
def plot_zeropoint(pars):
""" Plot 2d histogram.
Pars will be a dictionary containing:
data, figure_id, vmax, title_str, xp,yp, searchrad
"""
from matplotlib import pyplot as plt
xp = pars['xp']
yp = pars['yp']
searchrad = int(pars['searchrad'] + 0.5)
plt.figure(num=pars['figure_id'])
plt.clf()
if pars['interactive']:
plt.ion()
else:
plt.ioff()
plt.imshow(pars['data'], vmin=0, vmax=pars['vmax'],
interpolation='nearest')
plt.viridis()
plt.colorbar()
plt.title(pars['title_str'])
plt.plot(xp + searchrad, yp + searchrad, color='red', marker='+',
markersize=24)
plt.plot(searchrad, searchrad, color='yellow', marker='+', markersize=120)
plt.text(searchrad, searchrad, "Offset=0,0", verticalalignment='bottom',
color='yellow')
plt.xlabel("Offset in X (pixels)")
plt.ylabel("Offset in Y (pixels)")
if pars['interactive']:
plt.show()
if pars['plotname']:
suffix = pars['plotname'][-4:]
output = pars['plotname']
if '.' not in suffix:
output += '.png'
format = 'png'
else:
if suffix[1:] in ['png', 'pdf', 'ps', 'eps', 'svg']:
format = suffix[1:]
plt.savefig(output, format=format) | 0.000749 |
def clear_globals_reload_modules(self):
"""Clears globals and reloads modules"""
self.code_array.clear_globals()
self.code_array.reload_modules()
# Clear result cache
self.code_array.result_cache.clear() | 0.008163 |
def _connect(self):
"""
Connect to the MySQL server
"""
self._close()
self.conn = MySQLdb.Connect(host=self.hostname,
port=self.port,
user=self.username,
passwd=self.password,
db=self.database) | 0.005305 |
def collapsedintervals(table, start='start', stop='stop', key=None):
"""
Utility function to collapse intervals in a table.
If no facet `key` is given, returns an iterator over `(start, stop)` tuples.
If facet `key` is given, returns an iterator over `(key, start, stop)`
tuples.
"""
if key is None:
table = sort(table, key=start)
for iv in _collapse(values(table, (start, stop))):
yield iv
else:
table = sort(table, key=(key, start))
for k, g in rowgroupby(table, key=key, value=(start, stop)):
for iv in _collapse(g):
yield (k,) + iv | 0.010606 |
def crypto_aead_chacha20poly1305_ietf_encrypt(message, aad, nonce, key):
"""
Encrypt the given ``message`` using the IETF ratified chacha20poly1305
construction described in RFC7539.
:param message:
:type message: bytes
:param aad:
:type aad: bytes
:param nonce:
:type nonce: bytes
:param key:
:type key: bytes
:return: authenticated ciphertext
:rtype: bytes
"""
ensure(isinstance(message, bytes), 'Input message type must be bytes',
raising=exc.TypeError)
mlen = len(message)
ensure(mlen <= crypto_aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX,
'Message must be at most {0} bytes long'.format(
crypto_aead_chacha20poly1305_ietf_MESSAGEBYTES_MAX),
raising=exc.ValueError)
ensure(isinstance(aad, bytes) or (aad is None),
'Additional data must be bytes or None',
raising=exc.TypeError)
ensure(isinstance(nonce, bytes) and
len(nonce) == crypto_aead_chacha20poly1305_ietf_NPUBBYTES,
'Nonce must be a {0} bytes long bytes sequence'.format(
crypto_aead_chacha20poly1305_ietf_NPUBBYTES),
raising=exc.TypeError)
ensure(isinstance(key, bytes) and
len(key) == crypto_aead_chacha20poly1305_ietf_KEYBYTES,
'Key must be a {0} bytes long bytes sequence'.format(
crypto_aead_chacha20poly1305_ietf_KEYBYTES),
raising=exc.TypeError)
if aad:
_aad = aad
aalen = len(aad)
else:
_aad = ffi.NULL
aalen = 0
mxout = mlen + crypto_aead_chacha20poly1305_ietf_ABYTES
clen = ffi.new("unsigned long long *")
ciphertext = ffi.new("unsigned char[]", mxout)
res = lib.crypto_aead_chacha20poly1305_ietf_encrypt(ciphertext,
clen,
message,
mlen,
_aad,
aalen,
ffi.NULL,
nonce,
key)
ensure(res == 0, "Encryption failed.", raising=exc.CryptoError)
return ffi.buffer(ciphertext, clen[0])[:] | 0.000413 |
def chimera_anticluster(m, n=None, t=4, multiplier=3.0,
cls=BinaryQuadraticModel, subgraph=None, seed=None):
"""Generate an anticluster problem on a Chimera lattice.
An anticluster problem has weak interactions within a tile and strong
interactions between tiles.
Args:
m (int):
Number of rows in the Chimera lattice.
n (int, optional, default=m):
Number of columns in the Chimera lattice.
t (int, optional, default=t):
Size of the shore within each Chimera tile.
multiplier (number, optional, default=3.0):
Strength of the intertile edges.
cls (class, optional, default=:class:`.BinaryQuadraticModel`):
Binary quadratic model class to build from.
subgraph (int/tuple[nodes, edges]/:obj:`~networkx.Graph`):
A subgraph of a Chimera(m, n, t) graph to build the anticluster
problem on.
seed (int, optional, default=None):
Random seed.
Returns:
:obj:`.BinaryQuadraticModel`: spin-valued binary quadratic model.
"""
if seed is None:
seed = numpy.random.randint(2**32, dtype=np.uint32)
r = numpy.random.RandomState(seed)
m = int(m)
if n is None:
n = m
else:
n = int(n)
t = int(t)
ldata = np.zeros(m*n*t*2) # number of nodes
if m and n and t:
inrow, incol = zip(*_iter_chimera_tile_edges(m, n, t))
if m > 1 or n > 1:
outrow, outcol = zip(*_iter_chimera_intertile_edges(m, n, t))
else:
outrow = outcol = tuple()
qdata = r.choice((-1., 1.), size=len(inrow)+len(outrow))
qdata[len(inrow):] *= multiplier
irow = inrow + outrow
icol = incol + outcol
else:
irow = icol = qdata = tuple()
bqm = cls.from_numpy_vectors(ldata, (irow, icol, qdata), 0.0, SPIN)
if subgraph is not None:
nodes, edges = subgraph
subbqm = cls.empty(SPIN)
try:
subbqm.add_variables_from((v, bqm.linear[v]) for v in nodes)
except KeyError:
msg = "given 'subgraph' contains nodes not in Chimera({}, {}, {})".format(m, n, t)
raise ValueError(msg)
try:
subbqm.add_interactions_from((u, v, bqm.adj[u][v]) for u, v in edges)
except KeyError:
msg = "given 'subgraph' contains edges not in Chimera({}, {}, {})".format(m, n, t)
raise ValueError(msg)
bqm = subbqm
return bqm | 0.00157 |
def is_letter(char, strict=True):
"""
Check whether the character is a letter (as opposed to a diacritic or
suprasegmental).
In strict mode return True only if the letter is part of the IPA spec.
"""
if (char in chart.consonants) or (char in chart.vowels):
return True
if not strict:
return unicodedata.category(char) in ['Ll', 'Lo', 'Lt', 'Lu']
return False | 0.029491 |
def create_transaction(self, to_account):
"""Create a transaction for this statement amount and account, into to_account
This will also set this StatementLine's ``transaction`` attribute to the newly
created transaction.
Args:
to_account (Account): The account the transaction is into / out of.
Returns:
Transaction: The newly created (and committed) transaction.
"""
from_account = self.statement_import.bank_account
transaction = Transaction.objects.create()
Leg.objects.create(
transaction=transaction, account=from_account, amount=+(self.amount * -1)
)
Leg.objects.create(transaction=transaction, account=to_account, amount=-(self.amount * -1))
transaction.date = self.date
transaction.save()
self.transaction = transaction
self.save()
return transaction | 0.006459 |
def clear(self, correlation_id):
"""
Clears component state.
:param correlation_id: (optional) transaction id to trace execution through call chain.
"""
self._lock.acquire()
try:
self._cache = {}
finally:
self._lock.release() | 0.009804 |
def get_access_token(self) -> str:
"""
Returns the access token in case of successful authorization
"""
if self._service_token:
return self._service_token
if self._app_id and self._login and self._password:
try:
if self.login():
url_params = self.auth_oauth2()
if 'access_token' in url_params:
return url_params['access_token']
finally:
self.close() | 0.003839 |
def get_by_type(
self, app_id, event_type, timespan=None, filter=None, search=None, orderby=None, select=None, skip=None, top=None, format=None, count=None, apply=None, custom_headers=None, raw=False, **operation_config):
"""Execute OData query.
Executes an OData query for events.
:param app_id: ID of the application. This is Application ID from the
API Access settings blade in the Azure portal.
:type app_id: str
:param event_type: The type of events to query; either a standard
event type (`traces`, `customEvents`, `pageViews`, `requests`,
`dependencies`, `exceptions`, `availabilityResults`) or `$all` to
query across all event types. Possible values include: '$all',
'traces', 'customEvents', 'pageViews', 'browserTimings', 'requests',
'dependencies', 'exceptions', 'availabilityResults',
'performanceCounters', 'customMetrics'
:type event_type: str or ~azure.applicationinsights.models.EventType
:param timespan: Optional. The timespan over which to retrieve events.
This is an ISO8601 time period value. This timespan is applied in
addition to any that are specified in the Odata expression.
:type timespan: str
:param filter: An expression used to filter the returned events
:type filter: str
:param search: A free-text search expression to match for whether a
particular event should be returned
:type search: str
:param orderby: A comma-separated list of properties with \\"asc\\"
(the default) or \\"desc\\" to control the order of returned events
:type orderby: str
:param select: Limits the properties to just those requested on each
returned event
:type select: str
:param skip: The number of items to skip over before returning events
:type skip: int
:param top: The number of events to return
:type top: int
:param format: Format for the returned events
:type format: str
:param count: Request a count of matching items included with the
returned events
:type count: bool
:param apply: An expression used for aggregation over returned events
:type apply: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: EventsResults or ClientRawResponse if raw=true
:rtype: ~azure.applicationinsights.models.EventsResults or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.applicationinsights.models.ErrorResponseException>`
"""
# Construct URL
url = self.get_by_type.metadata['url']
path_format_arguments = {
'appId': self._serialize.url("app_id", app_id, 'str'),
'eventType': self._serialize.url("event_type", event_type, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timespan is not None:
query_parameters['timespan'] = self._serialize.query("timespan", timespan, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if search is not None:
query_parameters['$search'] = self._serialize.query("search", search, 'str')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if format is not None:
query_parameters['$format'] = self._serialize.query("format", format, 'str')
if count is not None:
query_parameters['$count'] = self._serialize.query("count", count, 'bool')
if apply is not None:
query_parameters['$apply'] = self._serialize.query("apply", apply, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EventsResults', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized | 0.002264 |
def easter(year=None):
"""
1900 - 2099 limit
:param year: int
:return: Easter day
"""
y = int(year) if year else _year
n = y - 1900
a = n % 19
q = n // 4
b = (7 * a + 1) // 19
m = (11 * a + 4 - b) % 29
w = (n + q + 31 - m) % 7
d = 25 - m - w
if d > 0:
return datetime.date(y, 4, d)
else:
return datetime.date(y, 3, (31 + d)) | 0.0025 |
async def StartUnitCompletion(self, entities, message):
'''
entities : typing.Sequence[~Entity]
message : str
Returns -> typing.Sequence[~ErrorResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='UpgradeSeries',
request='StartUnitCompletion',
version=1,
params=_params)
_params['entities'] = entities
_params['message'] = message
reply = await self.rpc(msg)
return reply | 0.00367 |
def resolve_pos(token):
"""If necessary, add a field to the POS tag for UD mapping.
Under Universal Dependencies, sometimes the same Unidic POS tag can
be mapped differently depending on the literal token or its context
in the sentence. This function adds information to the POS tag to
resolve ambiguous mappings.
"""
# TODO: This is a first take. The rules here are crude approximations.
# For many of these, full dependencies are needed to properly resolve
# PoS mappings.
if token.pos == "連体詞,*,*,*":
if re.match(r"[こそあど此其彼]の", token.surface):
return token.pos + ",DET"
if re.match(r"[こそあど此其彼]", token.surface):
return token.pos + ",PRON"
return token.pos + ",ADJ"
return token.pos | 0.001289 |
def prepare_site_model(exposure_xml, sites_csv, vs30_csv,
z1pt0, z2pt5, vs30measured, grid_spacing=0,
assoc_distance=5, output='site_model.csv'):
"""
Prepare a site_model.csv file from exposure xml files/site csv files,
vs30 csv files and a grid spacing which can be 0 (meaning no grid).
For each site the closest vs30 parameter is used. The command can also
generate (on demand) the additional fields z1pt0, z2pt5 and vs30measured
which may be needed by your hazard model, depending on the required GSIMs.
"""
hdf5 = datastore.hdf5new()
req_site_params = {'vs30'}
fields = ['lon', 'lat', 'vs30']
if z1pt0:
req_site_params.add('z1pt0')
fields.append('z1pt0')
if z2pt5:
req_site_params.add('z2pt5')
fields.append('z2pt5')
if vs30measured:
req_site_params.add('vs30measured')
fields.append('vs30measured')
with performance.Monitor(hdf5.path, hdf5, measuremem=True) as mon:
if exposure_xml:
mesh, assets_by_site = Exposure.read(
exposure_xml, check_dupl=False).get_mesh_assets_by_site()
mon.hdf5['assetcol'] = assetcol = site.SiteCollection.from_points(
mesh.lons, mesh.lats, req_site_params=req_site_params)
if grid_spacing:
grid = mesh.get_convex_hull().dilate(
grid_spacing).discretize(grid_spacing)
haz_sitecol = site.SiteCollection.from_points(
grid.lons, grid.lats, req_site_params=req_site_params)
logging.info(
'Associating exposure grid with %d locations to %d '
'exposure sites', len(haz_sitecol), len(assets_by_site))
haz_sitecol, assets_by, discarded = assoc(
assets_by_site, haz_sitecol,
grid_spacing * SQRT2, 'filter')
if len(discarded):
logging.info('Discarded %d sites with assets '
'[use oq plot_assets]', len(discarded))
mon.hdf5['discarded'] = numpy.array(discarded)
haz_sitecol.make_complete()
else:
haz_sitecol = assetcol
discarded = []
elif sites_csv:
lons, lats = [], []
for fname in sites_csv:
with open(fname) as csv:
for line in csv:
if line.startswith('lon,lat'): # possible header
continue
lon, lat = line.split(',')[:2]
lons.append(valid.longitude(lon))
lats.append(valid.latitude(lat))
haz_sitecol = site.SiteCollection.from_points(
lons, lats, req_site_params=req_site_params)
if grid_spacing:
grid = mesh.get_convex_hull().dilate(
grid_spacing).discretize(grid_spacing)
haz_sitecol = site.SiteCollection.from_points(
grid.lons, grid.lats, req_site_params=req_site_params)
else:
raise RuntimeError('Missing exposures or missing sites')
vs30orig = read_vs30(vs30_csv)
logging.info('Associating %d hazard sites to %d site parameters',
len(haz_sitecol), len(vs30orig))
sitecol, vs30, _ = assoc(
vs30orig, haz_sitecol, assoc_distance, 'warn')
sitecol.array['vs30'] = vs30['vs30']
if z1pt0:
sitecol.array['z1pt0'] = calculate_z1pt0(vs30['vs30'])
if z2pt5:
sitecol.array['z2pt5'] = calculate_z2pt5_ngaw2(vs30['vs30'])
if vs30measured:
sitecol.array['vs30measured'] = False # it is inferred
mon.hdf5['sitecol'] = sitecol
write_csv(output, sitecol.array[fields])
logging.info('Saved %d rows in %s' % (len(sitecol), output))
logging.info(mon)
return sitecol | 0.000249 |
def scratchpad():
"""Dummy page for styling tests."""
return render_template(
'demo.html',
config=dict(
project_name='Scratchpad',
style=request.args.get('style', 'default'),
),
title='Style Scratchpad',
) | 0.003663 |
def fmt_row(self, columns, dimensions, row, **settings):
"""
Format single table row.
"""
cells = []
i = 0
for column in columns:
cells.append(self.fmt_cell(
row[i],
dimensions[i],
column,
**settings[self.SETTING_TEXT_FORMATING]
)
)
i += 1
return self.bchar('v', 'm', settings[self.SETTING_BORDER_STYLE], **settings[self.SETTING_BORDER_FORMATING]) + \
self.bchar('v', 'm', settings[self.SETTING_BORDER_STYLE], **settings[self.SETTING_BORDER_FORMATING]).join(cells) + \
self.bchar('v', 'm', settings[self.SETTING_BORDER_STYLE], **settings[self.SETTING_BORDER_FORMATING]) | 0.008895 |
def frames(
self,
*,
callers: Optional[Union[str, List[str]]] = None,
callees: Optional[Union[str, List[str]]] = None,
kind: Optional[TraceKind] = None,
limit: Optional[int] = 10,
):
"""Display trace frames independent of the current issue.
Parameters (all optional):
callers: str or list[str] filter traces by this caller name
callees: str or list[str] filter traces by this callee name
kind: precondition|postcondition the type of trace frames to show
limit: int (default: 10) how many trace frames to display
(specify limit=None for all)
Sample usage:
frames callers="module.function", kind=postcondition
String filters support LIKE wildcards (%, _) from SQL:
% matches anything (like .* in regex)
_ matches 1 character (like . in regex)
"""
with self.db.make_session() as session:
query = (
session.query(
TraceFrame.id,
CallerText.contents.label("caller"),
TraceFrame.caller_port,
CalleeText.contents.label("callee"),
TraceFrame.callee_port,
)
.filter(TraceFrame.run_id == self.current_run_id)
.join(CallerText, CallerText.id == TraceFrame.caller_id)
.join(CalleeText, CalleeText.id == TraceFrame.callee_id)
)
if callers is not None:
query = self._add_list_or_string_filter_to_query(
callers, query, CallerText.contents, "callers"
)
if callees is not None:
query = self._add_list_or_string_filter_to_query(
callees, query, CalleeText.contents, "callees"
)
if kind is not None:
if kind not in {TraceKind.PRECONDITION, TraceKind.POSTCONDITION}:
raise UserError(
"Try 'frames kind=postcondition'"
" or 'frames kind=precondition'."
)
query = query.filter(TraceFrame.kind == kind)
if limit is not None and not isinstance(limit, int):
raise UserError("'limit' should be an int or None.")
trace_frames = query.group_by(TraceFrame.id).order_by(
CallerText.contents, CalleeText.contents
)
total_trace_frames = trace_frames.count()
limit = limit or total_trace_frames
self._output_trace_frames(
self._group_trace_frames(trace_frames, limit), limit, total_trace_frames
) | 0.003171 |
def new_session(self, zipkin_trace_v2, v2_ui=False):
"""Creates a new SchedulerSession for this Scheduler."""
return SchedulerSession(self, self._native.new_session(
self._scheduler, zipkin_trace_v2, v2_ui, multiprocessing.cpu_count())
) | 0.003922 |
def get_archives(self, offset=None, count=None, session_id=None):
"""Returns an ArchiveList, which is an array of archives that are completed and in-progress,
for your API key.
:param int: offset Optional. The index offset of the first archive. 0 is offset
of the most recently started archive. 1 is the offset of the archive that started prior to
the most recent archive. If you do not specify an offset, 0 is used.
:param int: count Optional. The number of archives to be returned. The maximum
number of archives returned is 1000.
:param string: session_id Optional. Used to list archives for a specific session ID.
:rtype: An ArchiveList object, which is an array of Archive objects.
"""
params = {}
if offset is not None:
params['offset'] = offset
if count is not None:
params['count'] = count
if session_id is not None:
params['sessionId'] = session_id
endpoint = self.endpoints.archive_url() + "?" + urlencode(params)
response = requests.get(
endpoint, headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout
)
if response.status_code < 300:
return ArchiveList(self, response.json())
elif response.status_code == 403:
raise AuthError()
elif response.status_code == 404:
raise NotFoundError("Archive not found")
else:
raise RequestError("An unexpected error occurred", response.status_code) | 0.005675 |
def create(name, url, backend, frequency=None, owner=None, org=None):
'''Create a new harvest source'''
log.info('Creating a new Harvest source "%s"', name)
source = actions.create_source(name, url, backend,
frequency=frequency,
owner=owner,
organization=org)
log.info('''Created a new Harvest source:
name: {0.name},
slug: {0.slug},
url: {0.url},
backend: {0.backend},
frequency: {0.frequency},
owner: {0.owner},
organization: {0.organization}'''.format(source)) | 0.001634 |
def create_deamon(cmd, shell=False, root=False):
"""Usage:
Create servcice process.
"""
try:
if root:
cmd.insert(0, 'sudo')
LOG.info(cmd)
subproc = subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return subproc.pid
except Exception as e:
LOG.error(e)
raise | 0.002439 |
def __parameter_descriptor(self, subfield_list):
"""Creates descriptor for a parameter using the subfields that define it.
Each parameter is defined by a list of fields, with all but the last being
a message field and the final being a simple (non-message) field.
Many of the fields in the descriptor are determined solely by the simple
field at the end, though some (such as repeated and required) take the whole
chain of fields into consideration.
Args:
subfield_list: List of fields describing the parameter.
Returns:
Dictionary containing a descriptor for the parameter described by the list
of fields.
"""
descriptor = {}
final_subfield = subfield_list[-1]
# Required
if all(subfield.required for subfield in subfield_list):
descriptor['required'] = True
# Type
descriptor['type'] = self.__field_to_parameter_type(final_subfield)
# Default
default = self.__parameter_default(final_subfield)
if default is not None:
descriptor['default'] = default
# Repeated
if any(subfield.repeated for subfield in subfield_list):
descriptor['repeated'] = True
# Enum
enum_descriptor = self.__parameter_enum(final_subfield)
if enum_descriptor is not None:
descriptor['enum'] = enum_descriptor
return descriptor | 0.005181 |
def read_bonedata(self, fid):
"""Read bone data from an acclaim skeleton file stream."""
bone_count = 0
lin = self.read_line(fid)
while lin[0]!=':':
parts = lin.split()
if parts[0] == 'begin':
bone_count += 1
self.vertices.append(vertex(name = '', id=np.NaN,
meta={'name': [],
'id': [],
'offset': [],
'orientation': [],
'axis': [0., 0., 0.],
'axis_order': [],
'C': np.eye(3),
'Cinv': np.eye(3),
'channels': [],
'bodymass': [],
'confmass': [],
'order': [],
'rot_ind': [],
'pos_ind': [],
'limits': [],
'xyz': np.array([0., 0., 0.]),
'rot': np.eye(3)}))
lin = self.read_line(fid)
elif parts[0]=='id':
self.vertices[bone_count].id = int(parts[1])
lin = self.read_line(fid)
self.vertices[bone_count].children = []
elif parts[0]=='name':
self.vertices[bone_count].name = parts[1]
lin = self.read_line(fid)
elif parts[0]=='direction':
direction = np.array([float(parts[1]), float(parts[2]), float(parts[3])])
lin = self.read_line(fid)
elif parts[0]=='length':
lgth = float(parts[1])
lin = self.read_line(fid)
elif parts[0]=='axis':
self.vertices[bone_count].meta['axis'] = np.array([float(parts[1]),
float(parts[2]),
float(parts[3])])
# order is reversed compared to bvh
self.vertices[bone_count].meta['axis_order'] = parts[-1][::-1].lower()
lin = self.read_line(fid)
elif parts[0]=='dof':
order = []
for i in range(1, len(parts)):
if parts[i]== 'rx':
chan = 'Xrotation'
order.append('x')
elif parts[i] =='ry':
chan = 'Yrotation'
order.append('y')
elif parts[i] == 'rz':
chan = 'Zrotation'
order.append('z')
elif parts[i] == 'tx':
chan = 'Xposition'
elif parts[i] == 'ty':
chan = 'Yposition'
elif parts[i] == 'tz':
chan = 'Zposition'
elif parts[i] == 'l':
chan = 'length'
self.vertices[bone_count].meta['channels'].append(chan)
# order is reversed compared to bvh
self.vertices[bone_count].meta['order'] = order[::-1]
lin = self.read_line(fid)
elif parts[0]=='limits':
self.vertices[bone_count].meta['limits'] = [[float(parts[1][1:]), float(parts[2][:-1])]]
lin = self.read_line(fid)
while lin !='end':
parts = lin.split()
self.vertices[bone_count].meta['limits'].append([float(parts[0][1:]), float(parts[1][:-1])])
lin = self.read_line(fid)
self.vertices[bone_count].meta['limits'] = np.array(self.vertices[bone_count].meta['limits'])
elif parts[0]=='end':
self.vertices[bone_count].meta['offset'] = direction*lgth
lin = self.read_line(fid)
return lin | 0.009981 |
def load(args):
'''
%prog load gff_file fasta_file [--options]
Parses the selected features out of GFF, with subfeatures concatenated.
For example, to get the CDS sequences, do this:
$ %prog load athaliana.gff athaliana.fa --parents mRNA --children CDS
To get 500bp upstream of a genes Transcription Start Site (TSS), do this:
$ %prog load athaliana.gff athaliana.fa --feature=upstream:TSS:500
Switch TSS with TrSS for Translation Start Site.
'''
from datetime import datetime as dt
from jcvi.formats.fasta import Seq, SeqRecord
# can request output fasta sequence id to be picked from following attributes
valid_id_attributes = ["ID", "Name", "Parent", "Alias", "Target"]
p = OptionParser(load.__doc__)
p.add_option("--parents", dest="parents", default="mRNA",
help="list of features to extract, use comma to separate (e.g." + \
"'gene,mRNA') [default: %default]")
p.add_option("--children", dest="children", default="CDS",
help="list of features to extract, use comma to separate (e.g." + \
"'five_prime_UTR,CDS,three_prime_UTR') [default: %default]")
p.add_option("--feature", dest="feature",
help="feature type to extract. e.g. `--feature=CDS` or " + \
"`--feature=upstream:TSS:500` [default: %default]")
p.add_option("--id_attribute", choices=valid_id_attributes,
help="The attribute field to extract and use as FASTA sequence ID " + \
"[default: %default]")
p.add_option("--desc_attribute", default="Note",
help="The attribute field to extract and use as FASTA sequence " + \
"description [default: %default]")
p.add_option("--full_header", default=None, choices=["default", "tair"],
help="Specify if full FASTA header (with seqid, coordinates and datestamp)" + \
" should be generated [default: %default]")
g1 = OptionGroup(p, "Optional parameters (if generating full header)")
g1.add_option("--sep", dest="sep", default=" ", \
help="Specify separator used to delimiter header elements [default: \"%default\"]")
g1.add_option("--datestamp", dest="datestamp", \
help="Specify a datestamp in the format YYYYMMDD or automatically pick `today`" + \
" [default: %default]")
g1.add_option("--conf_class", dest="conf_class", default=False, action="store_true",
help="Specify if `conf_class` attribute should be parsed and placed in the header" + \
" [default: %default]")
p.add_option_group(g1)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(p.print_help())
gff_file, fasta_file = args
if opts.feature:
opts.feature, opts.parent, opts.children, upstream_site, upstream_len, \
flag, error_msg = parse_feature_param(opts.feature)
if flag:
sys.exit(error_msg)
parents = set(opts.parents.split(','))
children_list = set(opts.children.split(','))
"""
In a situation where we want to extract sequence for only the top-level
parent feature, specify feature type of parent == child
"""
skipChildren = True if len(parents.symmetric_difference(children_list)) == 0 \
else False
id_attr = opts.id_attribute
desc_attr = opts.desc_attribute
sep = opts.sep
import gffutils
g = make_index(gff_file)
f = Fasta(fasta_file, index=False)
seqlen = {}
for seqid, size in f.itersizes():
seqlen[seqid] = size
fw = must_open(opts.outfile, "w")
for feat in get_parents(gff_file, parents):
desc = ""
if desc_attr:
fparent = feat.attributes['Parent'][0] \
if 'Parent' in feat.attributes else None
if fparent:
try:
g_fparent = g[fparent]
except gffutils.exceptions.FeatureNotFoundError:
logging.error("{} not found in index .. skipped".format(fparent))
continue
if desc_attr in g_fparent.attributes:
desc = ",".join(g_fparent.attributes[desc_attr])
elif desc_attr in feat.attributes:
desc = ",".join(feat.attributes[desc_attr])
if opts.full_header:
desc_parts = []
desc_parts.append(desc)
if opts.conf_class and 'conf_class' in feat.attributes:
desc_parts.append(feat.attributes['conf_class'][0])
if opts.full_header == "tair":
orient = "REVERSE" if feat.strand == "-" else "FORWARD"
feat_coords = "{0}:{1}-{2} {3} LENGTH=[LEN]".format(feat.seqid, \
feat.start, feat.end, orient)
else:
(s, e) = (feat.start, feat.end) if (feat.strand == "+") \
else (feat.end, feat.start)
feat_coords = "{0}:{1}-{2}".format(feat.seqid, s, e)
desc_parts.append(feat_coords)
datestamp = opts.datestamp if opts.datestamp else \
"{0}{1}{2}".format(dt.now().year, dt.now().month, dt.now().day)
desc_parts.append(datestamp)
desc = sep.join(str(x) for x in desc_parts)
desc = "".join(str(x) for x in (sep, desc)).strip()
if opts.feature == "upstream":
upstream_start, upstream_stop = get_upstream_coords(upstream_site, upstream_len, \
seqlen[feat.seqid], feat, children_list, g)
if not upstream_start or not upstream_stop:
continue
feat_seq = f.sequence(dict(chr=feat.seqid, start=upstream_start,
stop=upstream_stop, strand=feat.strand))
(s, e) = (upstream_start, upstream_stop) \
if feat.strand == "+" else \
(upstream_stop, upstream_start)
upstream_seq_loc = str(feat.seqid) + ":" + str(s) + "-" + str(e)
desc = sep.join(str(x) for x in (desc, upstream_seq_loc, \
"FLANKLEN=" + str(upstream_len)))
else:
children = []
if not skipChildren:
for c in g.children(feat.id, 1):
if c.featuretype not in children_list:
continue
child = f.sequence(dict(chr=c.chrom, start=c.start, stop=c.stop,
strand=c.strand))
children.append((child, c))
if not children:
print("[warning] %s has no children with type %s" \
% (feat.id, ','.join(children_list)), file=sys.stderr)
continue
else:
child = f.sequence(dict(chr=feat.seqid, start=feat.start, stop=feat.end,
strand=feat.strand))
children.append((child, feat))
# sort children in incremental position
children.sort(key=lambda x: x[1].start)
# reverse children if negative strand
if feat.strand == '-':
children.reverse()
feat_seq = ''.join(x[0] for x in children)
desc = desc.replace("\"", "")
id = ",".join(feat.attributes[id_attr]) if id_attr \
and feat.attributes[id_attr] else \
feat.id
if opts.full_header == "tair":
desc = desc.replace("[LEN]", str(len(feat_seq)))
rec = SeqRecord(Seq(feat_seq), id=id, description=desc)
SeqIO.write([rec], fw, "fasta")
fw.flush() | 0.00798 |
def _encode(self, tokens: List[str], mean: bool) -> Union[List[np.ndarray], np.ndarray]:
"""
Embed one text sample
Args:
tokens: tokenized text sample
mean: whether to return mean embedding of tokens per sample
Returns:
list of embedded tokens or array of mean values
"""
embedded_tokens = []
for t in tokens:
try:
emb = self.tok2emb[t]
except KeyError:
try:
emb = self._get_word_vector(t)
except KeyError:
emb = np.zeros(self.dim, dtype=np.float32)
self.tok2emb[t] = emb
embedded_tokens.append(emb)
if mean is None:
mean = self.mean
if mean:
filtered = [et for et in embedded_tokens if np.any(et)]
if filtered:
return np.mean(filtered, axis=0)
return np.zeros(self.dim, dtype=np.float32)
return embedded_tokens | 0.002896 |
def _run(self, gates, n_qubits, args, kwargs):
"""Default implementation of `Backend.run`.
Backend developer shouldn't override this function, but override `run` instead of this.
The default flow of running is:
1. preprocessing
2. call the gate action which defined in backend
3. postprocessing
Backend developer can:
1. Define preprocessing process. Override `_preprocess_run`
2. Define the gate action. Define methods `gate_{gate.lowername}`,
for example, `gate_x` for X gate, `gate_cx` for CX gate.
3. Define postprocessing process (and make return value). Override `_postprocess_run`
Otherwise, the developer can override `run` method if they want to change the flow of run.
"""
gates, ctx = self._preprocess_run(gates, n_qubits, args, kwargs)
self._run_gates(gates, n_qubits, ctx)
return self._postprocess_run(ctx) | 0.005102 |
def using_ios_stash():
''' returns true if sys path hints the install is running on ios '''
print('detected install path:')
print(os.path.dirname(__file__))
module_names = set(sys.modules.keys())
return 'stash' in module_names or 'stash.system' in module_names | 0.003571 |
def configfile_from_path(path, strict=True):
"""Get a ConfigFile object based on a file path.
This method will inspect the file extension and return the appropriate
ConfigFile subclass initialized with the given path.
Args:
path (str): The file path which represents the configuration file.
strict (bool): Whether or not to parse the file in strict mode.
Returns:
confpy.loaders.base.ConfigurationFile: The subclass which is
specialized for the given file path.
Raises:
UnrecognizedFileExtension: If there is no loader for the path.
"""
extension = path.split('.')[-1]
conf_type = FILE_TYPES.get(extension)
if not conf_type:
raise exc.UnrecognizedFileExtension(
"Cannot parse file of type {0}. Choices are {1}.".format(
extension,
FILE_TYPES.keys(),
)
)
return conf_type(path=path, strict=strict) | 0.00104 |
def parseExtn(extn=None):
"""
Parse a string representing a qualified fits extension name as in the
output of `parseFilename` and return a tuple ``(str(extname),
int(extver))``, which can be passed to `astropy.io.fits` functions using
the 'ext' kw.
Default return is the first extension in a fits file.
Examples
--------
::
>>> parseExtn('sci, 2')
('sci', 2)
>>> parseExtn('2')
('', 2)
>>> parseExtn('sci')
('sci', 1)
"""
if not extn:
return ('', 0)
try:
lext = extn.split(',')
except:
return ('', 1)
if len(lext) == 1 and lext[0].isdigit():
return ("", int(lext[0]))
elif len(lext) == 2:
return (lext[0], int(lext[1]))
else:
return (lext[0], 1) | 0.00246 |
def map(self, key_pattern, func, all_args, timeout=None):
'''Cache return value of multiple calls.
Args:
key_pattern (str): the key pattern to use for generating
keys for caches of the decorated function.
func (function): the function to call.
all_args (list): a list of args to be used to make calls to
the function.
timeout (int): the cache timeout
Returns:
A list of the return values of the calls.
Example::
def add(a, b):
return a + b
cache.map(key_pat, add, [(1, 2), (3, 4)]) == [3, 7]
'''
results = []
keys = [
make_key(key_pattern, func, args, {})
for args in all_args
]
cached = dict(zip(keys, self.get_many(keys)))
cache_to_add = {}
for key, args in zip(keys, all_args):
val = cached[key]
if val is None:
val = func(*args)
cache_to_add[key] = val if val is not None else NONE_RESULT
if val == NONE_RESULT:
val = None
results.append(val)
if cache_to_add:
self.set_many(cache_to_add, timeout)
return results | 0.00152 |
def atan(x, context=None):
"""
Return the inverse tangent of ``x``.
The mathematically exact result lies in the range [-π/2, π/2]. However,
note that as a result of rounding to the current context, it's possible
for the actual value to lie just outside this range.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_atan,
(BigFloat._implicit_convert(x),),
context,
) | 0.002227 |
def gen_password(password, crypt_salt=None, algorithm='sha512'):
'''
.. versionadded:: 2014.7.0
Generate hashed password
.. note::
When called this function is called directly via remote-execution,
the password argument may be displayed in the system's process list.
This may be a security risk on certain systems.
password
Plaintext password to be hashed.
crypt_salt
Crpytographic salt. If not given, a random 8-character salt will be
generated.
algorithm
The following hash algorithms are supported:
* md5
* blowfish (not in mainline glibc, only available in distros that add it)
* sha256
* sha512 (default)
CLI Example:
.. code-block:: bash
salt '*' shadow.gen_password 'I_am_password'
salt '*' shadow.gen_password 'I_am_password' crypt_salt='I_am_salt' algorithm=sha256
'''
if not HAS_CRYPT:
raise CommandExecutionError(
'gen_password is not available on this operating system '
'because the "crypt" python module is not available.'
)
return salt.utils.pycrypto.gen_hash(crypt_salt, password, algorithm) | 0.002449 |
def _get_user_provided_overrides(modules):
"""Load user-provided config overrides.
:param modules: stack modules to lookup in user overrides yaml file.
:returns: overrides dictionary.
"""
overrides = os.path.join(os.environ['JUJU_CHARM_DIR'],
'hardening.yaml')
if os.path.exists(overrides):
log("Found user-provided config overrides file '%s'" %
(overrides), level=DEBUG)
settings = yaml.safe_load(open(overrides))
if settings and settings.get(modules):
log("Applying '%s' overrides" % (modules), level=DEBUG)
return settings.get(modules)
log("No overrides found for '%s'" % (modules), level=DEBUG)
else:
log("No hardening config overrides file '%s' found in charm "
"root dir" % (overrides), level=DEBUG)
return {} | 0.001153 |
def _update_system_file(system_file, name, new_kvs):
"""Update the bcbio_system.yaml file with new resource information.
"""
if os.path.exists(system_file):
bak_file = system_file + ".bak%s" % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
shutil.copyfile(system_file, bak_file)
with open(system_file) as in_handle:
config = yaml.safe_load(in_handle)
else:
utils.safe_makedir(os.path.dirname(system_file))
config = {}
new_rs = {}
added = False
for rname, r_kvs in config.get("resources", {}).items():
if rname == name:
for k, v in new_kvs.items():
r_kvs[k] = v
added = True
new_rs[rname] = r_kvs
if not added:
new_rs[name] = new_kvs
config["resources"] = new_rs
with open(system_file, "w") as out_handle:
yaml.safe_dump(config, out_handle, default_flow_style=False, allow_unicode=False) | 0.003135 |
def splitext_no_dot(filename):
"""
Wrap os.path.splitext to return the name and the extension
without the '.' (e.g., csv instead of .csv)
"""
name, ext = os.path.splitext(filename)
ext = ext.lower()
return name, ext.strip('.') | 0.003937 |
def _assign_database_backend(self, db):
"""Assign Trace instance to stochastics and deterministics and Database instance
to self.
:Parameters:
- `db` : string, Database instance
The name of the database module (see below), or a Database instance.
Available databases:
- `no_trace` : Traces are not stored at all.
- `ram` : Traces stored in memory.
- `txt` : Traces stored in memory and saved in txt files at end of
sampling.
- `sqlite` : Traces stored in sqlite database.
- `hdf5` : Traces stored in an HDF5 file.
"""
# Objects that are not to be tallied are assigned a no_trace.Trace
# Tallyable objects are listed in the _nodes_to_tally set.
no_trace = getattr(database, 'no_trace')
self._variables_to_tally = set()
for object in self.stochastics | self.deterministics:
if object.keep_trace:
self._variables_to_tally.add(object)
try:
if object.mask is None:
# Standard stochastic
self._funs_to_tally[object.__name__] = object.get_value
else:
# Has missing values, so only fetch stochastic elements
# using mask
self._funs_to_tally[
object.__name__] = object.get_stoch_value
except AttributeError:
# Not a stochastic object, so no mask
self._funs_to_tally[object.__name__] = object.get_value
else:
object.trace = no_trace.Trace(object.__name__)
check_valid_object_name(self._variables_to_tally)
# If not already done, load the trace backend from the database
# module, and assign a database instance to Model.
if isinstance(db, str):
if db in dir(database):
module = getattr(database, db)
# Assign a default name for the database output file.
if self._db_args.get('dbname') is None:
self._db_args['dbname'] = self.__name__ + '.' + db
self.db = module.Database(**self._db_args)
elif db in database.__modules__:
raise ImportError(
'Database backend `%s` is not properly installed. Please see the documentation for instructions.' % db)
else:
raise AttributeError(
'Database backend `%s` is not defined in pymc.database.' % db)
elif isinstance(db, database.base.Database):
self.db = db
self.restore_sampler_state()
else: # What is this for? DH.
self.db = db.Database(**self._db_args) | 0.002105 |
def compare_SED(castroData1, castroData2, ylims, TS_thresh=4.0,
errSigma=1.0, specVals=[]):
""" Compare two SEDs
castroData1: A CastroData object, with the
log-likelihood v. normalization for each energy bin
castroData2: A CastroData object, with the
log-likelihood v. normalization for each energy bin
ylims : y-axis limits
TS_thresh : TS value above with to plot a point,
rather than an upper limit
errSigma : Number of sigma to use for error bars
specVals : List of spectra to add to plot
returns fig,ax which are matplotlib figure and axes objects
"""
import matplotlib.pyplot as plt
xmin = min(castroData1.refSpec.ebins[0], castroData2.refSpec.ebins[0])
xmax = max(castroData1.refSpec.ebins[-1], castroData2.refSpec.ebins[-1])
ymin = ylims[0]
ymax = ylims[1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax))
ax.set_xlabel("Energy [GeV]")
ax.set_ylabel(NORM_LABEL[castroData1.norm_type])
plotSED_OnAxis(ax, castroData1, TS_thresh, errSigma,
colorLim='blue', colorPoint='blue')
plotSED_OnAxis(ax, castroData2, TS_thresh, errSigma,
colorLim='red', colorPoint='red')
for spec in specVals:
ax.loglog(castroData1.refSpec.eref, spec)
return fig, ax | 0.00339 |
def run_all(self, direction):
"""
Runs all registered migrations
:param direction: Can be on of two values, UP or DOWN
"""
for key in sorted(migration_registry.keys):
self.run(key, direction) | 0.008197 |
def clear_cache(ip=None):
"""Clear the client cache or remove key matching the given ip."""
if ip:
with ignored(Exception):
client = CLIENT_CACHE[ip]
del CLIENT_CACHE[ip]
client.close()
else:
for client in CLIENT_CACHE.values():
with ignored(Exception):
client.close()
CLIENT_CACHE.clear() | 0.002571 |
def epifreq(self,R):
"""
NAME:
epifreq
PURPOSE:
calculate the epicycle frequency at R in this potential
INPUT:
R - Galactocentric radius (can be Quantity)
OUTPUT:
epicycle frequency
HISTORY:
2011-10-09 - Written - Bovy (IAS)
"""
return nu.sqrt(self.R2deriv(R,0.,use_physical=False)\
-3./R*self.Rforce(R,0.,use_physical=False)) | 0.036778 |
def preview(self, stream=sys.stdout):
"""A quick preview of docpie. Print all the parsed object"""
write = stream.write
write(('[Quick preview of Docpie %s]' % self._version).center(80, '='))
write('\n')
write(' sections '.center(80, '-'))
write('\n')
write(self.usage_text)
write('\n')
option_sections = self.option_sections
if option_sections:
write('\n')
write('\n'.join(option_sections.values()))
write('\n')
write(' str '.center(80, '-'))
write('\n[%s]\n' % self.usage_name)
for each in self.usages:
write(' %s\n' % each)
write('\n[Options:]\n\n')
for title, sections in self.options.items():
if title:
full_title = '%s %s' % (title, self.option_name)
else:
full_title = self.option_name
write(full_title)
write('\n')
for each in sections:
write(' %s\n' % each)
write('\n')
write(' repr '.center(80, '-'))
write('\n[%s]\n' % self.usage_name)
for each in self.usages:
write(' %r\n' % each)
write('\n[Options:]\n\n')
for title, sections in self.options.items():
if title:
full_title = '%s %s' % (title, self.option_name)
else:
full_title = self.option_name
write(full_title)
write('\n')
for each in sections:
write(' %r\n' % each)
write('\n')
write(' auto handlers '.center(80, '-'))
write('\n')
for key, value in self.extra.items():
write('%s %s\n' % (key, value)) | 0.00112 |
def validate_request_timestamp(req_body, max_diff=150):
"""Ensure the request's timestamp doesn't fall outside of the
app's specified tolerance.
Returns True if this request is valid, False otherwise.
:param req_body: JSON object parsed out of the raw POST data of a request.
:param max_diff: Maximum allowable difference in seconds between request
timestamp and system clock. Amazon requires <= 150 seconds for
published skills.
"""
time_str = req_body.get('request', {}).get('timestamp')
if not time_str:
log.error('timestamp not present %s', req_body)
return False
req_ts = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ")
diff = (datetime.utcnow() - req_ts).total_seconds()
if abs(diff) > max_diff:
log.error('timestamp difference too high: %d sec', diff)
return False
return True | 0.001126 |
async def _manage_connection(self):
"""Internal coroutine for managing the client connection."""
try:
while True:
message = await self._con.recv()
try:
unpacked = unpack(message)
except Exception: # pylint:disable=broad-except;This is a background worker
self._logger.exception("Corrupt message received")
continue
if not VALID_SERVER_MESSAGE.matches(unpacked):
self._logger.warning("Dropping invalid message from server: %s", unpacked)
continue
# Don't block until all callbacks have finished since once of
# those callbacks may call self.send_command, which would deadlock
# since it couldn't get the response until it had already finished.
if not await self._manager.process_message(unpacked, wait=False):
self._logger.warning("No handler found for received message, message=%s", unpacked)
except asyncio.CancelledError:
self._logger.info("Closing connection to server due to stop()")
finally:
await self._manager.process_message(dict(type='event', name=self.DISCONNECT_EVENT, payload=None))
await self._con.close() | 0.006662 |
def _WriteIfcfg(self, interfaces, logger):
"""Write ifcfg files for multi-NIC support.
Overwrites the files. This allows us to update ifcfg-* in the future.
Disable the network setup to override this behavior and customize the
configurations.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
"""
for interface in interfaces:
interface_config = os.path.join(
self.network_path, 'ifcfg-%s' % interface)
interface_content = [
'# Added by Google.',
'STARTMODE=hotplug',
'BOOTPROTO=dhcp',
'DHCLIENT_SET_DEFAULT_ROUTE=yes',
'DHCLIENT_ROUTE_PRIORITY=10%s00' % interface,
'',
]
with open(interface_config, 'w') as interface_file:
interface_file.write('\n'.join(interface_content))
logger.info('Created ifcfg file for interface %s.', interface) | 0.00516 |
def clear(self, job_id=None, force=False):
"""
Clear the queue and the job data. If job_id is not given, clear out all
jobs marked COMPLETED. If job_id is given, clear out the given job's
data. This function won't do anything if the job's state is not COMPLETED or FAILED.
:type job_id: NoneType or str
:param job_id: the job_id to clear. If None, clear all jobs.
:type force: bool
:param force: If True, clear the job (or jobs), even if it hasn't completed or failed.
"""
s = self.sessionmaker()
q = self._ns_query(s)
if job_id:
q = q.filter_by(id=job_id)
# filter only by the finished jobs, if we are not specified to force
if not force:
q = q.filter(
or_(ORMJob.state == State.COMPLETED, ORMJob.state == State.FAILED))
q.delete(synchronize_session=False)
s.commit()
s.close() | 0.00523 |
def to_dict(self, omit=()):
"""
Return a (shallow) copy of self cast to a dictionary,
optionally omitting some key/value pairs.
"""
result = self.__dict__.copy()
for key in omit:
if key in result:
del result[key]
return result | 0.006452 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.