content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_config(client, config_type, basefolder="/config"):
"""
Return an object that can be used to push/pull configurations inside
an etcd database.
Examples:
import etcd3
import etcdgo
client = etcd3.Etcd3Client()
# push a json configuration inside database
config = etcdgo.get_config(client, "json")
config.push("myconfig", "myfile.json")
# push a yaml configuration inside database
config = etcdgo.get_config(client, "yaml")
config.push("myconfig", "myfile.yaml")
# pull data from etcd database
data = config.pull("myconfig")
Args:
client (etcd3.Etcd3Client): etcd client object.
config_type (str): configuration type. Supported: json, yaml.
basefolder (str): root of the configuration inside the etcd database.
Returns:
Config: object to push/pull configurations inside an etcd database.
"""
if not client or not isinstance(client, etcd3.Etcd3Client):
raise ValueError("client must be of type etcd3.Etcd3Client")
if not config_type or not isinstance(config_type, str):
raise ValueError("config_type must be a string")
obj = None
if config_type.lower() == "json":
obj = etcdgo.config.JsonConfig(client, basefolder=basefolder)
elif config_type.lower() == "yaml":
obj = etcdgo.config.YamlConfig(client, basefolder=basefolder)
elif config_type.lower() == "ini":
obj = etcdgo.config.IniConfig(client, basefolder=basefolder)
else:
raise NotImplementedError("'%s' format is not supported" % config_type)
return obj
|
db0eabbe025a924a18188cbc02c0fe934531986a
| 35,035 |
def get_file_attribute_dtypes(filename):
# type: (str) -> Dict[str, str]
"""
Get the dtypes of the attributes of the file
:param filename:
:return:
"""
with h5py.File(filename, 'r') as infile:
return {key: type(value).__name__ for key, value in infile.attrs.items()}
|
ae03efa9210f898cf3fc120a87e313c1edbeae6e
| 35,036 |
def default_input_format(content_type='application/json', apply_globally=False, api=None):
"""A decorator that allows you to override the default output format for an API"""
def decorator(formatter):
formatter = hug.output_format.content_type(content_type)(formatter)
if apply_globally:
hug.defaults.input_format[content_type] = formatter
else:
apply_to_api = hug.API(api) if api else hug.api.from_object(formatter)
apply_to_api.http.set_input_format(content_type, formatter)
return formatter
return decorator
|
1bbfb2cb23dbb2353804e701938d005aea941e86
| 35,037 |
import inspect
def popargs(*args, **kwargs):
"""A decorator for _cp_dispatch
(cherrypy.dispatch.Dispatcher.dispatch_method_name).
Optional keyword argument: handler=(Object or Function)
Provides a _cp_dispatch function that pops off path segments into
cherrypy.request.params under the names specified. The dispatch
is then forwarded on to the next vpath element.
Note that any existing (and exposed) member function of the class that
popargs is applied to will override that value of the argument. For
instance, if you have a method named "list" on the class decorated with
popargs, then accessing "/list" will call that function instead of popping
it off as the requested parameter. This restriction applies to all
_cp_dispatch functions. The only way around this restriction is to create
a "blank class" whose only function is to provide _cp_dispatch.
If there are path elements after the arguments, or more arguments
are requested than are available in the vpath, then the 'handler'
keyword argument specifies the next object to handle the parameterized
request. If handler is not specified or is None, then self is used.
If handler is a function rather than an instance, then that function
will be called with the args specified and the return value from that
function used as the next object INSTEAD of adding the parameters to
cherrypy.request.args.
This decorator may be used in one of two ways:
As a class decorator:
@cherrypy.popargs('year', 'month', 'day')
class Blog:
def index(self, year=None, month=None, day=None):
#Process the parameters here; any url like
#/, /2009, /2009/12, or /2009/12/31
#will fill in the appropriate parameters.
def create(self):
#This link will still be available at /create. Defined functions
#take precedence over arguments.
Or as a member of a class:
class Blog:
_cp_dispatch = cherrypy.popargs('year', 'month', 'day')
#...
The handler argument may be used to mix arguments with built in functions.
For instance, the following setup allows different activities at the
day, month, and year level:
class DayHandler:
def index(self, year, month, day):
#Do something with this day; probably list entries
def delete(self, year, month, day):
#Delete all entries for this day
@cherrypy.popargs('day', handler=DayHandler())
class MonthHandler:
def index(self, year, month):
#Do something with this month; probably list entries
def delete(self, year, month):
#Delete all entries for this month
@cherrypy.popargs('month', handler=MonthHandler())
class YearHandler:
def index(self, year):
#Do something with this year
#...
@cherrypy.popargs('year', handler=YearHandler())
class Root:
def index(self):
#...
"""
# Since keyword arg comes after *args, we have to process it ourselves
# for lower versions of python.
handler = None
handler_call = False
for k, v in kwargs.items():
if k == 'handler':
handler = v
else:
raise TypeError(
"cherrypy.popargs() got an unexpected keyword argument '{0}'"
.format(k)
)
if handler is not None \
and (hasattr(handler, '__call__') or inspect.isclass(handler)):
handler_call = True
def decorated(cls_or_self=None, vpath=None):
if inspect.isclass(cls_or_self):
# cherrypy.popargs is a class decorator
cls = cls_or_self
setattr(cls, dispatch.Dispatcher.dispatch_method_name, decorated)
return cls
# We're in the actual function
self = cls_or_self
parms = {}
for arg in args:
if not vpath:
break
parms[arg] = vpath.pop(0)
if handler is not None:
if handler_call:
return handler(**parms)
else:
request.params.update(parms)
return handler
request.params.update(parms)
# If we are the ultimate handler, then to prevent our _cp_dispatch
# from being called again, we will resolve remaining elements through
# getattr() directly.
if vpath:
return getattr(self, vpath.pop(0), None)
else:
return self
return decorated
|
9139de9770e5e295656331a31d44ca38d4dcecbc
| 35,038 |
def to_pass(line):
"""
Replace a line of code with a pass statement, with
the correct number of leading spaces
Arguments
----------
line : str, line of code
Returns
----------
passed : str, line of code with same leading spaces
but code replaced with pass statement
"""
# the number of leading spaces on the line
spaces = len(line) - len(line.lstrip(' '))
# replace statement with pass and correct leading spaces
passed = (' ' * spaces) + 'pass'
return passed
|
f8444ecc38523aaef13d535258974881956e30b9
| 35,039 |
import numpy
def reverse_sort_C(C, sorted_index):
"""
Perform the reverse of sort described in sort_by_sorted_index, on rows in a numpy array.
Args:
C (numpy.array): array with C.shape[0] = len(sorted_index)
sorted_index (list of ints): desired order for rows of C
"""
m,n = C.shape
C_new = numpy.zeros(C.shape)
for i in range(len(sorted_index)):
row = sorted_index[i]
for j in range(n):
C_new[row][j] = C[i][j]
return C_new
|
a0865dba6479104bb1442ea185ec7913ed8cb53c
| 35,040 |
def auto_reconnect_connection(func):
"""
Attempt to safely reconnect when an error is hit that resembles the
bouncer disconnecting the client due to a timeout/etc.
"""
@wraps(func)
def inner(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception, e:
if not can_reconnect(e):
raise
self.close(reconnect=True)
return func(self, *args, **kwargs)
return inner
|
88d69a885004d4c683aa653d8c79d43fb9408476
| 35,041 |
def windices_of_references(string, sen_dict):
"""
returns a list of word/sentence indices for all coreferences to
the given string in sen_dict.
returns [(0,0,0)] if there were no coreferences found.
"""
indices = []
coreferences = coreferences_for(string, sen_dict)
if not coreferences:
return [(0, 0, 0)]
for ref_pair in coreferences:
for ref in ref_pair:
sen_id = ref[1]
start_index = ref[3]
end_index = ref[4]
interval = (int(sen_id), int(start_index), int(end_index))
indices.append(interval)
return list(set(indices))
|
2bf13beb67aa3e519fb40c06d59a018f89037890
| 35,042 |
def radius(x,y,z,xa,ya) :
"""
Compute distances between the control points (x,y) and a potential center (xa, ya)
in UTM coord, and sort the results depending on the elevation
of the control points (z)
Parameters
----------
x,y,z : UTM coordinates and Elevation of the control point
xa, ya : UTM coordinates of the potential center ("a" for apex)
Returns
-------
r: Distance between the control point and the potential center
("r" for Radius since we are dealing with conical shapes)
z: Elevation of the control point
Both r and z are sorted depending on the elevation.
Example
-------
radius_sorted, Elev_sorted = radius(control_xUTM, control_yUTM,control_z,apex_coordX,apex_coordY)
"""
r = np.sqrt( (x-xa)**2 + (y-ya)**2 )
index = np.argsort(r)
return r[index], np.array(z)[index]
|
3bbe20677dfd921c6c6d561f34b76ed54a12d266
| 35,043 |
def remove_labels(data):
"""
Keep only sqrt(n) of the real labels. To find the others, use k=sqrt(sqrt(n))
nearest neighbors from the labels we know, and use the mode.
"""
# "Remove" labels
# lost_idx = np.random.choice(
# len(data.y_train), size=int(len(data.y_train) - np.sqrt(len(data.y_train))))
lost_idx = np.random.choice(
len(data.y_train), size=int(0.63 * len(data.y_train)), replace=False)
X_lost = data.x_train[lost_idx]
X_rest = np.delete(data.x_train, lost_idx, axis=0)
y_lost = data.y_train[lost_idx]
y_rest = np.delete(data.y_train, lost_idx, axis=0)
if len(X_lost.shape) == 1:
X_lost = X_lost.reshape(1, -1)
if len(X_rest.shape) == 1:
X_rest = X_rest.reshape(1, -1)
# Impute data
for i in range(len(X_lost)):
tree = KDTree(X_rest)
d, idx = tree.query([X_lost[i]], k=int(np.sqrt(len(X_rest))), p=1)
y_lost[i] = mode(y_rest[idx][0])[0][0]
print('Ratio =', round(len(X_rest) / len(data.y_train), 2))
print('Total =', len(X_lost) + len(X_rest))
data.x_train = np.concatenate((X_lost, X_rest), axis=0)
data.y_train = np.concatenate((y_lost, y_rest), axis=0)
return data, 0.8 * len(X_rest) / (len(X_rest) + len(X_lost))
|
4a13ab58309c7ca104375f47c4cdb6dee076fa19
| 35,044 |
import asyncio
async def async_setup(hass, config):
"""Set up the Ais Files platform."""
# register services
@asyncio.coroutine
async def async_transfer_file(call):
if "path" not in call.data or "name" not in call.data:
return
await _async_transfer_file(hass, call.data["path"], call.data["name"])
@asyncio.coroutine
async def async_remove_file(call):
if "path" not in call.data:
return
await _async_remove_file(hass, call.data["path"])
@asyncio.coroutine
async def async_refresh_files(call):
await _async_refresh_files(hass)
@asyncio.coroutine
async def async_pick_file(call):
if "idx" not in call.data:
return
await _async_pick_file(hass, call.data["idx"])
@asyncio.coroutine
async def async_change_logger_settings(call):
await _async_change_logger_settings(hass, call)
@asyncio.coroutine
async def async_get_db_log_settings_info(call):
await _async_get_db_log_settings_info(hass, call)
@asyncio.coroutine
async def async_check_db_connection(call):
await _async_check_db_connection(hass, call)
hass.services.async_register(DOMAIN, "pick_file", async_pick_file)
hass.services.async_register(DOMAIN, "refresh_files", async_refresh_files)
hass.services.async_register(DOMAIN, "transfer_file", async_transfer_file)
hass.services.async_register(DOMAIN, "remove_file", async_remove_file)
hass.services.async_register(
DOMAIN, "change_logger_settings", async_change_logger_settings
)
hass.services.async_register(
DOMAIN, "check_db_connection", async_check_db_connection
)
hass.services.async_register(
DOMAIN, "get_db_log_settings_info", async_get_db_log_settings_info
)
hass.http.register_view(FileUpladView)
return True
|
22bec0569edf73774100b5f8a70dcbd6d6d65de3
| 35,045 |
from typing import Iterable
import itertools
def expand(curr_state: State, params: FindPathParams) -> Iterable[Identifier]:
"""
Expand a state into it's children states
"""
per_agent_expansion = []
for agent in curr_state.identifier.actual:
agent: Agent
# if an agent is colliding, expand it fully
if curr_state.collision_set.is_colliding(agent):
res = expand_position(agent, params.grid)
# otherwise, expand following the individually optimal path
else:
res = params.optimal_path.best_move(agent)
per_agent_expansion.append(res)
new_identifiers = itertools.product(*per_agent_expansion)
return [
Identifier(part, part)
for part in new_identifiers
]
|
1aebb5701b22d26a250aded9ee1e8aede0a87eac
| 35,046 |
import time
def SSDR(poses, rest_pose, num_bones, sparseness=4, max_iterations=20):
"""
Computes the Smooth Skinning Decomposition with Rigid bones
inputs: poses |num_poses| x |num_verts| x 3 matrix representing coordinates of vertices of each pose
rest_pose |num_verts| x 3 numpy matrix representing the coordinates of vertices in rest pose
num_bones number of bones to create
sparseness max number of bones influencing a single vertex
return: An i x j matrix of bone-vertex weights, where i = # vertices and j = # bones
A length-B list of (length-t lists of bone transformations [R_j | T_j] ), one list for each bone
A list of bone translations for the bones at rest
"""
start_time = time.time()
bone_transforms, rest_bones_t = initialize(poses, rest_pose, num_bones)
for _ in range(max_iterations):
W = update_weight_map(bone_transforms, rest_bones_t, poses, rest_pose, sparseness)
bone_transforms = update_bone_transforms(W, bone_transforms, rest_bones_t, poses, rest_pose)
print("Reconstruction error:", reconstruction_err(poses, rest_pose, bone_transforms, rest_bones_t, W))
end_time = time.time()
print("Done. Calculation took {0} seconds".format(end_time - start_time))
print("Avg reconstruction error:", reconstruction_err(poses, rest_pose, bone_transforms, rest_bones_t, W))
return W, bone_transforms, rest_bones_t
|
b4bf6b7d4a6c184b79ad0dcf2122ba31e1faef4f
| 35,047 |
import itertools
def string_permutations(test_list, list_to_permutate):
"""Takes a list and a set, and returns a list of all the permutations as strings"""
str_perms = [list(permutation) for permutation in itertools.permutations(list_to_permutate)]
return [str(test_list + str_perm) for str_perm in str_perms]
|
b4cee2f34e0382a7cd2b49f5b5f22bc85712731a
| 35,048 |
def to_cols(d):
"""Make a square matrix with columns equal to 'd'.
>>> print ker.to_cols(np.array([1,2,3,4]))
[[1 1 1 1]
[2 2 2 2]
[3 3 3 3]
[4 4 4 4]]
"""
return np.tile(d.reshape(len(d), -1), (1, len(d)))
|
1a5dc76a86d3b5d83b6ac5360af3df041237dbce
| 35,049 |
def captured_sensor(hass):
"""Create a captured today ArloSensor."""
data = _get_named_tuple({"captured_today": [0, 0, 0, 0, 0]})
return _get_sensor(hass, "Captured Today", "captured_today", data)
|
045b78cf8929517b4f15f479503ed82814182cce
| 35,050 |
def create_icon_axes(fig, ax_position, lw_bars, lw_grid, lw_border, rgrid):
"""
Create a polar axes containing the matplotlib radar plot.
Parameters
----------
fig : matplotlib.figure.Figure
The figure to draw into.
ax_position : (float, float, float, float)
The position of the created Axes in figure coordinates as
(x, y, width, height).
lw_bars : float
The linewidth of the bars.
lw_grid : float
The linewidth of the grid.
lw_border : float
The linewidth of the Axes border.
rgrid : array-like
Positions of the radial grid.
Returns
-------
ax : matplotlib.axes.Axes
The created Axes.
"""
with plt.rc_context({
'axes.edgecolor': MPL_BLUE,
'axes.linewidth': lw_border
}):
ax = fig.add_axes(ax_position)
ax.set_axisbelow(True)
linewidth = 5
N = 1000
n_1 = 6
n_2 = 32
i = np.linspace(0, 1, n_1)
j = np.linspace(0, 1, n_2 + 1, endpoint=True)
II, JJ = np.meshgrid(i, j)
R = 20 * (3)**II
THETA = 2 * np.pi * (JJ + 0.5 / n_2)
XX = R * np.cos(THETA)
YY = R * np.sin(THETA)
m = int(n_2 / 4 * 3 - 1)
n = int(n_1 / 2)
XXX = XX[m:m + 2, n:n + 2]
YYY = YY[m:m + 2, n:n + 2]
x_avg = (XXX.max() + XXX.min()) / 2
a = (XXX.max() - XXX.min()) / 2 * 1.618
y_avg = (YYY.max() + YYY.min()) / 2
ax.set_aspect("equal")
ax.axis("off")
ax.plot(XX[m - 1:m + 3, n - 1:n + 3],
YY[m - 1:m + 3, n - 1:n + 3],
"gray",
linewidth=linewidth,
solid_capstyle="round")
ax.plot(XX[m - 1:m + 3, n - 1:n + 3].T,
YY[m - 1:m + 3, n - 1:n + 3].T,
"gray",
linewidth=linewidth,
solid_capstyle="round")
ax.set_xlim([x_avg - a, x_avg + a])
ax.set_ylim([y_avg - a, y_avg + a])
ax.plot(XXX, YYY, "green", linewidth=linewidth, solid_capstyle="round")
ax.plot(XXX.T,
YYY.T,
"green",
linewidth=linewidth,
solid_capstyle="round")
xi = np.linspace(-1.0, 1.0, N)
eta = np.linspace(-1.0, 1.0, N)
XI, ETA = np.meshgrid(xi, eta)
shape_funcs = np.array([
1 / 4 * (1 + XI) * (1 + ETA),
1 / 4 * (1 - XI) * (1 + ETA),
1 / 4 * (1 + XI) * (1 - ETA),
1 / 4 * (1 - XI) * (1 - ETA),
])
XXXX = np.einsum(shape_funcs, [0, 1, 2], XXX.reshape((-1, )), [0])
YYYY = np.einsum(shape_funcs, [0, 1, 2], YYY.reshape((-1, )), [0])
ZZZZ = (XXXX - XXXX.min()) * (YYYY - YYYY.min() + a * 0.5)
ax.contourf(XXXX, YYYY, ZZZZ)
return ax
|
1ea5e6248ab6b9053afe543d4ed3d8d31ec02d57
| 35,051 |
import time
def baseline_multiclass(train_data, train_labels, test_data, test_labels, args):
"""Train various classifiers to get a baseline."""
clf, train_accuracy, test_accuracy, train_f1, test_f1, exec_time = [], [], [], [], [], []
clf.append(sklearn.neighbors.KNeighborsClassifier(n_neighbors=15, n_jobs=8))
clf.append(
sklearn.ensemble.RandomForestClassifier(n_jobs=8, verbose=10,
random_state=args.seed))
for i, c in enumerate(clf):
t_start = time.process_time()
c.fit(train_data, train_labels)
train_pred = c.predict(train_data)
test_pred = c.predict(test_data)
train_accuracy.append('{:5.2f}'.format(
100 * sklearn.metrics.accuracy_score(train_labels, train_pred)))
test_accuracy.append('{:5.2f}'.format(
100 * sklearn.metrics.accuracy_score(test_labels, test_pred)))
train_f1.append('{:5.2f}'.format(100 * sklearn.metrics.f1_score(
train_labels, train_pred, average='weighted')))
test_f1.append('{:5.2f}'.format(100 * sklearn.metrics.f1_score(
test_labels, test_pred, average='weighted')))
exec_time.append('{:5.2f}'.format(time.process_time() - t_start))
print('Train accuracy: {}'.format(' '.join(train_accuracy)))
print('Test accuracy: {}'.format(' '.join(test_accuracy)))
print('Train F1 (weighted): {}'.format(' '.join(train_f1)))
print('Test F1 (weighted): {}'.format(' '.join(test_f1)))
print('Execution time: {}'.format(' '.join(exec_time)))
return train_accuracy, test_accuracy, train_f1, test_f1, exec_time, clf
|
eb1a63b1921d54faa1141becdcc5d13716b08b7c
| 35,052 |
def zeno_data(word: str)->[str]:
"""returns all available zeno data"""
result = [None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None]
for pos in range(zeno.nrows):
if (word == zeno.cell(pos,0).value):
result[0] = zeno.cell(pos,1).value #sfi
result[1] = zeno.cell(pos,2).value #d
result[2] = zeno.cell(pos,3).value #u
result[3] = zeno.cell(pos,4).value #f
result[4] = zeno.cell(pos,5).value #gr1
result[5] = zeno.cell(pos,6).value #gr2
result[6] = zeno.cell(pos,7).value #gr3
result[7] = zeno.cell(pos,8).value #gr4
result[8] = zeno.cell(pos,9).value #gr5
result[9] = zeno.cell(pos,10).value #gr6
result[10] = zeno.cell(pos,11).value #gr7
result[11] = zeno.cell(pos,12).value #gr8
result[12] = zeno.cell(pos,13).value #gr9
result[13] = zeno.cell(pos,14).value #gr10
result[14] = zeno.cell(pos,15).value #gr11
result[15] = zeno.cell(pos,16).value #gr12
result[16] = zeno.cell(pos,17).value #gr13
break
pos += 1
return result
|
12981f05acfe9c7d272f7f907ec0ec51ddcf0d6a
| 35,053 |
from operator import index
def transfer():
"""Logic for transferring $$"""
data = decode_url(request.get_data().decode())
otheruser = data.get("name", None)
amount = data.get("amount", None)
if not (otheruser and get_user(otheruser)):
return index(f"Other user is not found")
if not request.cookies.get("userID"):
return index(f"Please login")
curr_user = get_user(request.cookies.get("userID"))
if amount is None or curr_user.get_amount() < amount:
return index(f"Amount is invalid / You do not have enough money")
user_data = get_user(otheruser)
user_data.money += amount
curr_user.money -= amount
db.session.commit()
return index("Successfully Transferred")
|
9afbda60cfb193a2d2852dfdc1accebe6d4fb845
| 35,054 |
from typing import Optional
def get_object_detection_by_id(odid: int) -> Optional[ObjectDetection]:
"""
Gets an object detection by id.
:param odid: The object detection's id.
:return: An object detection or None.
"""
try:
return ObjectDetection.objects.get(id=odid)
except ObjectDetection.DoesNotExist:
return None
|
3d3605326aa507d856c055edda787d2d407ac673
| 35,055 |
from cacao_accounting.contabilidad.registros.entidad import RegistroEntidad
from cacao_accounting.database import Entidad
def activar_entidad(id_entidad):
"""Estable una entidad como inactiva."""
REGISTRO = RegistroEntidad()
TRANSACCION = obtener_registro_desde_uuid(tabla=Entidad, uuid=id_entidad)
TRANSACCION.accion = "actualizar"
TRANSACCION.tipo = "principal"
TRANSACCION.nuevo_estatus = "activo"
REGISTRO.ejecutar_transaccion(TRANSACCION)
return LISTA_ENTIDADES
|
10d9da05b464c924471cb316b4b2dbd9e778a79a
| 35,056 |
import warnings
def trans_expected(clr, chromosomes, chunksize=1000000, use_dask=False):
"""
Aggregate the signal in intrachromosomal blocks.
Can be used as abackground for contact frequencies between chromosomes.
Parameters
----------
clr : cooler.Cooler
Cooler object
chromosomes : list of str
List of chromosome names
chunksize : int, optional
Size of dask chunks
use_dask : bool, optional
option to use dask
Returns
-------
pandas.DataFrame that stores total number of
interactions between a pair of chromosomes: 'balanced.sum',
corresponding number of bins involved
in the inter-chromosomal interactions: 'n_valid',
and a ratio 'balanced.avg = balanced.sum/n_valid', that is
the actual value of expected for every interchromosomal pair.
"""
warnings.warn(
"`cooltools.expected.trans_expected()` is deprecated in 0.3.2, will be removed subsequently. "
"Use `cooltools.expected.blocksum_pairwise()` instead.",
category=FutureWarning,
stacklevel=2,
)
if use_dask:
# pixels = daskify(clr.filename, clr.root + '/pixels', chunksize=chunksize)
raise NotImplementedError("To be implemented once dask supports MultiIndex")
# turn chromosomes into supports:
chrom_supports = [ (chrom, 0, None) for chrom in chromosomes ]
# use balaned transformation only:
balanced_transform = {
"balanced": \
lambda pixels: pixels["count"] * pixels["weight1"] * pixels["weight2"]
}
# trans_expected is simply a wrapper around new blocksum_pairwise
# but it preserved the interface of the original trans_expected
trans_records = blocksum_pairwise(clr,
supports=chrom_supports,
transforms=balanced_transform,
chunksize=chunksize)
# trans_records are inter-chromosomal only,
# changing trans_records keys to reflect that:
# region[0] for a region = (chrom, start, stop)
trans_records = {
( region1[0], region2[0] ): val for ( region1, region2 ), val in trans_records.items()
}
# turn trans_records into a DataFrame with
# MultiIndex, that stores values of 'balanced.sum'
# and 'n_valid' values for each pair of chromosomes:
trans_df = pd.DataFrame.from_dict( trans_records, orient="index" )
trans_df.index.rename( ["chrom1","chrom2"], inplace=True )
# an alternative way to get from records to DataFrame, as in CLI expected:
# result = pd.DataFrame(
# [
# {"chrom1": s1[0], "chrom2": s2[0], **rec}
# for (s1, s2), rec in trans_records.items()
# ],
# columns=["chrom1", "chrom2", "n_valid", "count.sum", "balanced.sum"],
# )
# the actual expected is balanced.sum/n_valid:
trans_df["balanced.avg"] = trans_df["balanced.sum"] / trans_df["n_valid"]
return trans_df
|
809d03501f6d6018da5133b65665f5046be36ca7
| 35,057 |
def exp_trans(base=None, **kwargs):
"""
Create a exponential transform class for *base*
This is inverse of the log transform.
Parameters
----------
base : float
Base of the logarithm
kwargs : dict
Keyword arguments passed onto
:func:`trans_new`. Should not include
the `transform` or `inverse`.
Returns
-------
out : type
Exponential transform class
"""
# default to e
if base is None:
name = 'power_e'
base = np.exp(1)
else:
name = 'power_{}'.format(base)
# transform function
def transform(x):
return base ** x
# inverse function
def inverse(x):
return np.log(x)/np.log(base)
return trans_new(name, transform, inverse, **kwargs)
|
fbd7ef154be3675d8b7010ae700e5b34d0f398c2
| 35,058 |
def broadcastable_to_str(b):
"""Return string representation of broadcastable."""
named_broadcastable = {
(): "scalar",
(False,): "vector",
(False, True): "col",
(True, False): "row",
(False, False): "matrix",
}
if b in named_broadcastable:
bcast = named_broadcastable[b]
else:
bcast = ""
return bcast
|
35dbe968a8341d076a264333c68fb597212439bc
| 35,059 |
def avg_weapon_count_character(db):
"""Returns the average number of weapons per character from MongoDB
database.
Args:
db (pymongo.database.Database): MongoDB database
Returns:
(float) Average number of weapons per character
"""
agg_dict = [
{
"$lookup":
{
'from': 'armory.weapon',
'localField': 'inventory',
'foreignField': 'pk',
'as': 'weapons_doc'
}
},
{
"$project":
{
"numberOfWeapons": {"$size": "$weapons_doc"}
}
},
{
"$group":
{
"_id": None,
"avgWeapon": {"$avg": "$numberOfWeapons"}
}
}
]
agg_res = db["charactercreator.character"].aggregate(agg_dict)
return list(agg_res)[0]["avgWeapon"]
|
0f65cad267d7b1d52730134e98e738dda56b2430
| 35,060 |
def partition(sort_list, low, high):
"""
All the elements smaller than the pivot
will be on the left side of the list
and all the elements on the right side
will be greater than the pivot.
"""
i = (low - 1)
pivot = sort_list[high]
for j in range(low, high):
if sort_list[j] <= pivot:
i += 1
sort_list[i], sort_list[j] = sort_list[j], sort_list[i]
sort_list[i+1], sort_list[high] = sort_list[high], sort_list[i+1]
return (i+1)
|
3ae3a569fc5c3968ae047bf20df7a7a59bdfb0cf
| 35,061 |
from argo.workflows.client import ApiClient
from typing import Dict
from typing import Any
def sanitize_for_serialization(obj: Dict[str, Any]) -> Dict[str, Any]:
"""Return object sanitized for serialization.
May be used with a V1alpha1Workflow to sanitize it
back to the original state (i.e. per manifest).
"""
cl = ApiClient()
return cl.sanitize_for_serialization(obj)
|
5a975e347ee529ac4db7777a0bc31750092f5442
| 35,062 |
def is_valid_degree_sequence(deg_sequence, method='hh'):
"""Returns True if deg_sequence is a valid degree sequence.
A degree sequence is valid if some graph can realize it.
Parameters
----------
deg_sequence : list
A list of integers where each element specifies the degree of a node
in a graph.
method : "eg" | "hh"
The method used to validate the degree sequence.
"eg" corresponds to the Erdős-Gallai algorithm, and
"hh" to the Havel-Hakimi algorithm.
Returns
-------
valid : bool
True if deg_sequence is a valid degree sequence and False if not.
References
----------
Erdős-Gallai
[EG1960]_, [choudum1986]_
Havel-Hakimi
[havel1955]_, [hakimi1962]_, [CL1996]_
"""
if method == 'eg':
valid = is_valid_degree_sequence_erdos_gallai(deg_sequence)
elif method == 'hh':
valid = is_valid_degree_sequence_havel_hakimi(deg_sequence)
else:
msg = "`method` must be 'eg' or 'hh'"
raise nx.NetworkXException(msg)
return valid
|
0ce013847902b002dcde32fbc0e818a5a506028c
| 35,063 |
def get_vpc_id(ec2, subnet_id: str = None) -> str:
"""Returns VPC ID that should be used for deployment."""
if subnet_id:
vpc_id = Subnet.get_by_id(ec2, subnet_id).vpc_id
else:
default_vpc = Vpc.get_default_vpc(ec2)
if not default_vpc:
raise ValueError('Default VPC not found')
vpc_id = default_vpc.vpc_id
return vpc_id
|
b345b840b0efadec62c68f74f4a69fc06c45b81e
| 35,066 |
from datetime import datetime
def _get_dates(request):
"""Obtain the start and end dates."""
today = date.today()
date_start = request.POST.get('date_start')
if not date_start:
date_start = today - timedelta(days=1)
else:
date_start = datetime.strptime(date_start, '%Y-%m-%d').date()
date_end = request.POST.get('date_end')
if not date_end:
date_end = today + timedelta(days=1)
else:
date_end = datetime.strptime(date_end, '%Y-%m-%d').date() + timedelta(days=1)
return (date_start, date_end)
|
d6313e9175de5e91965d162d066fbbbebe797914
| 35,067 |
import numpy
def detect_model(items: numpy.ndarray) -> int:
"""Detects which logistic model an item matrix fits into.
:param items: an item matrix
:return: an int between 1 and 4 denoting the logistic model of the given item matrix
"""
a, b, c, d = _split_params(items)
if any(d != 1):
return 4
if any(c != 0):
return 3
if len(set(a)) > 1:
return 2
return 1
|
f24881dd125638bf559e129e2871863ce24fdb91
| 35,069 |
import typing
def split_value(input_val) -> (typing.Union[dict, list], typing.Union[dict, list]):
"""Split input_val into data for params.yaml and zntrack.json
Parameters
----------
input_val: dict
A dictionary of shape {_type: str, value: any} from ZnJSON
Returns
-------
params_data: dict|list
A dictionary containing the data considered a parameter
input_val: dict|list
A dictionary containing the constant data which is not considered a parameter
"""
if isinstance(input_val, (list, tuple)):
data = [split_value(x) for x in input_val]
params_data, _ = zip(*data)
else:
if input_val["_type"] in ["zn.method"]:
params_data = input_val["value"].pop("kwargs")
params_data["_cls"] = input_val["value"].pop("cls")
else:
# things that are not zn.method and do not have kwargs, such as pathlib, ...
params_data = input_val.pop("value")
return params_data, input_val
|
be858fbfe5e65f02b79d37a54f7fd4d8123d1fc2
| 35,070 |
def assert_http_ok(resp, msg=None):
"""
Ensures the response is returning a HTTP 200.
"""
return assert_equal(resp.status_code, 200, resp.content if msg is None else msg)
|
8758d78ed248aa39c92c5aacdda02d9cf3df3fbe
| 35,071 |
def segnet_vgg13_bn(pretrained=False, progress=True, **kwargs):
"""Constructs a DeepLabV3+ model with a mobilenet backbone.
"""
model = SegNet(arch='segnet_vgg13_bn', **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['segnet_vgg13_bn'], progress=progress)
model.load_state_dict(state_dict)
return model
|
512f03e3e24bcfd143caacb77a6619c868738da0
| 35,072 |
def New_Dataframe(old_df,indicator_name):
""" create a new dataframe that is composed of only one indicator
Args:
old_df (dataframe): general dataframe from which we extract the new one
indicator_name (string): Name onf the indicator that will composed the new dataframe
Returns:
(dataframe): dataframe composed of only the chosen indicator
"""
return old_df.loc[old_df.Indicator == indicator_name]
|
5ccd394a01a70b39b64d2a12ed0aac6f39296a0a
| 35,073 |
def get_long_description():
"""Compose a long description for PyPI."""
long_description = None
try:
long_description = read_file('README.rst').decode('utf-8')
changelog = read_file('CHANGES.rst').decode('utf-8')
changelog = "\n".join(first_sections(changelog, '=', 4)) + """
Older versions
==============
The full changelog is available online in CHANGES.rst_.
.. _CHANGES.rst: https://github.com/hibtc/cpymad/blob/master/CHANGES.rst
"""
long_description += '\n' + changelog
except (IOError, UnicodeDecodeError):
pass
return long_description
|
52352c9515f6e04aba2bb85f24a146ca25a42128
| 35,074 |
import re
def _create_matcher(utterance):
"""Create a regex that matches the utterance."""
# Split utterance into parts that are type: NORMAL, GROUP or OPTIONAL
# Pattern matches (GROUP|OPTIONAL): Change light to [the color] {item}
parts = re.split(r'({\w+}|\[[\w\s]+\] *)', utterance)
# Pattern to extract name from GROUP part. Matches {item}
group_matcher = re.compile(r'{(\w+)}')
# Pattern to extract text from OPTIONAL part. Matches [the color]
optional_matcher = re.compile(r'\[([\w ]+)\] *')
pattern = ['^']
for part in parts:
group_match = group_matcher.match(part)
optional_match = optional_matcher.match(part)
# Normal part
if group_match is None and optional_match is None:
pattern.append(part)
continue
# Group part
if group_match is not None:
pattern.append(
r'(?P<{}>[\w ]+?)\s*'.format(group_match.groups()[0]))
# Optional part
elif optional_match is not None:
pattern.append(r'(?:{} *)?'.format(optional_match.groups()[0]))
pattern.append('$')
return re.compile(''.join(pattern), re.I)
|
78b1fc8b2096d5dcc2c2b6dc4112d562b1e365de
| 35,075 |
def change_video_state(player_name, state):
"""
:param player_name: video_player or topic_player or live or vr_live or pic_player or local_player
:param state: play or pause
:return:
"""
response = context.agent.call('ChangeVideoState', player_name, state)
if response.name == 'Fail':
raise ValueError(*response.args)
if len(response.args) == 0:
return None
return response.args[0]
|
d0f64b8e1266eed6df31db9ca2b072877f8e3ba9
| 35,076 |
import codecs
def generate(converter, input_file, format='xml', encoding='utf8'):
"""
Given a converter (as returned by compile()), this function reads
the given input file and converts it to the requested output format.
Supported output formats are 'xml', 'yaml', 'json', or 'none'.
:type converter: compiler.Context
:param converter: The compiled converter.
:type input_file: str
:param input_file: Name of a file to convert.
:type format: str
:param format: The output format.
:type encoding: str
:param encoding: Character encoding of the input file.
:rtype: str
:return: The resulting output.
"""
with codecs.open(input_file, encoding=encoding) as thefile:
return generate_string(converter, thefile.read(), format=format)
|
41a0c12387453b2e58972abe3c0f7d505036bb57
| 35,077 |
def compute_SS_TAS(df, cpds_median_score, num_L1000_feats = n_L1000_feats):
"""
Computes both Transcriptional activity score (TAS) and
signature strength per compound based on its replicates across all doses"""
dose_list = list(set(df['dose'].unique().tolist()))[1:7]
for dose in dose_list:
df_dose = df[df['dose'] == dose].copy()
cpds_ss = compute_signature_strength(list(cpds_median_score.keys()), df_dose)
cpds_tas = compute_tas(cpds_ss, cpds_median_score, dose, num_L1000_feats)
sorted_ss = {key:value for key, value in sorted(cpds_ss.items(), key=lambda item: item[0])}
sorted_tas = {key:value for key, value in sorted(cpds_tas.items(), key=lambda item: item[0])}
if dose == 1:
df_cpd_ss = pd.DataFrame.from_dict(sorted_ss, orient='index', columns = ['dose_1'])
df_cpd_tas = pd.DataFrame.from_dict(sorted_tas, orient='index', columns = ['dose_1'])
else:
df_cpd_ss['dose_' + str(dose)] = sorted_ss.values()
df_cpd_tas['dose_' + str(dose)] = sorted_tas.values()
return df_cpd_ss, df_cpd_tas
|
451fce4e361ac8a6144d2d9aefed949d236f0db2
| 35,078 |
def check_args(args):
"""
Checks validity of command line arguments and, in some cases
modifies them a little bit.
:param args: The command-line arguments.
:type args: argparse.ArgumentParser Namespace
:returns: argparse.ArgumentParser Namespace -- The updated command-line
arguments.
"""
if not args.outbase:
print("Must specify an output base filename (--outbase).")
raise SystemExit
# Supports passing a list of aspect files.
if args.aspfile:
args.aspfile = str(args.aspfile).split(',')
# If the band is not explicity called, attempt to derive it from filenames.
if not args.band and args.raw6file:
print("Trying to derive band from raw6 filename...")
if '-fd-raw6' in args.raw6file:
args.band = 'FUV'
elif '-nd-raw6' in args.raw6file:
args.band = 'NUV'
else:
print("Unable to parse band from raw6 filename. Specify band on"
" command line using --band.")
raise SystemExit
if not args.band:
print("Band not specified.")
else:
args.band = args.band.upper()
if not args.band in ["NUV", "FUV"]:
print("Band must be NUV or FUV. ")
raise SystemExit
if not (args.raw6file or args.scstfile) and not args.eclipse:
print ("Must provide raw6 and scst files or specify eclipse.")
raise SystemExit
return args
|
04270a50fce1003ee3960576b60bdcdc21f69767
| 35,079 |
from aiida.engine import Process
from aiida.orm.utils.node import is_valid_node_type_string
from typing import Tuple
def _get_ormclass_from_cls(cls: EntityClsType) -> Tuple[EntityTypes, Classifier]:
"""
Return the correct classifiers for the QueryBuilder from an ORM class.
:param cls: an AiiDA ORM class or backend ORM class.
:param query: an instance of the appropriate QueryBuilder backend.
:returns: the ORM class as well as a dictionary with additional classifier strings
Note: the ormclass_type_string is currently hardcoded for group, computer etc. One could instead use something like
aiida.orm.utils.node.get_type_string_from_class(cls.__module__, cls.__name__)
"""
# pylint: disable=protected-access,too-many-branches,too-many-statements
# Note: Unable to move this import to the top of the module for some reason
classifiers: Classifier
if issubclass(cls, nodes.Node):
classifiers = Classifier(cls.class_node_type) # type: ignore[union-attr]
ormclass = EntityTypes.NODE
elif issubclass(cls, groups.Group):
type_string = cls._type_string
assert type_string is not None, 'Group not registered as entry point'
classifiers = Classifier(GROUP_ENTITY_TYPE_PREFIX + type_string)
ormclass = EntityTypes.GROUP
elif issubclass(cls, computers.Computer):
classifiers = Classifier('computer')
ormclass = EntityTypes.COMPUTER
elif issubclass(cls, users.User):
classifiers = Classifier('user')
ormclass = EntityTypes.USER
elif issubclass(cls, authinfos.AuthInfo):
classifiers = Classifier('authinfo')
ormclass = EntityTypes.AUTHINFO
elif issubclass(cls, comments.Comment):
classifiers = Classifier('comment')
ormclass = EntityTypes.COMMENT
elif issubclass(cls, logs.Log):
classifiers = Classifier('log')
ormclass = EntityTypes.LOG
# Process
# This is a special case, since Process is not an ORM class.
# We need to deduce the ORM class used by the Process.
elif issubclass(cls, Process):
classifiers = Classifier(cls._node_class._plugin_type_string, cls.build_process_type())
ormclass = EntityTypes.NODE
else:
raise ValueError(f'I do not know what to do with {cls}')
if ormclass == EntityTypes.NODE:
is_valid_node_type_string(classifiers.ormclass_type_string, raise_on_false=True)
return ormclass, classifiers
|
c0471544887f614e6d98b90d38c595e0eb3fe5ad
| 35,082 |
import warnings
def norm_diff(a):
"""Calculate average of (a[i] - a[i+1]) / (a[i] + a[i+1])."""
if len(a) <= 1:
return np.nan
a = a.astype(float)
if np.allclose((a[1:] + a[:-1]), 0.):
return 0.
norm_diffs = (a[1:] - a[:-1]) / (a[1:] + a[:-1])
norm_diffs[(a[1:] == 0) & (a[:-1] == 0)] = 0.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning, module="numpy")
avg = np.nanmean(norm_diffs)
return avg
|
1755b357ad9b84e742e0c432db420df9161d8dc5
| 35,083 |
def expand_batch_dims(structure, batch_sizes):
"""Expands the first dimension of each tensor in structure to be batch_sizes.
Args:
structure: A structure (tuple, namedtuple, list, dictionary, ...).
batch_sizes: A 1-D tensor of shapes describing the batch dims.
Returns:
A structure matching the input structure, where each tensor's first
dimension has been expanded to match batch_sizes.
"""
def _helper(tensor):
if isinstance(tensor, tf.Tensor):
shape = tf.shape(tensor)
return tf.reshape(tensor, tf.concat([batch_sizes, shape[1:]], axis=0))
else:
return tensor
return nest.map_structure(_helper, structure)
|
1c72fc555b42d4dbc0f066ba399dc8f4c96b7245
| 35,084 |
def VLBAAIPSName( project, session):
"""
Derive AIPS Name. AIPS file name will be project+session with project
truncated to fit in 12 characters.
* project = project name
* session = session code
"""
################################################################
Aname = Aname=project.strip()[0:12-len(session)]+session
return Aname
# end VLBAAIPSName
|
1a9009c01f00fbb47d7355fa1b8513177dbd3784
| 35,085 |
import torch
def test_unbalanced_logging_with_multiple_optimizers(tmpdir):
"""This tests ensures reduction works in unbalanced logging settings."""
class TestModel(MultiOptModel):
actual = {0: [], 1: []}
def training_step(self, batch, batch_idx, optimizer_idx):
out = super().training_step(batch, batch_idx)
loss = out["loss"]
self.log(f"loss_{optimizer_idx}", loss, on_epoch=True)
self.actual[optimizer_idx].append(loss)
return out
model = TestModel()
model.training_epoch_end = None
# Initialize a trainer
trainer = pl.Trainer(
default_root_dir=tmpdir, max_epochs=1, limit_train_batches=5, limit_val_batches=5, weights_summary=None
)
trainer.fit(model)
for k, v in model.actual.items():
assert torch.equal(trainer.callback_metrics[f"loss_{k}_step"], v[-1])
# test loss is properly reduced
torch.testing.assert_allclose(trainer.callback_metrics[f"loss_{k}_epoch"], torch.tensor(v).mean())
|
b1c85a2ddba18af5c0e16c35e67a7b1757616805
| 35,086 |
from typing import Optional
def dot_general(lhs: Array, rhs: Array, dimension_numbers: DotDimensionNumbers,
precision: PrecisionLike = None,
preferred_element_type: Optional[DType] = None) -> Array:
"""More general contraction operator.
Wraps XLA's `DotGeneral
<https://www.tensorflow.org/xla/operation_semantics#dotgeneral>`_
operator.
Args:
lhs: an array
rhs: an array
dimension_numbers: a tuple of tuples of the form
`((lhs_contracting_dims, rhs_contracting_dims),
(lhs_batch_dims, rhs_batch_dims))`
precision: Optional. Either ``None``, which means the default precision for
the backend, a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
``lax.Precision`` enums indicating precision of ``lhs``` and ``rhs``.
preferred_element_type: Optional. Either ``None``, which means the default
accumulation type for the input types, or a datatype, indicating to
accumulate results to and return a result with that datatype.
Returns:
An array containing the result.
"""
contract_dims_seq, batch_dims_seq = dimension_numbers
contract_dims = tuple(map(lambda x: tuple(x), contract_dims_seq))
batch_dims = tuple(map(lambda x: tuple(x), batch_dims_seq))
return dot_general_p.bind(lhs, rhs,
dimension_numbers=(contract_dims, batch_dims),
precision=_canonicalize_precision(precision),
preferred_element_type=preferred_element_type)
|
824edf24f3130de598652e03da3aa2269f9e9b60
| 35,087 |
from typing import Tuple
def random(n: int, area: Tuple[float, float]):
"""
Generate random lines.
By default, 10 lines are randomly placed in a square with corners at (0, 0) and
(10mm, 10mm). Use the `--area` option to specify the destination area.
"""
lines = np.random.rand(n, 2) + 1j * np.random.rand(n, 2)
lines.real *= area[0]
lines.imag *= area[1]
return vp.LineCollection(lines)
|
c3cfda6d9ccf133adf494b30d626d55fbb354b6a
| 35,088 |
from pathlib import Path
def check_path_in_dir(file_path, directory_path):
"""
Check if a file path is in a directory
:param file_path: Full path to a file
:param directory_path: Full path to a directory the file may be in
:return: True if the file is in the directory
"""
directory = Path(directory_path)
parent = Path(file_path).parent
return parent == directory
|
5e96abd89c72ea39a944e75b4548fc20b67892cd
| 35,089 |
def trace_eyes(im, wnd_pos, wnd_dim, threshold, image_scale, filter_size, color_invert):
"""
Parameters
----------
im :
image (numpy array);
win_pos :
position of the window on the eyes (x, y);
win_dim :
dimension of the window on the eyes (w, h);
threshold :
threshold for ellipse fitting (int).
wnd_pos :
wnd_dim :
image_scale :
filter_size :
color_invert :
Returns
-------
"""
PAD = 0
cropped = _pad(
im[
wnd_pos[0] : wnd_pos[0] + wnd_dim[0], wnd_pos[1] : wnd_pos[1] + wnd_dim[1]
].copy(),
padding=PAD,
val=255,
)
thresholded = (cropped < threshold).astype(np.uint8)
# try:
e = _fit_ellipse(thresholded)
if e is False:
e = (np.nan,) * 10
else:
e = e[0][0] + e[0][1] + (e[0][2],) + e[1][0] + e[1][1] + (e[1][2],)
return np.array(e)
|
651c3a219d9e69d8e261659272f05e6ef9dc2519
| 35,090 |
def get() -> DslContextRegistry:
"""Gets the current active registry that observes DSL definitions."""
return _registry_holder.current
|
31029f0996716266e0f14b5155eadf21c3fbd8ad
| 35,091 |
def scalebar(length,slon='auto',slat='auto',az=90,label=True,ax=None,**kwargs):
"""
Plot scalebar of given length in meters.
Parameters:
length: Length of scalebar in meters
slon: Starting longitude (decimal degrees) for scalebar
slat: Starting latitude (decimal degrees) for scalebar
az = Azimuth of scalebar
label: Boolean for whether to label length of scalebar in km
ax: Axes on which to plot scalebar
Return:
ax: Axes with scalebar plotted
"""
if ax is None:
ax = plt.gca()
geodesic = cgeo.Geodesic() # Set up geodesic calculations
# Get map projection from axes
crs = ax.projection
if (slon=='auto')&(slat=='auto'):
trans = ax.transAxes + ax.transData.inverted()
sx,sy = trans.transform((0.1,0.1))
slon,slat = ccrs.Geodetic().transform_point(sx,sy,src_crs=crs)
# Calculate endpoint for given distance
end = geodesic.direct(
points=[slon,slat],azimuths=az,distances=length)[0]
elon = end[0]
elat = end[1]
mid = geodesic.direct(
points=[slon,slat],azimuths=az,distances=length/2)[0]
clon = mid[0]
clat = mid[1]
# Plot line from start to end
ax.plot([slon,elon],[slat,elat],transform=ccrs.Geodetic(),
**kwargs,linewidth=3)
# Add label with number of km
if label==True:
# Transform lat-lon into axes coordinates
tlon,tlat = crs.transform_point(clon,clat,src_crs=ccrs.Geodetic())
# Add label as annotation
ax.annotate(text=str(round(length/1000,None))+' km',xy=(tlon,tlat),
xytext=(0,3),xycoords='data',textcoords='offset points',
fontsize=7,ha='center')
return(ax)
|
1981816449291cf8cf2ffe7a8afef1ea638dac47
| 35,092 |
def to_str(bytes_or_str):
"""
Return Instance of str
"""
if isinstance(bytes_or_str, bytes):
value = bytes_or_str.decode('utf-8')
else:
value = bytes_or_str
return value
|
5578e52f72fa5ee5d629748e0388cde4dffe62ee
| 35,093 |
def flat_out(f, *a, **kw):
"""Flatten the output of target function."""
return f(*a, **kw).flatten()
|
ffe09ffbaae93657fde818de8a03cc17fee962f1
| 35,094 |
def delete_question(id):
""" Delete question. """
question = Question.query.get(id)
if question is not None:
question.delete()
response = {"message": "Object deleted."}
return make_response(jsonify(response), 200)
abort(404)
|
fd826877712efad3ea1344188f4571fd7e6bd9f3
| 35,095 |
def vgg16_cinic10_bn(pretrained=False, progress=True, **kwargs):
"""
VGG 16-layer model (configuration "D") with batch normalization
Inspired by: https://github.com/geifmany/cifar-vgg/blob/master/cifar100vgg.py to follow
https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=7486599 and then gave up on Dropout in Conv layers
and just used the smaller classifier head and reduced dropout.
"""
# return _vgg(
# cfg="D",
# batch_norm=True,
# smaller_head=True,
# **kwargs,
# )
return _vgg(
"vgg16_cinic10_bn",
"D",
True,
progress=progress,
pretrained_features_only=pretrained,
pretrained=False,
smaller_head=True,
**kwargs,
)
|
f7c86108208352dd89b34fb64c7a9875e594e470
| 35,096 |
from .apiv1 import blueprint as api1
def create_app(config_name):
"""
初始化Flask的应用对象,并初始化数据库等内容
:param config_name: str 配置模式的模式的名字 ("develop", "product")
:return:
"""
# 创建app
app = Flask(__name__)
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
if request.method == 'OPTIONS':
response.headers['Access-Control-Allow-Methods'] = 'DELETE, GET, POST, PUT'
headers = request.headers.get('Access-Control-Request-Headers')
if headers:
response.headers['Access-Control-Allow-Headers'] = headers
return response
# 根据配置模式的名字获取配置参数的类
config_class = config_map.get(config_name)
app.config.from_object(config_class)
# 使用app 初始化db
db.init_app(app)
# 创建redis连接对象
global redis_store
redis_store = redis.StrictRedis(host=config_class.REDIS_HOST, port=config_class.REDIS_PORT)
# 利用flask_session,将session数据保存在redis中
Session(app)
# 为flask 补充csrf防护
# CSRFProtect(app)
# 为flask 添加自定义的转换器
app.url_map.converters["re"] = ReConverter
# 注册蓝图
# from .apis import blueprint as api
app.register_blueprint(api1, url_prefix="/api/v1.0")
# from .apiv2 import blueprint as api2
# app.register_blueprint(api2, url_prefix="/api/v2.0")
# 使用swagger
# Swagger(app)
return app
|
79288b830a2ec8809253ecf98c1b610e1dce3c52
| 35,098 |
def add_vote_and_redirect(event, _):
"""
Handle add vote requests and redirect to a info page
:param event: event
:return: redirect to a page explaining that the vote was added
"""
# Save the vote
do_vote(event, None)
redirect_url = "/voted"
# Find the user and determine the redirect page
username = event["pathParameters"]["user"].lower()
user = User()
user_data = user.get_user_by_username(username)
if user_data:
# If the user has a redirect page configured
if user_data["voted_redirect"]:
redirect_url = user_data["voted_redirect"]
elif user_data["voted_message"]:
redirect_url = f"/voted?message={quote_plus(user_data['voted_message'])}"
# Return response
return create_response(302, additional_headers={"Location": redirect_url})
|
7db963fb8a0f17f7e8221038f70e06f14dd931fd
| 35,099 |
import json
def read_json_vectors(filename):
"""Return np array of vectors from json sources"""
vectors = []
with open(filename) as json_file:
json_data = json.load(json_file)
for v in json_data:
vectors.append(v)
print("Read {} vectors from {}".format(len(vectors), filename))
np_array = np.array(vectors)
return np_array
|
04010096dda8b210fad260ef955ce3c5c393e51a
| 35,100 |
from lmfit import Parameters, minimize
from typing import Pattern
def optimize_density_and_bkg_scaling(data_pattern, bkg_pattern, composition,
initial_density, initial_bkg_scaling, r_cutoff, iterations=2,
use_modification_fcn=False):
"""
This function tries to find the optimum density in background scaling with the given parameters. The equations
behind the optimization are presented equ (47-50) in the Eggert et al. 2002 paper.
:param data_pattern: original data pattern
:param bkg_pattern: original background pattern
:param composition: composition as a dictionary with the elements as keys and the abundances as values
:param initial_density: density starting point for the optimization procedure
:param initial_bkg_scaling: background scaling starting point for the optimization procedure
:param r_cutoff: cutoff value below which there is no signal expected (below the first peak in g(r))
:param iterations: number of iterations for optimization, described in equations 47-49 in Eggert et al. 2002
:param use_modification_fcn: Whether or not to use the Lorch modification function during the Fourier transform.
:return: tuple with optimized parameters (density, density_error, bkg_scaling, bkg_scaling_error)
"""
N = sum([composition[x] for x in composition])
q = data_pattern.extend_to(0, 0).x
inc = calculate_incoherent_scattering(composition, q)
f_eff = calculate_effective_form_factors(composition, q)
z_tot = calculate_atomic_number_sum(composition)
s_inf = calculate_s_inf(composition, z_tot, f_eff, q)
j = calculate_j(inc, z_tot, f_eff)
def optimization_fcn(x):
density = x['density'].value
bkg_scaling = x['bkg_scaling'].value
r = np.arange(0, r_cutoff, 0.02)
sample_pattern = data_pattern - bkg_scaling * bkg_pattern
sample_pattern = sample_pattern.extend_to(0, 0)
alpha = calculate_alpha(sample_pattern, z_tot, f_eff, s_inf, j, density)
coherent_pattern = calculate_coherent_scattering(sample_pattern, alpha, N, inc)
sq_pattern = calculate_sq(coherent_pattern, N, z_tot, f_eff)
iq_pattern = Pattern(sq_pattern.x, sq_pattern.y - s_inf)
fr_pattern = calculate_fr(iq_pattern, r, use_modification_fcn=use_modification_fcn)
q, iq_int = iq_pattern.data
r, fr_int = fr_pattern.data
delta_fr = fr_int + 4 * np.pi * r * density
for iteration in range(iterations):
in_integral = np.array(np.sin(np.outer(q.T, r))) * delta_fr
integral = np.trapz(in_integral, r)
iq_optimized = iq_int - 1. / q * (iq_int / (s_inf + j) + 1) * integral
iq_pattern = Pattern(q, iq_optimized)
fr_pattern = calculate_fr(iq_pattern, r)
q, iq_int = iq_pattern.data
r, fr_int = fr_pattern.data
delta_fr = fr_int + 4 * np.pi * r * density
return delta_fr
params = Parameters()
params.add('density', value=initial_density, )
params.add('bkg_scaling', value=initial_bkg_scaling)
result = minimize(optimization_fcn, params)
return result.params['density'].value, result.params['density'].stderr, \
result.params['bkg_scaling'].value, result.params['density'].stderr
|
281c1ec6c57854fcc703ca9d4f1b364cea9ccf98
| 35,101 |
def detail(root, name, my, request, actie="", msg=""):
"""bouw het scherm met actiegegevens op.
de soort user wordt meegegeven aan het scherm om indien nodig wijzigen onmogelijk te
maken en diverse knoppen te verbergen.
"""
## msg = request.GET.get("msg", "")
if not msg:
if request.user.is_authenticated:
msg = 'U bent ingelogd als <i>{}</i>. '.format(request.user.username)
msg += 'Klik <a href="/logout/'
inuit = 'uit'
else:
msg = 'U bent niet ingelogd. Klik <a href="/accounts/login/'
inuit = 'in'
msg += '?next=/{}/{}/">hier</a> om {} te loggen. '.format(root, actie, inuit)
if inuit == "uit":
msg += "Klik op een van onderstaande termen om meer te zien."
page_data = {
"name": name,
"root": root,
"pages": my.Page.objects.all().order_by('order'),
"soorten": my.Soort.objects.all().order_by('order'),
"stats": my.Status.objects.all().order_by('order'),
"users": [x.assigned for x in my.Worker.objects.all()],
"msg": msg}
if is_user(root, request.user) or is_admin(root, request.user):
page_data["readonly"] = False
else:
page_data["readonly"] = True
if actie == "nieuw":
titel = "Nieuwe actie"
page_titel = ""
volgnr = 0
aant = my.Actie.objects.count()
nw_date = dt.datetime.now()
if aant:
last = my.Actie.objects.all()[aant - 1]
jaar, volgnr = last.nummer.split("-")
volgnr = int(volgnr) if int(jaar) == nw_date.year else 0
volgnr += 1
page_data["nummer"] = "{0}-{1:04}".format(nw_date.year, volgnr)
page_data["nieuw"] = request.user
else:
actie = my.Actie.objects.get(pk=actie)
page_data["actie"] = actie
titel = "Actie {0} - ".format(actie.nummer)
page_titel = "Titel/Status"
page_data["title"] = titel
page_data["page_titel"] = page_titel
return render(request, root + '/actie.html', page_data)
|
fd53b4ed2bb860ce20d8c7a6474c6effe5b33a71
| 35,103 |
def define_areas(
pixel_filtered_map: np.ndarray, district_heating_zone_threshold: float
):
"""
This function defines areas where the sum of the pixel values for a given
area exceeds a certain threshold.
Inputs :
* pixel_filtered_map : pixel filtered map (MWh).
* district_heating_zone_threshold : threshold that the areas must meet (MWh).
Outputs :
* areas :
- map where the value of the pixels belonging to a given area corresponds
to the potential of this area.
* filtered_map :
- map where the values of the pixels not belonging to a filtered area
are set to zero.
* total_potential :
- the sum of the potential of each zone.
* areas_potential :
- list of the potential of each zone.
The pixels that do not pass the thresholds (and therefore defined
with a potential equal to zero) belong to the first zone.
For example : [0, "1st area potential", ..., "nth area potential"]
- NB: this zone is not interesting, therefore only the potential
of the other zones is returned in practice
"""
structure = np.ones((3, 3)).astype(int)
expanded_map = binary_dilation(input=pixel_filtered_map, structure=structure)
eroded_map = binary_erosion(input=expanded_map, structure=structure)
labels_array, n_label = measurements.label(
input=eroded_map,
structure=structure,
)
# labels start from 1, therefore the array size is 'num_labels_array + 1'
areas_potential = np.zeros((n_label + 1)).astype(float)
if n_label > 0:
end, start, sorted_array = get_browsing_indexes(
labels_array=labels_array,
pixel_filtered_map=pixel_filtered_map,
n_label=n_label,
)
for i, (start_index, end_index) in enumerate(zip(start, end)):
area = sorted_array[start_index:end_index, 3]
area_potential = np.sum(area)
if area_potential >= district_heating_zone_threshold:
# i+1 because labeling starts from 1 and not from 0
# factor 0.001 for conversion from MWh/ha to GWh/ha
areas_potential[i + 1] = np.around(np.sum(area_potential) / 1000, 2)
areas = areas_potential[labels_array]
filtered_map = pixel_filtered_map * (areas > 0).astype(int)
total_potential = np.sum(areas_potential)
return areas, filtered_map, total_potential, areas_potential[1:]
|
67d4f4418e5fd456f12454a6de81840da5e8cb39
| 35,104 |
def get_relation(filename):
"""read relation file, return rel2idx"""
rel2idx = {}
f = open(filename, 'r')
lines = f.readlines()
for (n, rel) in enumerate(lines):
rel = rel.strip().lower()
rel2idx[rel] = n
f.close()
return rel2idx
|
1c239ec3343cf63e502bf9de485171c9a346e240
| 35,105 |
def id_for_base(val):
"""Return an id for a param set."""
if val is None:
return "No base params"
if "editor-command" in val:
return "Long base params"
if "ecmd" in val:
return "Short base params"
return "Unknown base params"
|
ecf54fa40195ba4de7db13874e44388a04527bed
| 35,106 |
import math
def __bilinear(lat, lon, table):
"""Return bilinear interpolated data from table"""
try:
lat = float(lat)
lon = float(lon)
except ValueError:
return ''
if _non_finite(lat) or _non_finite(lon):
return ''
if math.fabs(lat) > 90 or math.fabs(lon) > 180:
return ''
row = int(math.floor((90.0 + lat) / TABLE_SPAN))
column = int(math.floor((180.0 + lon) / TABLE_SPAN))
if row < (TABLE_ROWS - 1):
grid_w = row
grid_e = row + 1
else:
grid_w = row - 1
grid_e = row
if column < (TABLE_COLS - 1):
grid_s = column
grid_n = column + 1
else:
grid_s = column - 1
grid_n = column
south = grid_s * TABLE_SPAN - 180
north = grid_n * TABLE_SPAN - 180
west = grid_w * TABLE_SPAN - 90
east = grid_e * TABLE_SPAN - 90
delta = TABLE_SPAN * TABLE_SPAN * 100
from_west = lat - west
from_south = lon - south
from_east = east - lat
from_north = north - lon
result = table[grid_e][grid_n] * from_west * from_south
result += table[grid_w][grid_n] * from_east * from_south
result += table[grid_e][grid_s] * from_west * from_north
result += table[grid_w][grid_s] * from_east * from_north
return result / delta
|
83a665437e4391fd44c87aa8b4631c63c0da4756
| 35,107 |
def serialize_serializable(obj, spec, ctx):
""" Serialize any class that defines a ``serialize`` method. """
return obj.serafin_serialize(spec, ctx)
|
936c3b51257c60c156cd9686e38b09ec55a929f2
| 35,108 |
import time
import torch
def train_baseline(train_loader, model, criterion, optimizer, epoch):
"""
:param train_loader: data loader with training images
:param model: model used for training
:param criterion: loss function
:param optimizer: optimizer
:param epoch: current training epoch
:return: baseline training script used for pretraining and fine-tuning of deepmir model
"""
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input_img, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# create autograd variables of the input and target so that hooks (forward and backward) can be used
# the hooks are used to track the gradient updates in layers that have hooks
input_var = torch.autograd.Variable(input_img)
target_var = torch.autograd.Variable(target)
output = model(input_var) # compute model predictions_test
# activate these lines in case of model with 1 predictions_test neuron
# target_var = target_var.unsqueeze(1)
# target_var = target_var.float()
# ###########
loss = criterion(output, target_var) # update the loss function
# measure accuracy and record loss
[acc] = accuracy(output.data, target, topk=(1,))
losses.update(loss.data, input_img.size(0))
top1.update(acc.item(), input_img.size(0))
# compute gradient and do loss step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print(f'Epoch: [{epoch}][{i}/{len(train_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
f'Loss {losses.val:.4f} ({losses.avg:.4f})\t'
f'Accuracy {top1.val:.3f} ({top1.avg:.3f})')
return losses.avg, top1.avg
|
ccac87a4e7a9d43fd5f5f85f00ab4cf797c23def
| 35,109 |
def best_match(supported, header):
"""Return mime-type with the highest quality ('q') from list of candidates.
Takes a list of supported mime-types and finds the best match for all the
media-ranges listed in header. The value of header must be a string that
conforms to the format of the HTTP Accept: header. The value of 'supported'
is a list of mime-types. The list of supported mime-types should be sorted
in order of increasing desirability, in case of a situation where there is
a tie.
>>> best_match(['application/xbel+xml', 'text/xml'],
'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
split_header = _filter_blank(header.split(','))
parsed_header = [parse_media_range(r) for r in split_header]
weighted_matches = []
pos = 0
for mime_type in supported:
weighted_matches.append((fitness_and_quality_parsed(mime_type,
parsed_header), pos, mime_type))
pos += 1
weighted_matches.sort()
return weighted_matches[-1][0][1] and weighted_matches[-1][2] or ''
|
b7774bcce984dce5d55b5351218ec71163fc5032
| 35,110 |
def hourly_median(hours, series, all_hours=True):
"""
Calculate hourly binned medians of a time series.
Parameters
----------
hours : array_like
Time series of the hour number. Must be of the same length as `series`.
series : array_like
Time series of the data.
all_hours : bool, optional
Default is `True` to consider 24 hours. If `False`, only consider the
hours that are present in the `hours` input.
Returns
-------
hour_level : array_like
Hour levels.
median : array_like
Median values by the hour.
q1 : array_like
First quartile values by the hour.
q3 : array_like
Third quartile values by the hour.
See Also
--------
`hourly_avg` : Hourly binned average function.
"""
if all_hours:
hour_level = _np.arange(24)
else:
hour_level = _np.unique(hours)
med_hr = _np.zeros(hour_level.size) + _np.nan
q1_hr = _np.zeros(hour_level.size) + _np.nan
q3_hr = _np.zeros(hour_level.size) + _np.nan
for i in range(hour_level.size):
med_hr[i] = _np.nanmedian(series[hours == hour_level[i]])
q1_hr[i], q3_hr[i] = _np.nanpercentile(
series[hours == hour_level[i]], [25.0, 75]
)
HourlyMedianResult = namedtuple(
"HourlyMedianResult", ("hour_level", "median", "q1", "q3")
)
return HourlyMedianResult(hour_level, med_hr, q1_hr, q3_hr)
|
7712b425e9445a520eda6bcc8cc18a6ac8274de4
| 35,111 |
def is_orthonormal_direct(basis):
"""
Returns True if the basis is orthonormal and direct:
Parameters
----------
basis
Returns
-------
"""
return is_orthonormal(basis) and np.allclose(
np.cross(basis[0, :], basis[1, :]), basis[2, :]
)
|
d58f5f9c30ea82d52fd6444febf0515fb99a9147
| 35,112 |
def matlab_style_gauss2D(shape=(3, 3), sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's fspecial('gaussian',[shape],[sigma])
Acknowledgement : https://stackoverflow.com/questions/17190649/how-to-obtain-a-gaussian-filter-in-python (Author@ali_m)
"""
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
|
4be8f129fd7063bc959802e206782bee46310dae
| 35,113 |
def triangle2str(idx, triangle):
"""
converts a triangle to a string
"""
return f"shapes[{idx}] = makeShape(" + \
f"materials[{triangle['matid']}]," + \
f"makeTriangle({vec32str(triangle['a'])}," + \
f"{vec32str(triangle['b'])}," + \
f"{vec32str(triangle['c'])}));"
|
13761396602855bf38901f5eef365f0bbc2592bf
| 35,114 |
import socket
def get_hosts_from_yaml(test_yaml, args, key_match=None):
"""Extract the list of hosts from the test yaml file.
This host will be included in the list if no clients are explicitly called
out in the test's yaml file.
Args:
test_yaml (str): test yaml file
args (argparse.Namespace): command line arguments for this program
key_match (str, optional): test yaml key used to filter which hosts to
find. Defaults to None which will match all keys.
Returns:
list: a unique list of hosts specified in the test's yaml file
"""
display(
args,
"Extracting hosts from {} - matching key '{}'".format(
test_yaml, key_match))
host_set = set()
if args.include_localhost and key_match != YAML_KEYS["test_servers"]:
host_set.add(socket.gethostname().split(".")[0])
found_client_key = False
for key, value in list(find_yaml_hosts(test_yaml).items()):
display(args, " Found {}: {}".format(key, value))
if key_match is None or key == key_match:
display(args, " Adding {}".format(value))
host_set.update(value)
if key in YAML_KEYS["test_clients"]:
found_client_key = True
# Include this host as a client if no clients are specified
if not found_client_key and key_match != YAML_KEYS["test_servers"]:
local_host = socket.gethostname().split(".")[0]
display(args, " Adding the localhost: {}".format(local_host))
host_set.add(local_host)
return sorted(list(host_set))
|
4dce69fe52769972c396208859d9cc5cfe4cf64a
| 35,115 |
def parse_labels_mapping(s):
"""
Parse the mapping between a label type and it's feature map.
For instance:
'0;1;2;3' -> [0, 1, 2, 3]
'0+2;3' -> [0, None, 0, 1]
'3;0+2;1' -> [1, 2, 1, 0]
"""
if len(s) > 0:
split = [[int(y) for y in x.split('+')] for x in s.split(';')]
elements = sum(split, [])
assert all(x in range(4) for x in elements)
assert len(elements) == len(set(elements))
labels_mapping = []
for i in range(4):
found = False
for j, l in enumerate(split):
if i in l:
assert not found
found = True
labels_mapping.append(j)
if not found:
labels_mapping.append(None)
assert len(labels_mapping) == 4
else:
labels_mapping = None
return labels_mapping
|
d2f77876f1759e6d4093afc720e7631f1b4d9ff4
| 35,116 |
from typing import Optional
from typing import Any
from typing import Dict
def sraa_eedi3(clip: vs.VideoNode, rep: Optional[int] = None, **eedi3_args: Any)-> vs.VideoNode:
"""Drop half the field with eedi3+nnedi3 and interpolate them.
Args:
clip (vs.VideoNode): Source clip.
rep (Optional[int], optional): Repair mode. Defaults to None.
Returns:
vs.VideoNode: AA'd clip
"""
nnargs: Dict[str, Any] = dict(nsize=0, nns=3, qual=1)
eeargs: Dict[str, Any] = dict(alpha=0.2, beta=0.6, gamma=40, nrad=2, mdis=20)
eeargs.update(eedi3_args)
eedi3_fun, nnedi3_fun = core.eedi3m.EEDI3, core.nnedi3cl.NNEDI3CL
flt = core.std.Transpose(clip)
flt = eedi3_fun(flt, 0, False, sclip=nnedi3_fun(flt, 0, False, False, **nnargs), **eeargs)
flt = core.std.Transpose(flt)
flt = eedi3_fun(flt, 0, False, sclip=nnedi3_fun(flt, 0, False, False, **nnargs), **eeargs)
if rep:
flt = core.rgsf.Repair(flt, clip, rep)
return flt
|
24f5bcaedbd16fe0feabfb589fdf4244554888ae
| 35,117 |
from typing import Any
import dataclasses
def _is_nested(x: Any) -> bool:
"""Returns whether a value is nested."""
return isinstance(x, dict) or dataclasses.is_dataclass(x)
|
798000adfd8eb900b61be988ab6a31e1b062540d
| 35,118 |
import math
def pedel(lsize, seq_len, mps, dist_fx=poisson):
"""
Pedel calculates library diversity given library size and mutational load.
For poisson distribution use poisson (default).
For pcr distribution, use first pcr_distribution_factory(efficiency, cycles) to obtain a function specific to those parameters.
>>> pedel(1e6,len(wt),4,pcr_distribution_factory(0.4, 32))
:param lsize: library size
:param seq_len: sequence length (bases)
:param mps: mutation per seq. (mutational load)
:param dist_fx: function of the distribution
:return: number of distinct sequences in library
"""
def Lx(x):
return poisson(x, mps) * lsize
def Cx(x):
"""
Number of unique variants in libray with x mutations
:param x: n muts
:return: C_x
"""
if x == 0:
return 1 # wt
# eq 6.
return Vx(x) * (1 - math.exp(-Lx(x) / Vx(x)))
def Vx(x):
"""
Number of possible variants with x muts
:param x: number of mutations
:return: V_x
"""
if x == 0:
return 1 # wt
# eq. 5
return special.binom(seq_len, x) * 3 ** x
xu = (mps + math.log(0.1 / lsize)) / math.log(mps / (2 * seq_len))
xl = (mps + math.log(3 / lsize)) / math.log(mps / (3 * seq_len))
s1 = math.floor(xl)
s2 = math.ceil(xu)
# note that range(min,max) does not include max.
C = 1 + \
sum([Vx(x) for x in range(1, s1 + 1)]) + \
sum([Cx(x) for x in range(s1, s2)]) + \
lsize * (1 - sum([dist_fx(x, mps) for x in range(0, s2)]))
return C
|
d5f75a1032272961a407dc7f02292e3095966281
| 35,119 |
def contains_pept(name):
"""Checks if the saccharide name contains the peptide fragment,
such as EES, GS, SA etc"""
contains_pept = False
for pept_stub_name in ('_E', '_G', '_S', '_A'):
if (pept_stub_name in name) and ('_NRE' not in name):
contains_pept = True
return contains_pept
|
937679a96b21766e96eb455baca51c1695412287
| 35,120 |
import torch
def check_stacked_complex(data: torch.Tensor) -> torch.Tensor:
"""
Check if tensor is stacked complex (real & imag parts stacked along last dim) and convert it to a combined complex
tensor.
Args:
data: A complex valued tensor, where the size of the final dimension might be 2.
Returns:
A complex valued tensor.
"""
return torch.view_as_complex(data) if data.shape[-1] == 2 else data
|
afce7ac1840ff64199c9ebc9f4222e1d3f09dafd
| 35,121 |
def circle(x, y, a, b, width):
"""
widthで指定された直径の中に含まれているかを判定
:param x:
:param y:
:param a:
:param b:
:param width:
:return:
"""
_x = round(((x - a) ** 2), 3)
_y = round(((y - b) ** 2), 3)
_r = round(((width/2) ** 2), 3)
if (_x + _y) <= _r:
return _r - (_x + _y)
return None
|
fabddad9e3c404dc36e1cf1830ebcc107cf66516
| 35,122 |
def fetch_representative_points(
service_area_ids,
include_census_data,
engine=connect.create_db_engine()
):
"""
Fetch representative points for a list of service areas.
Prepares responses for use by the frontend.
"""
if not service_area_ids:
return []
query_params = {
'id_list': tuple(service_area_ids)
}
# Set census mapping.
census_mapping = CENSUS_FIELDS_BY_CATEGORY if include_census_data else {}
if include_census_data:
join_list = ' '.join(["""
LEFT JOIN {table}
ON (representative_points.census_tract = {table}.census_tract)
""".format(
table=table) for table in CENSUS_TABLES
])
select_query = """
SELECT {cols}
FROM {table_name}
{joins}
WHERE service_area_id IN %(id_list)s
ORDER BY id
;
""".format(
cols=', '.join(
RP_COLUMNS + readable_columns_from_census_mapping(CENSUS_FIELDS_BY_CATEGORY)),
table_name=representative_point.RepresentativePoint.__tablename__,
joins=join_list,
)
logger.info('Fetching representative_points with census data.')
else:
select_query = """
SELECT {cols}
FROM {table_name}
WHERE service_area_id IN %(id_list)s
ORDER BY id
;
""".format(
cols=', '.join(RP_COLUMNS),
table_name=representative_point.RepresentativePoint.__tablename__
)
return [
representative_point.row_to_dict(row, census_mapping=census_mapping)
for row in engine.execute(select_query, query_params).fetchall()
]
|
daec38168159c537454d55696f6bcac72799b586
| 35,123 |
def get_most_similar(candidates, target_val, endpointService):
"""
select the entity from candidates that are most similar to the original one
ties are broken by overall popularity
"""
closest_dist = float('inf')
closest_matches = []
target_val = target_val.lower()
for cand in candidates:
# skip candidates, we dont have a label for
if not cand['labels']:
continue
# the dist of this candidate
distances = [sDist.levenshtein(target_val, label.lower()) for label in cand['labels']]
dist = min(distances)
# do we need to update?
if dist < closest_dist:
closest_dist = dist
closest_matches = [cand]
elif dist == closest_dist:
closest_matches.append(cand)
# we got a unique best-match
if len(closest_matches) == 1:
return closest_matches[0]
# break ties by popularity
cands_uris = [cand['uri'] for cand in closest_matches]
popularity = endpointService.get_popularity_for_lst.send([cands_uris])
for cand in closest_matches:
if (cand['uri'] in popularity) and (len(popularity[cand['uri']]) > 0) and ('popularity' in popularity[cand['uri']][0]):
cand['popularity'] = int(popularity[cand['uri']][0]['popularity'])
else:
cand['popularity'] = 0
closest_matches.sort(key=lambda x: x['popularity'], reverse=True)
return closest_matches[0]
|
0f4c99e0ed9428160880a778385c35597732b322
| 35,124 |
def dict_remove_none(starting_seq=None, extend_chained=True, chained=None, chained_status=None):
"""
Given a target sequence, look for dictionary keys that have values of None and remove them.
By default, ``chained`` will have ``.extend()`` or ``.update()`` called on it with
``starting_seq`` as the only argument.
Set ``extend_chained`` to False to ignore ``starting_seq``.
chained_status
Status returned by the chained method.
The first return value (status) will be True if the sterilizing is successful,
and False otherwise.
The second argument will be the sterilized sequence.
"""
if extend_chained:
try:
if starting_seq and isinstance(chained, (set, dict)):
chained.update(starting_seq)
elif starting_seq and isinstance(chained, list):
chained.extend(starting_seq)
except (AttributeError, TypeError, ValueError):
log.error("Invalid arguments type", exc_info=True)
return False, None
if isinstance(chained, dict):
ret = _sterilize_dict(chained)
elif isinstance(chained, (list, set, tuple)):
ret = _sterilize_seq(chained)
else:
log.error("Invalid arguments type - dict, list, set or tuple expected")
ret = None
status = bool(ret)
return status, ret
|
ce43ce9cdfff92fcce329b50cf25aeb8f74b6ff5
| 35,125 |
import requests
def get_scoped_token(os_auth_url, access_token, project_id):
"""
Get a scoped token, will try all protocols if needed
"""
unscoped_token, protocol = get_unscoped_token(os_auth_url, access_token)
url = get_keystone_url(os_auth_url, "/v3/auth/tokens")
body = {
"auth": {
"identity": {"methods": ["token"], "token": {"id": unscoped_token}},
"scope": {"project": {"id": project_id}},
}
}
request = requests.post(url, json=body)
# pylint: disable=no-member
if request.status_code != requests.codes.created:
raise RuntimeError("Unable to get a scoped token")
return request.headers["X-Subject-Token"], protocol
|
1ebeeb09e90ae6ad6091ef2623c9be59b27a22d5
| 35,126 |
def split_feature_class(label: str, frame: pd.DataFrame):
""" Split features and class from encode dataset. `label` is the class, and
`frame` is the dataset which has been encoded.
"""
sub_cols = [attr for attr in frame.columns if attr.startswith(label)]
if len(sub_cols) <= 1:
return frame, None
is_one_class = len(sub_cols) == 2
if is_one_class:
# For one class, there are two sorted values.
# e.g. ['Yes', 'No'] => [[0, 1],
# [1, 0]]
# Choose second column to represent this attribute.
label_ = sub_cols[1]
return frame.drop(sub_cols, axis=1), frame[label_]
# merge multiple columns into one column: [Name_A, Name_B, ..] => Name
y = frame[sub_cols].apply(lambda x: Index(x).get_loc(1), axis=1)
return frame.drop(sub_cols, axis=1), y
|
03cd3b4f3c2909aaf03ec8832bb7a543a6c1f789
| 35,130 |
def user_form_test(): # <dev>
"""recommender user form.
Returns:
request: dictionary (json object)
list of user's strain description
schema:
{"type_list": [""],
"effect_list": [""],
"flavor_list": [""]}
"""
return render_template("index.html")
|
3f853f197ffa7d879af77a6c1777ec6f9ea394dc
| 35,131 |
def bestfit_sphere_numpy(points):
"""Returns the sphere's center and radius that fits best through a set of points.
Parameters
----------
points: list of points
XYZ coordinates of the points.
Returns
-------
tuple: center, radius
sphere center (XYZ coordinates) and sphere radius.
Notes
-----
For more information see [1]_.
References
----------
.. [1] Least Squares Sphere Fit.
Available at: https://jekel.me/2015/Least-Squares-Sphere-Fit/.
Examples
--------
>>> from compas.geometry import bestfit_sphere_numpy
>>> points = [(291.580, -199.041, 120.194), (293.003, -52.379, 33.599),\
(514.217, 26.345, 29.143), (683.253, 26.510, -6.194),\
(683.247, -327.154, 179.113), (231.606, -430.659, 115.458),\
(87.278, -419.178, -18.863), (24.731, -340.222, -127.158)]
>>> center, radius = bestfit_sphere_numpy(points)
"""
# Assemble the A matrix
spX = asarray([p[0] for p in points])
spY = asarray([p[1] for p in points])
spZ = asarray([p[2] for p in points])
A = zeros((len(spX), 4))
A[:, 0] = spX*2
A[:, 1] = spY*2
A[:, 2] = spZ*2
A[:, 3] = 1
# Assemble the f matrix
f = zeros((len(spX), 1))
f[:, 0] = (spX*spX) + (spY*spY) + (spZ*spZ)
C, residules, rank, singval = lstsq(A, f)
# solve for the radius
t = (C[0]*C[0]) + (C[1]*C[1]) + (C[2]*C[2]) + C[3]
radius = sqrt(t)
return [float(C[0][0]), float(C[1][0]), float(C[2][0])], radius
|
52cee41806ef1ea7dc5c3b3c00109ab686b606cd
| 35,132 |
import numpy
def absupearson(a,b,weights):
"""Distance between two points based on the pearson correlation coefficient.
By treating each data point as half of a list of ordered pairs it is
possible to caluclate the pearson correlation coefficent for the list. The
correlation coefficent is then converted into a pseudo-distance by
subtracting its absolute value from 1. This function assumes the ordered
pairs are centered around 0. Used over the uncentered pearson distance when
linearity of correlation, and not slope of correlation, is a more
appropriate measure of similarity.
Parameters:
a,b : ndarray
The data points. Expects two rank 1 arrays of the same length.
weights : ndarray
The weights for each dimension. Expects rank 1 array of same
length as a & b.
Returns:
d : float
The absolute uncentered pearson distance between the two data
points.
See Also:
abspearson
Notes:
The absolute uncentered pearson distance is defined mathematically as:
| -- |
| > w[i]a[i]b[i] |
| -- |
d = 1 - | ----------------------------- |
| -- -- |
| > w[i]a[i]**2 > w[i]b[i]**2 |
| -- -- |
where a[i] & b[i] are the ith elements of a & b respectively and w[i]
is the weight of the ith dimension. Only dimensions for which both
vectors have values are used when computing the sums of squares.
"""
return 1. - numpy.abs((2.*upearson(a,b,weights)-1))
|
7bc70816f96a8bd90a424eb7ca13fd291d086b06
| 35,133 |
def plot_met(data_df: pd.DataFrame, data_interp_df: pd.DataFrame, met_selection: list, scaled_conc: bool = False,
x_scale: str = 'linear', y_scale: str = 'linear', x_lim: tuple = (10 ** -10, 1), y_lim: tuple = None):
"""
Plots a given metabolite across all models in altair. Uses line plots.
Args:
data_df:
data_interp_df:
met:
scaled_conc:
x_lim:
Returns:
"""
if data_df is not None:
plot1 = alt.Chart(data_df[data_df['met'].isin(met_selection)]).mark_line().encode(
alt.X('time_point:Q',
scale=alt.Scale(domain=x_lim, type=x_scale)
),
alt.Y('concentration:Q' if scaled_conc else 'concentration_unscaled:Q',
scale=alt.Scale(type=y_scale)
),
color='model:N',
tooltip='model:N'
).properties(
width=500,
height=400
)
plot2 = alt.Chart(data_interp_df[data_interp_df['met'].isin(met_selection)]).mark_point().encode(
alt.X('time_point:Q',
scale=alt.Scale(domain=x_lim, type=x_scale)
),
alt.Y('concentration:Q' if scaled_conc else 'concentration_unscaled:Q',
scale=alt.Scale(y_scale)
),
color='model:N',
tooltip='model:N'
).properties(
width=500,
height=400
)
return alt.layer(plot1, plot2).interactive()
else:
plot2 = alt.Chart(data_interp_df[data_interp_df['met'].isin(met_selection)]).mark_line().encode(
alt.X('time_point:Q',
scale=alt.Scale(domain=x_lim, type=x_scale)
),
alt.Y('concentration:Q' if scaled_conc else 'concentration_unscaled:Q',
scale=alt.Scale(y_scale)
),
color='model:N',
tooltip='model:N'
).properties(
width=500,
height=400
)
return plot2.interactive()
|
cad99c352a4c04a1a9492811bb79fa61853fb25a
| 35,134 |
def get_page(db: Session, name: str) -> models.Page:
"""
Get the page with the given name.
:param db: The db session to check.
:param name: The name of the page to find.
:return: The desired page in the db, or None if DNE.
"""
return db.query(models.Page).filter(models.Page.name == name).first()
|
f4fb59efe5dc68260e9a261858e77a9e946502f8
| 35,136 |
from typing import Tuple
def _dbms_utility_name_resolve(
connection: oracle.Connection, name: str, context: int
) -> Tuple[str, str, str, str, int, int]:
"""Wrapper for Oracle DBMS_UTILITY.NAME_RESOLVE procedure"""
with connection.cursor() as cursor:
schema = cursor.var(str)
part1 = cursor.var(str)
part2 = cursor.var(str)
database_link = cursor.var(str)
part1_type = cursor.var(int)
object_id = cursor.var(int)
cursor.callproc(
"dbms_utility.name_resolve",
[
name,
context,
schema,
part1,
part2,
database_link,
part1_type,
object_id,
],
)
return (
schema.getvalue(),
part1.getvalue(),
part2.getvalue(),
database_link.getvalue(),
part1_type.getvalue(),
object_id.getvalue(),
)
|
43bdf191c8f7b296cbac93f9d63a405a7f0f1115
| 35,138 |
def func_call(instance: ARIA, func_name: str, command_name: str, demisto_arguments: list, args: dict):
""" Helper function used to call different demisto command
Args:
instance: An ARIA instance.
func_name: Name of the functions in the ARIA class.
command_name: Related demisto command name.
demisto_arguments: List of arguments name in the right order.
args: Input of demisto arguments dict.
"""
arguments_value = []
for arg in demisto_arguments:
value = args.get(arg) # get values from demisto command
arguments_value.append(value)
context_entry = getattr(instance, func_name)(*tuple(arguments_value)) # get returned tuple
table_header = ['Rule', 'Status', 'Endpoints']
context_name = func_name.title().replace('_', '')
ec = {
f'Aria.{context_name}(val.name && val.name == obj.name)': context_entry
}
readable_output = tableToMarkdown(command_name, context_entry, table_header)
return readable_output, ec
|
2d55693c7df9179f4771717ed00da45ea74f2d1d
| 35,139 |
def admin_role_required(f):
"""
Grant access if user is in Administrator role
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if g.user.role.name != 'Administrator':
return redirect(url_for('error', code=401))
return f(*args, **kwargs)
return decorated_function
|
e0f67f97339af3867227f9507047e240fea43eaa
| 35,140 |
def format_location_detail_tract(data):
"""Reformat the list of data to Location Detail format for tract
Args:
data (list): A list of FSF object
Returns:
A pandas formatted DataFrame
"""
df = pd.DataFrame([vars(o) for o in data])
df.rename(columns={'fsid': 'fsid_placeholder'}, inplace=True)
if not df['county'].isna().values.all():
df = pd.concat([df.drop(['county'], axis=1), df['county'].apply(pd.Series)], axis=1)
df.rename(columns={'fsid': 'county_fips', 'name': 'county_name'}, inplace=True)
else:
df.drop(['county'], axis=1, inplace=True)
df['county_fips'] = pd.NA
df['county_name'] = pd.NA
if not df['state'].isna().values.all():
df = pd.concat([df.drop(['state'], axis=1), df['state'].apply(pd.Series)], axis=1)
df.rename(columns={'fsid': 'state_fips', 'name': 'state_name'}, inplace=True)
else:
df.drop(['state'], axis=1, inplace=True)
df['state_fips'] = pd.NA
df['state_name'] = pd.NA
df.rename(columns={'fsid_placeholder': 'fsid', 'name_placeholder': 'name'}, inplace=True)
df['fsid'] = df['fsid'].apply(str)
df['county_fips'] = df['county_fips'].astype('Int64').apply(str)
df['state_fips'] = df['state_fips'].astype('Int64').apply(str).apply(lambda x: x.zfill(2))
df['geometry'] = df['geometry'].apply(get_geom_center)
df = pd.concat([df.drop(['geometry'], axis=1), df['geometry'].apply(pd.Series)], axis=1)
return df[['fsid', 'valid_id', 'fips', 'county_fips', 'county_name', 'state_fips', 'state_name',
'latitude', 'longitude']]
|
3d99e5790aab8e3031494fac824dd5425acf2463
| 35,141 |
from typing import List
def pgcdDesDistancesEntreRepetitions(crypted: List[int], i: int) -> int:
"""
Returns pgcd{ }
"""
assert 0 <= i < len(crypted) - 2, "i must be in [0, len(crypted)-2)"
search_for = crypted[i:i+2+1]
current_pattern: List[int] = []
distances: List[int] = []
for i, c in enumerate(crypted[i+3:]):
if 0 < len(current_pattern) < 3 and c == search_for[len(current_pattern)]:
current_pattern += [c]
if len(current_pattern) == len(search_for):
distances += [i]
current_pattern = []
return pgcd(*distances)
|
7843c9d86f5dc0a28793cb6158261cdb9425df84
| 35,142 |
import numpy
def preprocess(simulation, field):
"""
Store DOF and geometry info that will not change unless the mesh is
updated (which is not handled in any way)
"""
mesh = simulation.data['mesh']
dofmap = field.function_space().dofmap()
conFC = simulation.data['connectivity_FC']
# Data to compute
facet_data = {}
cell_dofs = [None] * mesh.num_cells()
is_ghost_cell = [False] * mesh.num_cells()
cell_coords = numpy.zeros((2, 3), float)
for facet in dolfin.facets(mesh, 'all'):
fid = facet.index()
cell_ids = conFC(fid)
if len(cell_ids) != 2:
continue
# Get midpoint coordinates of the two connected cells and the DG0
# dof needed to find the field value in each of the cells
for i, cell_id in enumerate(cell_ids):
cell = dolfin.Cell(mesh, cell_id)
is_ghost_cell[cell_id] = cell.is_ghost()
pt = cell.midpoint()
cell_coords[i, 0] = pt.x()
cell_coords[i, 1] = pt.y()
cell_coords[i, 2] = pt.z()
# Get the one and only DG0 dof for this cell
dofs = dofmap.cell_dofs(cell_id)
assert len(dofs) == 1
cell_dofs[cell_id] = dofs[0]
# Unit vector from cell 1 to cell 0
uvec = cell_coords[0] - cell_coords[1]
uvec /= (uvec[0] ** 2 + uvec[1] ** 2 + uvec[2] ** 2) ** 0.5
# Store data for later use
coords0 = numpy.array(cell_coords[0])
coords1 = numpy.array(cell_coords[1])
facet_data[fid] = cell_ids[0], cell_ids[1], coords0, coords1, uvec
return facet_data, cell_dofs, is_ghost_cell
|
552cba3b5c092ea530f2089ec8252991349a0853
| 35,143 |
def drop_probable_entities(X):
"""
:param X: a data matrix: a list wrapping a list of strings, with each sublist being a sentence.
:return:
>>> drop_empty_lists(drop_arabic_numeric(drop_probable_entities([['Catullus'], ['C.', 'VALERIVS', 'CATVLLVS'],['1','2', '2b', '3' ],['I.', 'ad', 'Cornelium'],['Cui', 'dono', 'lepidum', 'novum', 'libellum', 'arida', 'modo', 'pumice', 'expolitum', '?']])))
[['ad'], ['dono', 'lepidum', 'novum', 'libellum', 'arida', 'modo', 'pumice', 'expolitum', '?']]
"""
return [[word
for word in sentence
if word[0].lower() == word[0]]
for sentence in X]
|
18e21d21212d56e256fd015f9513e06b31ed1027
| 35,144 |
def wrong_obj_msg(*objs, allow="sources"):
"""return error message for wrong object type provided"""
assert len(objs) <= 1, "only max one obj allowed"
allowed = allow.split("+")
prefix = "No" if len(allowed) == 1 else "Bad"
msg = f"{prefix} {'/'.join(allowed)} provided"
if "sources" in allowed:
msg += "\n" + ALLOWED_SOURCE_MSG
if "observers" in allowed:
msg += "\n" + ALLOWED_OBSERVER_MSG
if "sensors" in allowed:
msg += "\n" + ALLOWED_SENSORS_MSG
if objs:
obj = objs[0]
msg += f"\nreceived {obj!r} of type {type(obj).__name__!r} instead." ""
return msg
|
09294ccc6da5f02718edbdc2e04c8a92f213eb4a
| 35,145 |
def text2haiku(text, keep_chance=0.75, iterations=2000):
"""
Given a string of text, try to make a haiku
returns a haiku as a string or None if it fails
"""
word_list = text_to_word_list(text)
haiku = bagging_haiku_maker(word_list, keep_chance=keep_chance, iterations=iterations)
return haiku
|
759e6a94698e252f2df29911700a9fa6b945e61d
| 35,146 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.