content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def find_features_with_dtypes(df, dtypes):
"""
Find feature names in df with specific dtypes
df: DataFrame
dtypes: data types (defined in numpy) to look for
e.g, categorical features usually have dtypes np.object, np.bool
and some of them have np.int (with a limited number of unique items)
"""
return np.asarray([fname for (fname, ftype) in df.dtypes.to_dict().items()
if ftype in dtypes])
|
a94177dd24cb96915245959c0a22b254bd2a59df
| 28,442 |
def DeConv2d(net, n_out_channel = 32, filter_size=(3, 3),
out_size = (30, 30), strides = (2, 2), padding = 'SAME', batch_size = None, act = None,
W_init = tf.truncated_normal_initializer(stddev=0.02), b_init = tf.constant_initializer(value=0.0),
W_init_args = {}, b_init_args = {}, name ='decnn2d'):
"""Wrapper for :class:`DeConv2dLayer`, if you don't understand how to use :class:`DeConv2dLayer`, this function may be easier.
Parameters
----------
net : TensorLayer layer.
n_out_channel : int, number of output channel.
filter_size : tuple of (height, width) for filter size.
out_size : tuple of (height, width) of output.
batch_size : int or None, batch_size. If None, try to find the batch_size from the first dim of net.outputs (you should tell the batch_size when define the input placeholder).
strides : tuple of (height, width) for strides.
act : None or activation function.
others : see :class:`Conv2dLayer`.
"""
if act is None:
act = tf.identity
if batch_size is None:
batch_size = tf.shape(net.outputs)[0]
net = DeConv2dLayer(layer = net,
act = act,
shape = [filter_size[0], filter_size[1], n_out_channel, int(net.outputs._shape[-1])],
output_shape = [batch_size, out_size[0], out_size[1], n_out_channel],
strides = [1, strides[0], strides[1], 1],
padding = padding,
W_init = W_init,
b_init = b_init,
W_init_args = W_init_args,
b_init_args = b_init_args,
name = name)
return net
|
c99d717bac217878bc569d7fad4462d5445ac709
| 28,443 |
from typing import List
import re
def parse_release(base: str, path: str) -> List[str]:
"""Extracts built images from the release.yaml at path
Args:
base: The built images will be expected to start with this string,
other images will be ignored
path: The path to the file (release.yaml) that will contain the built images
Returns:
list of the images parsed from the file
"""
images = []
with open(path) as f:
for line in f:
match = re.search(base + ".*" + DIGEST_MARKER + ":[0-9a-f]*", line)
if match:
images.append(match.group(0))
return images
|
f4fec0908f2975a9ed9eef3e0a3a62549c9f757c
| 28,444 |
def build_config(config_file=get_system_config_directory()):
"""
Construct the config object from necessary elements.
"""
config = Config(config_file, allow_no_value=True)
application_versions = find_applications_on_system()
# Add found versions to config if they don't exist. Versions found
# in the config file takes precedence over versions found in PATH.
for item in application_versions.iteritems():
if not config.has_option(Config.EXECUTABLES, item[0]):
config.set(Config.EXECUTABLES, item[0], item[1])
return config
|
148e597f7fd9562f9830c8bd41126dd0efef96f1
| 28,445 |
import re
def preProcess(column):
"""
Do a little bit of data cleaning with the help of Unidecode and Regex.
Things like casing, extra spaces, quotes and new lines can be ignored.
"""
column = unidecode(column)
column = re.sub('\n', ' ', column)
column = re.sub('-', '', column)
column = re.sub('/', ' ', column)
column = re.sub("'", '', column)
column = re.sub(",", '', column)
column = re.sub(":", ' ', column)
column = re.sub(' +', ' ', column)
column = column.strip().strip('"').strip("'").lower().strip()
if not column:
column = None
return column
|
fda71aab1b2ce2baedbbc5d2195f115c9561e75d
| 28,446 |
def size_to_pnts(size) -> np.ndarray:
"""
获得图片 size 的四个角点 (4,2)
"""
width = size[0]
height = size[1]
return np.array([[0, 0], [width, 0], [width, height], [0, height]])
|
ca189cea9201646b0ce4cf2e32c2e21ad26929f3
| 28,447 |
def create_scenario_mms_datasets(variable_name,
scenario_name,
num_chunks,
data_path,
normalized=False):
"""Create the multi-model statistics dataset for a scenario.
Runs the function initialize_empty_mms_arrays, fill_empty_arrays,
and create_xr_dataset to generate the multi-model statistics dataset
for a scenario. Prints to the user what is being done.
Args:
variable_name: The string name of the model variable.
scenario_name: The string name of the scenario.
num_chunks: Integer number of chunks to use for saving the zarr file.
data_path: String path where the arrays will be located.
normalized: False (default) if model data is not normalized.
Returns:
Arrays of dimensions (lats, lons, times) and multi-model statistic
values (mean_vals, max_vals, min_vals, std_vals).
"""
print('Creating empty arrays')
[empty_dsets,
dim_info,
dims,
file_names,
datasets] = initialize_empty_mms_arrays(data_path,
scenario_name=scenario_name,
num_chunks=20,
normalized=normalized)
[lats, lons, times] = dims
print('Calculating multimodel statistics')
[mean_vals,
min_vals,
max_vals,
std_vals] = fill_empty_arrays(empty_dsets,
dim_info,
file_names,
datasets,
variable_name,
num_chunks)
print('Exporting dataset')
ds = create_xr_dataset(lats, lons, times, mean_vals, max_vals, min_vals, std_vals)
export_dataset(ds=ds,
output_path=OUTPUT_PATH,
variable_name=variable_name,
scenario_name=scenario_name,
normalized=normalized)
return lats, lons, times, mean_vals, max_vals, min_vals, std_vals
|
0bef48bc009b2ee72abec511d9f6f886a8ed289c
| 28,448 |
def carla_rotation_to_numpy_rotation_matrix(carla_rotation):
"""
Convert a carla rotation to a Cyber quaternion
Considers the conversion from left-handed system (unreal) to right-handed
system (Cyber).
Considers the conversion from degrees (carla) to radians (Cyber).
:param carla_rotation: the carla rotation
:type carla_rotation: carla.Rotation
:return: a numpy.array with 3x3 elements
:rtype: numpy.array
"""
roll, pitch, yaw = carla_rotation_to_RPY(carla_rotation)
numpy_array = euler2mat(roll, pitch, yaw)
rotation_matrix = numpy_array[:3, :3]
return rotation_matrix
|
38aed692b0ad7008fff71dc9b31ce03d552ae2f2
| 28,449 |
def _liquid_viscocity(_T, ranged=True):
"""Pa * s"""
OutOfRangeTest(_T, 59.15, 130, ranged)
A, B, C, D, E = -2.0077E+01, 2.8515E+02, 1.7840E+00, -6.2382E-22, 10.0
return exp(A + B / _T + C * log(_T) + D * _T**E)
|
2fd29eea442862e4904d3164783694b151cab6c9
| 28,450 |
import re, fileinput
def readConfig(filename):
"""Parses a moosicd configuration file and returns the data within.
The "filename" argument specifies the name of the file from which to read
the configuration. This function returns a list of 2-tuples which associate
regular expression objects to the commands that will be used to play files
whose names are matched by the regexps.
"""
config = []
expecting_regex = True
regex = None
command = None
for line in fileinput.input(filename):
# skip empty lines
if re.search(r'^\s*$', line):
continue
# skip lines that begin with a '#' character
if re.search('^#', line):
continue
# chomp off trailing newline
if line[-1] == '\n':
line = line[:-1]
# the first line in each pair is interpreted as a regular expression
# note that case is ignored. it would be nice if there was an easy way
# for the user to choose whether or not case should be ignored.
if expecting_regex:
regex = re.compile(line)
expecting_regex = False
# the second line in each pair is interpreted as a command
else:
command = line.split()
config.append((regex, command))
expecting_regex = True
return config
|
3b641686b8e6cfaebec668367a12e32bc59104a8
| 28,452 |
def negative_f1(y_true, y_pred) -:
"""Implements custom negative F1 loss score for use in multi-isotope classifiers.
Args:
y_true: a list of ground truth.
y_pred: a list of predictions to compare against the ground truth.
Returns:
Returns the custom loss score.
Raises:
None
"""
diff = y_true - y_pred
negs = K.clip(diff, -1.0, 0.0)
false_positive = -K.sum(negs, axis=-1)
true_positive = 1.0 - false_positive
lower_clip = 1e-20
true_positive = K.clip(true_positive, lower_clip, 1.0)
return -K.mean(true_positive)
|
255c3e34a17f4301a6c842c4109d930916cac3d5
| 28,453 |
import torch
def cal_gauss_log_lik(x, mu, log_var=0.0):
"""
:param x: batch of inputs (bn X fn)
:return: gaussian log likelihood, and the mean squared error
"""
MSE = torch.pow((mu - x), 2)
gauss_log_lik = -0.5*(log_var + np.log(2*np.pi) + (MSE/(1e-8 + torch.exp(log_var))))
MSE = torch.mean(torch.sum(MSE, axis=1))
gauss_log_lik = torch.mean(torch.sum(gauss_log_lik, axis=1))
return gauss_log_lik, MSE
|
b2d4f660c4475a632c649844694ff3f67dc93fca
| 28,454 |
def translate_fun_parseInt(x):
"""Converts parseInt(string, radix) to
__extrafunc_parseInt(string, radix=10)
Args:
x (str): JavaScript code to translate.
Returns:
str: Translated JavaScript code.
Examples:
>>> from ee_extra import translate_fun_parseInt
>>> translate_fun_parseInt('1010101', 2)
"""
# Regex conditions to get the string to replace,
# the arguments, and the variable name.
arg_names = list(set(functextin(x, "parseInt")))
# if does not match the condition, return the original string
if arg_names == []:
return x, 0
replacement = [f"parseInt({arg_name})" for arg_name in arg_names]
to_replace_by = [
"__ee_extrafunc_parseInt(%s)" % (arg_name) for arg_name in arg_names
]
# Replace string by our built-in function
for z in zip(replacement, to_replace_by):
x = x.replace(z[0], z[1])
return x, 1
|
9bc63d3e4005fed12209de0169ad2641bcf09f65
| 28,455 |
def simple_intensity_based_segmentation(image, gaussian_sigma=1, thresh_method="Otsu", smallest_area_of_object=5,label_img_depth = "8bit"):
"""Perform intensity based thresholding and detect objects
Args:
raw_image_path : path to a raw image
gaussian_sigma : sigma to use for the gaussian filter
thresh_method : threshold method
smallest_area_of_object : smallest area of objects in pixels
label_img_depth : label depth
Returns:
A labelled image
"""
# apply a gaussian filter
image_smooth = skfil.gaussian(image, sigma=gaussian_sigma, preserve_range = True)
# apply threshold
bw = gen_background_mask(image_smooth, threshold_method = thresh_method)
#remove small objects
bw_size_filtered = remove_small_objects(bw,smallest_area_of_object )
#Label connected components
label_image = label(bw_size_filtered)
# add an empty image to the image
if (label_img_depth == "8bit"):
label_image_cor = cv2.normalize(label_image, None, 0, np.max(label_image), cv2.NORM_MINMAX, cv2.CV_8U)
elif (label_img_depth == "16bit"):
label_image_cor = cv2.normalize(label_image, None, 0, np.max(label_image), cv2.NORM_MINMAX, cv2.CV_16U)
else:
raise Exception('Invalid input: should be among {8bit, 16bit}')
return label_image_cor
|
2f270b38e7f5d07ceb4437d7b9b6d26174af56fc
| 28,456 |
import torch
def mish(x):
"""mish activation function
Args:
x (Tensor): input tensor.
Returns:
(Tensor): output tensor and have same shape with x.
Examples:
>>> mish(to_tensor([-3.0, -1.0, 0.0, 2.0]))
tensor([-1.4228e-01, -2.6894e-01, 0.0000e+00, 1.7616e+00]
References:
Mish - "Mish: A Self Regularized Non-Monotonic Neural Activation Function"
https://arxiv.org/abs/1908.08681v1
"""
return x * (torch.tanh(F.softplus(x)))
|
73447216f12a2e60e9ccc249eca9abe4baa94be8
| 28,457 |
def text_box_end_pos(pos, text_box, border=0):
"""
Calculates end pos for a text box for cv2 images.
:param pos: Position of text (same as for cv2 image)
:param text_box: Size of text (same as for cv2 image)
:param border: Outside padding of textbox
:return box_end_pos: End xy coordinates for text box (end_point for cv2.rectangel())
"""
box_x, box_y = pos
text_w, text_h = text_box
box_end_pos = (box_x + text_w + border, box_y + text_h + border)
return box_end_pos
|
5bd2b46fe3456ccdef1407b90256edeb310d92bc
| 28,458 |
def ticket_competence_add_final(request, structure_slug, ticket_id,
new_structure_slug, structure, can_manage, ticket,
office_employee=None):
"""
Adds new ticket competence (second step)
:type structure_slug: String
:type ticket_id: String
:type new_structure_slug: String
:type structure: OrganizationalStructure (from @has_admin_privileges)
:type can_manage: Dictionary (from @has_admin_privileges)
:type ticket: Ticket (from @ticket_assigned_to_structure)
:type office_employee: OrganizationalStructureOfficeEmployee (from @is_operator)
:param structure_slug: structure slug
:param ticket_id: ticket code
:param new_structure_slug: selected structure slug
:param structure: structure object (from @has_admin_privileges)
:param can_manage: if user can manage or can read only (from @has_admin_privileges)
:param ticket: ticket object (from @ticket_assigned_to_structure)
:param office_employee: operator offices queryset (from @is_operator)
:return: render
"""
strutture = OrganizationalStructure.objects.filter(is_active = True)
# Lista uffici ai quali il ticket è assegnato
ticket_offices = ticket.get_assigned_to_offices(office_active=False)
operator_offices_list = []
assignments = TicketAssignment.objects.filter(ticket=ticket,
office__organizational_structure=structure,
office__is_active=True,
follow=True,
taken_date__isnull=False)
for assignment in assignments:
if user_manage_office(user=request.user,
office=assignment.office):
operator_offices_list.append(assignment.office)
new_structure = get_object_or_404(OrganizationalStructure,
slug=new_structure_slug,
is_active=True)
categorie = TicketCategory.objects.filter(organizational_structure=new_structure.pk,
is_active=True)
if request.method == 'POST':
form = TicketCompetenceSchemeForm(data=request.POST)
if form.is_valid():
category_slug = form.cleaned_data['category_slug']
follow = form.cleaned_data['follow']
readonly = form.cleaned_data['readonly']
selected_office_slug = form.cleaned_data['selected_office']
# Refactor
# follow_value = form.cleaned_data['follow']
# readonly_value = form.cleaned_data['readonly']
# follow = True if follow_value == 'on' else False
# readonly = True if readonly_value == 'on' else False
# La categoria passata in POST esiste?
categoria = get_object_or_404(TicketCategory,
slug=category_slug,
organizational_structure=new_structure,
is_active=True)
selected_office = None
if selected_office_slug:
selected_office = get_object_or_404(OrganizationalStructureOffice,
slug=selected_office_slug,
organizational_structure=structure,
is_active=True)
# Se alla categoria non è associato alcun ufficio,
# all'utente viene mostrato il messaggio di errore
# Perchè l'ufficio speciale Help-Desk è già competente sul ticket
if not categoria.organizational_office:
messages.add_message(request, messages.ERROR,
_("Il ticket è già di competenza"
" dell'ufficio speciale <b>{}</b>,"
" che ha la competenza della tipologia di richiesta "
"<b>{}</b>".format(settings.DEFAULT_ORGANIZATIONAL_STRUCTURE_OFFICE,
categoria)))
return redirect('uni_ticket:manage_ticket_url_detail',
structure_slug=structure_slug,
ticket_id=ticket_id)
new_office = categoria.organizational_office
if new_office in ticket_offices:
messages.add_message(request, messages.ERROR,
_("Il ticket è già di competenza"
" dell'ufficio <b>{}</b>, responsabile"
" della tipologia di richiesta <b>{}</b>".format(new_office,
categoria)))
return redirect('uni_ticket:manage_ticket_url_detail',
structure_slug=structure_slug,
ticket_id=ticket_id)
messages.add_message(request, messages.SUCCESS,
_("Competenza <b>{}</b> aggiunta"
" correttamente".format(new_office)))
# If not follow anymore
if not follow:
abandoned_offices = ticket.block_competence(user=request.user,
structure=structure,
allow_readonly=False,
selected_office=selected_office)
for off in abandoned_offices:
# if off.is_default:
# messages.add_message(request, messages.WARNING,
# _("L'ufficio <b>{}</b> non può essere"
# " rimosso dagli uffici competenti".format(off)))
# else:
ticket.update_log(user=request.user,
note= _("Competenza abbandonata da"
" Ufficio: {}".format(off)))
# If follow but readonly
elif readonly:
abandoned_offices = ticket.block_competence(user=request.user,
structure=structure,
selected_office=selected_office)
for off in abandoned_offices:
if off.is_default:
messages.add_message(request, messages.WARNING,
_("L'ufficio <b>{}</b> non può essere"
" posto in sola lettura".format(off)))
else:
ticket.update_log(user=request.user,
note= _("Competenza trasferita da"
" Ufficio: {}."
" (L'ufficio ha mantenuto"
" accesso in sola lettura)".format(off)))
# If follow and want to manage
ticket.add_competence(office=new_office,
user=request.user)
ticket.update_log(user=request.user,
note= _("Nuova competenza: {} - {}"
" - Categoria: {}".format(new_structure,
new_office,
categoria)))
# log action
logger.info('[{}] {} added new competence to'
' ticket {}'
' (follow: {}) (readonly: {})'.format(timezone.now(),
request.user,
ticket,
follow,
readonly))
return redirect('uni_ticket:manage_ticket_url_detail',
structure_slug=structure_slug,
ticket_id=ticket_id)
else:
for k,v in get_labeled_errors(form).items():
messages.add_message(request, messages.ERROR,
"<b>{}</b>: {}".format(k, strip_tags(v)))
user_type = get_user_type(request.user, structure)
template = "{}/add_ticket_competence.html".format(user_type)
title = _('Trasferisci competenza ticket')
sub_title = '{} ({})'.format(ticket.subject, ticket_id)
d = {'can_manage': can_manage,
'categorie': categorie,
'operator_offices': operator_offices_list,
'structure': structure,
'structure_slug': new_structure_slug,
'strutture': strutture,
'sub_title': sub_title,
'ticket': ticket,
'title': title,}
return render(request, template, d)
|
b3e159494d8f7ecf7603596face065f02e44e00e
| 28,459 |
def _find_computecpp_root(repository_ctx):
"""Find ComputeCpp compiler"""
computecpp_path = ""
if _COMPUTECPP_TOOLKIT_PATH in repository_ctx.os.environ:
computecpp_path = repository_ctx.os.environ[_COMPUTECPP_TOOLKIT_PATH].strip()
if computecpp_path.startswith("/"):
_check_computecpp_version(repository_ctx, computecpp_path)
return computecpp_path
fail("Cannot find SYCL compiler, please correct your path")
|
91bc817036a976565434f1a3c52c5bb7e80ed86d
| 28,460 |
def distance(v):
"""
Estimated distance to the body of the Mandelbuld
"""
z = v
for k in range(MAX_ITERS):
l = (z**2).sum()
if l > BAILOUT:
escape_time = k
break
z = pow3d(z, ORDER) + v
else:
return 0
return np.log(np.log(l)) / MU_NORM - escape_time + MAX_ITERS - 2
|
79a6075da3c022c48c111ffec015835716c12f9a
| 28,461 |
def _depol_error_value_two_qubit(error_param,
gate_time=0,
qubit0_t1=inf,
qubit0_t2=inf,
qubit1_t1=inf,
qubit1_t2=inf):
"""Return 2-qubit depolarizing channel parameter for device model"""
# Check trivial case where there is no gate error
if error_param is None:
return None
if error_param == 0:
return 0
# Check t1 and t2 are valid
if qubit0_t1 <= 0 or qubit1_t1 <= 0:
raise NoiseError("Invalid T_1 relaxation time parameter: T_1 <= 0.")
if qubit0_t2 <= 0 or qubit1_t2 <= 0:
raise NoiseError("Invalid T_2 relaxation time parameter: T_2 <= 0.")
if qubit0_t2 - 2 * qubit0_t1 > 0 or qubit1_t2 - 2 * qubit1_t1 > 0:
raise NoiseError(
"Invalid T_2 relaxation time parameter: T_2 greater than 2 * T_1.")
if gate_time is None:
gate_time = 0
if gate_time == 0 or (qubit0_t1 == inf and
qubit0_t2 == inf and
qubit1_t1 == inf and
qubit1_t2 == inf):
if error_param is not None and error_param > 0:
return 4 * error_param / 3
else:
return 0
# Otherwise we calculate the depolarizing error probability to account
# for the difference between the relaxation error and gate error
if qubit0_t1 == inf:
q0_par1 = 1
else:
q0_par1 = exp(-gate_time / qubit0_t1)
if qubit0_t2 == inf:
q0_par2 = 1
else:
q0_par2 = exp(-gate_time / qubit0_t2)
if qubit1_t1 == inf:
q1_par1 = 1
else:
q1_par1 = exp(-gate_time / qubit1_t1)
if qubit1_t2 == inf:
q1_par2 = 1
else:
q1_par2 = exp(-gate_time / qubit1_t2)
denom = (
q0_par1 + q1_par1 + q0_par1 * q1_par1 + 4 * q0_par2 * q1_par2 +
2 * (q0_par2 + q1_par2) + 2 * (q1_par1 * q0_par2 + q0_par1 * q1_par2))
depol_param = 1 + 5 * (4 * error_param - 3) / denom
return depol_param
|
ab779de7d0fac3f828f9fffbb0c13e588c3fc54b
| 28,462 |
def get_google_order_sheet():
""" Return the google orders spreadsheet """
return get_google_sheet(ANDERSEN_LAB_ORDER_SHEET, 'orders')
|
69ce8dcf03fd31701700eb0515ae7c3b47c9d127
| 28,463 |
def collision_check(direction):
"""
:param direction: Str : example up
:return:
"""
# really scuffed needs hard rework worked on this in night and its bad
# but it dose its job so i guess its ok for now
if mapGen.map[p.position][direction] is not None:
if mapGen.map[mapGen.map[p.position][direction]]["biom"] != "locked_box":
if mapGen.map[mapGen.map[p.position][direction]] is not None:
if mapGen.map[mapGen.map[p.position][direction]]["biom"] == "box":
if mapGen.map[mapGen.map[p.position][direction]][direction] is not None:
if mapGen.map[mapGen.map[mapGen.map[p.position][direction]][direction]]["biom"] == "box":
return False
if mapGen.map[mapGen.map[p.position][direction]][direction] is not None:
if mapGen.map[mapGen.map[mapGen.map[p.position][direction]][direction]]["biom"] == "locked_box":
return False
if mapGen.map[mapGen.map[p.position][direction]][direction] is not None:
if mapGen.map[mapGen.map[mapGen.map[p.position][direction]][direction]]["biom"] == "destination":
mapGen.map[mapGen.map[mapGen.map[p.position][direction]][direction]]["biom"] = "locked_box"
mapGen.map[mapGen.map[p.position][direction]]["biom"] = "land"
p.position = mapGen.map[p.position][direction]
return True
else:
mapGen.map[mapGen.map[mapGen.map[p.position][direction]][direction]]["biom"] = "box"
mapGen.map[mapGen.map[p.position][direction]]["biom"] = "land"
p.position = mapGen.map[p.position][direction]
return True
else:
p.position = mapGen.map[p.position][direction]
else:
return False
|
35b0bd0e6e2811b470bd513ea33c339ed7c7a96b
| 28,464 |
def get_freq(freq):
"""
Return frequency code of given frequency str.
If input is not string, return input as it is.
Example
-------
>>> get_freq('A')
1000
>>> get_freq('3A')
1000
"""
if isinstance(freq, compat.string_types):
base, mult = get_freq_code(freq)
freq = base
return freq
|
16998470970449a9f94758c87c1d42e392c86dc9
| 28,465 |
def OMRSE(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-08-30", **kwargs
) -> Graph:
"""Return OMRSE graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-08-30"
Version to retrieve
The available versions are:
- 2022-04-06
- 2021-08-30
"""
return AutomaticallyRetrievedGraph(
"OMRSE", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
|
2de03cc7da02e58279ed5b576db4cbe14c98e0b5
| 28,466 |
def produce_new_shapedir(verts, n_betas=20):
"""Given a matrix of batch of vertices, run PCA through SVD in order to identify
a certain number of shape parameters to best describe the vert shape.
:param verts: (N x V x 3) array
:param n_betas: Number of betas to be fitted to, B
:return vtemplate: (V x 3) Array of new template vertices
:return shapedir: (nbetas x 3V) Matrix that maps an array of betas to a set of vertex deformations from the template verts
"""
N, V, _ = verts.shape
K = min(N, V, n_betas)
if n_betas > K:
print(f"Insufficient size for {n_betas} betas. Using K = {K}")
n_betas = K
v_template = verts.mean(axis=0) # set new template verts
offsets = (verts - v_template).reshape(N, 3*V)
pca = PCA(n_components = K)
fit = pca.fit(offsets)
vecs = fit.components_ * fit.explained_variance_[:, None] ** 0.5 # multiply principal unit vectors by variance
shapedir = vecs.T.reshape(V, 3, K)
return v_template, shapedir
|
b12008b0c8b3809d211753dab6b89d3fcbb8bbce
| 28,467 |
import logging
def get_calibration(
df: pd.DataFrame,
features:pd.DataFrame,
outlier_std: float = 3,
calib_n_neighbors: int = 100,
calib_mz_range: int = 20,
calib_rt_range: float = 0.5,
calib_mob_range: float = 0.3,
**kwargs) -> (np.ndarray, float):
"""Wrapper function to get calibrated values for the precursor mass.
Args:
df (pd.DataFrame): Input dataframe that contains identified peptides.
features (pd.DataFrame): Features dataframe for which the masses are calibrated.
outlier_std (float, optional): Range in standard deviations for outlier removal. Defaults to 3.
calib_n_neighbors (int, optional): Number of neighbors used for regression. Defaults to 100.
calib_mz_range (int, optional): Scaling factor for mz range. Defaults to 20.
calib_rt_range (float, optional): Scaling factor for rt_range. Defaults to 0.5.
calib_mob_range (float, optional): Scaling factor for mobility range. Defaults to 0.3.
**kwargs: Arbitrary keyword arguments so that settings can be passes as whole.
Returns:
corrected_mass (np.ndarray): The calibrated mass
y_hat_std (float): The standard deviation of the precursor offset after calibration
"""
target = 'prec_offset_ppm'
cols = ['mz','rt']
if 'mobility' in df.columns:
cols += ['mobility']
scaling_dict = {}
scaling_dict['mz'] = ('relative', calib_mz_range/1e6)
scaling_dict['rt'] = ('absolute', calib_rt_range)
scaling_dict['mobility'] = ('relative', calib_mob_range)
df_sub = remove_outliers(df, outlier_std)
if len(df_sub) > calib_n_neighbors:
y_hat = kneighbors_calibration(df_sub, features, cols, target, scaling_dict, calib_n_neighbors)
corrected_mass = (1-y_hat/1e6) * features['mass_matched']
y_hat_std = y_hat.std()
mad_offset = np.median(np.absolute(y_hat - np.median(y_hat)))
logging.info(f'Precursor calibration std {y_hat_std:.2f}, {mad_offset:.2f}')
return corrected_mass, y_hat_std, mad_offset
else:
logging.info('Not enough data points present. Skipping recalibration.')
mad_offset = np.median(np.absolute(df['prec_offset_ppm'].values - np.median(df['prec_offset_ppm'].values)))
return features['mass_matched'], np.abs(df['prec_offset_ppm'].std()), mad_offset
|
af53baf47e1999f5ef421ddb1a12e2a41757f62e
| 28,468 |
from typing import Mapping
from typing import Iterable
def _decode_bytestrings(o):
"""Decode all base64-encoded values (not keys) to bytestrings"""
if isinstance(o, Mapping):
return {key: _decode_bytestrings(value) for key, value in o.items()}
elif isinstance(o, Iterable) and not isinstance(o, (str, bytes)):
return list([_decode_bytestrings(value) for value in o])
elif isinstance(o, str) and o.startswith(BASE64_IDENTIFIER):
return b64decode(o[len(BASE64_IDENTIFIER):])
else:
return o
|
6a4fd49b50df91ee9705eda2192d10cb8f64606b
| 28,470 |
def extinction(lambda1in,R,unit = 'microns'):
"""
Calculates A(lambda)/A_V. So, if we know E(B - V), we do
A(lambda) = A(lambda)/A_V * E(B - V) * R.
R is alternatively R_V, usually 3.1---this parameterizes the extinction law, which you should know if you are using this function.
This is the CCM89 extinction law, which assumes microns.
"""
if 'ang' in unit:
lambda1 = lambda1in/1.e4
else:
lambda1 = lambda1in
if (lambda1 > 100).all():
print("Check units! This program assumes microns")
if (lambda1 > 3.0).any():
print ("Warning: extrapolating into the far IR (lambda > 3 microns)")
if (lambda1 < 0.125).any():
print('Warning: extreme UV is an extrapolation')
if (lambda1 < 0.1).any():
print('warning: extrapolating into the extreme UV (lambda < 1000 A)')
a = sp.zeros(lambda1.size)
b = sp.zeros(lambda1.size)
m = (lambda1 > 0.909)
a[m] = 0.574*(1/lambda1[m])**(1.61)
b[m] = -0.527*(1/lambda1[m])**(1.61)
m = (lambda1 > 0.30303)*(lambda1 <= 0.909)
x = 1/lambda1[m] - 1.82
a[m] = 1 + 0.17699*x - 0.50447*x**2 - 0.02427*x**3 + 0.72085*x**4 + 0.01979*x**5 - 0.7753*x**6 + 0.32999*x**7
b[m] = 1.41338*x + 2.28305*x**2 + 1.07233*x**3 - 5.38434*x**4 - 0.62251*x**5 + 5.3026*x**6 - 2.09002*x**7
m = (lambda1 > 0.125)*(lambda1 <= 0.30303)
x = 1/lambda1[m]
a[m] = 1.752 - 0.316*x - 0.104/( (x - 4.67)**2 + 0.341)
b[m] = -3.090 + 1.825*x + 1.206/( (x - 4.62)**2 + 0.263)
m = (lambda1 > 0.125)*(lambda1 <= 0.1695)
x = 1/lambda1[m]
a[m] += -0.04473*(x - 5.9)**2 - 0.009779*(x-5.9)**3
b[m] += 0.21300*(x - 5.9)**2 + 0.120700*(x - 5.9)**3
m = (lambda1 < 0.125)
x = 1/lambda1[m]
a[m] = -1.073 - 0.628*(x - 8.) + 0.137*(x - 8.)**2 - 0.070*(x - 8.)**3
b[m] = 13.670 + 4.257*(x - 8.) - 0.420*(x - 8.)**2 + 0.374*(x - 8.)**3
return a + b/R
|
d6b7a728de0b861786f6e28d3000f77d90248703
| 28,471 |
def to_rtp(F0, phi, h):
""" Converts from spherical to Cartesian coordinates (up-south-east)
"""
# spherical coordinates in "physics convention"
r = F0
phi = np.radians(phi)
theta = np.arccos(h)
x = F0*np.sin(theta)*np.cos(phi)
y = F0*np.sin(theta)*np.sin(phi)
z = F0*np.cos(theta)
if type(F0) is np.ndarray:
return np.column_stack([z, -y, x,])
else:
return np.array([z, -y, x])
|
dbf89e94d9e66925621969c53b476456a27af93d
| 28,472 |
def to_base(num, base, numerals=NUMERALS):
"""Convert <num> to <base> using the symbols in <numerals>"""
int(num)
int(base)
if not (0 < base < len(numerals)):
raise ValueError("<base> must be in the range [1, %i>" % len(numerals))
if num == 0:
return '0'
if num < 0:
sign = '-'
num = -num
else:
sign = ''
if base == 1:
return sign + ('1'*num)
result = ''
while num:
result = numerals[num % (base)] + result
num //= base
return sign + result
|
aa25cb3f26e855d17c88be25b05251ebec216790
| 28,473 |
def identify_algorithm_hyperparameters(model_initializer): # FLAG: Play nice with Keras
"""Determine keyword-arguments accepted by `model_initializer`, along with their default values
Parameters
----------
model_initializer: functools.partial, or class, or class instance
The algorithm class being used to initialize a model
Returns
-------
hyperparameter_defaults: dict
The dict of kwargs accepted by `model_initializer` and their default values"""
hyperparameter_defaults = dict()
# FLAG: Play nice with Keras
try:
signature_parameters = signature(model_initializer).parameters
except TypeError:
signature_parameters = signature(model_initializer.__class__).parameters
for k, v in signature_parameters.items():
if (v.kind == v.KEYWORD_ONLY) or (v.kind == v.POSITIONAL_OR_KEYWORD):
hyperparameter_defaults[k] = v.default
return hyperparameter_defaults
|
5ed499e8b5cf832a75009adf3bb29c7f65d97d35
| 28,474 |
from typing import List
from typing import Tuple
def cnf_rep_to_text(cnf_rep: List[List[Tuple[str, bool]]]) -> str:
"""
Converts a CNF representation to a text.
:param cnf_rep: The CNF representation to convert.
:return: The text representation of the CNF.
"""
lines = []
for sentence in cnf_rep:
sentence_str = ''
first_in_clause = True
for atom in sentence:
if first_in_clause:
first_in_clause = False
else:
sentence_str += ' '
if atom[1]:
sentence_str += atom[0]
else:
sentence_str += '!' + atom[0]
lines.append(sentence_str)
return '\n'.join(lines)
|
dec3754493cfb0bd9fb5e68d2bab92a40bd0f294
| 28,475 |
def reshape_for_linear(images):
"""Reshape the images for the linear model
Our linear model requires that the images be reshaped as a 1D tensor
"""
n_images, n_rgb, img_height, img_width = images.shape
return images.reshape(n_images, n_rgb * img_height * img_width)
|
dffc5e7d0f96c4494443a7480be081b8fe6b4abd
| 28,476 |
def otp(data, password, encodeFlag=True):
""" do one time pad encoding on a sequence of chars """
pwLen = len(password)
if pwLen < 1:
return data
out = []
for index, char in enumerate(data):
pwPart = ord(password[index % pwLen])
newChar = char + pwPart if encodeFlag else char - pwPart
newChar = newChar + 256 if newChar < 0 else newChar
newChar = newChar - 256 if newChar >= 256 else newChar
out.append(newChar)
return bytes(out)
|
34223c69149b09b1cc3bde8bf1c432f21415362b
| 28,477 |
from datetime import datetime
def export_actions(path='/tmp', http_response=False):
"""
A script for exporting Enforcement Actions content
to a CSV that can be opened easily in Excel.
Run from within consumerfinance.gov with:
`python cfgov/manage.py runscript export_enforcement_actions`
By default, the script will dump the file to `/tmp/`,
unless a path argument is supplied,
or http_response is set to True (for downloads via the Wagtail admin).
A command that passes in path would look like this:
`python cfgov/manage.py runscript export_enforcement_actions
--script-args [PATH]`
"""
timestamp = datetime.datetime.now().strftime("%Y-%m-%d")
slug = 'enforcement-actions-{}.csv'.format(timestamp)
if http_response:
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = 'attachment;filename={}'.format(slug)
write_questions_to_csv(response)
return response
file_path = '{}/{}'.format(path, slug).replace('//', '/')
with open(file_path, 'w', encoding='windows-1252') as f:
write_questions_to_csv(f)
|
f85f095b6c7c3bd5a1a5277125c56e17e9d8cbd9
| 28,478 |
def reduce_load_R():
"""
Used for reconstructing a copy of the R interpreter from a pickle.
EXAMPLES::
sage: from sage.interfaces.r import reduce_load_R
sage: reduce_load_R()
R Interpreter
"""
return r
|
858723306137a0e751f25766cbea5609867255f5
| 28,479 |
import urllib
def path_to_playlist_uri(relpath):
"""Convert path relative to playlists_dir to M3U URI."""
if isinstance(relpath, compat.text_type):
relpath = relpath.encode('utf-8')
return b'm3u:%s' % urllib.quote(relpath)
|
a69c441411b09ccce387ff76d93d17013b960de4
| 28,480 |
from json import load
import logging
def read_drive_properties(path_name):
"""
Reads drive properties from json formatted file.
Takes (str) path_name as argument.
Returns (dict) with (bool) status, (str) msg, (dict) conf
"""
try:
with open(path_name) as json_file:
conf = load(json_file)
return {"status": True, "msg": f"Read from file: {path_name}", "conf": conf}
except (IOError, ValueError, EOFError, TypeError) as error:
logging.error(str(error))
return {"status": False, "msg": str(error)}
except:
logging.error("Could not read file: %s", path_name)
return {"status": False, "msg": f"Could not read file: {path_name}"}
|
18b9051801b032f5aa5532da0cfcca8793be8c91
| 28,481 |
import math
def proj(
point: np.ndarray, tol: float = 1e-9, bounds: tuple[float, float] = (0, 1)
) -> tuple[np.ndarray, float]:
"""Find projection on true ROC.
Args:
point: A point in [0, 1]^2.
tol: Tolerance.
bounds: Bounds of projection to help with the calculation.
Returns:
Projection on the true ROC and the slope there.
"""
left, right = bounds
roc_left = gauss_roc(left, tol)
if left + roc_left[0] >= sum(point):
return np.array([left, roc_left[0]]), roc_left[1]
roc_right = gauss_roc(right, tol)
if right + roc_right[0] <= sum(point):
return np.array([right, roc_right[0]]), roc_right[1]
while not math.isclose(left, right, abs_tol=tol):
middle = (left + right) / 2
if middle + gauss_roc(middle, tol)[0] > sum(point):
right = middle
else:
left = middle
if left == bounds[0]:
return np.array([left, roc_left[0]]), roc_left[1]
if right == bounds[1]:
return np.array([right, roc_right[0]]), roc_right[1]
roc_middle = gauss_roc(left, tol)
return np.array([left, roc_middle[0]]), roc_middle[1]
|
c56c1d9beb54f45691d63900e7458ed1ec4218ee
| 28,482 |
def _split_on_wildcard(string):
"""Split the string into two such that first part does not have any wildcard.
Args:
string (str): The string to be split.
Returns:
A 2-tuple where first part doesn't have any wildcard, and second part does
have a wildcard. If wildcard is not found, the second part is empty.
If string starts with a wildcard then first part is empty.
For example:
_split_on_wildcard('a/b/c/d*e/f/*.txt') => ('a/b/c/d', '*e/f/*.txt')
_split_on_wildcard('*e/f/*.txt') => ('', '*e/f/*.txt')
_split_on_wildcard('a/b/c/d') => ('a/b/c/d', '')
"""
match = WILDCARD_REGEX.search(string)
if match is None:
return string, ''
first_wildcard_idx = match.start()
prefix = string[:first_wildcard_idx]
wildcard_str = string[first_wildcard_idx:]
return prefix, wildcard_str
|
09625186d22d50b737c94d2b22156a48bbf9b5ad
| 28,483 |
def esgUSPTOPatentGrantsDF(symbol="", **kwargs):
"""Patent grants are indications that a company has successfully signaled that it values its IP, that its IP is unique in the eyes of the USPTO, and that its initial patent application was a reasonable one.
Patent grants data is issued weekly on Tuesdays.
Currently only the first three assignees listed on the patent are included. Future versions may contain more detail on the content of patent grants, including assignees beyond the first three listed on the grant.
History available from 2002
https://iexcloud.io/docs/api/#esg-uspto-patent-grants
Args:
symbol (str): symbol to use
"""
kwargs["subkey"] = "10"
return _baseDF(id="PREMIUM_EXTRACT_ALPHA_ESG", symbol=symbol, **kwargs)
|
22524f06572dca4fd2118a407273b7d23e68453c
| 28,484 |
from typing import Optional
from typing import Dict
def get_web_optimized_params(
src_dst,
zoom_level_strategy: str = "auto",
aligned_levels: Optional[int] = None,
tms: morecantile.TileMatrixSet = morecantile.tms.get("WebMercatorQuad"),
) -> Dict:
"""Return VRT parameters for a WebOptimized COG."""
if src_dst.crs != tms.rasterio_crs:
with WarpedVRT(src_dst, crs=tms.rasterio_crs) as vrt:
bounds = vrt.bounds
aff = list(vrt.transform)
else:
bounds = src_dst.bounds
aff = list(src_dst.transform)
resolution = max(abs(aff[0]), abs(aff[4]))
# find max zoom (closest to the raster resolution)
max_zoom = tms.zoom_for_res(
resolution, max_z=30, zoom_level_strategy=zoom_level_strategy,
)
# defined the zoom level we want to align the raster
aligned_levels = aligned_levels or 0
base_zoom = max_zoom - aligned_levels
# find new raster bounds (bounds of UL tile / LR tile)
ul_tile = tms._tile(bounds[0], bounds[3], base_zoom)
w, _, _, n = tms.xy_bounds(ul_tile)
# The output resolution should match the TMS resolution at MaxZoom
vrt_res = tms._resolution(tms.matrix(max_zoom))
# Output transform is built from the origin (UL tile) and output resolution
vrt_transform = Affine(vrt_res, 0, w, 0, -vrt_res, n)
lr_tile = tms._tile(bounds[2], bounds[1], base_zoom)
e, _, _, s = tms.xy_bounds(
morecantile.Tile(lr_tile.x + 1, lr_tile.y + 1, lr_tile.z)
)
vrt_width = max(1, round((e - w) / vrt_transform.a))
vrt_height = max(1, round((s - n) / vrt_transform.e))
return dict(
crs=tms.rasterio_crs,
transform=vrt_transform,
width=vrt_width,
height=vrt_height,
)
|
2e108f4619f5bf672981e60114065196f15116d0
| 28,487 |
from sage.misc.superseded import deprecation
def AlternatingSignMatrices_n(n):
"""
For old pickles of ``AlternatingSignMatrices_n``.
EXAMPLES::
sage: sage.combinat.alternating_sign_matrix.AlternatingSignMatrices_n(3)
doctest:...: DeprecationWarning: this class is deprecated. Use sage.combinat.alternating_sign_matrix.AlternatingSignMatrices instead
See http://trac.sagemath.org/14301 for details.
Alternating sign matrices of size 3
"""
deprecation(14301,'this class is deprecated. Use sage.combinat.alternating_sign_matrix.AlternatingSignMatrices instead')
return AlternatingSignMatrices(n)
|
3fad083e18ded990b62f2453ba75966fac6df6ed
| 28,488 |
def _get_band(feature, name, size):
"""
Gets a band normalized and correctly scaled from the raw data.
Args:
feature (obj): the feature as it was read from the files.
name (str): the name of the band.
size (int): the size of the band.
Returns:
tf.Tensor: the band parsed into a tensor for further manipulation.
"""
return _normalize_band(tf.reshape(feature[name], [size, size]), name)
|
db13abf7dc1cfa1cff88da864c2aaac043f574b2
| 28,489 |
import functools
def rgetattr(obj, attr, default=sentinel):
"""
from https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects
"""
if default is sentinel:
_getattr = getattr
else:
def _getattr(obj, name):
return getattr(obj, name, default)
return functools.reduce(_getattr, [obj] + attr.split('.'))
|
6b6b7d98e117647a5609e10a499795ad293f1c6d
| 28,490 |
def unpack_dims(data, vlabels):
"""
Unpacks an interleaved 4th dimension in an imaging data array
Parameters
----------
data : np array
a numpy array of data. Should have 3 spatial dimensions followed by
one nonspatial dimension of interleaved data
vlabels : pandas DataFrame
a dataframe indicating the label type for each slice in the 4th dimension.
Each column specifies a label type, and each row gives the labeling combination for
each index in the 4th dimension. It is assumed row 0 corresponds to index 0 in the 4th
dimension, and so on. Additionally, each column should be gapless. In other words, there
should be at least one entry for each integer between the min and max of a column. Otherwise
you will get blank dimensions in the unpacked data.
The vlabels are returned as part of parrec_to_nifti. They can also be found as
an ordered dict with parrecobject.header.get_volume_labels() (but you must
convert to an DataFrame). A NiFTi using parrec_to_nifti may also have the labels
saved as an Excel file
Note that you change the order of the column of vlabels,
and this will change the order of the returned dimensions of the output
Returns
-------
new_data : np array
The data with the 4th dimension unpacked into n additional dimensions, where
n is the number of columns in vlabels. For example, if relabels has three columns,
then the 4th dimension of the original data will become the 4th, 5th and 6th dimensions
in new_data. The 1st column in vlabels will be stored as the 4th dimension, the 2nd
column in vlabels will be stored as the 5th dimension, and so on
"""
adj_labels = vlabels.copy()
for col in adj_labels.columns:
# we need to make the labels zero indexed
adj_labels[col] = adj_labels[col] - np.min(adj_labels[col])
spatial_dim_maxes = data.shape[:3]
extra_dim_maxes = [i for i in adj_labels.max()+1]
dim_shape = []
dim_shape.extend(spatial_dim_maxes)
dim_shape.extend(extra_dim_maxes)
new_data = np.zeros(dim_shape)
for i, row in adj_labels.iterrows():
sli = data[:,:,:,i]
# construct an indexing tuple
itup = [...]
add_labels = list(row)
itup.extend(add_labels)
itup = tuple(itup)
new_data[itup] = sli
return new_data
|
1eaf0b00d4dd8be26927845aefa334b84e9264df
| 28,491 |
def bash_this(s):
"""produce a shell fragment that runs the string str inside a fresh bash.
This works around potential strange options that are set in the topmost
bash like POSIX-compatibility mode, -e or similar."""
return 'bash -c %s' % shell_quote(s)
|
e06d287ebdf226ab6f83004c65cea7ada94232e1
| 28,492 |
def multiply_tensors(tensor1, tensor2):
"""Multiplies two tensors in a matrix-like multiplication based on the
last dimension of the first tensor and first dimension of the second
tensor.
Inputs:
tensor1: A tensor of shape [a, b, c, .., x]
tensor2: A tensor of shape [x, d, e, f, ...]
Outputs:
A tensor of shape [a, b, c, ..., d, e, f, ...]
"""
sh1 = tf.shape(tensor1)
sh2 = tf.shape(tensor2)
len_sh1 = len(tensor1.get_shape())
len_sh2 = len(tensor2.get_shape())
prod1 = tf.constant(1, dtype=tf.int32)
sh1_list = []
for z in range(len_sh1 - 1):
sh1_z = sh1[z]
prod1 *= sh1_z
sh1_list.append(sh1_z)
prod2 = tf.constant(1, dtype=tf.int32)
sh2_list = []
for z in range(len_sh2 - 1):
sh2_z = sh2[len_sh2 - 1 - z]
prod2 *= sh2_z
sh2_list.append(sh2_z)
reshape_1 = tf.reshape(tensor1, [prod1, sh1[len_sh1 - 1]])
reshape_2 = tf.reshape(tensor2, [sh2[0], prod2])
result = tf.reshape(tf.matmul(reshape_1, reshape_2), sh1_list + sh2_list)
assert len(result.get_shape()) == len_sh1 + len_sh2 - 2
return result
|
374547e03fe95b02a77ef1420e7cac2f07248fb3
| 28,494 |
def get_cumulative_collection():
"""获取设备累积数据表
"""
client = MongoClient(connection_string)
db = client.get_database(database)
collection = db.get_collection('equipment_cumulative')
return collection
|
0de651fe424730b2e486298e8e142190514748bb
| 28,495 |
import re
def self_closing(xml_str, isSelfClosing):
"""
是否自闭合空标签,
:param isSelfClosing:
:param xml_str:
:return:
"""
if(isSelfClosing=="true"):
xml_str = re.sub(r"<(.*)>(</.*>)", r"<\1/>" , xml_str)
return xml_str
else:
return xml_str
|
b8b68626549da9a27335c5340db3ba65b753af90
| 28,496 |
def init_weights_he(nin, nout, nd, ny):
""" Sample the weights using variance Var(W) = 2/nin according to He initilization
for ReLU nonlinearities from a normal distribution with zero mean.
"""
sigma = np.sqrt(2/(nin))
weights = np.random.normal(0, sigma,((nd, ny))) # Weight vector (nd x ny)
return weights
|
fdb9fcef6888ea8513b22e84207f67cd71d90a9e
| 28,498 |
def get_all_message_template():
"""returns all drivers or none"""
try:
return MessageTemplates.objects.all(), "success"
except Exception as e:
return None, str(e)
|
1e63e73776b5b15d1cd7512fec32cea14454d717
| 28,499 |
def request_pet_name():
"""Requests users pet name as input.
Args:
NONE
Returns:
User's name.
Raises:
ValueError: If input is not a character.
"""
while True:
try:
if (pet_name := input("Enter your pet's name: \n")).isalpha():
break
else:
print("Must be characters, please enter your pet's name again.")
except ValueError:
print("Provide name with only characters.")
continue
return pet_name
|
efef2cfb0792b89f158f5a0bb42d10cf9bd1655d
| 28,500 |
def butter_bandpass_filter(voltage, lowcut, highcut, fs, order=5):
"""Filter data with a bandpass, butterworth filter
Args:
voltage: array of voltage data from an ECG signal
lowcut: low frequency cutoff
highcut: high frequency cutoff
fs: sampling frequency
order: filter order (power)
Returns:
filtdat: array of filtered voltage data
"""
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
filtdat = lfilter(b, a, voltage)
return filtdat
|
31892cc5c98289f2e8af3a610b9f4ad1f1cbb58b
| 28,501 |
def _get_variable_names(expression):
"""Return the list of variable names in the Numexpr `expression`."""
names = []
stack = [expression]
while stack:
node = stack.pop()
if node.astType == 'variable':
names.append(node.value)
elif hasattr(node, 'children'):
stack.extend(node.children)
return list(set(names))
|
db75b0066b89bc7a6a022a56b28981910836524c
| 28,502 |
def add(data_path, _):
"""add templates based on arguments and configurations."""
ask_option = AskOption(data_path)
library_chosen = LibraryChosen()
confirmation = Confirmation()
add_library = AddLibrary()
type_library_name = TypeLibraryName()
possible_states = [
ask_option, library_chosen, confirmation, add_library, type_library_name
]
machine = Machine(
initial_state=ask_option,
possible_states=possible_states
)
try:
machine.run()
except BackSignal:
return BACK
except HaltSignal:
return
|
09472c91394e41d345d5ac648c7b90a0e80cfcf3
| 28,504 |
from scipy.special import gamma
def GGD(x,d=2,p=1):
"""Two parameter generalized gamma distribution (GGD)
Parameters
----------
x : array_like (positive)
d : float (positive)
p : float (positive)
Returns
-------
pdf : array_like
Notes
-----
.. math::
G(x;d,p) = \frac{p}{\Gamma(d/p)}x^{d-1}\exp{-x^p}
where Gamma() is the gamma function
"""
return p/gamma(d/p)*x**(d-1)*np.exp(-x**p)
|
c18914f118870ff535d039f136e08a21e386ba43
| 28,505 |
def update_imported_docs(version_pk):
"""
Check out or update the given project's repository.
"""
version_data = api.version(version_pk).get()
version = make_api_version(version_data)
project = version.project
# Make Dirs
if not os.path.exists(project.doc_path):
os.makedirs(project.doc_path)
with project.repo_lock(getattr(settings, 'REPO_LOCK_SECONDS', 30)):
update_docs_output = {}
if not project.vcs_repo():
raise ProjectImportError(("Repo type '{0}' unknown"
.format(project.repo_type)))
# Get the actual code on disk
if version:
log.info('Checking out version {slug}: {identifier}'.format(
slug=version.slug, identifier=version.identifier))
version_slug = version.slug
version_repo = project.vcs_repo(version_slug)
update_docs_output['checkout'] = version_repo.checkout(
version.identifier
)
else:
# Does this ever get called?
log.info('Updating to latest revision')
version_slug = 'latest'
version_repo = project.vcs_repo(version_slug)
update_docs_output['checkout'] = version_repo.update()
# Ensure we have a conf file (an exception is raised if not)
project.conf_file(version.slug)
# Do Virtualenv bits:
if project.use_virtualenv:
if project.use_system_packages:
site_packages = '--system-site-packages'
else:
site_packages = '--no-site-packages'
# Here the command has been modified to support different
# interpreters.
update_docs_output['venv'] = run(
'{cmd} {site_packages} {path}'.format(
cmd='virtualenv-2.7 -p {interpreter}'.format(
interpreter=project.python_interpreter),
site_packages=site_packages,
path=project.venv_path(version=version_slug)
)
)
# Other code expects sphinx-build to be installed inside the
# virtualenv. Using the -I option makes sure it gets installed
# even if it is already installed system-wide (and
# --system-site-packages is used)
if project.use_system_packages:
ignore_option = '-I'
else:
ignore_option = ''
if project.python_interpreter != 'python3':
# Let's see if this works now.
sphinx = 'sphinx==1.1.3'
update_docs_output['sphinx'] = run(
('{cmd} install {ignore_option} {sphinx} '
'virtualenv==1.10.1 setuptools==1.1 '
'docutils==0.11').format(
cmd=project.venv_bin(version=version_slug, bin='pip'),
sphinx=sphinx, ignore_option=ignore_option))
else:
sphinx = 'sphinx==1.1.3'
# python 3 specific hax
update_docs_output['sphinx'] = run(
('{cmd} install {ignore_option} {sphinx} '
'virtualenv==1.9.1 docutils==0.11').format(
cmd=project.venv_bin(version=version_slug, bin='pip'),
sphinx=sphinx, ignore_option=ignore_option))
if project.requirements_file:
os.chdir(project.checkout_path(version_slug))
update_docs_output['requirements'] = run(
'{cmd} install --force-reinstall --exists-action=w -r {requirements}'.format(
cmd=project.venv_bin(version=version_slug, bin='pip'),
requirements=project.requirements_file))
os.chdir(project.checkout_path(version_slug))
if os.path.isfile("setup.py"):
if getattr(settings, 'USE_PIP_INSTALL', False):
update_docs_output['install'] = run(
'{cmd} install --force-reinstall --ignore-installed .'.format(
cmd=project.venv_bin(version=version_slug, bin='pip')))
else:
update_docs_output['install'] = run(
'{cmd} setup.py install --force'.format(
cmd=project.venv_bin(version=version_slug,
bin='python')))
else:
update_docs_output['install'] = (999, "", "No setup.py, skipping install")
# Update tags/version
version_post_data = {'repo': version_repo.repo_url}
if version_repo.supports_tags:
version_post_data['tags'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.tags
]
if version_repo.supports_branches:
version_post_data['branches'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.branches
]
try:
api.project(project.pk).sync_versions.post(json.dumps(version_post_data))
except Exception, e:
print "Sync Verisons Exception: %s" % e.message
return update_docs_output
|
c9bcbf369cbe329c6e82c3634b537a2d31df995a
| 28,506 |
def t(string):
"""
add \t
"""
return (string.count(".")) * "\t" + string
|
a394ac3983369836666d0610c345c6ef3c095994
| 28,507 |
def get_boundary_levels(eris):
"""Get boundary levels for eris."""
return [func(eris.keys()) for func in (min, max)]
|
20d98447e600fecc3b9495e9fb5e5d09ff3b3c1e
| 28,508 |
import json
def team_changepass():
"""The ``/team/changepass`` endpoint requires authentication and expects
the ``team_id`` and ``password`` as arguments. The team's password
will be set to ``password``
Note that this endpoint requires a POST request.
It can be reached at ``/team/changepass?secret=<API_SECRET>``.
The JSON response is::
{
"result": "success" or "fail"
}
:param int team_id: the team_id whose password to change.
:param str password: the new plaintext password.
:return: a JSON dictionary with with status code "success" or "fail"
"""
team_id = request.form.get("team_id")
password = request.form.get("password")
cursor = mysql.cursor()
password_encrypted = hash_password(password)
cursor.execute("""UPDATE teams SET password = %s
WHERE id = %s""",
(password_encrypted, team_id))
mysql.database.commit()
if cursor.rowcount == 0:
return json.dumps({"result": "fail"})
else:
return json.dumps({"result": "success"})
|
47f9921e9e457828a44e27f2b055ab47df52142e
| 28,510 |
def load_options(parser=None, argv=[], positional_args=True):
""" parses sys.argv, possibly exiting if there are mistakes
If you set parser to a ConfigParser object, then you have control
over the usage string and you can prepopulate it with options you
intend to use. But don't set a ``--config`` / ``-c`` option;
load_options uses that to find a configuration file to load
If a parser was passed in, we return ``(config, parser, [args])``.
Otherwise we return ``(config, [args])``. Args is only included
if ``positional_args`` is True and there are positional arguments
See :func:`load_config` for details on the ``--config`` option.
"""
def is_config_appender(arg):
return "." in arg and "=" in arg and arg.find(".") < arg.find("=")
parser_passed_in=parser
if not parser:
parser = OptionParser()
parser.add_option("-c", "--config", help="the path to a config file to read options from")
if argv:
options, args = parser.parse_args(argv)
else:
options, args = parser.parse_args()
print "arg",args
print "options",options
config = load_config(options.config, [a for a in args if is_config_appender(a)])
other_args = [a for a in args if not is_config_appender(a)]
return_list = [config]
if parser_passed_in:
return_list.append(options)
if other_args:
if positional_args:
return_list.append(other_args)
else:
raise Exception("Arguments %s not understood" % other_args)
else:
if positional_args:
raise Exception("This program expects one or more positional arguments that are missing")
if len(return_list) == 1:
return return_list[0]
else:
return tuple(return_list)
|
d0114ba8473b7a0b9283d65ec9fe97a19f54019f
| 28,511 |
def read_spans(fname, separator = ';'):
"""
Read in a span file, of the form
Polynomial;NumberOfComplexPlaces;Root;SpanDimension;VolumeSpan;ManifoldSpan;FitRatio
Returns a dictionary object (certainly NOT a Dataset) such that they
keys are polynomials, and the values are dictionaries. These
dictionaries have keys of roots and the values are [SpanDimension,
VolumeSpan, ManifoldSpan, FitRatio.
"""
f = open(fname,'r')
f.readline()
spans = dict()
for l in f.readlines():
w = l.replace('"','').replace(' ','').strip('\n').split(separator) # whitespace can cause weird problems
for i in [4,5,7,8,9,10]:
try:
w[i] = w[i][2:-2].split("','") # convert string back to list of strings
except IndexError:
break
spans.setdefault(w[0],dict())[w[2]] = w[4:]
return spans
|
3bec0157f5905dd1c3ffa80cc0d1999f50ecc48c
| 28,512 |
from typing import Dict
def merge_hooks(hooks1: Dict[str, list], hooks2: Dict[str, list]) -> Dict[str, list]:
"""
Overview:
merge two hooks, which has the same keys, each value is sorted by hook priority with stable method
Arguments:
- hooks1 (:obj:`dict`): hooks1 to be merged
- hooks2 (:obj:`dict`): hooks2 to be merged
Returns:
- new_hooks (:obj:`dict`): merged new hooks
.. note::
This merge function uses stable sort method without disturbing the same priority hook
"""
assert set(hooks1.keys()) == set(hooks2.keys())
new_hooks = {}
for k in hooks1.keys():
new_hooks[k] = sorted(hooks1[k] + hooks2[k], key=lambda x: x.priority)
return new_hooks
|
add5ae72917ca9aff109e8ac86a4d6902c14b298
| 28,514 |
def get_max_assocs_in_sample_csr(assoc_mat):
"""
Returns the maximum number of co-associations a sample has and the index of
that sample.
"""
first_col = assoc_mat.indptr
n_cols = first_col[1:] - first_col[:-1]
max_row_size = n_cols.max()
max_row_idx = n_cols.argmax()
return max_row_size, max_row_idx
|
a341153afa0398cb2a43b97614cd39129e6b2ac5
| 28,516 |
def command_mood(self, args):
"""
/mood [<mood> [text]]
"""
if not args:
return self.xmpp.plugin['xep_0107'].stop()
mood = args[0]
if mood not in pep.MOODS:
return self.information('%s is not a correct value for a mood.'
% mood,
'Error')
if len(args) == 2:
text = args[1]
else:
text = None
self.xmpp.plugin['xep_0107'].publish_mood(mood, text,
callback=dumb_callback)
|
43d383711f56e70440dd61ff5485f649ad96626b
| 28,517 |
def putativePrimer(seq,lastShared):
"""
Generate a mock primer based on desired TM or length
and end position. This is used to estimate whether an
exact match restriction site found in the shared region
of two sequences is likely to be captured by a primer
(rendering it necessary to modify the site or throw out
the enzyme) or if it can be safely ignored.
Input
seq = string of valid DNA bases
lastShared = integer indicating last base of primer
Output
bestPrimer = string of valid DNA bases
"""
# type can be 'tm' or 'length'
seq = seq.lower()
# Generate primer sub-sequences
currentStart = 0
primerList = []
while currentStart >= 0:
currentPrimer = seq[currentStart:lastShared]
primerList.append(currentPrimer)
currentStart -= 1
if Settings.primerType == 'tm':
output = []
for eachPrimer in primerList:
output.append(estimateTM(eachPrimer))
# Filter for negative values
filterList = [x for x in range(0,len(output)) if output[x] <= 0]
primerList = [primerList[x] for x in range(0,len(output)) if x not in filterList]
output = [output[x] for x in range(0,len(output)) if x not in filterList]
# Find minimum diff
outputDiff = [abs(x - Settings.TM) for x in output]
# Choose the best primer sub-sequence based on difference from optimum Tm
bestPrimer = [primerList[x] for x in range(0,len(outputDiff)) if outputDiff[x] == min(outputDiff)]
return(bestPrimer)
elif Settings.primerType == 'length':
# compare length of primers in list to optimum length
positionList = list(range(0,len(primerList)))
filterList = [abs(len(x) - Settings.minLength) for x in primerList]
bestPrimer = [primerList[x] for x in positionList if filterList[x] == min(filterList)]
return(bestPrimer)
|
0efa964ba834735bb71f3c8e2d565762ec7cfb8d
| 28,518 |
import random
def get_config(runner,
raw_uri: str,
root_uri: str,
target: str = BUILDINGS,
nochip: bool = True,
test: bool = False) -> SemanticSegmentationConfig:
"""Generate the pipeline config for this task. This function will be called
by RV, with arguments from the command line, when this example is run.
Args:
runner (Runner): Runner for the pipeline. Will be provided by RV.
raw_uri (str): Directory where the raw data resides
root_uri (str): Directory where all the output will be written.
target (str): "buildings" | "roads". Defaults to "buildings".
nochip (bool, optional): If True, read directly from the TIFF during
training instead of from pre-generated chips. The analyze and chip
commands should not be run, if this is set to True. Defaults to
True.
test (bool, optional): If True, does the following simplifications:
(1) Uses only a small subset of training and validation scenes.
(2) Enables test mode in the learner, which makes it use the
test_batch_sz and test_num_epochs, among other things.
Defaults to False.
Returns:
SemanticSegmentationConfig: An pipeline config.
"""
spacenet_cfg = SpacenetConfig.create(raw_uri, target)
scene_ids = spacenet_cfg.get_scene_ids()
if len(scene_ids) == 0:
raise ValueError(
'No scenes found. Something is configured incorrectly.')
random.seed(5678)
scene_ids = sorted(scene_ids)
random.shuffle(scene_ids)
# Workaround to handle scene 1000 missing on S3.
if '1000' in scene_ids:
scene_ids.remove('1000')
split_ratio = 0.8
num_train_ids = round(len(scene_ids) * split_ratio)
train_ids = scene_ids[:num_train_ids]
val_ids = scene_ids[num_train_ids:]
if test:
train_ids = train_ids[:16]
val_ids = val_ids[:4]
channel_order = [0, 1, 2]
class_config = spacenet_cfg.get_class_config()
train_scenes = [
build_scene(spacenet_cfg, id, channel_order) for id in train_ids
]
val_scenes = [
build_scene(spacenet_cfg, id, channel_order) for id in val_ids
]
scene_dataset = DatasetConfig(
class_config=class_config,
train_scenes=train_scenes,
validation_scenes=val_scenes)
chip_sz = 325
img_sz = chip_sz
chip_options = SemanticSegmentationChipOptions(
window_method=SemanticSegmentationWindowMethod.sliding, stride=chip_sz)
if nochip:
data = SemanticSegmentationGeoDataConfig(
scene_dataset=scene_dataset,
window_opts=GeoDataWindowConfig(
method=GeoDataWindowMethod.sliding,
size=chip_sz,
stride=chip_options.stride),
img_sz=img_sz,
num_workers=4)
else:
data = SemanticSegmentationImageDataConfig(
img_sz=img_sz, num_workers=4)
backend = PyTorchSemanticSegmentationConfig(
data=data,
model=SemanticSegmentationModelConfig(backbone=Backbone.resnet50),
solver=SolverConfig(
lr=1e-4,
num_epochs=5,
test_num_epochs=2,
batch_sz=8,
one_cycle=True),
log_tensorboard=True,
run_tensorboard=False,
test_mode=test)
return SemanticSegmentationConfig(
root_uri=root_uri,
dataset=scene_dataset,
backend=backend,
train_chip_sz=chip_sz,
predict_chip_sz=chip_sz,
img_format='npy',
chip_options=chip_options)
|
d30651205d500850a32f0be6364653d4d7f638fa
| 28,520 |
def readfmt(s, fmt=DEFAULT_INPUTFMT):
"""Reads a given string into an array of floats using the given format"""
ret = map(float, s.strip().split())
return ret
|
30024e27450ab6f350d7829894865f68e13d95f2
| 28,521 |
def is_image_sharable(context, image, **kwargs):
"""Return True if the image can be shared to others in this context."""
# Is admin == image sharable
if context.is_admin:
return True
# Only allow sharing if we have an owner
if context.owner is None:
return False
# If we own the image, we can share it
if context.owner == image['owner']:
return True
# Let's get the membership association
if 'membership' in kwargs:
membership = kwargs['membership']
if membership is None:
# Not shared with us anyway
return False
else:
members = image_member_find(context,
image_id=image['id'],
member=context.owner)
if members:
member = members[0]
else:
# Not shared with us anyway
return False
# It's the can_share attribute we're now interested in
return member['can_share']
|
778ca70c4b12c0f20586ce25a35551e1356d20c8
| 28,522 |
def ksz_radial_function(z,ombh2, Yp, gasfrac = 0.9,xe=1, tau=0, params=None):
"""
K(z) = - T_CMB sigma_T n_e0 x_e(z) exp(-tau(z)) (1+z)^2
Eq 4 of 1810.13423
"""
if params is None: params = default_params
T_CMB_muk = params['T_CMB'] # muK
thompson_SI = constants['thompson_SI']
meterToMegaparsec = constants['meter_to_megaparsec']
ne0 = ne0_shaw(ombh2,Yp)
return T_CMB_muk*thompson_SI*ne0*(1.+z)**2./meterToMegaparsec * xe *np.exp(-tau)
|
64551363c6b3c99028ebfd3f7cee69c0c273a2e2
| 28,524 |
def find_commits(repo, ref='HEAD', grep=None):
"""
Find git commits.
:returns: List of matching commits' SHA1.
:param ref: Git reference passed to ``git log``
:type ref: str
:param grep: Passed to ``git log --grep``
:type grep: str or None
"""
opts = []
if grep:
opts += ['--grep', grep]
commits = git(repo, 'log', '--format=%H', *opts, ref, '--')
return commits.splitlines()
|
8adb5e0dfebfc5ef86f0a17b2b4a7596ab91a382
| 28,525 |
def findMatches(arg_by_ref, checkForName=False):
"""Finds POIs with the same geometry in 2 datasets.
For each POI in the first dataset, check whether there is a corresponding POI in the 2nd one.
If it exists, move the POI from the second dataset to a resulting dataset B. In any case, the
POIs from the first dataset are going to be moved in the resulting dataset A.
arg_by_ref array -- The array containing the 2 datasets
checkForName boolean -- Whether to also check for same name
returns tuple -- The two resulting datasets
"""
dataA = arg_by_ref[0]
dataB = arg_by_ref[1]
definition = list(dataA.columns.values)
res_A = pd.DataFrame(columns=definition)
res_B = pd.DataFrame(columns=definition)
for index, poiA in dataA.iterrows():
wkt = poiA.WKT
if checkForName:
poiB = dataB.loc[(dataB.WKT == wkt) & (dataB[NAME] == poiA[NAME])]
else:
poiB = dataB.loc[dataB.WKT == wkt]
exists = (poiB.WKT.count() > 0)
if exists:
res_B = res_B.append(poiB)
dataB = dataB.drop(poiB.index)
res_A = res_A.append(poiA)
dataA = dataA.drop(index)
arg_by_ref[0] = dataA
arg_by_ref[1] = dataB
return (res_A, res_B)
|
68f32bc29b970bb86663060c46490698a0e1b3b9
| 28,528 |
def _decicelsius_to_kelvins(temperatures_decicelsius):
"""Converts from temperatures from decidegrees Celsius to Kelvins.
:param temperatures_decicelsius: numpy array of temperatures in decidegrees
Celsius.
:return: temperatures_kelvins: numpy array of temperatures in Kelvins, with
same shape as input.
"""
return temperatures_decicelsius * 0.1 + 273.15
|
880d42637970c680cd241b5418890468443c6a5b
| 28,529 |
def emails_to_warn():
""" who should get warning about errors messages in the chestfreezer? """
emails_for_escalation = _get_array_option_with_default('emails_to_warn', DEFAULT_EMAILS_TO_WARN)
return emails_for_escalation
|
f7135f2b55e813391ee86fae65e8f6cc10ccd31e
| 28,530 |
import types
from typing import Sequence
from typing import Tuple
def get_public_symbols(
root_module: types.ModuleType) -> Sequence[Tuple[str, types.FunctionType]]:
"""Returns `(symbol_name, symbol)` for all symbols of `root_module`."""
fns = []
for name in getattr(root_module, '__all__'):
o = getattr(root_module, name)
fns.append((name, o))
return fns
|
96be2bf9d2548f1c7b5b8b12b926996105b084ca
| 28,532 |
def NewStandardEnv(packager, provider):
"""NewStandardEnv(object packager, object provider) object
NewStandardEnv returns a new *Env with the given params plus standard declarations.
"""
return Env(handle=_checker.checker_NewStandardEnv(packager.handle, provider.handle))
|
ff8842553a2dc1676c0b4abe4b8f1f5bee41b753
| 28,533 |
def get_ecs_secret_access_key(config_fpath, bucket_name):
"""Return the ECS secret access key.
:param config_fpath: path to the dtool config file
:param bucket_name: name of the bucket in a ECS namespace
:returns: the ECS secret access key or an empty string
"""
key = ECS_SECRET_ACCESS_KEY_KEY_PREFIX + bucket_name
return get_config_value_from_file(
key,
config_fpath,
""
)
|
3583d5d45a9d8f70f839c33ab7007b85977483e4
| 28,534 |
def solve_naked_quads(sudoku, verbose):
"""Exclude the candidates of seen quad-value cell quads from unsolved cells
in their unit."""
return solve_naked_n_tuples(sudoku, 4, verbose)
|
0a8a67928e7c3cb65fa5868cc30b60c08823ce6a
| 28,535 |
def prototypical_spectra_plot(
dataset,
results_df,
plot_type="imshow",
fig=None,
fig_kws={},
plot_kws={},
cbar_kws={},
**kwargs
):
"""Plot the prototypical spectra from the calibration samples.
Args:
dataset (pyeem.datasets.Dataset): [description]
results_df (pandas.DataFrame): [description]
plot_type (str, optional): [description]. Defaults to "imshow".
fig (matplotlib.pyplot.figure, optional): [description]. Defaults to None.
fig_kws (dict, optional): Optional keyword arguments to include for the figure. Defaults to {}.
plot_kws (dict, optional): Optional keyword arguments to include. They are sent as an argument to the matplotlib plot call. Defaults to {}.
cbar_kws (dict, optional): Optional keyword arguments to include for the colorbar. Defaults to {}.
Returns:
matplotlib.axes.Axes: [description]
"""
nspectra = len(results_df.index.unique())
nrows, ncols = _get_subplot_dims(nspectra)
nplots = nrows * ncols
# Set the fig_kws as a mapping of default and kwargs
default_fig_kws = dict(
tight_layout={"h_pad": 5, "w_pad": 0.05}, figsize=(ncols ** 2, nrows * ncols)
)
# Set the fig_kws
fig_kws = dict(default_fig_kws, **fig_kws)
fig = plt.figure(**fig_kws)
projection = None
if plot_type in ["surface", "surface_contour"]:
projection = "3d"
axes = []
for i in range(1, ncols * nrows + 1):
ax = fig.add_subplot(nrows, ncols, i, projection=projection)
axes.append(ax)
for i in range(nspectra, nplots):
axes[i].axis("off")
axes[i].set_visible(False)
# axes[i].remove()
ax_idx = 0
for index, row in results_df.iterrows():
proto_eem_df = pd.read_hdf(dataset.hdf, key=row["hdf_path"])
source_name = proto_eem_df.index.get_level_values("source").unique().item()
proto_conc = proto_eem_df.index.get_level_values("proto_conc").unique().item()
source_units = (
proto_eem_df.index.get_level_values("source_units").unique().item()
)
intensity_units = (
proto_eem_df.index.get_level_values("intensity_units").unique().item()
)
title = "Prototypical Spectrum: {0}\n".format(source_name.title())
title += "Concentration: {0} {1}".format(proto_conc, source_units)
idx_names = proto_eem_df.index.names
drop_idx_names = [
idx_name for idx_name in idx_names if idx_name != "emission_wavelength"
]
proto_eem_df = proto_eem_df.reset_index(level=drop_idx_names, drop=True)
eem_plot(
proto_eem_df,
plot_type=plot_type,
intensity_units=intensity_units,
title=title,
ax=axes[ax_idx],
fig_kws=fig_kws,
plot_kws=plot_kws,
cbar_kws=cbar_kws,
**kwargs
)
ax_idx += 1
pad = kwargs.get("tight_layout_pad", 1.08)
h_pad = kwargs.get("tight_layout_hpad", None)
w_pad = kwargs.get("tight_layout_wpad", None)
rect = kwargs.get("tight_layout_rect", None)
if plot_type in ["surface", "surface_contour"]:
w_pad = kwargs.get("tight_layout_wpad", 25)
plt.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
return axes
|
96bd2d6b283b01257b35c4ec436ecc9e3457db7b
| 28,536 |
def generate_gate_piover8(
c_sys: CompositeSystem, is_physicality_required: bool = True
) -> "Gate":
"""Return the Gate class for the pi/8 (T) gate on the composite system.
Parameters
----------
c_sys: CompositeSystem
is_physicality_required: bool = True
whether the generated object is physicality required, by default True
Returns
----------
Gate
The Gate class for the pi/8 (T) gate on the composite system.
"""
assert len(c_sys.elemental_systems) == 1
hs = generate_gate_piover8_mat()
gate = Gate(c_sys=c_sys, hs=hs, is_physicality_required=is_physicality_required)
return gate
|
5c3cd7721a3bf7de2eb96521f2b9c04e82845dee
| 28,538 |
def get_ftext_trials_fast(review_id):
"""
retrieve all ftext trials related to a review
@param review_id: pmid of review
@return: all registered trials and their linked publications
"""
conn = dblib.create_con(VERBOSE=True)
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute("""
SELECT tr.nct_id, tr.brief_title, tr.overall_status, tr.brief_summary, tr.enrollment, tr.completion_date
FROM tregistry_entries tr
INNER JOIN freetext_review_rtrial rt ON tr.nct_id = rt.nct_id
LEFT JOIN trialpubs_rtrial t on tr.nct_id = t.nct_id
WHERE rt.review_id = %s
GROUP BY tr.nct_id, rt.review_id, rt.nct_id
""", (review_id,))
reg_trials = list(cur.fetchall())
# for i, trial in enumerate(reg_trials):
# trial = dict(trial)
# if usr:
# for v in trial['voters']:
# if usr and usr.nickname == v[1]:
# trial['user_vote'] = v[0]
# trial['nicknames'] = ['you' if x[1] == usr.nickname else x[1] for x in trial['voters'] if x[1] is not None]
# else:
# trial['nicknames'] = [x[1] for x in trial['voters'] if x[1] is not None]
# if trial['nicknames']:
# trial['voters'] = str(', '.join(trial['nicknames']))
# else:
# trial['voters'] = ""
# reg_trials[i] = trial.copy()
return {'reg_trials': reg_trials}
|
e4fd91e93a5b32b083ddf9d0dccd282dee339601
| 28,539 |
from typing import List
from typing import Tuple
def find_edges(names: List[str]) -> List[Tuple[str, str]]:
"""
Given a set of short lineages, return a list of pairs of parent-child
relationships among lineages.
"""
longnames = [decompress(name) for name in names]
edges = []
for x in longnames:
if x == "A":
continue # A is root
y = get_parent(x)
while y is not None and y not in longnames:
y = get_parent(y)
if y is None:
continue
if y != x:
edges.append((x, y) if x < y else (y, x))
edges = [(compress(x), compress(y)) for x, y in edges]
assert len(set(edges)) == len(edges)
assert len(edges) == len(names) - 1
return edges
|
9fd254de99a1be4647c476cfcd997b580cb44605
| 28,540 |
def get_plot_spec_binstat_abs(energy, bins = 50, range = (0, 5)):
"""
Create `PlotSpec` for plot of some stat of abs energy resolution vs TrueE.
"""
# pylint: disable=redefined-builtin
return PlotSpec(
title = None,
label_x = 'True %s Energy [GeV]' % (energy),
label_y = '(Reco - True) %s Energy [GeV]' % (energy),
bins_x = bins,
range_x = range,
bins_y = None,
range_y = None,
minor = True,
grid = True,
)
|
4b6b1d5e32234ac3d5c3b9acece4b4fcae795fee
| 28,542 |
def generate_token(data):
"""Generate a token for given data object"""
serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
return serializer.dumps(data, salt=current_app.config['SECURITY_PASSWORD_SALT'])
|
0df4e85179da9b4d5bf56a868b652f490df4887a
| 28,543 |
import time
def monitor_gcp(vm_name: str, job_arguments: dict):
"""Monitor status of job based on vm_name. Requires stable connection."""
# Check VM status from command line
while True:
try:
check_cmd = (
[
"gcloud",
"alpha",
"compute",
"tpus",
"--zone",
"europe-west4-a",
"--verbosity",
"critical",
]
if job_arguments["use_tpus"]
else [
"gcloud",
"compute",
"instances",
"list",
"--verbosity",
"critical",
]
)
out = sp.check_output(check_cmd)
break
except sp.CalledProcessError as e:
stderr = e.stderr
return_code = e.returncode
print(stderr, return_code)
time.sleep(1)
# Clean up and check if vm_name is in list of all jobs
job_info = out.split(b"\n")[1:-1]
running_job_names = []
for i in range(len(job_info)):
decoded_job_info = job_info[i].decode("utf-8").split()
if decoded_job_info[-1] in ["STAGING", "RUNNING"]:
running_job_names.append(decoded_job_info[0])
job_status = vm_name in running_job_names
return job_status
|
ba78353b84267a48e0dbd9c4ae7b8da280ddf471
| 28,544 |
def five_top_workers(month, year):
"""
Top 5 presence users with information about them.
"""
dict_months = []
monthly_grouped = group_by_month(get_data(), year)
for user in monthly_grouped:
try:
dict_months.append((user.items()[0][0], user.items()[0][1][month]))
except:
pass
sorted_dict = sorted_months_dict(dict_months)
return five_top_user_data(dict_months, sorted_dict)
|
75a63d49e11f528b90a90509b87ab22d58a87c72
| 28,546 |
import collections
import re
def get_assignment_map_from_checkpoint(tvars, init_checkpoint, prefix=""):
"""Compute the union of the current variables and checkpoint variables."""
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
initialized_variable_names = {}
assignment_map = collections.OrderedDict()
for x in tf.train.list_variables(init_checkpoint):
(name, var) = (x[0], x[1])
if prefix + name not in name_to_variable:
continue
assignment_map[name] = prefix + name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return assignment_map, initialized_variable_names
|
5469356a8b70da9268f42c08588bed0c765446c8
| 28,547 |
def u_rce_hh80(lats, thermal_ro, rot_rate=ROT_RATE_EARTH, radius=RAD_EARTH):
"""Zonal wind in gradient balance with equilibrium temperatures."""
return rot_rate*radius*cosdeg(lats)*((1 + 2*thermal_ro)**0.5 - 1)
|
2fe0aba3f66a6429cbeb6674a46769d4474e31a4
| 28,548 |
def CreatePiecewiseFunction(**params):
"""Create and return a piecewise function. Optionally, parameters can be
given to assign to the piecewise function.
"""
pfunc = servermanager.piecewise_functions.PiecewiseFunction()
controller = servermanager.ParaViewPipelineController()
controller.InitializeProxy(pfunc)
SetProperties(pfunc, **params)
controller.RegisterOpacityTransferFunction(pfunc)
return pfunc
|
6d0e55676a7abf98e967a354e321524b82f2c674
| 28,549 |
import json
def cancelCardTransactionPayload(cancel_time):
"""
Function for constructing payload for cancelCardTransaction API call.
Note: All parameters are of type String unless otherwise stated below.
:param cancel_time: Date and time of the request. Format - YYYY-MM-DD HH:mm:ss
:return: JSON Payload for API call
"""
payload_py = {
"cancel_time": cancel_time
}
payload_json = json.dumps(payload_py)
return payload_json
|
e96ee75bbc4c20a094283fa664bca6ddd6b9556c
| 28,550 |
import re
def remove_elongation(word):
"""
:param word: the input word to remove elongation
:return: delongated word
"""
regex_tatweel = r'(\w)\1{2,}'
# loop over the number of times the regex matched the word
for index_ in range(len(re.findall(regex_tatweel, word))):
if re.search(regex_tatweel, word):
elongation_found = re.search(regex_tatweel, word)
elongation_replacement = elongation_found.group()[0]
elongation_pattern = elongation_found.group()
word = re.sub(elongation_pattern, elongation_replacement, word, flags=re.MULTILINE)
else:
break
return word
|
a0b4be8640193568075f053009e5761894f302c1
| 28,551 |
def coevolve_alignment(method,alignment,**kwargs):
""" Apply coevolution method to alignment (for intramolecular coevolution)
method: f(alignment,**kwargs) -> 2D array of coevolution scores
alignment: alignment object for which coevolve scores should be
calculated
**kwargs: parameters to be passed to method()
"""
# Perform method specific validation steps
if method == sca_alignment: sca_input_validation(alignment,**kwargs)
if method == ancestral_state_alignment:
ancestral_states_input_validation(alignment,**kwargs)
validate_alignment(alignment)
return method(alignment,**kwargs)
|
056813427f21b806742fd2bf613dcd2b769e709f
| 28,552 |
from textwrap import dedent, wrap
def compute_known_facts(known_facts, known_facts_keys):
"""Compute the various forms of knowledge compilation used by the
assumptions system.
This function is typically applied to the results of the ``get_known_facts``
and ``get_known_facts_keys`` functions defined at the bottom of
this file.
"""
fact_string = dedent('''\
"""
The contents of this file are the return value of
``sympy.assumptions.ask.compute_known_facts``.
Do NOT manually edit this file.
Instead, run ./bin/ask_update.py.
"""
from sympy.core.cache import cacheit
from sympy.logic.boolalg import And, Not, Or
from sympy.assumptions.ask import Q
# -{ Known facts in Conjunctive Normal Form }-
@cacheit
def get_known_facts_cnf():
return And(
%s
)
# -{ Known facts in compressed sets }-
@cacheit
def get_known_facts_dict():
return {
%s
}
''')
# Compute the known facts in CNF form for logical inference
LINE = ",\n "
HANG = ' '*8
cnf = to_cnf(known_facts)
c = LINE.join([str(a) for a in cnf.args])
mapping = single_fact_lookup(known_facts_keys, cnf)
items = sorted(mapping.items(), key=str)
keys = [str(i[0]) for i in items]
values = ['set(%s)' % sorted(i[1], key=str) for i in items]
m = LINE.join(['\n'.join(
wrap("%s: %s" % (k, v),
subsequent_indent=HANG,
break_long_words=False))
for k, v in zip(keys, values)]) + ','
return fact_string % (c, m)
|
39744ee1bd56ad0bc2fc6412a06da772f45d1a2b
| 28,553 |
def mro(*bases):
"""Calculate the Method Resolution Order of bases using the C3 algorithm.
Suppose you intended creating a class K with the given base classes. This
function returns the MRO which K would have, *excluding* K itself (since
it doesn't yet exist), as if you had actually created the class.
Another way of looking at this, if you pass a single class K, this will
return the linearization of K (the MRO of K, *including* itself).
Found at:
http://code.activestate.com/recipes/577748-calculate-the-mro-of-a-class/
"""
seqs = [list(C.__mro__) for C in bases] + [list(bases)]
res = []
while True:
non_empty = list(filter(None, seqs))
if not non_empty:
# Nothing left to process, we're done.
return tuple(res)
for seq in non_empty: # Find merge candidates among seq heads.
candidate = seq[0]
not_head = [s for s in non_empty if candidate in s[1:]]
if not_head:
# Reject the candidate.
candidate = None
else:
break
if not candidate:
raise TypeError("inconsistent hierarchy, no C3 MRO is possible")
res.append(candidate)
for seq in non_empty:
# Remove candidate.
if seq[0] == candidate:
del seq[0]
|
bbc3fde351c92c4ae0a5c82a3e06e95de29e2e8d
| 28,554 |
def CalculatepHfromTA(param, TA, val, TP, TSi):
""" SUB CalculatepHfromTATC, version 04.01, 10-13-96, written by Ernie Lewis.
Inputs: TA, TC, TP, TSi
Output: pH
This calculates pH from TA and TC using K1 and K2 by Newton's method.
It tries to solve for the pH at which Residual = 0.
The starting guess is pH = 8.
Though it is coded for H on the total pH scale, for the pH values
occuring in seawater (pH > 6) it will be equally valid on any pH scale
(H terms negligible) as long as the K Constants are on that scale.
"""
# Declare global constants
global K0, K1, K2, KW, KB
# Set iteration parameters
pHGuess = 8.0 # this is the first guess
pHTol = 1.0e-4 # tolerance for iterations end
ln10 = np.log(10.0)
# creates a vector holding the first guess for all samples
if hasattr(TA, 'shape'):
pH = np.ones(TA.shape) * pHGuess
else:
pH = pHGuess
deltapH = pHTol + 1.0
# Begin iteration to find pH
while np.any(abs(deltapH) > pHTol):
H, Beta = CalculateHfrompH(pH)
NCAlk = CalculateNCAlkfrompH(H, TP, TSi)
if param is 'TC':
CAlk = val * K1 * (H + 2 * K2) / Beta
# find Slope dTA/dpH (not exact, but keeps all important terms)
Slope = ln10 * (
val * K1 * H * (H * H + K1 * K2 + 4.0 * H * K2)
/ Beta / Beta + TB * KB * H / (KB + H) / (KB + H) + KW / H + H
)
elif param is 'pCO2':
HCO3 = K0 * K1 * val / H
CO3 = K0 * K1 * K2 * val / (H * H)
CAlk = HCO3 + 2 * CO3
# find Slope dTA/dpH (not exact, but keeps all important terms)
Slope = ln10 * (
HCO3 + 4 * CO3 + TB * KB * H / (KB + H) / (KB + H) + KW / H + H
)
elif param is 'CO3':
HCO3 = H * val / K2
CAlk = HCO3 + 2 * val
# find Slope dTA/dpH (not exact, but keeps all important terms)
Slope = ln10 * (
HCO3 + 4 * CO3 + TB * KB * H / (KB + H) / (KB + H) + KW / H + H
)
else:
raise ValueError('Unknown carbon param: {}'.format(param))
TA_calc = CAlk + NCAlk
Residual = TA - TA_calc
deltapH = Residual / Slope # this is Newton's method
# to keep the jump from being too big
while np.any(abs(deltapH) > 1):
deltapH = deltapH / 2.0
pH = pH + deltapH # Is on the same scale as K1 and K2 were calculated
return pH
|
b160decae54b25677b1158f77bbc9818abcfd0df
| 28,556 |
from operator import inv
def transform_image(image, shiftx, shifty, angle, order=1):
"""
Apply shift and rotation to the image.
The translation is applied first, then the rotation. If no rotation is
requested (``angle=0``), then ``scipy.ndimage.shift()`` is called to
perform a translation. Otherwise, ``scipy.ndimage.affine_transform()`` is
called. In both cases the settings ``mode='wrap', prefilter=False`` are
used. Prefilter *must* be turned off because it applies lossy image
sharpening leading to artifacts.
Parameters
----------
image : numpy.ndarray
2D image input.
shiftx : float
Shift in the x-axis in pixels.
shifty : float
Shift in the y-axis in pixels.
angle : float
Rotation angle in radians (positive is clockwise).
order : int
(Optional, default: 1) Spline interpolation order. 1 for bilinear, 3
for bicubic (bilinear is the original behavior).
Returns
-------
numpy.ndarray
Transformed image.
Notes
-----
The transformation is implemented as sequence of affine transformations.
The ``scipy`` module takes a matrix of the form (ndim + 1, ndim + 1),
where it assumes that the transformation is specified using homogeneous
coordinates. This matrix has the 2x2 rotation matrix in the top left
corner, and the linear shifts in the top right. They are applied in this
order:
.. code-block:: text
1 0 shiftx
0 1 shifty
0 0 1
(translation by shift amounts)
1 0 -(X-1)/2
0 1 -(Y-1)/2
0 0 1
(translation to center rotation on the IDL rot center)
cos sin 0
-sin cos 0
0 0 1
(clockwise rotation)
1 0 +(X-1)/2
0 1 +(Y-1)/2
0 0 1
(undo translation for center of rotation)
"""
if shiftx == 0 and shifty == 0 and angle == 0:
return image
elif angle == 0:
return shift(image, [shifty, shiftx],
order=order, mode='wrap', prefilter=False)
else:
# The original IDL implementation performs the linear translation
# first (wrapping at borders), then rotates the image clockwise by an
# angle in degrees. The center of the rotation is (X-1)/2, (Y-1)/2. In
# both steps, bilinear interpolation is used.
# Numpy array coordinates are (y, x). This swaps dx and dy in the
# translation part, and the position of the -sin element in the
# rotation part, compared to the standard version for (x, y, 1).
# Beware that the coordinate transforms are calculated in the
# conventional (x, y) sense, but are written in numpy's (y, x) order
# when implementing them in the transformation matrix.
cx, sx = np.cos(angle), np.sin(angle)
# Center of rotation
rot_x = 0.5 * (image.shape[1] - 1)
rot_y = 0.5 * (image.shape[0] - 1)
dx = cx * (shiftx - rot_x) + sx * (shifty - rot_y) + rot_x
dy = -sx * (shiftx - rot_x) + cx * (shifty - rot_y) + rot_y
tx = np.array([[cx, -sx, dy],
[sx, cx, dx],
[0, 0, 1]], dtype=np.float64)
# print(cx, sx)
# print(tx)
# The prefilter option, which is turned on by default, applies an
# image sharpening. It must not be applied. The mode is set to 'wrap'
# to emulate the behavior of the original implementation of image
# shift. The spline interpolation order is 1 for bilinear, 3 for
# bicubic (bilinear is the original behavior).
return affine_transform(image, inv(tx),
order=order, mode='wrap', prefilter=False)
|
203f06c42b68de0db834924fd302193c37629669
| 28,557 |
import json
import time
from typing import Callable
import dill
def instate():
"""
Same as calculate() but the results are not saved to the database
Use this to update the state of the server to further analyse the model
:return: id of the simulation and result of the calculation
"""
# get the name of the calculation setup
name = request.args.get('name')
if name is None:
return "Please provide 'name' argument to specify which model to instate"
if request.method == 'POST':
options = request.get_data(as_text=True)
options = json.loads(options)
else:
options = None
# ----------------------- MODEL UPDATE --------------------------------
parameters, msg = update_params(name=name)
if msg is not None:
return msg
model, idf = update_model(name=name, parameters=parameters)
# ----------------------- CALCULATIONS --------------------------------
tic = time.perf_counter()
impact_result, cost_result, energy_result, sim_id = run(name=name, model=model, idf=idf, simulation_options=options)
toc = time.perf_counter()
# measure execution time
exec_time = toc - tic
# ----------------------- EVALUATION --------------------------------
eval_type = Callable[[pd.DataFrame, pd.DataFrame, pd.DataFrame], pd.Series]
evaluate: eval_type = dill.loads(R.get(f'{name}:evaluate_func'))
result = evaluate(impacts=impact_result.impacts, costs=cost_result.costs, energy=energy_result)
data = {
'result': result.to_dict(),
'simulation_id': sim_id,
'calculation_time': exec_time
}
return jsonify(data)
|
e50549ff8ae5e9e49cd972799ce0abffed213912
| 28,558 |
import pickle
def pickler(obj=None, filename: str= None, mode: str = 'pickle'):
"""
pickles the file to filename, or
unpickles and returns the file
(to save the result of long running calculations)
Parameters
----------
obj :
the object to pickle
filename : str
file to pickle to
mode:
one of 'pickle' or 'depickle'
"""
unpickled = None
if mode == 'pickle':
pickle.dump(obj, open(filename,'wb'))
elif mode == 'unpickle':
unpickled = pickle.load(open(filename,'rb'))
return unpickled
|
b57e85a15099b5eed4e6c3d425bc4df7ff73d657
| 28,559 |
def csrgeam(m, n, descrA, csrValA, csrRowPtrA, csrColIndA, descrB, csrValB,
csrRowPtrB, csrColIndB, handle=None, alpha=1.0, beta=0.0,
nnzA=None, nnzB=None, check_inputs=True):
""" add two sparse matrices: C = alpha*A + beta*B.
higher level wrapper to cusparse<t>csrgemm routines.
"""
if check_inputs:
for array in [csrValA, csrRowPtrA, csrColIndA, csrValB, csrRowPtrB,
csrColIndB]:
if not isinstance(array, pycuda.gpuarray.GPUArray):
raise ValueError("all csr* inputs must be a pyCUDA gpuarray")
if cusparseGetMatType(descrA) != CUSPARSE_MATRIX_TYPE_GENERAL:
raise ValueError("Only general matrix type supported")
if cusparseGetMatType(descrB) != CUSPARSE_MATRIX_TYPE_GENERAL:
raise ValueError("Only general matrix type supported")
if handle is None:
handle = misc._global_cusparse_handle
if nnzA is None:
nnzA = csrValA.size
if nnzB is None:
nnzB = csrValB.size
dtype = csrValA.dtype
# perform some basic sanity checks
if check_inputs:
if csrValA.size != nnzA:
raise ValueError("length of csrValA array must match nnzA")
if csrValB.size != nnzB:
raise ValueError("length of csrValB array must match nnzB")
if (dtype != csrValB.dtype):
raise ValueError("incompatible dtypes")
if csrRowPtrA.size != m + 1:
raise ValueError("bad csrRowPtrA size")
if csrRowPtrB.size != m + 1:
raise ValueError("bad csrRowPtrB size")
# allocate output matrix C descr and row pointers
descrC = cusparseCreateMatDescr()
cusparseSetMatType(descrC, CUSPARSE_MATRIX_TYPE_GENERAL)
alloc = misc._global_cusparse_allocator
csrRowPtrC = gpuarray.zeros((m+1, ), dtype=np.int32, allocator=alloc)
# call csrgemmNnz to determine nnzC and fill in csrRowPtrC
nnzC = _csrgeamNnz(m, n, descrA, csrRowPtrA, csrColIndA, descrB,
csrRowPtrB, csrColIndB, handle=handle, descrC=descrC,
csrRowPtrC=csrRowPtrC, nnzA=nnzA, nnzB=nnzB,
check_inputs=False)
# allocated rest of C based on nnzC
csrValC = gpuarray.zeros((nnzC, ), dtype=dtype, allocator=alloc)
csrColIndC = gpuarray.zeros((nnzC, ), dtype=np.int32, allocator=alloc)
if dtype == np.float32:
fn = cusparseScsrgeam
elif dtype == np.float64:
fn = cusparseDcsrgeam
elif dtype == np.complex64:
fn = cusparseCcsrgeam
elif dtype == np.complex128:
fn = cusparseZcsrgeam
else:
raise ValueError("unsupported sparse matrix dtype: %s" % dtype)
fn(handle, m, n, alpha, descrA, nnzA, csrValA, csrRowPtrA, csrColIndA,
beta, descrB, nnzB, csrValB, csrRowPtrB, csrColIndB, descrC,
csrValC, csrRowPtrC, csrColIndC)
return (descrC, csrValC, csrRowPtrC, csrColIndC)
|
c51338336fda4a6e49529cee1f2137a826eb0b4d
| 28,560 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.