content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import re
def find_kwic(item, regexes, shell_nouns):
"""This function takes the location of a vertically annotated text of COCA/COHA
and transforms it to the form [['word', 'lemma', 'POS'], ['word2', 'lemma2', 'POS2'], ...]
As a second argument the regex objects to be used in the search are passed
(they are built outside this function to avoid building them again and again).
As a third argument, a list of shell nouns is passed, taken from 'settings.py'
It first filters only the contexts around shell nouns to speed up the subsequent regex.
Afterwards, matching results are extracted and converted into a human-readable form.
"""
f = open(item, "r")
text = [x.split() for x in f.readlines()] # read the file, transform to list of lists
f.close()
shell_noun_locations = [id for id in range(len(text)) if text[id][0] in shell_nouns] # find out where in the text do we find shell nouns
shell_noun_locations = [[x-7, x+7] for x in shell_noun_locations] # expand the context around the shell nouns to allow the regex to work
shell_noun_locations = [[x,y] if x >= 0 else [0,y] for x,y in shell_noun_locations] # make sure the range does not get out of the list (left side)
shell_noun_locations = [[x,y] if y <= len(text) else [x,len(text)] for x,y in shell_noun_locations] # make sure the range does not get out of the list (right side)
contexts = [text[x:y] for x,y in shell_noun_locations] # extract the relevant contexts from the text
contexts = [x for x in contexts if x[2] != "y"] # remove punctuation
horizontal = [["_".join(x) for x in item] for item in contexts] # convert to horizontal markup to allow the regex search
horizontal = [x+(5*["0_0_0"]) for x in horizontal] # add the dummy 0_0_0 to prevent overlap
horizontal = " ".join([" ".join(context) for context in horizontal]) # transform to a plain text
del shell_noun_locations, contexts, text # remove shell_noun_locations, text and contexts from the memory
entries = [regex.findall(horizontal) for regex in regexes] # for each shell noun find the fitting contexts
entries = [item for sublist in entries for item in sublist] # transform from list of lists to list
entries = [re.sub("_\S+|0_0_0", "" ,x) for x in entries] # remove tags
return entries
|
65a76b7be4037f2e142b20378ac7a8c68dc7f4c2
| 35,727 |
def is_numeric(value: str):
"""Return True if given value is a number"""
return value.isdigit()
|
fe61469ab388534a17d079590591378f87078cd3
| 35,728 |
def mixed(operations, paths, _fs=None):
"""Decorates a function that supports multiple types of action.
:param operations: The list of operations (e.g. ["readlink"]).
:param paths: The paths to match (e.g. ["/<file>", "/<file>.txt"]).
:param _fs: An optional _DiazedFileSystem instance (mostly for testing).
:returns: A decorator that will register the function with fs for path.
"""
fs = _resolve_fs(_fs)
return _get_decorator(fs, operations=operations, paths=paths)
|
7a7144cb49d64cca74f0e366af3d4a9574a86f8b
| 35,729 |
def bladeTELoss(w, t, Temp, alpha, beta, rho, C, K, Y):
"""Thermoelastic calculations for blades
Invoked for upper joint only (there is no lower blade)
w = angular frequency
t = blade thickness
Temp = temperature
alpha = coeff of thermal expansion
beta = temp dependence of Young's modulus
rho = mass density
C = heat capacity
K = thermal conductivity W/(m K)
Y = Young's modulus
Returns the loss angle associated with thermoelastic damping
(blade vertical)
"""
# vertical TE time constant, blades
tau = (rho * C * t**2) / (K * pi**2)
# vertical delta, blades
# Here the TE cancellation is ignored
delta = Y * alpha**2 * Temp / (rho * C)
phi_TE = delta * tau * w / (1 + w**2 * tau**2)
return phi_TE
|
d655542a0e09bff311c6f0cbbf57e2c8de954149
| 35,730 |
def pretty_snp_association(association):
"""
Prints association stats in roughly the same format as STOPGAP for a cluster of SNPs
Args: GeneSNP_Association
Returntype: String
"""
snp = association.snp
gene_name = association.gene.name
gene_id = association.gene.id
score = association.score
results = [snp.rsID, snp.chrom, str(snp.pos), gene_name, gene_id, str(score)]
results += [str(association.intermediary_scores[functional_source.display_name]) for functional_source in postgap.Cisreg.sources]
return "\t".join(results)
|
f302c451463cce38a2abcab60d322ec876da4efb
| 35,731 |
import torch
def nb_Genes(w, device="cpu"):
"""
========================================================================== \n
Return the number of selected genes from the matrix w \n
#----- INPUT \n
w : (Tensor) weight matrix \n
#----- OUTPUT \n
nbG : (Scalar) the number of genes \n
indGene_w : (array) the index of the genes \n
===========================================================================
"""
#
d = w.shape[0]
ind_genes = torch.zeros((d, 1), device="cpu")
for i in range(d):
if torch.norm(w[i, :]) > 0:
ind_genes[i] = 1
indGene_w = (ind_genes == 1).nonzero()[:, 0]
nbG = ind_genes.sum().int()
return nbG, indGene_w.numpy()
|
ba9d7f150e177799c1fdf4c521859b6879b09997
| 35,732 |
def S_IMODE(mode):
"""Return the portion of the file's mode that can be set by
os.chmod().
"""
return mode & 0o7777
|
b77df185704a5df812dc1fe2431f80d847c9bc15
| 35,733 |
def storage_initial_constraint_rule(backend_model, node, tech):
"""
If storage is cyclic, allow an initial storage to still be set. This is
applied to the storage of the final timestep/datestep of the series as that,
in cyclic storage, is the 'storage_previous_step' for the first
timestep/datestep.
If clustering and ``storage_inter_cluster`` exists:
.. container:: scrolling-wrapper
.. math::
\\boldsymbol{storage_{inter\\_cluster}}(loc::tech, datestep_{final})
\\times ((1 - storage_loss) ** 24) = storage_{initial}(loc::tech) \\times storage_{cap}(loc::tech)
\\quad \\forall loc::tech \\in loc::techs_{store}, \\forall datestep \\in datesteps
Where :math:`datestep_{final}` is the last datestep of the timeseries
Else:
.. container:: scrolling-wrapper
.. math::
\\boldsymbol{storage}(loc::tech, timestep_{final})
\\times ((1 - storage_loss) ** 24) = storage_{initial}(loc::tech) \\times storage_{cap}(loc::tech)
\\quad \\forall loc::tech \\in loc::techs_{store}, \\forall timestep \\in timesteps
Where :math:`timestep_{final}` is the last timestep of the timeseries
"""
storage_initial = get_param(backend_model, "storage_initial", (node, tech))
storage_loss = get_param(backend_model, "storage_loss", (node, tech))
if hasattr(backend_model, "storage_inter_cluster"):
storage = backend_model.storage_inter_cluster
final_step = backend_model.datesteps[-1]
time_resolution = 24
else:
storage = backend_model.storage
final_step = backend_model.timesteps[-1]
time_resolution = backend_model.timestep_resolution[final_step]
return (
storage[node, tech, final_step] * ((1 - storage_loss) ** time_resolution)
== storage_initial * backend_model.storage_cap[node, tech]
)
|
b538cd05f834b18471b348d73e6c5a4ca3c5acab
| 35,735 |
def get_hform_298k_thermp(output_string):
"""
Obtains deltaHf from thermp output
"""
# Line pattern containing the DeltaHf value at 298 K
dhf298_pattern = ('h298 final' +
app.one_or_more(app.SPACE) +
app.capturing(app.FLOAT))
dhf298 = float(apf.last_capture(dhf298_pattern, output_string))
return dhf298
|
1ac0a111501a0dff5987c662302b6cabf09e6477
| 35,736 |
def filter_peaks_width(peakdic, width_range):
"""
Filter a dictionary of peaks by peak range width.
- peakdic: Dictionary of peaks per sequence (chromosome)
- with_range: Tuple with the minimum and maximum peak
peak width to keep. Other peaks will be discarded
"""
minim, maxim = width_range
before, after = 0, 0
peak_lengths = []
for seq in peakdic:
before += len(peakdic[seq])
for peak in peakdic[seq]:
peak_lengths.append(len(peak))
if len(peak) < minim or len(peak) > maxim:
peakdic[seq].remove(peak)
after += len(peakdic[seq])
plt.hist(np.log10(peak_lengths), bins = 100)
plt.axvline(np.log10(minim), color='k',
linestyle='dashed', linewidth=1)
plt.axvline(np.log10(maxim), color='k',
linestyle='dashed', linewidth=1)
plt.show()
print(f'{before} peaks before filtering')
print(f'{after} peaks after filtering')
return(peakdic)
|
ede51a85999daf589c233fb53e4834ebb3cf3b0a
| 35,737 |
def scrape_course(data):
"""Initializes the course if it doesn't exist and returns a list of
("instructor", data) pairs."""
print " " + data['url']
instructors = []
coursesoup = BeautifulSoup(requests.get(data['url']).text)
ratingstab = coursesoup.find(id="tab-ratings")
floatdivs = ratingstab.find_all(class_="float-left")
if len(floatdivs) > 1 and floatdivs[1].find("span", class_="count"):
courseid = floatdivs[1].a['href'].split("/")[-2]
course = Course.query.get(courseid)
if course is None:
course = Course(id=courseid,
department=data['department'],
number=data['number'],
name=data['name'])
db.session.add(course)
for instructorentry in coursesoup.find_all(class_="ratings-instructor"):
if instructorentry.find(class_="rating-details-qtip"):
instructorurl = 'http://ninjacourses.com' + instructorentry.a['href']
instructors.append(("instructor", { 'url': instructorurl }))
return instructors
|
0e3fc6e05774ec89fb67dcaab1acf3d38853ae0b
| 35,738 |
def send_tp(mcr, x, y, z, a, b, player):
"""
Send the telepor command using mcr. 'x', 'y' 'z'
are cartesian coordinates, 'a' and 'b' are angles.
"""
tp_parameters = [str(i) for i in ["/tp", player, x, y, z, a, b]]
mc_command = ' '.join(tp_parameters)
resp = mcr.command(mc_command)
return resp
|
43c5d94d209c6a4f9d988c6c2711fc3927bd37c3
| 35,739 |
def ori_smooth(directions,frames_per_second=None,return_missing=False):
"""smooth orientations using an RTS smoother
This treats orientations as XYZ positions
"""
dt = 1.0/frames_per_second
A = np.array([[1, 0, 0, dt, 0, 0],
[0, 1, 0, 0, dt, 0],
[0, 0, 1, 0, 0, dt],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
Q = 1.0*np.eye(6)
C = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0]])
R = 30.0*np.eye(3)
init_x = np.hstack((directions[0,:],np.zeros((3,))))
assert not np.any( np.isnan( init_x ) ), "cannot start with missing orientation"
init_V = 2*Q
y=directions
dirsmooth,V = adskalman.kalman_smoother(y,A,C,Q,R,init_x,init_V)
dirsmooth = dirsmooth[:,:3] # take only position information
dirsmooth_missing = np.array(dirsmooth,copy=True)
# remove results too distant from observations
avlen=5
if len(y) >= avlen:
good = ~np.isnan(y[:,0])
near_good = running_average(good,avlen)
pad = avlen//2
near_good = np.hstack( (np.zeros( (pad,) ), near_good, np.zeros( (pad,) )) )
good_cond = near_good>.2
bad_cond = ~good_cond
if bad_cond.shape != dirsmooth[:,0].shape:
print 'xxxx'
print bad_cond.shape
print dirsmooth[:,0].shape
print directions.shape
dirsmooth[bad_cond,:] = np.nan
# normalize lengths to unit vectors
np.sum(dirsmooth**2,axis=1)
dirsmooth = (dirsmooth.T/np.sqrt(np.sum(dirsmooth**2,axis=1))).T
if return_missing:
return dirsmooth, dirsmooth_missing
else:
return dirsmooth
|
85871a95409d059617d41931155c14c6b9532d93
| 35,741 |
from typing import Sequence
import array
def compile_array(data: Sequence[float], format="xyseb") -> array.array:
"""Gather point components from input data.
Format codes:
- ``x`` = x-coordinate
- ``y`` = y-coordinate
- ``s`` = start width
- ``e`` = end width
- ``b`` = bulge value
- ``v`` = (x, y [,z]) tuple (z-axis is ignored)
Args:
data: list or tuple of point components
format: format string, default is 'xyseb'
Returns:
array.array: array.array('d', (x, y, start_width, end_width, bulge))
"""
a = array.array("d", (0.0, 0.0, 0.0, 0.0, 0.0))
format = [code for code in format.lower() if code in FORMAT_CODES]
for code, value in zip(format, data):
if code not in FORMAT_CODES:
continue
if code == "v":
vertex = Vec3(value)
a[0] = vertex.x
a[1] = vertex.y
else:
a["xyseb".index(code)] = value
return a
|
6de809fdcd32a1b39a2cc3e8639cb8047fc476fa
| 35,742 |
def nms(bboxes, iou_threshold, sigma = 0.3, method = 'nms'):
""" function to implement non-maximal suppression / softmax non-maximal supression of bboxes """
""" takes bboxes with the shape of (num_of_box, 6), where 6 => (xmin, ymin, xmax, ymax, score, class) """
# remove duplicates in classes
classes_in_img = list(set(bboxes[:, 5]))
# initialise list to store best bboxes
best_bboxes = []
# iterate over each class
for cls in classes_in_img:
# get mask for bboxes with the same class and apply on bboxes to obtain array of bboxes with same class
cls_mask = (bboxes[:, 5] == cls)
cls_bboxes = bboxes[cls_mask]
# iterate while there are still bboxes in cls_bboxes
while len(cls_bboxes) > 0:
# select index of the bbox with the highest score
max_ind = np.argmax(cls_bboxes[:, 4])
# select bbox with highest score
best_bbox = cls_bboxes[max_ind]
# append to best _bbox list
best_bboxes.append(best_bbox)
# obtain cls_bboxes without best bbox
cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])
# calculate iou of remaining bboxes with best bbox
iou = bbox_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])
weight = np.ones((len(iou), ), dtype = np.float32)
# assert method to be either 'nms' or 'soft_nms'
assert method in ['nms', 'soft_nms']
if method == 'nms':
# obtain nms iou mask based on threshold
iou_mask = iou > iou_threshold
# apply mask on weights
weight[iou_mask.numpy()] = 0.0
if method == 'soft_nms':
# obtain soft_nms weights
weight = np.exp(-(1.0 * iou ** 2 / sigma))
# apply weights on cls_bboxes
cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight
# obtain score mask of scores greater than zero
score_mask = cls_bboxes[:, 4] > 0.
# apply mask on cls_bboxes
cls_bboxes = cls_bboxes[score_mask]
return best_bboxes
|
5e2b390f2c920d6d0f6909bb058cc50b316a36b3
| 35,743 |
def hls_to_hex(h, l, s):
"""Converts a (hue, lightness, saturation) tuple to a "#rrbbgg" string.
Args:
h, l, s: the HLS values
Returns:
a hex string
"""
return rgb_to_hex(*hls_to_rgb(h, l, s))
|
562e4f802b1cc19734574186cbf107f3a288e044
| 35,744 |
def seconds_to_hhmmssms(seconds):
"""Parses the number of seconds after midnight and returns the corresponding HH:MM:SS.f-string.
Args:
seconds (float): number of seconds after midnight.
Returns:
str: the corresponding HH:MM:SS.f-string.
"""
int_seconds = int(seconds)
ms = round((seconds - int_seconds) * 1000)
m, s = divmod(int_seconds, 60)
h, m = divmod(m, 60)
return "{:02d}:{:02d}:{:02d}.{:03d}".format(h, m, s, ms)
|
ce68c6238de4229aed99d3e4a72596d72f97af7c
| 35,746 |
import torch
def sample_with_probs(model, x, steps, temperature=1.0, sample=False, top_k=None, first_sentence=False):
"""
A modified version of sample from mingpt.util that also returns probability of each sentence
and allow end with first sentence complete
"""
block_size = model.get_block_size()
model.eval()
all_probs = []
current_prob = 1
prev_newline = False
early_terminate = True
for k in range(steps):
x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed
logits, _ = model(x_cond)
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1, :] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# append to the sequence and continue
x = torch.cat((x, ix), dim=1)
# update sentence prob
curr_newline = train_dataset.itos[ix.item()] == '\n'
if prev_newline:
if curr_newline:
all_probs.append(current_prob)
if first_sentence:
if not early_terminate:
early_terminate = True
else:
break
current_prob = 1.0
prev_newline = False
else:
if curr_newline:
prev_newline = True
current_prob *= probs[0][ix[0][0]].item()
return x, all_probs
|
643e8e0f05c126180fa0998212d8070394df8764
| 35,747 |
def expression(processor, composer, searcher):
# type: (Parser, Compiler, scanner.Scanner) -> Tuple[Parser, Compiler]
"""Compiles expression."""
return parse_precedence(processor, composer, searcher, Precedence.PREC_ASSIGNMENT)
|
84b2e3f832a6dcbb89199f1e5c3b69f2044fb36c
| 35,748 |
import time
def get_games_results(game_name: str, web: str = "all"):
"""
Get games results from spiders
:param game_name:
:param web:
:return: list of games, obtained from spiders.
"""
games = []
not_formatted_games = []
time.sleep(get_random_delay())
if web == "eneba":
not_formatted_games.extend(EnebaScrapper(game_name).get_content())
elif web == "instantgaming":
not_formatted_games.extend(InstantGamingScrapper(game_name).get_content())
elif web == "steam":
not_formatted_games.extend(SteamScrapper(game_name).get_content())
elif web == "all":
not_formatted_games.extend(EnebaScrapper(game_name).get_content())
time.sleep(get_random_delay())
not_formatted_games.extend(InstantGamingScrapper(game_name).get_content())
time.sleep(get_random_delay())
not_formatted_games.extend(SteamScrapper(game_name).get_content())
for game in not_formatted_games:
if game:
games.append(
Game(
title=game.get("title"),
price=game.get("price"),
link=game.get("link"),
search=game_name
)
)
return games
|
a78b520aa9426a4ba733a060f4a34432b35d8bce
| 35,749 |
def plot_pr_curve(y_true, y_pred, title=None):
"""
Convenience function for plotting precision recall curve
@param: y_true - ground truth
@type: array like
@param: y_pred_train - predictions
@type: array like
"""
precision, recall, _ = precision_recall_curve(y_true, y_pred)
avg_precision = average_precision_score(y_true, y_pred)
fig, ax = plt.subplots()
lw = 2
ax.plot(recall[::-1], precision[::-1], color='navy', lw=lw)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.axhline(y=avg_precision)
if title:
ax.set_title(title)
else:
ax.set_title('Precision - Recall Curve\nAvg Precision: {}'.format(avg_precision))
ax.grid()
return fig
|
ad8575afc92d1e03b2ab9a8f27395ebfec0f606f
| 35,750 |
def matrix_from_interactions(interactions, mapping, default=0.0):
"""Generate numpy matrices from interaction dictionary
:param interactions: dictionary read from WORDOM avgpsn
:param mapping: residuemap, preserves residue names
:param default: default interaction
:return: tuple of interaction strength and frequency numpy array
"""
# Expecting symmetric dictionary
size = len(mapping)
strength = zeros((size, size)) + default
frequency = zeros((size, size)) + default
# Populera strength och frequency via mappings från interactions
for resa, inter in interactions.items():
for resb, (strng, freq) in inter.items():
a = mapping[resa] - 1
b = mapping[resb] - 1
strength[a][b] = strength[b][a] = strng
frequency[a][b] = frequency[b][a] = freq
return strength, frequency
|
c120ed1d56aed8c3b25a2c06d579af5d76705377
| 35,751 |
def to_flags(value):
"""Return a (flags, ednsflags) tuple which encodes the rcode.
*value*, an ``int``, the rcode.
Raises ``ValueError`` if rcode is < 0 or > 4095.
Returns an ``(int, int)`` tuple.
"""
if value < 0 or value > 4095:
raise ValueError('rcode must be >= 0 and <= 4095')
v = value & 0xf
ev = long(value & 0xff0) << 20
return (v, ev)
|
12477f6db3f1124d08884815ded5f2313ab1af98
| 35,752 |
def map_mix_query_attr_to_ch(mixing_query):
"""Map the mixing query attributes (tip_angle and phase) to the channel index.
If the attribute is defined for the channel use the defined value else set it to 0.
Args:
spectral_dimension: A list SpectralDimension objects.
index: The index of the event from a flatten event list.
"""
attributes = ["tip_angle", "phase"]
return {
item: {
i: getattr(getattr(mixing_query, f"ch{i+1}"), item) or 0
if getattr(mixing_query, f"ch{i+1}") is not None
else 0
for i in range(3)
}
for item in attributes
}
|
9192b9abbc2710b8ebdf172ad34c3dfadf8048ef
| 35,753 |
def masked( arr, v, mask ):
""" Performs the 1D convolution on arr, omitting pixels under the mask """
arr = arr.astype(float)
smoothed = conv( arr*mask, v )
norm = conv( mask.astype(np.float), v )
return smoothed/norm
|
ac12e1703b931800bcaea1d496dceb98e1c9303b
| 35,754 |
from datetime import datetime
def api_get_videos_duration(list_videos, api_service):
"""Get the duration of 50 videos at once.
:param list_videos: A list of video IDs, maximum size 50.
:param api_service: API Google Token generated with Google.py call.
:return: a dictionary associating video id and duration of said video.
"""
if list_videos:
durations = []
dates = []
if isinstance(list_videos[0], tuple):
chunks50 = divide_chunks([video[0] for video in list_videos], 50)
else:
chunks50 = divide_chunks([video for video in list_videos], 50)
# print(chunks50)
for chunk in chunks50:
request = api_service.videos().list(id=",".join(chunk),
part=['contentDetails', 'snippet'],
maxResults=50).execute()
# print(request)
durations += [parse_duration(element["contentDetails"]["duration"]) for element in request["items"]]
dates += [element["snippet"]["publishedAt"] for element in request["items"]]
# print(len(list_videos), len(durations), len(dates))
id_and_duration = sorted([(video_id, durations[idx], datetime.strptime(dates[idx], "%Y-%m-%dT%H:%M:%S%z"))
for idx, video_id in enumerate(list_videos)], key=lambda tup: tup[2])
return id_and_duration
return []
|
a88dd81b510ba99038401e90eb524194860a2c83
| 35,757 |
def build_pieces(headers, batch_size, start, end, max_piece_size=100000, metadata_columns=None):
"""
Build pieces function takes as input a list of headers and
returns a list of pieces split in size maximum max_piece_size.
Input: (filename, count, count_before)
Output: (filename:str, piece_start:int, piece_end:int, batch_id:int, batch_length:int, last_piece:bool)
This function is the main feature of embedding reader, it makes it possible to read many files
in parallel and abstract away all the batching
"""
if metadata_columns is None:
metadata_columns = []
columns = ["filename", "count", "count_before"] + metadata_columns
filecount = namedtuple("FileCount", columns)
items = [filecount(*args) for args in zip(*[headers[col] for col in columns])]
header_i = 0
while header_i < len(items) and items[header_i].count_before + items[header_i].count <= start:
header_i += 1
continue
if header_i == len(items):
raise ValueError(f"Can not build pieces for batch with start: {start}, end: {end}, perhaps reduce the start")
# we skipped start-count_before from the first file
read_current_file = start - items[header_i].count_before
read_current_batch = 0
pieces = []
for batch_id, batch_start in enumerate(range(start, end, batch_size)):
batch_length = min(batch_size, end - batch_start)
# building all pieces of this batch
while header_i < len(items) and read_current_batch < batch_length:
if items[header_i].count == 0:
header_i += 1
continue
count_before = items[header_i].count_before
count = items[header_i].count
piece_start = batch_start + read_current_batch - count_before
piece_length = min(count - piece_start, batch_length - read_current_batch, max_piece_size)
piece_end = piece_start + piece_length
read_current_file += piece_length
read_current_batch += piece_length
piece_filename = items[header_i].filename
last_piece = read_current_batch == batch_length
batch_end = batch_start + batch_length
piece = (
piece_filename,
piece_start,
piece_end,
piece_length,
batch_id,
batch_start,
batch_end,
batch_length,
last_piece,
)
piece = piece + tuple(items[header_i][3 + col] for col in range(len(metadata_columns)))
pieces.append(piece)
if read_current_file == items[header_i].count:
read_current_file = 0
header_i += 1
read_current_batch = 0
return pd.DataFrame(
pieces,
columns=PIECES_BASE_COLUMNS + metadata_columns,
)
|
92f3ac97d1a9ec16c0aea491ac773707463d05e3
| 35,758 |
from typing import Dict
def collect_values(wdlfile: str, separate_required: bool,
category_key: str, fallback_category: str,
description_key: str, fallback_description: str,
fallback_description_to_object: bool,
strict: bool) -> Dict:
"""
:param wdlfile: The workflow for which the values will be retrieved.
:param separate_required: Whether or not to put required inputs in a
separate category.
:param category_key: The key used in parameter_meta for categories.
:param fallback_category: The default category.
:param description_key: The key used in parameter_meta for
descriptions.
:param fallback_description: The default description.
:param fallback_description_to_object: Whether or not the entire
object should be returned for a given object if the description
key is not found.
:param strict: When true, raise a ValueError if no parameter_meta is
available for the input.
:return: The values.
"""
document = WDL.load(wdlfile)
workflow = document.workflow
if workflow is None:
raise ValueError("No workflow is available in the WDL file.")
inputs, required_inputs = gather_inputs(workflow)
parameter_meta = gather_parameter_meta(workflow, workflow.name)
gathered_meta = gather_meta(workflow, workflow.name)
authors = wrap_in_list(workflow.meta.get("authors", []))[:]
values = {"workflow_name": workflow.name,
"workflow_file": wdlfile,
"workflow_authors": authors,
"workflow_all_authors": gathered_meta["authors"],
"workflow_meta": workflow.meta,
"excluded_inputs": gathered_meta["exclude"],
"wdl_aid_version": __version__}
missing_parameter_meta = []
for name, inp in inputs:
if name in gathered_meta["exclude"]:
continue
if name not in parameter_meta:
missing_parameter_meta.append(name)
category = ("required"
if name in required_inputs and separate_required
else get_category(parameter_meta, name, category_key,
fallback_category))
entry = {
"name": name,
"type": str(inp.value.type),
"default":
str(inp.value.expr) if inp.value.expr is not None else None,
"description":
get_description(parameter_meta, name, description_key,
fallback_description,
fallback_description_to_object)
}
try:
values[category].append(entry)
except KeyError:
values[category] = [entry]
if strict and len(missing_parameter_meta) > 0:
missed_inputs = "\n".join(missing_parameter_meta)
raise ValueError(
f"Missing parameter_meta for inputs:\n{missed_inputs}")
return values
|
356fcdde70ca644c2bf9fd8d78ed0fba539c3aaf
| 35,759 |
def _GetConfigMapsChanges(args):
"""Return config map env var and volume changes for given args."""
volume_kwargs = {}
env_kwargs = {}
updates = _StripKeys(
getattr(args, 'update_config_maps', None) or args.set_config_maps or {})
volume_kwargs['updates'] = {
k: v for k, v in updates.items() if _IsVolumeMountKey(k)
}
env_kwargs['updates'] = {
k: v for k, v in updates.items() if not _IsVolumeMountKey(k)
}
removes = _MapLStrip(getattr(args, 'remove_config_maps', None) or [])
volume_kwargs['removes'] = [k for k in removes if _IsVolumeMountKey(k)]
env_kwargs['removes'] = [k for k in removes if not _IsVolumeMountKey(k)]
clear_others = bool(args.set_config_maps or args.clear_config_maps)
env_kwargs['clear_others'] = clear_others
volume_kwargs['clear_others'] = clear_others
config_maps_changes = []
if any(env_kwargs.values()):
config_maps_changes.append(
config_changes.ConfigMapEnvVarChanges(**env_kwargs))
if any(volume_kwargs.values()):
config_maps_changes.append(
config_changes.ConfigMapVolumeChanges(**volume_kwargs))
return config_maps_changes
|
c82e3d8d85cf224c4af7ebb397136aa253e36a17
| 35,760 |
import torch
def dice_loss(pred, target):
"""Cacluate dice loss
Parameters
----------
pred:
predictions from the model
target:
ground truth label
"""
smooth = 1.0
pred = torch.sigmoid(pred)
p_flat = pred.view(-1)
t_flat = target.view(-1)
intersection = (p_flat * t_flat).sum()
return (2.0 * intersection + smooth) / (p_flat.sum() + t_flat.sum() + smooth)
|
5bdede57fd34340b823324f962b20e5481333adf
| 35,762 |
import tqdm
import requests
def stock_em_dxsyl(market: str = "上海主板") -> pd.DataFrame:
"""
东方财富网-数据中心-新股数据-打新收益率
http://data.eastmoney.com/xg/xg/dxsyl.html
:param market: choice of {"上海主板", "创业板", "深圳主板"}
:type market: str
:return: 指定市场的打新收益率数据
:rtype: pandas.DataFrame
"""
market_map = {"上海主板": "2", "创业板": "3", "深圳主板": "4"}
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
page_num = _get_page_num_dxsyl(market=market)
temp_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1)):
params = {
"type": "NS",
"sty": "NSDXSYL",
"st": "16",
"sr": "-1",
"p": str(page),
"ps": "50",
"js": "var oyfyNYmO={pages:(pc),data:[(x)]}",
"stat": market_map[market],
"rt": "52898446",
}
res = requests.get(url, params=params)
data_text = res.text
data_json = demjson.decode(data_text[data_text.find("={") + 1 :])
temp_df = temp_df.append(pd.DataFrame(data_json["data"]), ignore_index=True)
temp_df = temp_df.iloc[:, 0].str.split(",", expand=True)
temp_df.columns = [
"股票代码",
"股票简称",
"发行价",
"最新价",
"网上发行中签率",
"网上有效申购股数",
"网上有效申购户数",
"网上超额认购倍数",
"网下配售中签率",
"网下有效申购股数",
"网下有效申购户数",
"网下配售认购倍数",
"总发行数量",
"开盘溢价",
"首日涨幅",
"打新收益",
"上市日期",
"市场",
]
return temp_df
|
42eaac231a9c5a2ab7e5d209ac8f2093de0869a8
| 35,763 |
def get_warningness() -> int:
"""Gets the warning level of the entire program"""
return _warningness
|
57bb29e6697698707132435ccf0f27aba9bab0d2
| 35,764 |
import scipy
def distanceMetrics(vol1, vol2, voxelsize_mm):
"""
avgd[mm] - Average symmetric surface distance
rmsd[mm] - RMS symmetric surface distance
maxd[mm] - Maximum symmetric surface distance
"""
# crop data to reduce computation time
pads1 = getDataPadding(vol1)
pads2 = getDataPadding(vol2)
pads = [
[min(pads1[0][0], pads2[0][0]), min(pads1[0][1], pads2[0][1])],
[min(pads1[1][0], pads2[1][0]), min(pads1[1][1], pads2[1][1])],
[min(pads1[2][0], pads2[2][0]), min(pads1[2][1], pads2[2][1])],
]
vol1 = cropArray(vol1, pads)
vol2 = cropArray(vol2, pads)
# compute borders and distances
border1 = _get_border(vol1)
border2 = _get_border(vol2)
# pyed = sed3.sed3(vol1, seeds=border1); pyed.show()
b1dst = scipy.ndimage.morphology.distance_transform_edt(
border1 == 0, sampling=voxelsize_mm
)
b2dst = scipy.ndimage.morphology.distance_transform_edt(
border2 == 0, sampling=voxelsize_mm
)
dst_b1_to_b2 = border2 * b1dst
dst_b2_to_b1 = border1 * b2dst
dst_12 = dst_b1_to_b2[border2]
dst_21 = dst_b2_to_b1[border1]
dst_both = np.append(dst_12, dst_21)
# compute metrics
avgd = np.average(dst_both)
rmsd = np.average(dst_both ** 2)
maxd = max(np.max(dst_b1_to_b2), np.max(dst_b2_to_b1))
return avgd, rmsd, maxd
|
684db2e73cf861fcf657ccd6a979ee42187f5b47
| 35,765 |
def point_in_quadrilateral_2d(
point: np.ndarray, quadrilateral: np.ndarray
) -> bool:
"""Determines whether a point is inside a 2D quadrilateral.
Parameters
----------
point : np.ndarray
(2,) array containing coordinates of a point.
quadrilateral : np.ndarray
(4, 2) array containing the coordinates for the 4 corners
of a quadrilateral. The vertices should be in clockwise order
such that indexing with [0, 1, 2], and [0, 2, 3] results in
the two non-overlapping triangles that divide the
quadrilateral.
Returns
-------
"""
triangle_vertices = np.stack(
(quadrilateral[[0, 1, 2]], quadrilateral[[0, 2, 3]])
)
in_triangles = inside_triangles(triangle_vertices - point)
if in_triangles.sum() < 1:
return False
else:
return True
|
cde2c018d792f8ee406a8fe4ca3eaf336ccdd0b1
| 35,766 |
def pSEDIa( sedtype ):
""" returns the likelihood of observing
host galaxy with the given rest-frame
B-K color, assuming the SN is a Ia.
The SED type is from the GOODZ SED template
set (Dahlen et al 2010).
1=E, 2=Sbc, 3=Scd, 4=Irr, 5,6=Starburst
plus 4 interpolations between each.
RETURNS : P(sedtype|Ia), dPplus, dPminus
"""
if sedtype in [None, 'None'] : return( 1, 0., 0. ) # unknown
if not type(sedtype)==str :
if sedtype > 3.5 : sedtype = 'SB'
elif sedtype > 1.5 : sedtype = 'A'
elif sedtype <= 1.5: sedtype = 'P'
sedtype = sedtype.lower()
if sedtype in ['starburst','sb'] : return( 0.129, 0.05, -0.05 ) # starburst
elif sedtype in ['late','a'] : return( 0.521, 0.05, -0.05 ) # late
elif sedtype in ['early','p'] : return( 0.351, 0.05, -0.05 ) # Early
else : return( 1, 0., 0. )
|
493966a39f5a8d01390f4652643e861c7570963d
| 35,767 |
import re
def markov(bot, msg):
"""Return the best quote ever."""
if final_model:
# This tries to generate a sentence that doesn't "overlap", or
# share too much similarity with seeded text.
# Read more here: https://github.com/jsvine/markovify#basic-usage
sentence = final_model.make_sentence(tries=200)
if sentence:
# Put a zero width space in every word to prevent pings.
# This is also much simpler than using crazy IRC nick regex.
# Put it in the middle of the word since nicks are quoted
# using "<@keur>" syntax. Additionally, remove any -slack at
# the end of a nick, to avoid inserting a space like
# abcde|-slack (thus pinging abcde).
def insert_space(w):
halfway = len(re.sub(r'-slack([^A-Za-z0-9_\-\\\[\]{}^`|]|\Z)', r'\1', w)) // 2
return w[:halfway] + '\u2060' + w[halfway:]
msg.respond(
' '.join(map(insert_space, sentence.split())),
ping=False,
)
else:
# This has never happened, but just in case...
msg.respond(
'Could not generate sentence. Please try again.',
ping=True,
)
|
24ea657257880ec0755360eb64d9bbcd9dfaf62f
| 35,768 |
def two_body_mc_stress_stress_jit(
bond_array_1,
c1,
etypes1,
bond_array_2,
c2,
etypes2,
d1,
d2,
d3,
d4,
sig,
ls,
r_cut,
cutoff_func,
):
"""2-body multi-element kernel between two partial stress components
accelerated with Numba.
Args:
bond_array_1 (np.ndarray): 2-body bond array of the first local
environment.
c1 (int): Species of the central atom of the first local environment.
etypes1 (np.ndarray): Species of atoms in the first local
environment.
bond_array_2 (np.ndarray): 2-body bond array of the second local
environment.
c2 (int): Species of the central atom of the second local environment.
etypes2 (np.ndarray): Species of atoms in the second local
environment.
d1 (int): First stress component of the first environment (1=x, 2=y,
3=z).
d2 (int): Second stress component of the first environment (1=x, 2=y,
3=z).
d3 (int): First stress component of the second environment (1=x, 2=y,
3=z).
d4 (int): Second stress component of the second environment (1=x, 2=y,
3=z).
sig (float): 2-body signal variance hyperparameter.
ls (float): 2-body length scale hyperparameter.
r_cut (float): 2-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Return:
float: Value of the 2-body kernel.
"""
kern = 0
ls1 = 1 / (2 * ls * ls)
ls2 = 1 / (ls * ls)
ls3 = ls2 * ls2
sig2 = sig * sig
for m in range(bond_array_1.shape[0]):
ri = bond_array_1[m, 0]
ci = bond_array_1[m, d1]
coordinate_1 = bond_array_1[m, d2] * ri
fi, fdi = cutoff_func(r_cut, ri, ci)
e1 = etypes1[m]
for n in range(bond_array_2.shape[0]):
e2 = etypes2[n]
# check if bonds agree
if (c1 == c2 and e1 == e2) or (c1 == e2 and c2 == e1):
rj = bond_array_2[n, 0]
cj = bond_array_2[n, d3]
coordinate_2 = bond_array_2[n, d4] * rj
fj, fdj = cutoff_func(r_cut, rj, cj)
r11 = ri - rj
A = ci * cj
B = r11 * ci
C = r11 * cj
D = r11 * r11
force_kern = force_helper(
A, B, C, D, fi, fj, fdi, fdj, ls1, ls2, ls3, sig2
)
kern += force_kern * coordinate_1 * coordinate_2 / 4
return kern
|
f5f702398f7bb3e8b498b60653bcc2a520e81f95
| 35,769 |
def new_world_news(cat):
"""[categories]\n\n
lore \n
general\n
updates\n
"""
return nww.news(cat)
|
696fc748cc8927ba75059fda9dd787afe6250471
| 35,771 |
import time
def aggregation_svg(es, query: Query):
""" Execute aggregation query and render as an SVG. """
is_internal = "/logs" in request.headers.get('Referer', '')
width = query.args.pop('width', '100%' if is_internal else '1800')
width_scale = None
if width != '100%':
width_scale = tinygraph.Scale(100, (0, 100), (0, int(width)))
height = int(query.args.pop('height', '125' if is_internal else '600'))
logs_url = query.as_url('/logs')
from_time = parse_timestamp(query.from_timestamp)
to_time = parse_timestamp(query.to_timestamp)
scale = tinygraph.Scale(100, (from_time * 1000, to_time * 1000), (0, 100))
interval = query.interval
if interval == "auto":
try:
interval_s = max(1, tinygraph.time_increment(from_time, to_time, 100))
interval = tinygraph.pretty_duration(interval_s)
except ValueError as ex:
raise ValueError("Could not guess interval: ", ex)
else:
interval_s = parse_offset(interval)
es_query = query.to_elasticsearch(query.from_timestamp)
es_query["aggs"] = query.aggregation("num_results", interval)
resp = es.search(index=query.index, body=es_query, request_timeout=query.timeout)
total_count = 0
max_count = 0
num_results_buckets = resp['aggregations']['num_results']['buckets']
for bucket in num_results_buckets:
total_count += bucket['doc_count']
max_count = max(max_count, bucket['doc_count'])
query_params = [('dc', query.datacenter), ('index', query.index)]
query_params += query.args.items()
query_params += [('from', query.from_timestamp), ('to', query.to_timestamp)]
query_str = ", ".join([f"{item[0]}={item[1]}" for item in query_params])
# num_hits = resp['hits']['total']['value']
avg_count = 0
if num_results_buckets:
avg_count = int(total_count / len(num_results_buckets))
bucket_width = scale.factor * interval_s * 1000
buckets = []
max_percentile = 0
percentile_lines = None
if query.percentiles_terms:
percentile_lines = {}
for bucket in num_results_buckets:
percentiles = bucket[query.percentiles_terms]['values']
for percentile in percentiles.keys():
percentile_lines[percentile] = ""
max_percentile = max(max_percentile or 0, percentiles[str(query.percentiles[-1])] or 0)
color_mapper = ColorMapper()
for idx, bucket in enumerate(num_results_buckets):
count = bucket['doc_count']
from_ts = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(bucket['key'] / 1000))
to_ts = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime((bucket['key'] + interval_s * 1000) / 1000))
label_align = "middle"
if idx / len(num_results_buckets) < 0.25:
label_align = "start"
elif idx / len(num_results_buckets) > (1 - 0.25):
label_align = "end"
bucket_data = {
"count": count,
"key": bucket['key_as_string'],
"label": f"(count: {count})",
"label_y": "15%" if is_internal else "50%",
"label_align": label_align,
"height": int((count / max_count) * 100),
"pos_x": scale.map(bucket['key']),
"from_ts": from_ts,
"to_ts": to_ts,
"logs_url": logs_url + f"&from={from_ts}&to={to_ts}",
"aggregation_terms": query.aggregation_terms,
}
if query.aggregation_terms:
offset_y = 100
sub_buckets = bucket[query.aggregation_terms]['buckets']
sub_buckets.sort(key=lambda bucket: bucket['key'])
bucket_data['sub_buckets'] = []
for sub_bucket in sub_buckets:
sub_count = sub_bucket['doc_count']
sub_height = max(0.25, int((sub_count / max_count) * 100))
offset_y -= sub_height
bucket_data['sub_buckets'].append({
'count': sub_count,
'height': sub_height,
'offset_y': offset_y,
'color': color_mapper.to_color(sub_bucket['key']),
})
bucket_data['label'] = "\n".join([f"{sub_bucket['key']}: {sub_bucket['doc_count']} ({(sub_bucket['doc_count'] / count) * 100 :.2f}%)" for sub_bucket in sub_buckets])
if query.percentiles_terms:
percentiles = bucket[query.percentiles_terms]['values']
bucket_data['label'] += "\n\n" + " ".join([f"p{int(float(val)) if float(val).is_integer() else val}: {key or 0:.2f}" for val, key in percentiles.items()])
bucket_data['percentiles'] = []
scale_percentile = tinygraph.Scale(1000, (0, max_percentile), (0, 95))
for percentile, value in percentiles.items():
if not value:
continue
pos_y = 100 - scale_percentile.map(value)
if width_scale:
percentile_lines[percentile] += f" {width_scale.map(bucket_data['pos_x']+bucket_width/2)},{pos_y/100 * height}"
percentile = float(percentile)
pretty_percentile = int(percentile) if percentile.is_integer() else percentile
bucket_data['percentiles'].append({
'pos_y': pos_y,
'name': pretty_percentile,
'value': value,
})
buckets.append(bucket_data)
query_title = ""
if not is_internal:
query_title += query_str + "\n"
query_title += f"count per {interval}: max: {max_count}, avg: {avg_count}"
if query.percentiles_terms:
percentiles = resp["aggregations"][query.percentiles_terms]["values"]
query_title += " ("
ps = []
for p, val in percentiles.items():
val = int(val) if val.is_integer() else '{:.2f}'.format(val)
ps.append(f"p{int(float(p)) if float(p).is_integer() else p}: {val}")
query_title += ", ".join(ps)
query_title += ")"
template = Template(r"""<?xml version="1.0" encoding="UTF-8"?>
<svg xmlns="http://www.w3.org/2000/svg" class="chart" width="{{ width }}" height="{{ height }}" xmlns:xlink="http://www.w3.org/1999/xlink">
<title id="title">Aggregation for query: {{ query_str | e }}</title>
<style>
svg {
font-family: monospace;
}
rect {
fill-opacity: 0.5;
stroke-width: 1px;
}
text {
white-space: pre;
}
g text {
display: none;
}
g:hover text {
display: block;
}
</style>
<text x="10" y="14">{{ query_title | e }}</text>
{% for bucket in buckets %}
{% if bucket.aggregation_terms %}
<g class="bucket">
<a target="_parent" alt="Logs from {{ bucket.from_ts }} to {{ bucket.to_ts }}" xlink:href="{{ bucket.logs_url | e }}">
{% for sub_bucket in bucket.sub_buckets %}
<rect fill="{{ sub_bucket.color }}" stroke="{{ sub_bucket.color }}" width="{{ bucket_width }}%" height="{{ sub_bucket.height }}%" y="{{ sub_bucket.offset_y }}%" x="{{ bucket.pos_x }}%"></rect>
{% endfor %}
</a>
<text y="{{ bucket.label_y }}" x="{{ bucket.pos_x }}%" text-anchor="{{ bucket.label_align }}">{{ bucket.key | e }}
{{ bucket.label | e }}</text>
{% for percentile in bucket.percentiles %}
<line stroke="black" x1="{{ bucket.pos_x }}%" x2="{{ bucket.pos_x + bucket_width }}%"
y1="{{ percentile.pos_y }}%" y2="{{ percentile.pos_y }}%" />
{% endfor %}
</g>
{% else %}
<g class="bucket">
<a target="_parent" alt="Logs from {{ bucket.from_ts }} to {{ bucket.to_ts }}" xlink:href="{{ bucket.logs_url | e }}">
<rect fill="#00b2a5" stroke="#00b2a5" width="{{ bucket_width }}%" height="{{ bucket.height }}%" y="{{ 100-bucket.height }}%" x="{{ bucket.pos_x }}%"></rect>
</a>
<text y="{{ bucket.label_y }}" x="{{ bucket.pos_x }}%" text-anchor="{{ bucket.label_align }}">{{ bucket.key | e }}
{{ bucket.label | e }}</text>
{% for percentile in bucket.percentiles %}
<line stroke="black" x1="{{ bucket.pos_x }}%" x2="{{ bucket.pos_x + bucket_width }}%"
y1="{{ percentile.pos_y }}%" y2="{{ percentile.pos_y }}%" />
{% endfor %}
</g>
{% endif %}
{% endfor %}
{% if percentile_lines %}
<polyline id="percentile" fill="none" stroke="rgba(100, 100, 100, 0.7)" points="{{ percentile_lines[list(percentile_lines.keys())[-1]] }}" />
{% endif %}
</svg>
""")
return Response(template.render(list=list, width=width, height=height, query_title=query_title, bucket_width=bucket_width, buckets=buckets, percentile_lines=percentile_lines), content_type="image/svg+xml")
|
3ef3e01232fcaeccdb810df80bc824494569b439
| 35,772 |
def transposem(inp):
"""Transpose multiple matrices."""
perm = list(range(len(inp.shape)))
perm[-2], perm[-1] = perm[-1], perm[-2]
return tf.transpose(inp, perm)
|
1851aef9afa0717953bf409ac9ecdb993f0dd674
| 35,773 |
from typing import Any
def list_api_tokens(
component_manager: ComponentManager = Depends(get_component_manager),
token: str = Depends(get_api_token),
) -> Any:
"""Returns list of created API tokens associated with the authenticated user."""
authorized_access = component_manager.verify_access(token)
# Check if the caller has admin access on the user resource
component_manager.verify_access(
token,
authorized_access.authorized_subject,
AccessLevel.ADMIN,
)
return component_manager.get_auth_manager().list_api_tokens(
authorized_access.authorized_subject
)
|
7b52a3fff5594cdf3be5eaf0a344c23ea0e320c4
| 35,775 |
def get_val_via_socket(key):
"""Retrieve value of key from redis over Unix socket file."""
set_redis_socket_pool()
global SOCKET_POOL
# retrieve value
r = StrictRedis(connection_pool=SOCKET_POOL)
res = r.get(key)
return res.decode() if hasattr(res, "decode") else res
|
3783d121ec16365edc81c8a538015598a100dd55
| 35,776 |
def euclidean_distance(goal_pose):
"""Euclidean distance between current pose and the goal."""
return sqrt(pow((goal_pose.pose.position.x - pose.pose.pose.position.x), 2) +
pow((goal_pose.pose.position.y - pose.pose.pose.position.y), 2))
|
bdcf6f005eb548f2fb8a2d3f530cf276f75725a2
| 35,777 |
def get_rel_join_parts(database, from_table, rel) -> QueryParts:
"""
:param Database database: The database being reviewed.
:param Table from_table:
:param Relationship rel: The relationship to get the parts from.
:return: New join parts for a join based on a relationship.
"""
to_table = database.get_table(rel.to_table)
######## DON'T DELETE - has logic for parsing generic relationship conditions.
# # get two columns from the key.
# # this assumes that the pairs are always separated by "and" or "or".
# parts = re.split(" and | AND | or | OR", rel.conditions)
# from_cols = []
# to_cols = []
#
# # break up each comparison to get the columns.
# # These don't have to be in any order, so have to check the table names.
# for part in parts:
# part = part.strip().strip("\"')(") # get rid of extraneous characters.
# subparts = re.split("=|>|<|!",part) # split on various comparison operators.
# for subpart in subparts:
# subpart.strip("=><!") # get rid of any remaining characters.
# if "." in subpart: # there are scenarios where the split gets non-column fields.
# table_and_column = subpart.split(".")
# table = table_and_column[0].strip("\"' ")
# column = table_and_column[1].strip("\"' ")
# if table == from_table.table_name:
# from_cols.append(column)
# elif table == to_table.table_name:
# to_cols.append(column)
# else:
# raise ValueError(f'Unexpected table name "{table}" in join from "'
# f'{from_table.table_name} to {to_table.table_name}.')
from_cols = from_table.primary_key
if not from_cols:
from_cols = [col for col in from_table.columns.keys()]
to_cols = to_table.primary_key
if not to_cols:
to_cols = [col for col in to_table.columns.keys()]
return QueryParts(
name=rel.name,
from_db=database.database_name,
from_schema=from_table.schema_name,
from_table=from_table.table_name,
to_db=database.database_name,
to_schema=to_table.schema_name,
to_table=to_table.table_name,
from_cols=from_cols,
to_cols=to_cols,
join_condition=rel.conditions
)
|
75346106c368f760eef36a7c0eb504d04eafe298
| 35,778 |
from lightgbm import LGBMRegressor
from entmoot.learning.tree_model import EntingRegressor, MisicRegressor
def cook_estimator(base_estimator, std_estimator=None, space=None, random_state=None,
base_estimator_params=None):
"""Cook a default estimator.
For the special base_estimator called "DUMMY" the return value is None.
This corresponds to sampling points at random, hence there is no need
for an estimator.
Parameters
----------
base_estimator : "GBRT", creates LightGBM tree model based on base_estimator
std_estimator : DistandBasedStd instance,
Estimates model uncertainty of base_estimator
space : Space instance
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
base_estimator_params : dict
Extra parameters provided to the base_estimator at init time.
"""
# collect indices of categorical features
cat_idx = get_cat_idx(space)
if isinstance(base_estimator, str):
base_estimator = base_estimator.upper()
if base_estimator not in ["GBRT", "RF", "DUMMY"]:
raise ValueError("Valid strings for the base_estimator parameter "
"are: 'GBRT', 'RF', or 'DUMMY' not "
f"{base_estimator}.")
elif is_supported(base_estimator):
base_estimator = \
EntingRegressor(
base_estimator=base_estimator,
random_state=random_state,
cat_idx=cat_idx
)
else:
raise ValueError("base_estimator is not supported.")
if std_estimator.std_type == 'distance':
tree_reg = EntingRegressor
elif std_estimator.std_type == 'proximity':
tree_reg = MisicRegressor
if base_estimator == "GBRT":
gbrt = LGBMRegressor(boosting_type='gbdt',
objective='regression',
verbose=-1,
)
base_estimator = tree_reg(base_estimator=gbrt,
std_estimator=std_estimator,
random_state=random_state,
cat_idx=cat_idx)
elif base_estimator == "RF":
rf = LGBMRegressor(boosting_type='random_forest',
objective='regression',
verbose=0,
subsample_freq=1,
subsample=0.9,
bagging_seed= random_state
)
base_estimator = tree_reg(base_estimator=rf,
std_estimator=std_estimator,
random_state=random_state,
cat_idx=cat_idx)
elif base_estimator == "DUMMY":
return None
if base_estimator_params is not None:
base_estimator.set_params(**base_estimator_params)
return base_estimator
|
53fea2782062124d1814cc298a55a45195b145af
| 35,779 |
from pandas import Series
from typing import Optional
def _convert_and_box_cache(
arg: DatetimeScalarOrArrayConvertible,
cache_array: "Series",
name: Optional[str] = None,
) -> "Index":
"""
Convert array of dates with a cache and wrap the result in an Index.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
cache_array : Series
Cache of converted, unique dates
name : string, default None
Name for a DatetimeIndex
Returns
-------
result : Index-like of converted dates
"""
result = Series(arg).map(cache_array)
return _box_as_indexlike(result, utc=None, name=name)
|
60d2f5752b51aa16f2d5d2a67192158dc27f57d1
| 35,780 |
def generate_file_path(package_path, file_name):
"""
Dynamically generate full path to file, including filename and extension.
:param package_path: (array) ordered list of packages in path to test file
:param file_name: (string) name of the file w/ test, including the extension
:return: (string) full path to file, including filename and extension
"""
file_path = ""
for package in package_path:
file_path += package + "/"
return file_path + file_name
|
a6d2ac12cdc726c4727e23301971e921cab9455b
| 35,781 |
from typing import Tuple
def pdread_2col(filename: str, noheader: bool = False) -> Tuple[ndarray, ndarray]:
"""Read in a 2 column file with pandas.
Parameters
----------
filename: str
Name of file to read.
noheader: bool
Flag indicating if there is no column names given in file.
Default = False.
Returns
-------
col1: ndarray
First column as float.
col2: ndarray
Second column as float.
"""
try:
if noheader:
data = pd.read_csv(
filename,
comment="#",
names=["col1", "col2"],
header=None,
dtype=float,
delim_whitespace=True,
)
else:
data = pd.read_csv(
filename,
comment="#",
names=["col1", "col2"],
dtype=float,
delim_whitespace=True,
)
except Exception as e:
print("There was an error trying to read in the file \n{}".format(filename))
raise e
return data["col1"].values, data["col2"].values
|
b1cd7282bf7122abf412351069d7f949be7928cc
| 35,782 |
async def infer_type_map_array(engine, fn, ary):
"""Infer the return type of map_array."""
fn_t = await fn['type']
ary_t = await ary['type']
if not isinstance(ary_t, Array):
raise MyiaTypeError('Expected array')
xref = engine.vref({'type': ary_t.elements})
return Array(await fn_t(xref))
|
c3bed37c4adfb0e1a1acfe86242bae9349edffa7
| 35,783 |
def _fullname(attr):
"""Fully qualified name of an attribute."""
fullname = ""
if hasattr(attr, "__module__"):
fullname += attr.__module__
if hasattr(attr, "__name__"):
if fullname:
fullname += "."
fullname += attr.__name__
if not fullname:
fullname = str(attr)
return fullname
|
672120f7b16175b9fed091fbdd93456ba5d89004
| 35,784 |
def example_positionfixes():
"""Positionfixes for tests."""
p1 = Point(8.5067847, 47.4)
p2 = Point(8.5067847, 47.5)
p3 = Point(8.5067847, 47.6)
t1 = pd.Timestamp("1971-01-01 00:00:00", tz="utc")
t2 = pd.Timestamp("1971-01-01 05:00:00", tz="utc")
t3 = pd.Timestamp("1971-01-02 07:00:00", tz="utc")
list_dict = [
{"user_id": 0, "tracked_at": t1, "geometry": p1},
{"user_id": 0, "tracked_at": t2, "geometry": p2},
{"user_id": 1, "tracked_at": t3, "geometry": p3},
]
pfs = gpd.GeoDataFrame(data=list_dict, geometry="geometry", crs="EPSG:4326")
pfs.index.name = "id"
# assert validity of positionfixes.
pfs.as_positionfixes
return pfs
|
496283dc4d73588e7c9891577bc8915a8ec58863
| 35,785 |
def do_histtestbasic() -> bool:
"""Run this unit test with hard coded, default parameters."""
file = "sample.txt"
par = [5, 0, 15]
test = HistTestBasic(txtfile=file, params=par)
return test.test(False)
|
b8f033ba62d073a44b9208f8233892774cc7b502
| 35,787 |
def static(**kwargs):
"""
Return a predefined ``dict`` when the given regex matches.
"""
return lambda values: kwargs
|
ac12595cc1b70dd5f9cccd8ae043f650f0ef59c5
| 35,789 |
def get_user_profile(email): # GET
"""Get user profile
Fetches from the user collection by using the user's email as key.
Args:
User's email (str)
Returns:
User profile object (dict)
"""
# NOTE: This method previously called LCS with director credentials in order to retrieve the user's name
# We will update TeamRU to store names along with our user objects, saving the need to call LCS again
user_profile = coll("users").find_one({"_id": email})
if not user_profile:
return {"message": "User not found"}, 404
user_profile["user_id"] = user_profile.pop("_id")
return user_profile, 200
|
10fe0b3ff37d54d51d456392cb1984d92e10c10a
| 35,791 |
import hashlib
def sha1(string):
"""Compute the sha1 hexdigest of the string."""
return hashlib.sha1(string.encode('utf-8')).hexdigest()
|
b663fc501e24a2331f69847024756b97dabc0cd4
| 35,792 |
import cgi
import re
def generate_rss():
""" Generate the RSS feed pages on a daily basis """
def parse():
try:
form = cgi.FieldStorage()
form_play = form.getfirst('play', '')
form_date = form.getfirst('start', '')
# alphanumeric only for play
if not form_play in daily_bard_settings.ALLOWED_PLAYCODES:
return False
# 8 number only for date. Obviously not exhaustive
if not re.match('[0-9]{8}$', form_date):
return False
base_year = int(form_date[0:4])
base_month = int(form_date[4:6])
base_day = int(form_date[6:8])
base_date = datetime.date(base_year, base_month, base_day)
today = datetime.datetime.utcnow().date()
final = generate(form_play, base_date, today)
print final
except:
# Swallow any error
return False
return True
if not parse():
# Show empty page in failure case
print "Content-Type: text/xml"
print
print "<?xml version=\"1.0\" encoding=\"utf-8\"?><feed/>"
|
9b92b9cc6095ccbce0bfab204476c205c7e47569
| 35,793 |
from datetime import datetime
def current_weather():
"""
Get all stations
render template to client
"""
global last_updated_weather_time, last_updated_weather_data, first_run_weather
if (((last_updated_weather_time - datetime.datetime.now())).total_seconds() < -900 or first_run_weather):
first_run_weather = False
sql_get_weather = """SELECT *
FROM dublin_bikes.weather_current
ORDER BY last_update DESC
LIMIT 1;
"""
print("INSIDE 2")
rows = sql_query(sql_get_weather)
last_updated_weather_data = []
for row in rows:
last_updated_weather_data.append(dict(row)) # inset dict of data into list
print(row)
last_updated_weather_time = datetime.datetime.now()
return jsonify(weather=last_updated_weather_data)
|
25dfb78759903c1f089d397b548a79d36304cf74
| 35,794 |
import re
def parse_timedelta(time_str):
"""
Parse a time string e.g. (2h13m) into a timedelta object. Stolen on the web
"""
regex = re.compile(r'^((?P<days>[\.\d]+?)d)?((?P<hours>[\.\d]+?)h)?((?P<minutes>[\.\d]+?)m)?((?P<seconds>[\.\d]+?)s)?$')
time_str=replace(time_str,{
'sec':'s',
'second': 's',
'seconds': 's',
'minute':'m',
'minutes':'m',
'min':'m',
'mn':'m',
'days':'d',
'day':'d',
'hours':'h',
'hour':'h'})
parts = regex.match(time_str)
if parts is None: raise ValueError("Could not parse any time information from '{}'. Examples of valid strings: '8h', '2d8h5m20s', '2m4s'".format(time_str))
time_params = {name: float(param) for name, param in parts.groupdict().items() if param}
return timedelta(**time_params)
|
0b18f77197f8122cb92ccd7b7552baab800173f7
| 35,795 |
def gmtime(space, w_seconds=None):
"""gmtime([seconds]) -> (tm_year, tm_mon, tm_day, tm_hour, tm_min,
tm_sec, tm_wday, tm_yday, tm_isdst)
Convert seconds since the Epoch to a time tuple expressing UTC (a.k.a.
GMT). When 'seconds' is not passed in, convert the current time instead.
"""
# rpython does not support that a variable has two incompatible builtins
# as value so we have to duplicate the code. NOT GOOD! see localtime() too
seconds = _get_inttime(space, w_seconds)
t_ref = lltype.malloc(rffi.TIME_TP.TO, 1, flavor='raw')
t_ref[0] = seconds
p = c_gmtime(t_ref)
lltype.free(t_ref, flavor='raw')
if not p:
raise OperationError(space.w_ValueError, space.newtext(_get_error_msg()))
return _tm_to_tuple(space, p)
|
f4095b64c60c67454f7ef24e295438f3eff68a77
| 35,796 |
from typing import Callable
def nested_defaultdict(default_factory: Callable, depth: int = 1) -> defaultdict:
"""Creates a nested default dictionary of arbitrary depth with a specified callable as leaf."""
if not depth:
return default_factory()
result = partial(defaultdict, default_factory)
for _ in repeat(None, depth - 1):
result = partial(defaultdict, result)
return result()
|
58b162431d70f559dc58a256e78d201644cddde3
| 35,797 |
def main(client_id, client_secret):
"""Console script for boxcast_python_sdk."""
client = BoxCastClient(client_id, client_secret)
account = client.get_account()
print(account)
return 0
|
ec6562052d7bf6cb8594356df16cbcf0d98314b2
| 35,798 |
import pytz
from datetime import datetime
def now(timezone):
"""Get the current time in the given timezone
Args:
timezone: The desired timezone as a string. eg 'US/Eastern'
"""
utc = pytz.timezone('UTC').localize(datetime.utcnow())
return utc.astimezone(pytz.timezone(timezone))
|
ebd89601ebcb945f01c3e68fbe0f5350e4fc2d0a
| 35,799 |
def ReadBlackList(path):
"""Read a blacklist of forbidden directories and files.
Ignore lines starting with a # so we can comment the datafile.
Args:
path: file to load the blacklist from.
Returns:
dictionary of path:True mappings
"""
blacklist_file = open(path, 'r')
catalog = []
for entry in blacklist_file:
if not entry or entry[:1] == '#':
pass # ignore comment and empty lines in blacklist file
else:
catalog.append(entry.strip())
return catalog
|
694b9bd8c09385677d49e8563ac8f08b923cadb0
| 35,800 |
def height_water_critical(FlowRate, Width):
"""Return the critical local water height.
:param FlowRate: flow rate of water
:type FlowRate: u.m**3/u.s
:param Width: width of channel (????????)
:type Width: u.m
:return: critical water height
:rtype: u.m
"""
ut.check_range([FlowRate.magnitude, ">0", "Flow rate"],
[Width.magnitude, ">0", "Width"])
return ((FlowRate / (Width * np.sqrt(1*u.gravity))) ** (2/3)).to(u.m)
|
39602709854f04e007533fe133a47edf722e6a5a
| 35,801 |
def author_number_of_files_owned(results):
""" Number of files owned by author.
:param results: results from author_file_owned()
:return: {author: number of files owned}
:rtype: dict
"""
authors = defaultdict(int)
for item in results:
authors[item.name] += 1
return authors
|
7a4b07f58bc0f4898408a16353c7b5a7f2108904
| 35,803 |
def updateBounds(bounds, (x, y), min=min, max=max):
"""Return the bounding recangle of rectangle bounds and point (x, y)."""
xMin, yMin, xMax, yMax = bounds
return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y)
|
5e77fd6b422a252a5c8fc1b22a642ce05e5caf82
| 35,804 |
def prepare_metadata(devkit_archive):
"""Extract dataset metadata required for HDF5 file setup.
Parameters
----------
devkit_archive : str or file-like object
The filename or file-handle for the gzipped TAR archive
containing the ILSVRC2012 development kit.
Returns
-------
n_train : int
The number of examples in the training set.
valid_groundtruth : ndarray, 1-dimensional
An ndarray containing the validation set groundtruth in terms of
0-based class indices.
n_test : int
The number of examples in the test set
wnid_map : dict
A dictionary that maps WordNet IDs to 0-based class indices.
"""
# Read what's necessary from the development kit.
synsets, raw_valid_groundtruth = read_devkit(devkit_archive)
# Mapping to take WordNet IDs to our internal 0-999 encoding.
wnid_map = dict(zip((s.decode('utf8') for s in synsets['WNID']),
xrange(1000)))
# Map the 'ILSVRC2012 ID' to our zero-based ID.
ilsvrc_id_to_zero_based = dict(zip(synsets['ILSVRC2012_ID'],
xrange(len(synsets))))
# Map the validation set groundtruth to 0-999 labels.
valid_groundtruth = [ilsvrc_id_to_zero_based[id_]
for id_ in raw_valid_groundtruth]
# Get number of test examples from the test archive
with tar_open(TEST_IMAGES_TAR) as f:
n_test = sum(1 for _ in f)
# Ascertain the number of filenames to prepare appropriate sized
# arrays.
n_train = int(synsets['num_train_images'].sum())
log.info('Training set: {} images'.format(n_train))
log.info('Validation set: {} images'.format(len(valid_groundtruth)))
log.info('Test set: {} images'.format(n_test))
n_total = n_train + len(valid_groundtruth) + n_test
log.info('Total (train/valid): {} images'.format(n_total))
return n_train, valid_groundtruth, n_test, wnid_map
|
3d0e7cd536983c6f505bf5d62d5a8be4e50ab167
| 35,806 |
import io
def optimize_results(sample_names,
control_samples,
FPs_per_genome,
plot_roc=False,
plot_tuning_curve=False,
filtered_results_file=None,
output_dir=None,
mutations_dataframe=None):
"""
Optimizes the list of detected mutations according to the list of control samples and desired level of false positives set by the user.
Filtered results will be loaded to the mutations attribute of the MutationDetection object.
:param sample_names: The list of sample names included in the analysis. (list of str)
:param control_samples: List of sample names that should be used as control samples in the sense, that no unique mutations are expected in them. (The sample names listed here must match a subset of the sample names listed in bam_filename.) (list of str)
:param FPs_per_genome: The total number of false positives tolerated in a control sample. (int)
:param plot_roc: If True, ROC curves will be plotted as a visual representation of the optimization process. (default: False) (boolean)
:param plot_tuning_curve: If True, tuning curves displaying the number of mutations found in different samples with different score filters will be plotted as a visual representation of the optimization process. (default: False) (boolean)
:param filtered_results_file: The path to the file where filtered results should be saved. (default: [output_dir]/filtered_results.csv) (str)
:param output_dir: the path to the directory where raw mutation tables are located (default: None) (str)
:param mutations_dataframe: the pandas.DataFrame where mutations are located (default: None) (pandas.DataFrame)
:returns: (score_lim_dict, filtered_results)
- score_lim_dict: a dictionary containing the optimized score values for each ploidy separately
- filtered_results: a pandas.DataFrame containing the filtered mutations
"""
if (len(sample_names) < 2):
raise ValueError('Result optimization cannot be performed on less than 2 samples.')
if sum([1 for s in control_samples if s not in sample_names]) > 0:
raise ValueError('List of "control_samples" is not a subset of "sample_names".')
if filtered_results_file is None:
if output_dir is not None:
filtered_results_file = output_dir + '/filtered_results.csv'
else:
filtered_results_file = 'filtered_results.csv'
if not isinstance(mutations_dataframe, type(None)):
if not mutations_dataframe.__class__ == __pd.core.frame.DataFrame:
raise ValueError('Error: "mutations_dataframe" must be a pandas DataFrame.')
elif sorted(list(mutations_dataframe.columns)) != sorted(['sample_name', 'chr', 'pos', 'type', 'score',
'ref', 'mut', 'cov', 'mut_freq', 'cleanliness',
'ploidy']):
msg = 'Error: The DataFrame supplied in argument "mutations_dataframe" does not have the required columns.'
msg += '\n'
msg += 'Make sure to have the following columns: sample_name, chr, pos, type, score, ' \
'ref, mut, cov, mut_freq, cleanliness, ploidy'
raise ValueError(msg)
df_somatic = mutations_dataframe[~mutations_dataframe['sample_name'].str.contains(',')]
else:
if (not __os.path.isfile(output_dir + '/all_SNVs.isomut2')):
raise ValueError(
'SNV results (' + output_dir + '/all_SNVs.isomut2) could not be found, results cannot be optimized.')
if (not __os.path.isfile(output_dir + '/all_indels.isomut2')):
raise ValueError(
'Indel results (' + output_dir + '/all_indels.isomut2) could not be found, results cannot be optimized.')
__subprocess.check_call(
'cat ' + output_dir + '/all_SNVs.isomut2 | awk \'BEGIN{FS="\t"; OFS="\t";}{if($1 !~ /,/) print $0;}\' > ' + output_dir + '/unique_SNVs.isomut2',
shell=True)
__subprocess.check_call(
'cat ' + output_dir + '/all_indels.isomut2 | awk \'BEGIN{FS="\t"; OFS="\t";}{if($1 !~ /,/) print $0;}\' > ' + output_dir + '/unique_indels.isomut2',
shell=True)
df_SNV = __pd.read_csv(output_dir + '/unique_SNVs.isomut2', header=0,
names=['sample_name', 'chr', 'pos', 'type', 'score',
'ref', 'mut', 'cov', 'mut_freq', 'cleanliness', 'ploidy'],
sep='\t',
low_memory=False)
df_indel = __pd.read_csv(output_dir + '/unique_indels.isomut2', header=0,
names=['sample_name', 'chr', 'pos', 'type', 'score',
'ref', 'mut', 'cov', 'mut_freq', 'cleanliness', 'ploidy'],
sep='\t',
low_memory=False)
__subprocess.check_call('rm ' + output_dir + '/unique_SNVs.isomut2', shell=True)
__subprocess.check_call('rm ' + output_dir + '/unique_indels.isomut2', shell=True)
df_somatic = __pd.concat([df_SNV, df_indel])
df_somatic['ploidy'] = __pd.to_numeric(df_somatic['ploidy'], errors='ignore')
unique_ploidies = sorted(list(df_somatic['ploidy'].unique()))
if plot_tuning_curve:
plot.__plot_tuning_curve(control_samples=control_samples,
mutation_dataframe=df_somatic,
return_string=False,
unique_samples=sample_names)
score_lim_dict, f = plot.__plot_roc(mutation_dataframe=df_somatic,
control_samples=control_samples,
FPs_per_genome=FPs_per_genome,
plot_roc=plot_roc,
unique_samples=sample_names)
if mutations_dataframe is not None:
def filter(row):
if row['score'] < score_lim_dict[row['type']][unique_ploidies.index(row['ploidy'])]:
return 'FILTER'
else:
return 'PASS'
mutations_dataframe_filtered = __copy.deepcopy(mutations_dataframe)
mutations_dataframe_filtered['FILTER'] = mutations_dataframe_filtered.apply(filter, axis=1)
return score_lim_dict, mutations_dataframe_filtered[mutations_dataframe_filtered['FILTER'] != 'FILTER'].drop(
'FILTER', axis=1)
else:
io.__print_filtered_results(output_dir=output_dir,
filename=filtered_results_file,
unique_ploidies=unique_ploidies,
score_lim_dict=score_lim_dict,
control_samples=control_samples,
FPs_per_genome=FPs_per_genome)
return score_lim_dict, io.load_mutations(output_dir=output_dir, filename=filtered_results_file)
|
add62fbf94e7f984d883957801b59dca022287a6
| 35,807 |
from datetime import datetime
def get_detections(
args,
config: DictConfig,
module: ModuleType,
model: nn.Module,
geoscreens_data: LightningDataModule,
video_id: str,
):
"""
Returns:
Dict: keys = frame index, value = a dict of detections that looks something like::
{
"frame_idx": 0,
"seconds": 0.00,
"time":: "00:00:00.0000",
"label_ids": [17, 39],
"scores": [0.5707356929779053, 0.5458141565322876],
"bboxes": [
{
"xmin": 522.35400390625,
"ymin": 177.13229370117188,
"xmax": 640.0,
"ymax": 362.1326599121094,
},
{
"xmin": 537.4188232421875,
"ymin": 139.51719665527344,
"xmax": 635.33642578125,
"ymax": 157.04588317871094,
},
],
}
"""
infer_tfms = tfms.A.Adapter(
[*tfms.A.resize_and_pad(config.dataset_config.img_size), tfms.A.Normalize()]
)
infer_ds = GeoscreensInferenceDataset(
args.video_frames_path, geoscreens_data.parser.class_map, video_id, infer_tfms
)
infer_dl = module.infer_dl(infer_ds, batch_size=8, shuffle=False, num_workers=16)
preds = module.predict_from_dl(model, infer_dl, detection_threshold=0.5)
detections = {}
frame_counter = 0
for frame_info, pred in zip(infer_ds.frames, preds):
detections[frame_counter] = {
"frame_idx": frame_info["frame_idx"],
"seconds": frame_info["seconds"],
"time": datetime.utcfromtimestamp(frame_info["seconds"]).strftime("%H:%M:%S:%f"),
"label_ids": [int(l) for l in pred.detection.label_ids],
"scores": pred.detection.scores.tolist(),
"bboxes": [
{
"xmin": float(box.xmin),
"ymin": float(box.ymin),
"xmax": float(box.xmax),
"ymax": float(box.ymax),
}
for box in pred.detection.bboxes
],
}
frame_counter += 1
return detections
|
3c28598b6b9bd465434932ee8809e73cecedf698
| 35,808 |
def gen_format_string(a: np.array):
"""
Generates a matrix format in the shape of a.
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
print(gen_format_string(a))
┌ ┐
|{:>2}{:>2}{:>2} |
|{:>2}{:>2}{:>2} |
|{:>2}{:>2}{:>2} |
└ ┘
The template can be rendered with the help of the unboxing operator, e.g.:
>>> gen_format_string(a).format(*a.flatten())
or
>>> gen_format_string(a).format(*[x for x in np.nditer(a)])
"""
str_arr = copy_as_str(a)
max_lens = max_len_in_col(str_arr) + 1
rows = a.shape[0]
inner_juice = "".join(map(lambda x: "{{:>{}}}".format(x), max_lens))
# bad readability :(, but apparently faster than invoking list constructor like with [map(lambda x: " ", max_lens)]
spacers = *map(lambda x: " ", max_lens),
header = "┌" + inner_juice.format(*spacers) + " ┐\n"
body = ""
for x in range(rows):
body += "|" + inner_juice + " |\n"
footer = "└" + inner_juice.format(*spacers) + " ┘"
ret = header + body + footer
return ret
|
c975ab9c99b75f14757f8fecc5c1215180af3f98
| 35,810 |
def create_quantum_model():
"""Create a QNN model circuit and readout operation to go along with it."""
data_qubits = cirq.GridQubit.rect(4, 4) # a 4x4 grid.
readout = cirq.GridQubit(-1, -1) # a single qubit at [-1,-1]
circuit = cirq.Circuit()
# Prepare the readout qubit.
circuit.append(cirq.X(readout))
circuit.append(cirq.H(readout))
builder = CircuitLayerBuilder(data_qubits=data_qubits, readout=readout)
# Then add layers (experiment by adding more).
builder.add_layer(circuit, cirq.XX, "xx1")
builder.add_layer(circuit, cirq.ZZ, "zz1")
# Finally, prepare the readout qubit.
circuit.append(cirq.H(readout))
return circuit, cirq.Z(readout)
|
c7dcc577f3ef7a5ced1dd4b9bf08be4e5065d315
| 35,812 |
def get_completeness_coef(iocs_per_feed: int, total_iocs: int) -> float:
"""
This function wrapper is intended for
calculate feed completeness factor
Parameters:
iocs_per_feed: int — Number of IoCs in the CTI feed
total_iocs: int — Number of IoCs across all CTI feeds
Returns:
Completeness coefficient (float, 0..1)
"""
return functions.completeness(iocs_per_feed, total_iocs)
|
9f677549f90b8e8d6eb2d328eb91068ea32920bf
| 35,813 |
def valid_model(model):
"""Check if the model is valid."""
if model is None:
raise ValueError("The SaraBotTagger model can not be None")
path_to_model = f'{absolute_path}/bot_model/{model}'
model = load(path_to_model)
return True
|
8e00d0907e31f1370bd52532ae06df6f7458737a
| 35,814 |
def getApi(token_path):
""" Логинится в вк и возвращает готовую к работе сессию """
with open(token_path) as f:
token = f.read().strip()
session = vk_api.VkApi(token=token, api_version="5.52")
return session.get_api()
|
143b11a9e2612bc10bf41b716d21adcf14bdc60d
| 35,815 |
import re
def projects():
"""
General purpose provider of paths to test projects with the conventional layout
"""
base_path = PROJECT_ROOT / "tests" / "fixtures"
projects = {
re.match(r"^([_\w]+)_project", path.name).groups()[0]: path.resolve()
for path in base_path.glob("*_project")
}
projects.update(
{
f"{project_key}/"
+ re.match(
fr".*?/{project_key}_project/([_\w\/]+?)(:?\/pyproject)?.toml$",
path.as_posix(),
).groups()[0]: path
for project_key, project_path in projects.items()
for path in project_path.glob("**/*.toml")
}
)
return projects
|
250473fbb89f65d8163e7d2f6085cc7217ea2860
| 35,816 |
def qair2rh(qair: xr.DataArray, temp: xr.DataArray, pres: xr.DataArray) -> xr.DataArray:
"""
Get the relative humdity from the specific humidity.
Args:
qair (xr.DataArray): The specific humidity (dimensionless).
temp (xr.DataArray): The temperature (kelvin).
pres (xr.DataArray): The pressure (pascal).
Returns:
xr.DataArray: The relative humidity.
"""
t_0c = 273.15
es = 6.112 * np.exp((17.76 * (temp - t_0c)) / (temp - t_0c + 243.5))
e = qair * pres / (0.378 * qair + 0.622)
rh = e / es
# rh[rh > 100] = 1
# rh[rh < 0] = 0
return rh
|
3e230a3c79d8106486f3eb3b67a1459705bbfb89
| 35,817 |
import re
def read_trees(input_dir, features, signals, backgrounds, selection=None,
negative_weight_treatment="passthrough",
equalise_signal=True, branch_w="EvtWeight",
col_w="MVAWeight", col_target="Signal"):
"""
Read in Ttrees.
Files in the input directory should be named according to the schema
"histofile_$PROCESS.root". Within each file should be a Ttree named
"Ttree_$PROCESS" containing event data. A branch named "EvtWeight"
containing event weights is expected in each Ttree.
Parameters
----------
selection : string, optional
ROOT selection string specifying cuts that should be made on read-in
trees. If None, no cuts are made.
Returns
-------
input_dir : string
Directory containing input ROOT files for the classifier.
features : list of strings
Names of features to be used in classifier training. Should correspond
to Ttree branches in the input files.
signals : list of strings
Names of processes to be considered signal.
backgrounds : list of strings
Names of processes to be considered background.
selection : string, optional
ROOT selection string specifying the cuts that should be made on
input files. If None, no cuts are performed.
negative_weight_treatment : "passthrough", "abs", or "reweight", optional
How negative event weights should be treated
"passthrough": negative weights are unaltered (default).
"abs": the absolute value of all negative weights is taken.
"reweight": The absolute value of all negative weights is taken.
The original normalisation for each process is then
restored by linearly scaling the resulting weights
down. This will fail if any processes have an overall
negative weight.
equalise_signal : bool, optional
If True (the default), the weights of the signal channels are linearly
scaled so that the overall normalisation for both the signal and
background channels is the same.
branch_w : string, optional
Name of branch in ROOT files containing event weights.
col_w : string, optional
Name of column in returned DataFrame containing "MVA Weights". These
are the event weights after the transformations specfied by the
negative_weight_treatment and equalise_signal options have taken place.
col_target: string, optional
Name of column inn returned DataFrame containing the target values for
the classifier. This will be 1 for events in processes specified by
signals and 0 otherwise.
df : DataFrame
DataFrame containing the Ttree data, MVA weights (as "MVAWeight") and
classification flag for each event ("Signal" == 1 for signal events,
0 otherwise).
Notes
-----
Options for this function are handled entirely by the global configuration.
"""
def get_process_name(path):
"""
Given a path to a ROOT file, return the name of the process contained.
Parameters
----------
path : string
Path to ROOT file.
Returns
-------
string :
Name of process.
"""
return re.split(r"histofile_|\.", path)[-2]
sig_dfs = []
bkg_dfs = []
processes = signals + backgrounds
for process in processes:
df = read_tree(input_dir + "histofile_{}.root".format(process),
"Ttree_{}".format(process),
columns=features + [branch_w],
where=selection)
if df.empty:
continue
# Deal with weights
if negative_weight_treatment == "reweight":
df[col_w] = reweight(df[branch_w])
elif negative_weight_treatment == "abs":
df[col_w] = np.abs(df[branch_w])
elif negative_weight_treatment == "passthrough":
df[col_w] = df[branch_w]
elif negative_weight_treatment == "zero":
df[col_w] = np.clip(df[branch_w], a_min=0, a_max=None)
else:
raise ValueError("Bad value for option negative_weight_treatment:",
negative_weight_treatment)
# Count events
print("Process ", process, " contains ", len(df.index), " (",
df[branch_w].sum(), " ± ", df[branch_w].pow(2).sum() ** 0.5,
") events", sep='')
# Label process
df = df.assign(Process=process)
# Split into signal and background
if process in signals:
sig_dfs.append(df)
else:
bkg_dfs.append(df)
sig_df = pd.concat(sig_dfs)
bkg_df = pd.concat(bkg_dfs)
# Equalise signal and background weights if we were asked to
if equalise_signal:
sig_df[col_w], bkg_df[col_w] = balance_weights(sig_df[col_w],
bkg_df[col_w])
# Label signal and background
sig_df[col_target] = 1
bkg_df[col_target] = 0
df = pd.concat([sig_df, bkg_df]).reset_index(drop=True)
# Count events again
print("There are ", len(sig_df.index), " (", sig_df[branch_w].sum(), " ± ",
sig_df[branch_w].pow(2).sum() ** 0.5, ") signal events", sep='')
print("There are ", len(bkg_df.index), " (", bkg_df[branch_w].sum(), " ± ",
bkg_df[branch_w].pow(2).sum() ** 0.5, ") background events", sep='')
print("Making ", len(df.index), " (", df[branch_w].sum(), " ± ",
df[branch_w].pow(2).sum() ** 0.5, ") events in total", sep='')
return pd.concat([sig_df, bkg_df]).reset_index(drop=True)
|
88e27750eb84173f0505533da4c0e3e44d80a9cf
| 35,818 |
def package_form(request):
"""
Upload a new package
"""
return render(request, 'packages/package_form.html', {
'form': PackageForm(),
})
|
f57c92fcdbf67a4d410af6ca20f44e3d0bdb72a5
| 35,819 |
from typing import List
def destory(list_id):
"""Delete list."""
list = db_session.query(List).filter(List.id == list_id).first()
if(list.user_id != login_session['user_id']):
flash("This list does not belong to your account")
return redirect(url_for('list.index'))
db_session.delete(list)
db_session.commit()
flash("List %s destroyed" % list.name)
return redirect(url_for('list.index'))
|
54899380c4a80d076815b9fe3f835fc44fb5d911
| 35,820 |
def categorize_os():
"""
Categorize operating system by its parent distribution.
Args:
None
Raises:
None
Returns:
None
"""
os_name = get_system_name()
if os_name in ["ubuntu", "kali", "backtrack", "debian"]:
return "debian"
# elif some other OS, add their name
else: # if OS not in list
return None
|
84df15c78e3e9c40294e6ad77afbac857df3e29e
| 35,823 |
def has_names_directive(block: FencedBlock) -> bool:
"""Does the code block have a share-names or clear-names directive."""
assert block.role == Role.CODE, "must be a Python code block."
return block.has_directive(Marker.SHARE_NAMES) or block.has_directive(
Marker.CLEAR_NAMES
)
|
cd70019e724c4f0370fa1c3c9db8d52c22c80e3f
| 35,824 |
import math
def make_reber_classification(n_samples, invalid_size=0.5,
return_indeces=False):
"""
Generate random dataset for Reber grammar classification.
Invalid words contains the same letters as at Reber grammar, but
they are build whithout grammar rules.
Parameters
----------
n_samples : int
Number of samples in dataset.
invalid_size : float
Proportion of invalid words in dataset, defaults to ``0.5``.
Value must be between ``0`` and ``1``.
return_indeces : bool
If ``True``, each word will be converted to array where each
letter converted to the index. Defaults to ``False``.
Returns
-------
tuple
Return two lists. First contains words and second - labels for them.
Examples
--------
>>> from neupy.datasets import make_reber_classification
>>>
>>> data, labels = make_reber_classification(10, invalid_size=0.5)
>>> data
array(['SXSXVSXXVX', 'VVPS', 'VVPSXTTS', 'VVS', 'VXVS', 'VVS',
'PPTTTXPSPTV', 'VTTSXVPTXVXT', 'VSSXSTX', 'TTXVS'],
dtype='<U12')
>>> labels
array([0, 1, 0, 1, 1, 1, 0, 0, 0, 1])
>>>
>>> data, labels = make_reber_classification(
... 4, invalid_size=0.5, return_indeces=True)
>>> data
array([array([1, 3, 1, 4]),
array([0, 3, 0, 3, 0, 4, 3, 0, 4, 4]),
array([1, 3, 1, 2, 3, 1, 2, 4]),
array([0, 3, 0, 0, 3, 0, 4, 2, 4, 1, 0, 4, 0])], dtype=object)
"""
if n_samples < 2:
raise ValueError("There are must be at least 2 samples")
if not 0 < invalid_size < 1:
raise ValueError("`invalid_size` argument value must be between "
"zero and one, got {}".format(invalid_size))
n_valid_words = int(math.ceil(n_samples * invalid_size))
n_invalid_words = n_samples - n_valid_words
valid_words = make_reber(n_valid_words)
valid_labels = [1] * n_valid_words
invalid_words = []
invalid_labels = [0] * n_valid_words
for i in range(n_invalid_words):
word_length = randint(3, 14)
word = [choice(avaliable_letters) for _ in range(word_length)]
invalid_words.append(''.join(word))
samples, labels = shuffle(np.array(valid_words + invalid_words),
np.array(valid_labels + invalid_labels))
if return_indeces:
samples = convert_letters_to_indeces(samples)
return samples, labels
|
c38627651ed2fb3f18bbebbefe3e06801c1accf5
| 35,825 |
def batchify(X, size):
"""
```
Splits X into separate batch sizes specified by size.
Args:
X(list): elements
size(int): batch size
Returns:
list of evenly sized batches with the last batch having the remaining elements
```
"""
return [X[x : x + size] for x in range(0, len(X), size)]
|
d3e4ad015eb3b8bb4cdbaa6bf87a2bc1989c4614
| 35,826 |
def _single_entity_stmt(
start_day: dt,
end_day: dt,
event_types: tuple[str, ...],
entity_id: str,
entity_id_like: str,
) -> StatementLambdaElement:
"""Generate a logbook query for a single entity."""
stmt = lambda_stmt(
lambda: _select_events_without_states(start_day, end_day, event_types)
.where(
Events.event_data.like(entity_id_like)
| EventData.shared_data.like(entity_id_like)
)
.union_all(
_select_states(start_day, end_day).where(States.entity_id == entity_id),
_select_events_context_only().where(
Events.context_id.in_(
_select_entity_context_ids_sub_query(
start_day, end_day, event_types, entity_id, entity_id_like
)
)
),
)
.order_by(Events.time_fired)
)
return stmt
|
0cbc33f1715b65cfbd803ab9674c6264d73a096e
| 35,827 |
def listtoslides(data):
"""Checks if format is correct + adds img and durration elements"""
slides = []
for slide in data:
slide = slide[:2]
slide[0] = slide[0][:25]
slide[1] = slide[1][:180]
slide.append("imgpath")
slide.append(0)
slides.append(slide)
return slides
|
b4b7180fc5755eff6a32ff8b448f1dfd65ad6f75
| 35,828 |
def clean_data(sample):
"""
整体清洗函数,为了方便多线程使用
Args:
sample: 一个元组,包含正文内容和标题内容
Returns:
"""
(content, title) = sample
sample = dict()
# 清洗数据
sample["title"] = clean_weibo_title(title.strip())
sample["content"] = clean_weibo_content(content.strip())
return sample
|
268f98dfc8e8aaeb7f6a10887278964b6deb56db
| 35,829 |
def read_cat_as_dataframe(fichero_cat, tipo_registro, columns=None):
"""
Devuelve un pandas.DataFrame con los registros del el tipo deseado
- fichero_cat: fichero .cat, puede estar comprimido.
- tipo_registro: Tipo de registro, p.ej.: '11', '15'
- columns: Lista con los nombres de los campos que se quiere cargar.
"""
reg_def = FinCat(tipo_registro, columns)
cat_gen = line_generator(fichero_cat, encoding='latin1')
return pd.DataFrame.from_records(list(tuple_record_generator(reg_def, cat_gen)), columns=reg_def.columns)
|
47a25aa147bcf00ff3aa45abc4c5ef524a11761a
| 35,830 |
def init_cppn_from_img(image: np.array, color: bool = True, sim_threshold: float = 0.6, max_optim_iter: int = 10000,
init_stop_bound: int = 100) -> CPPN:
"""
Initializes a CPPN using the given image by optimizing the SSIM score between the CPPNs output and the image.
This is done by trying multiple net depths for the CPPN and pre-optimizing its weights using evolutionary strategies.
Args:
image (np.array): the image to be optimized for.
color (bool): Initilize the CPPN for either RGB if True, or grayscale if False.
sim_threshold: A similarity threshold to prevent the CPPN producing images too similar to the input image.
max_optim_iter: Maximum number of weight pre-optimization iterations.
init_stop_bound: Maximum number of CPPNs to try.
Returns:
CPPN: The initilized CPPN, which produces images somewhat similar to the input image
"""
print('Initializing CPPN ...', end='\r')
cppn = CPPN(color=color, img_size=image.shape[0])
gen_image = cppn.render_image()
sim_score = similarity(image, gen_image, color)
iteration = 0
not_improved_since = 0
curr_best = gen_image, cppn.get_weights(), cppn.net_depth, cppn.z
# print('finding initial CPPN config...')
# find an initial CPPN that can produce at least somewhat similar images
while sim_score < sim_threshold and not_improved_since < init_stop_bound:
new_gen_image = cppn.render_image()
new_sim_score = similarity(image, new_gen_image, color)
if new_sim_score > sim_score:
sim_score = new_sim_score
# print(f'iter:{iteration}, sim:{sim_score} depth:{cppn.net_depth}')
curr_best = new_gen_image, cppn.get_weights(), cppn.net_depth, cppn.z
not_improved_since = 0
else:
cppn.reset()
not_improved_since += 1
iteration += 1
# run pre-optimization on sim score up until threshold or max_iter
# print('pre-optimizing weights...')
cppn = CPPN(color=color, net_depth=curr_best[2])
cppn.set_weights(curr_best[1])
cppn.z = curr_best[3]
iteration = 0
optimization_steps = 0
while sim_score < sim_threshold and iteration < max_optim_iter:
weights = cppn.get_weights()
new_weights = mutate(weights, 0.3, 0.05)
cppn.set_weights(new_weights)
new_gen_image = cppn.render_image()
new_sim_score = similarity(image, new_gen_image, color)
if new_sim_score > sim_score:
sim_score = new_sim_score
optimization_steps += 1
# print(iteration, sim_score)
else:
cppn.set_weights(weights) # if no improvement, back to old state
iteration += 1
# print(f'{iteration} {sim_score} {new_sim_score}')
if optimization_steps < 1 and sim_score < 0.3:
return init_cppn_from_img(image, color)
# print(f'initialization finished. cppn net_depth:{cppn.net_depth}')
print('Initializing CPPN ... Done.')
return cppn
|
bb97754776dfceb05d144e7f125d5eda2008a5bd
| 35,831 |
def unwind_create_nodes_query(data, labels=None, keys=None):
""" Generate a parameterised ``UNWIND...CREATE`` query for bulk
loading nodes into Neo4j.
:param data:
:param labels:
:param keys:
:return: (query, parameters) tuple
"""
return cypher_join("UNWIND $data AS r",
_create_clause("_", (tuple(labels or ()),)),
_set_properties_clause("r", keys),
data=list(data))
|
3c0c444ecd1497399c27607bf0deb3026f017a03
| 35,832 |
def _scalePoints(points, scale=1, convertToInteger=True):
"""
Scale points and optionally convert them to integers.
"""
if convertToInteger:
points = [
(int(round(x * scale)), int(round(y * scale)))
for (x, y) in points
]
else:
points = [(x * scale, y * scale) for (x, y) in points]
return points
|
3ce3fedfbf7c428386af1571cc1a770bd9f66018
| 35,833 |
def get_customer_tax_rate(request, product):
"""Returns the specfic customer tax for the current customer and product.
"""
cache_key = 'cached_customer_tax_rate_%s' % product.pk
if request and hasattr(request, cache_key):
return getattr(request, cache_key)
customer_tax = get_first_valid(request, CustomerTax.objects.all(), product)
if customer_tax:
taxrate = customer_tax.rate
else:
taxrate = _calc_product_tax_rate(request, product)
if request:
setattr(request, cache_key, taxrate)
return taxrate
|
64491092626f0bb12c9a8a0522fc81acfe55846c
| 35,834 |
def iscoroutinepartial(coro):
"""
Function returns True if function it's a partial instance of coroutine. See additional information here_.
:param coro: Function
:return: bool
.. _here: https://goo.gl/C0S4sQ
"""
while True:
parent = coro
coro = getattr(parent, 'func', None)
if coro is None:
break
return gen.is_coroutine_function(parent)
|
dace8744f79475518a0c52f488c17d9b685fae07
| 35,835 |
def AppendFchunk(funcea, ea1, ea2):
"""
Append a function chunk to the function
@param funcea: any address in the function
@param ea1: start of function tail
@param ea2: end of function tail
@return: 0 if failed, 1 if success
@note: If a chunk exists at the specified addresses, it must have exactly
the specified boundaries
"""
func = idaapi.get_func(funcea)
if not func:
return 0
else:
return idaapi.append_func_tail(func, ea1, ea2)
|
709c1790d6dd1e472ab97efdab84eeef9a87ab8f
| 35,837 |
def testing_audio():
"""Load data for the tests."""
return sf.read(TEST_WAVEFILE_PATH, always_2d=True, dtype='float32')
|
c61f3429bcb1ed7745ae07282b65e0b0b0a6cfb8
| 35,838 |
def linr(xdata, ydata):
"""Return the linear regression coefficients a and b for (x,y) data.
Returns the y-intercept and slope of the straight line of the least-
squared regression line, that is, the line which minimises the sum of
the squares of the errors between the actual and calculated y values.
>>> xdata = [0.0, 0.25, 1.25, 1.75, 2.5, 2.75]
>>> ydata = [1.5*x + 0.25 for x in xdata]
>>> linr(xdata, ydata)
(0.25, 1.5)
"""
t = xysums(xdata, ydata)
if t.n < 2:
raise StatsError('regression line requires at least two points')
b = t.Sxy/t.Sxx
a = t.sumy/t.n - b*t.sumx/t.n
return (a, b)
|
0761004020d5a4b723e39f9988a4e379a64506cd
| 35,839 |
def album_sticker_get(client, album, sticker):
"""Gets a sticker associated with an album."""
# I am pretty sure that MPD only implements stickers for songs, so
# the sticker gets attached to the first song in the album.
tracks = client.find("album", album)
if len(tracks) == 0:
return
return client.sticker_get("song", tracks[0]["file"], "album_" + sticker)
|
4fd02292c1d7be672de9ccc926f5880c7b831503
| 35,840 |
def print_confusion_matrix(confusion_matrix, class_names, filename, normalize = True, figsize = (5,5), fontsize=16):
"""Prints a confusion matrix, as returned by sklearn.metrics.confusion_matrix, as a heatmap.
Arguments
---------
confusion_matrix: numpy.ndarray
The numpy.ndarray object returned from a call to sklearn.metrics.confusion_matrix.
Similarly constructed ndarrays can also be used.
class_names: list
An ordered list of class names, in the order they index the given confusion matrix.
figsize: tuple
A 2-long tuple, the first value determining the horizontal size of the ouputted figure,
the second determining the vertical size. Defaults to (10,7).
fontsize: int
Font size for axes labels. Defaults to 14.
Returns
-------
matplotlib.figure.Figure
The resulting confusion matrix figure
"""
if normalize:
confusion_matrix = (confusion_matrix.astype('float') / confusion_matrix.sum(axis=1)[:, np.newaxis])*100
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
df_cm = pd.DataFrame(
confusion_matrix, index=class_names, columns=class_names,
)
fig = plt.figure(figsize=figsize)
fmt = '.2f' if normalize else 'd'
#####set heatmap customization#####
try:
heatmap = sns.heatmap(df_cm, annot=True, fmt=fmt, cmap='GnBu', linewidths=.5, cbar=False, annot_kws={"size": 16})
except ValueError:
raise ValueError("Confusion matrix values must be integers.")
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=fontsize)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=fontsize)
plt.ylabel('True label', fontsize=16, fontname='sans-serif')
plt.xlabel('Predicted label', fontsize=16, fontname='sans-serif')
if filename != None:
fig.savefig(filename + '.png', bbox_inches='tight') #store image as .png
return fig
|
bfb7cfd33a35d5e1b1a08255c2e777c26d45c566
| 35,841 |
from re import T
def group():
""" RESTful CRUD controller """
tablename = "pr_group"
table = s3db[tablename]
response.s3.filter = (table.system == False) # do not show system groups
s3mgr.configure("pr_group_membership",
list_fields=["id",
"person_id",
"group_head",
"description"
])
rheader = lambda r: s3db.pr_rheader(r, tabs = [(T("Group Details"), None),
(T("Address"), "address"),
(T("Contact Data"), "contact"),
(T("Members"), "group_membership")
])
output = s3_rest_controller(rheader=rheader)
return output
|
110e7c17dc8a4a3f129845f278e1a4d9760da8c3
| 35,843 |
import torch
def colorization_inference(model, img):
"""Inference image with the model.
Args:
model (nn.Module): The loaded model.
img (str): File path of input image.
Returns:
np.ndarray: The predicted colorization result.
"""
# cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
if model.cfg.get('demo_pipeline', None):
test_pipeline = model.cfg.demo_pipeline
elif model.cfg.get('test_pipeline', None):
test_pipeline = model.cfg.test_pipeline
else:
test_pipeline = model.cfg.val_pipeline
# remove gt from test_pipeline
# 待补充,下面这一段不一定对
keys_to_remove = ['gt', 'gt_path']
for key in keys_to_remove:
for pipeline in list(test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
# build the data pipeline
test_pipeline = Compose(test_pipeline)
# prepare data
data = None
if isinstance(img, str):
data = dict(img_gray_path=img)
if isinstance(img, np.ndarray):
data = dict(img_gray=img)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# # forward the model
# model.eval()
# with torch.no_grad():
# results = model.forward(data['gray_img']).squeeze()
# forward the model
with torch.no_grad():
result = model(test_mode=True, **data)
final = post_process(result, img)
return final
|
8d47cfb2c5242e23ebd4e57952badb3d41bec450
| 35,844 |
def load_templates_from_dir(directory: str) -> Environment:
"""Produce an Environment targeted at a directory."""
return Environment(loader=FileSystemLoader(directory))
|
8d2e8062164d4fa02be41c059253524e5da86b92
| 35,846 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.