content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_hdf_filepaths(hdf_dir):
"""Get a list of downloaded HDF files which is be used for iterating through hdf file conversion."""
print "Building list of downloaded HDF files..."
hdf_filename_list = []
hdf_filepath_list = []
for dir in hdf_dir:
for dir_path, subdir, files in os.walk(dir):
for f in files:
if f.endswith(".hdf"):
hdf_filename_list.append(os.path.splitext(f)[0])
hdf_filepath_list.append(os.path.join(dir_path, f))
return hdf_filename_list, hdf_filepath_list | a64aa83d06b218a0faf3372e09c2ac337ce106aa | 14,306 |
def mask_depth_image(depth_image, min_depth, max_depth):
""" mask out-of-range pixel to zero """
ret, depth_image = cv2.threshold(
depth_image, min_depth, 100000, cv2.THRESH_TOZERO)
ret, depth_image = cv2.threshold(
depth_image, max_depth, 100000, cv2.THRESH_TOZERO_INV)
depth_image = np.expand_dims(depth_image, 2)
return depth_image | 39fde62083666a9bb4a546c29aa736a23724e25f | 14,307 |
def prompt_for_password(prompt=None):
"""Fake prompt function that just returns a constant string"""
return 'promptpass' | 49499970c7698b08f38078c557637907edef3223 | 14,309 |
def heading(start, end):
"""
Find how to get from the point on a planet specified as a tuple start
to a point specified in the tuple end
"""
start = ( radians(start[0]), radians(start[1]))
end = ( radians(end[0]), radians(end[1]))
delta_lon = end[1] - start[1]
delta_lat = log(tan(pi/4 + end[0]/2)/tan(pi/4 + start[0]/2))
return int(round((360 + degrees(atan2(delta_lon, delta_lat))) % 360)) | 97bd69fc308bdf1a484901ad25b415def20f25ee | 14,310 |
def get_frame_list(video, jump_size = 6, **kwargs):
"""
Returns list of frame numbers including first and last frame.
"""
frame_numbers =\
[frame_number for frame_number in range(0, video.frame_count, jump_size)]
last_frame_number = video.frame_count - 1;
if frame_numbers[-1] != last_frame_number:
frame_numbers.append(last_frame_number)
return frame_numbers | 786de04b4edf224045216de226ac61fdd42b0d7b | 14,311 |
def build_xlsx_response(wb, title="report"):
""" Take a workbook and return a xlsx file response """
title = generate_filename(title, '.xlsx')
myfile = BytesIO()
myfile.write(save_virtual_workbook(wb))
response = HttpResponse(
myfile.getvalue(),
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=%s' % title
response['Content-Length'] = myfile.tell()
return response | c9990c818b45fc68646ff28d39ea0b914e362db1 | 14,312 |
def top_k(*args, **kwargs):
""" See https://www.tensorflow.org/api_docs/python/tf/nn/top_k .
"""
return tensorflow.nn.top_k(*args, **kwargs) | 0b0a9f250a466f439301d840af2213f6b2758656 | 14,313 |
def determine_d_atoms_without_connectivity(zmat, coords, a_atoms, n):
"""
A helper function to determine d_atoms without connectivity information.
Args:
zmat (dict): The zmat.
coords (list, tuple): Just the 'coords' part of the xyz dict.
a_atoms (list): The determined a_atoms.
n (int): The 0-index of the atom in the zmat to be added.
Returns:
list: The d_atoms.
"""
d_atoms = [atom for atom in a_atoms]
for i in reversed(range(n)):
if i not in d_atoms and i in list(zmat['map'].keys()) and (i >= len(zmat['symbols']) or not is_dummy(zmat, i)):
angle = calculate_angle(coords=coords, atoms=[zmat['map'][z_index] for z_index in d_atoms[1:] + [i]])
if not is_angle_linear(angle):
d_atoms.append(i)
break
if len(d_atoms) < 4:
# try again and consider dummies
for i in reversed(range(n)):
if i not in d_atoms and i in list(zmat['map'].keys()):
angle = calculate_angle(coords=coords, atoms=[zmat['map'][z_index] for z_index in d_atoms[1:] + [i]])
if not is_angle_linear(angle):
d_atoms.append(i)
break
return d_atoms | 8b2a60f6092e24b9bc6f71e4ad8753e06c2e6373 | 14,314 |
def all_of_them():
"""
Return page with all products with given name from API.
"""
if 'username' in session:
return render_template('productsearch.html', username=escape(session['username']), vars=lyst)
else:
return "Your are not logged in" | cee22ba57e108edaedc9330c21caa9cb60968aa5 | 14,315 |
def is_blank(line):
"""Determines if a selected line consists entirely of whitespace."""
return whitespace_re.match(line) is not None | 1120d7a70ce5c08eb5f179cd3d2a258af5cd3bc2 | 14,316 |
import re
import collections
import urllib
def _gff_line_map(line, params):
"""Map part of Map-Reduce; parses a line of GFF into a dictionary.
Given an input line from a GFF file, this:
- decides if the file passes our filtering limits
- if so:
- breaks it into component elements
- determines the type of attribute (flat, parent, child or annotation)
- generates a dictionary of GFF info which can be serialized as JSON
"""
gff3_kw_pat = re.compile("\w+=")
def _split_keyvals(keyval_str):
"""Split key-value pairs in a GFF2, GTF and GFF3 compatible way.
GFF3 has key value pairs like:
count=9;gene=amx-2;sequence=SAGE:aacggagccg
GFF2 and GTF have:
Sequence "Y74C9A" ; Note "Clone Y74C9A; Genbank AC024206"
name "fgenesh1_pg.C_chr_1000003"; transcriptId 869
"""
quals = collections.defaultdict(list)
if keyval_str is None:
return quals
# ensembl GTF has a stray semi-colon at the end
if keyval_str[-1] == ';':
keyval_str = keyval_str[:-1]
# GFF2/GTF has a semi-colon with at least one space after it.
# It can have spaces on both sides; wormbase does this.
# GFF3 works with no spaces.
# Split at the first one we can recognize as working
parts = keyval_str.split(" ; ")
if len(parts) == 1:
parts = keyval_str.split("; ")
if len(parts) == 1:
parts = keyval_str.split(";")
# check if we have GFF3 style key-vals (with =)
is_gff2 = True
if gff3_kw_pat.match(parts[0]):
is_gff2 = False
key_vals = [p.split('=') for p in parts]
# otherwise, we are separated by a space with a key as the first item
else:
pieces = [p.strip().split(" ") for p in parts]
key_vals = [(p[0], " ".join(p[1:])) for p in pieces]
for key, val in key_vals:
val = (val[1:-1] if (len(val) > 0 and val[0] == '"'
and val[-1] == '"') else val)
if val:
quals[key].extend(val.split(','))
# if we don't have a value, make this a key=True/False style
# attribute
else:
quals[key].append('true')
for key, vals in quals.items():
quals[key] = [urllib.unquote(v) for v in vals]
return quals, is_gff2
def _nest_gff2_features(gff_parts):
"""Provide nesting of GFF2 transcript parts with transcript IDs.
exons and coding sequences are mapped to a parent with a transcript_id
in GFF2. This is implemented differently at different genome centers
and this function attempts to resolve that and map things to the GFF3
way of doing them.
"""
# map protein or transcript ids to a parent
for transcript_id in ["transcript_id", "transcriptId", "proteinId"]:
try:
gff_parts["quals"]["Parent"] = \
gff_parts["quals"][transcript_id]
break
except KeyError:
pass
# case for WormBase GFF -- everything labelled as Transcript
if gff_parts["quals"].has_key("Transcript"):
# parent types
if gff_parts["type"] in ["Transcript"]:
if not gff_parts["id"]:
gff_parts["id"] = gff_parts["quals"]["Transcript"][0]
# children types
elif gff_parts["type"] in ["intron", "exon", "three_prime_UTR",
"coding_exon", "five_prime_UTR", "CDS", "stop_codon",
"start_codon"]:
gff_parts["quals"]["Parent"] = gff_parts["quals"]["Transcript"]
return gff_parts
strand_map = {'+' : 1, '-' : -1, '?' : None, None: None}
line = line.strip()
if line[:2] == "##":
return [('directive', line[2:])]
elif line[0] != "#":
parts = line.split('\t')
should_do = True
if params.limit_info:
for limit_name, limit_values in params.limit_info.items():
cur_id = tuple([parts[i] for i in
params.filter_info[limit_name]])
if cur_id not in limit_values:
should_do = False
break
if should_do:
assert len(parts) >= 9, line
gff_parts = [(None if p == '.' else p) for p in parts]
gff_info = dict()
# collect all of the base qualifiers for this item
quals, is_gff2 = _split_keyvals(gff_parts[8])
gff_info["is_gff2"] = is_gff2
if gff_parts[1]:
quals["source"].append(gff_parts[1])
if gff_parts[5]:
quals["score"].append(gff_parts[5])
if gff_parts[7]:
quals["phase"].append(gff_parts[7])
gff_info['quals'] = dict(quals)
gff_info['rec_id'] = gff_parts[0]
# if we are describing a location, then we are a feature
if gff_parts[3] and gff_parts[4]:
gff_info['location'] = [int(gff_parts[3]) - 1,
int(gff_parts[4])]
gff_info['type'] = gff_parts[2]
gff_info['id'] = quals.get('ID', [''])[0]
gff_info['strand'] = strand_map[gff_parts[6]]
if is_gff2:
gff_info = _nest_gff2_features(gff_info)
# features that have parents need to link so we can pick up
# the relationship
if gff_info['quals'].has_key('Parent'):
final_key = 'child'
elif gff_info['id']:
final_key = 'parent'
# Handle flat features
else:
final_key = 'feature'
# otherwise, associate these annotations with the full record
else:
final_key = 'annotation'
return [(final_key, (simplejson.dumps(gff_info) if params.jsonify
else gff_info))]
return [] | 555ca7d4ce455563e7230d4b85f4f4404fa839bc | 14,317 |
def smoothEvolve(problem, orig_point, first_ref, second_ref):
"""Evolves using RVEA with abrupt change of reference vectors."""
pop = Population(problem, assign_type="empty", plotting=False)
try:
pop.evolve(slowRVEA, {"generations_per_iteration": 200, "iterations": 15})
except IndexError:
return pop.archive
try:
pop.evolve(
slowRVEA,
{
"generations_per_iteration": 10,
"iterations": 20,
"old_point": orig_point,
"ref_point": first_ref,
},
)
except IndexError:
return pop.archive
try:
pop.evolve(
slowRVEA,
{
"generations_per_iteration": 10,
"iterations": 20,
"old_point": first_ref,
"ref_point": second_ref,
},
)
except IndexError:
return pop.archive
return pop.archive | fd7f7e82e8b029597affd63ec04da3d40c049c98 | 14,318 |
def combine_color_channels(discrete_rgb_images):
"""
Combine discrete r,g,b images to RGB iamges.
:param discrete_rgb_images:
:return:
"""
color_imgs = []
for r, g, b in zip(*discrete_rgb_images):
# pca output is float64, positive and negative. normalize the images to [0, 255] rgb
r = (255 * (r - np.max(r)) / -np.ptp(r)).astype(int)
g = (255 * (g - np.max(g)) / -np.ptp(g)).astype(int)
b = (255 * (b - np.max(b)) / -np.ptp(b)).astype(int)
color_imgs.append(cv2.merge((r, g, b)))
return color_imgs | 246653a698c997faffade25405b1dfafb8236510 | 14,319 |
def decohere_earlier_link(tA, tB, wA, wB, T_coh):
"""Applies decoherence to the earlier generated of the two links.
Parameters
----------
tA : float
Waiting time of one of the links.
wA : float
Corresponding fidelity
tB : float
Waiting time of the other link.
wB : float
Corresponding fidelity
t_both : float
Time both links experience decoherence (e.g. communication time)
T_coh : float
Memory coherence time. If set to 0, there is no decay.
Returns
-------
Tuple (float : tA, float : tB, float : wA, float : wB) after decoherence.
"""
delta_t = abs(tA - tB)
if(tA < tB):
wA = wern_after_memory_decoherence(wA, delta_t, T_coh)
elif(tB < tA):
wB = wern_after_memory_decoherence(wB, delta_t, T_coh)
return wA, wB | 0ec19f50d1673f69211ac7af21ab942612fe8a67 | 14,320 |
import torch
def train(
network: RNN,
data: np.ndarray,
epochs: int = 10,
_n_seqs: int = 10,
_n_steps: int = 50,
lr: int = 0.001,
clip: int = 5,
val_frac: int = 0.2,
cuda: bool = True,
print_every: int = 10,
):
"""Train RNN."""
network.train()
opt = torch.optim.Adam(network.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
val_idx = int(len(data) * (1 - val_frac))
data, val_data = data[:val_idx], data[val_idx:]
if cuda:
network.cuda()
step = 0
train_loss = []
validation_loss = []
for i in range(epochs):
h = network.init_hidden(_n_seqs)
for x, y in get_batches(data, _n_seqs, _n_steps):
step += 1
# One-hot encode, make Torch tensors
x = one_hot_encode(x, network.vocab)
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
if cuda:
inputs, targets = inputs.cuda(), targets.cuda()
h = tuple([m.data for m in h])
network.zero_grad()
output, h = network.forward(inputs, h)
loss = criterion(output, targets.view(_n_seqs * _n_steps))
loss.backward()
# Avoid exploding gradients
nn.utils.clip_grad_norm_(network.parameters(), clip)
opt.step()
if step % print_every == 0:
# Validation loss
val_h = network.init_hidden(_n_seqs)
val_losses = []
for x, y in get_batches(val_data, _n_seqs, _n_steps):
x = one_hot_encode(x, network.vocab)
x, y = torch.from_numpy(x), torch.from_numpy(y)
val_h = tuple([m.data for m in val_h])
inputs, targets = x, y
if cuda:
inputs, targets = inputs.cuda(), targets.cuda()
output, val_h = network.forward(inputs, val_h)
val_loss = criterion(output, targets.view(_n_seqs * _n_steps))
val_losses.append(val_loss.item())
train_loss.append(loss.item())
validation_loss.append(np.mean(val_losses))
print(
f"Epoch: {i + 1} / {epochs},",
f"Step: {step},",
f"Loss: {loss.item():.4f},",
"Val Loss: {:.4f}".format(np.mean(val_losses)),
)
return train_loss, validation_loss | 97c642c0b2849cb530121d914c586bc6ee76a26b | 14,321 |
def simple_lunar_phase(jd):
"""
This just does a quick-and-dirty estimate of the Moon's phase given the date.
"""
lunations = (jd - 2451550.1) / LUNAR_PERIOD
percent = lunations - int(lunations)
phase_angle = percent * 360.
delta_t = phase_angle * LUNAR_PERIOD / 360.
moon_day = int(delta_t + 0.5)
phase = get_phase_description(phase_angle)
bgcolor = get_moon_color(delta_t)
return dict(
angle = phase_angle,
day = moon_day,
phase = phase,
days_since_new_moon = delta_t,
bgcolor = bgcolor,
) | 539d407241a0390b140fad4b67e2173ff1dee66c | 14,322 |
def fetch_traj(data, sample_index, colum_index):
""" Returns the state sequence. It also deletes the middle index, which is
the transition point from history to future.
"""
# data shape: [sample_index, time, feature]
traj = np.delete(data[sample_index, :, colum_index:colum_index+1], history_len-1, axis=1)
return traj.flatten() | da373d3890f2c89754e36f78b78fb582b429109d | 14,323 |
from datetime import datetime
def validate_date(period: str, start: bool = False) -> pd.Timestamp:
"""Validate the format of date passed as a string.
:param period: Date in string. If None, date of today is assigned.
:type period: str
:param start: Whether argument passed is a starting date or an ending date,
defaults to False.
:type start: bool, optional
:raises IntegerDateInputError: If integer type object is passed.
:return: Date with format YYYY-MM-DD or YY-MM-DD.
:rtype: pandas.Timestamp
"""
if isinstance(period, int):
raise IntegerDateInputError('Input type of period should be in string.')
if period is None:
date = _convert_none_to_date(start)
else:
try:
date_format = '%y-%m-%d'
period = datetime.strptime(period, date_format)
except ValueError:
date_format = '%Y-%m-%d'
finally:
date = string_to_date(period, date_format)
return date | 99c76ee2e23beaff92a8ad67bf38b26c5f33f4bd | 14,324 |
import copy
def rec_module_mic(echograms, mic_specs):
"""
Apply microphone directivity gains to a set of given echograms.
Parameters
----------
echograms : ndarray, dtype = Echogram
Target echograms. Dimension = (nSrc, nRec)
mic_specs : ndarray
Microphone directions and directivity factor. Dimension = (nRec, 4)
Returns
-------
rec_echograms : ndarray, dtype = Echogram
Echograms subjected to microphone gains. Dimension = (nSrc, nRec)
Raises
-----
TypeError, ValueError: if method arguments mismatch in type, dimension or value.
Notes
-----
Each row of `mic_specs` is expected to be described as [x, y, z, alpha],
with (x, y, z) begin the unit vector of the mic orientation.
`alpha` must be contained in the range [0(dipole), 1(omni)],
so that directivity is expressed as: d(theta) = a + (1-a)*cos(theta).
"""
nSrc = echograms.shape[0]
nRec = echograms.shape[1]
_validate_echogram_array(echograms)
_validate_ndarray_2D('mic_specs', mic_specs, shape0=nRec, shape1=C+1)
mic_vecs = mic_specs[:,:C]
mic_coeffs = mic_specs[:,-1]
rec_echograms = copy.copy(echograms)
# Do nothing if all orders are zeros(omnis)
if not np.all(mic_coeffs == 1):
for ns in range(nSrc):
for nr in range(nRec):
nRefl = len(echograms[ns, nr].value)
# Get vectors from source to receiver
rec_vecs = echograms[ns, nr].coords
rec_vecs = rec_vecs / np.sqrt(np.sum(np.power(rec_vecs,2), axis=1))[:,np.newaxis]
mic_gains = mic_coeffs[nr] + (1 - mic_coeffs[nr]) * np.sum(rec_vecs * mic_vecs[nr,:], axis=1)
rec_echograms[ns, nr].value = echograms[ns, nr].value * mic_gains[:,np.newaxis]
_validate_echogram_array(rec_echograms)
return rec_echograms | 91abe86095cab7ccb7d3f2f001892df4a809106b | 14,325 |
def is_string_like(obj): # from John Hunter, types-free version
"""Check if obj is string."""
try:
obj + ''
except (TypeError, ValueError):
return False
return True | cb7682f91009794011c7c663f98e539d8543c8fd | 14,326 |
def divide():
"""Handles division, returns a string of the answer"""
a = int(request.args["a"])
b = int(request.args["b"])
quotient = str(int(operations.div(a, b)))
return quotient | 50c48f9802b3f11e3322b88a45068cad7e354637 | 14,328 |
def ax2cu(ax):
"""Axis angle pair to cubochoric vector."""
return Rotation.ho2cu(Rotation.ax2ho(ax)) | 6f1c6e181d1e25a2bf28605f01d66fa6a8ffcd45 | 14,329 |
def define_genom_loc(current_loc, pstart, p_center, pend, hit_start, hit_end, hit_strand, ovl_range):
""" [Local] Returns location label to be given to the annotated peak, if upstream/downstream or overlapping one edge of feature."""
all_pos = ["start", "end"]
closest_pos, dmin = distance_to_peak_center(
p_center, hit_start, hit_end, hit_strand, all_pos)
if current_loc == "not.specified": # Not internal
if closest_pos == "end" and any(ovl_range):
return "overlapEnd"
elif closest_pos == "start" and any(ovl_range):
return "overlapStart"
elif not any(ovl_range):
# Check about direction :"upstream", "downstream"
current_loc = find_peak_dir(
hit_start, hit_end, hit_strand, pstart, p_center, pend)
return current_loc
return current_loc | 87f89a1d392935a008b7997930bf16dda96cf36b | 14,331 |
import torch
def actp(Gij, X0, jacobian=False):
""" action on point cloud """
X1 = Gij[:,:,None,None] * X0
if jacobian:
X, Y, Z, d = X1.unbind(dim=-1)
o = torch.zeros_like(d)
B, N, H, W = d.shape
if isinstance(Gij, SE3):
Ja = torch.stack([
d, o, o, o, Z, -Y,
o, d, o, -Z, o, X,
o, o, d, Y, -X, o,
o, o, o, o, o, o,
], dim=-1).view(B, N, H, W, 4, 6)
elif isinstance(Gij, Sim3):
Ja = torch.stack([
d, o, o, o, Z, -Y, X,
o, d, o, -Z, o, X, Y,
o, o, d, Y, -X, o, Z,
o, o, o, o, o, o, o
], dim=-1).view(B, N, H, W, 4, 7)
return X1, Ja
return X1, None | 87f85a713b72de65064224086d9c4715f704d800 | 14,333 |
def isPalindrome(s):
"""Assumes s is a str
Returns True if s is a palindrome; False otherwise.
Punctuation marks, blanks, and capitalization are ignored."""
def toChars(s):
s = s.lower()
letters = ''
for c in s:
if c in 'abcdefghijklmnopqrstuvwxyz':
letters = letters + c
return letters
def isPal(s):
print(' isPal called with', s)
if len(s) <= 1:
print(' About to return True from base case')
return True
else:
answer = s[0] == s[-1] and isPal(s[1:-1])
print(' About to return', answer, 'for', s)
return answer
return isPal(toChars(s)) | 496f5fa92088a5ecb1b99be811e68501513ee3a4 | 14,334 |
import logging
def compute_irs(ground_truth_data,
representation_function,
random_state,
diff_quantile=0.99,
num_train=gin.REQUIRED,
batch_size=gin.REQUIRED):
"""Computes the Interventional Robustness Score.
Args:
ground_truth_data: GroundTruthData to be sampled from.
representation_function: Function that takes observations as input and
outputs a dim_representation sized representation for each observation.
random_state: Numpy random state used for randomness.
diff_quantile: Float value between 0 and 1 to decide what quantile of diffs
to select (use 1.0 for the version in the paper).
num_train: Number of points used for training.
batch_size: Batch size for sampling.
Returns:
Dict with IRS and number of active dimensions.
"""
logging.info("Generating training set.")
mus, ys = utils.generate_batch_factor_code(ground_truth_data,
representation_function, num_train,
random_state, batch_size)
assert mus.shape[1] == num_train
ys_discrete = utils.make_discretizer(ys)
active_mus = _drop_constant_dims(mus)
if not active_mus.any():
irs_score = 0.0
else:
irs_score = scalable_disentanglement_score(ys_discrete.T, active_mus.T,
diff_quantile)["avg_score"]
score_dict = {}
score_dict["IRS"] = irs_score
score_dict["num_active_dims"] = np.sum(active_mus)
return score_dict | 0ff37699367946fa099f4a5a48cbc66d89af8a29 | 14,336 |
def Grab_Pareto_Min_Max(ref_set_array, objective_values, num_objs, num_dec_vars, objectives_names=[],
create_txt_file='No'):
"""
Purposes: Identifies the operating policies producing the best and worst performance in each objective.
Gets called automatically by processing_reference_set.Reference_Set()
Required Args:
1. ref_set_array: an array of P arrays, P=number of points in the reference set. Each of the P
arrays contains N=num_objs+num_vars (number of objective values in optimization problem and number of
decision variables). Decision variables come first, followed by objective values.
2. objective_values: The objective value portion of the ref_set_array. It is also an array of P arrays,
where each of the P arrays is of length num_objs.
3. num_objs = integer number of objective values (e.g., 5)
4. num_dec_vars: integer number of decision variable values (e.g., 30)
Optional Args:
5. objectives_names: (list of names of objectives for objective_values returned from Reference_Set,
as defined above). Example: ['Sediment', 'Hydropower']. Used to name .txt file and provide output dictionary
keys.
6. create_text_file: String of 'Yes' or 'No'. Indicates whether users wants function to produce text files of
operating policies.
Returns:
1. indices of ref_set_array that correspond to the points of the highest and lowest value in each of the
objectives.
2. Various text files that store the DPS parameters corresponding to the operating policy, if the user wishes
to create such files.
"""
# Find operating policy parameters corresponding to the largest objective
# List of index of (1) highest and (2) lowest values (column in objective_values array)
indices = [[0 for i in range(2)] for j in range(num_objs)]
indices_dict = {}
for obj in range(num_objs):
indices[obj][0] = np.argmin(objective_values[obj]) # MIN for each objective
indices[obj][1] = np.argmax(objective_values[obj]) # MAX for each objective
if create_txt_file == 'Yes':
# Save max and min policies so PySedSim can import them.
np.savetxt('RBF_Parameters_max_' + objectives_names[obj] + '.txt',
ref_set_array[indices[1][obj]][0:num_dec_vars], newline=' ')
np.savetxt('RBF_Parameters_min_' + objectives_names[obj] + '.txt',
ref_set_array[indices[0][obj]][0:num_dec_vars], newline=' ')
indices = np.asarray(indices) # cast list as array
indices_dict[objectives_names[obj]] = {'Min': indices[obj][0], 'Max': indices[obj][1]}
return indices_dict | 882ba260576550d2c9de20e950594d5369410187 | 14,337 |
def edist(x, y):
""" Compute the Euclidean distance between two samples x, y \in R^d."""
try:
dist = np.sqrt(np.sum((x-y)**2))
except ValueError:
print 'Dimensionality of samples must match!'
else:
return dist | e0b0e586b49bf3e19eafa2cbf271fb3b1ddc2b99 | 14,338 |
def allsec_preorder(h):
"""
Alternative to using h.allsec(). This returns all sections in order from
the root. Traverses the topology each neuron in "pre-order"
"""
#Iterate over all sections, find roots
roots = root_sections(h)
# Build list of all sections
sec_list = []
for r in roots:
add_pre(h,sec_list,r)
return sec_list | 0407bd37c3f975ba91a51b3058d6c619813f2526 | 14,340 |
def get_func_bytes(*args):
"""get_func_bytes(func_t pfn) -> int"""
return _idaapi.get_func_bytes(*args) | 56000602c9d1d98fb1b76c809cce6c3458a3f426 | 14,341 |
def balanced_accuracy(y_true, y_score):
"""Compute accuracy using one-hot representaitons."""
if isinstance(y_true, list) and isinstance(y_score, list):
# Online scenario
if y_true[0].ndim == 2 and y_score[0].ndim == 2:
# Flatten to single (very long prediction)
y_true = np.concatenate(y_true, axis=0)
y_score = np.concatenate(y_score, axis=0)
if y_score.ndim == 3 and y_score.shape[-1] == 1:
y_score = np.ravel(y_score)
y_true = np.ravel(y_true).astype(int)
y_score = np.around(y_score).astype(int)
if y_true.ndim == 2 and y_true.shape[-1] != 1:
y_true = np.argmax(y_true, axis=-1)
if y_true.ndim == 2 and y_true.shape[-1] == 1:
y_true = np.round(y_true).astype(int)
if y_score.ndim == 2 and y_score.shape[-1] != 1:
y_score = np.argmax(y_score, axis=-1)
if y_score.ndim == 2 and y_score.shape[-1] == 1:
y_score = np.round(y_score).astype(int)
return balanced_accuracy_score(y_true, y_score) | 9745769ac047863b902f0e74f17680f9ccee5a53 | 14,342 |
import json
from dateutil import tz
from datetime import datetime
async def get_weather(weather):
""" For .weather command, gets the current weather of a city. """
if not OWM_API:
await weather.reply(
f"`{JAVES_NNAME}:` **Get an API key from** https://openweathermap.org/ `first.`")
return
APPID = OWM_API
if not weather.pattern_match.group(1):
CITY = DEFCITY
if not CITY:
await weather.reply(
f"`{JAVES_NNAME}:` **Please specify a city or set one as default using the WEATHER_DEFCITY config variable.**"
)
return
else:
CITY = weather.pattern_match.group(1)
timezone_countries = {
timezone: country
for country, timezones in c_tz.items() for timezone in timezones
}
if "," in CITY:
newcity = CITY.split(",")
if len(newcity[1]) == 2:
CITY = newcity[0].strip() + "," + newcity[1].strip()
else:
country = await get_tz((newcity[1].strip()).title())
try:
countrycode = timezone_countries[f'{country}']
except KeyError:
await weather.reply("`Invalid country.`")
return
CITY = newcity[0].strip() + "," + countrycode.strip()
url = f'https://api.openweathermap.org/data/2.5/weather?q={CITY}&appid={APPID}'
request = get(url)
result = json.loads(request.text)
if request.status_code != 200:
await weather.reply(f"`Invalid country.`")
return
cityname = result['name']
curtemp = result['main']['temp']
humidity = result['main']['humidity']
min_temp = result['main']['temp_min']
max_temp = result['main']['temp_max']
desc = result['weather'][0]
desc = desc['main']
country = result['sys']['country']
sunrise = result['sys']['sunrise']
sunset = result['sys']['sunset']
wind = result['wind']['speed']
winddir = result['wind']['deg']
ctimezone = tz(c_tz[country][0])
time = datetime.now(ctimezone).strftime("%A, %I:%M %p")
fullc_n = c_n[f"{country}"]
dirs = ["N", "NE", "E", "SE", "S", "SW", "W", "NW"]
div = (360 / len(dirs))
funmath = int((winddir + (div / 2)) / div)
findir = dirs[funmath % len(dirs)]
kmph = str(wind * 3.6).split(".")
mph = str(wind * 2.237).split(".")
def fahrenheit(f):
temp = str(((f - 273.15) * 9 / 5 + 32)).split(".")
return temp[0]
def celsius(c):
temp = str((c - 273.15)).split(".")
return temp[0]
def sun(unix):
xx = datetime.fromtimestamp(unix, tz=ctimezone).strftime("%I:%M %p")
return xx
await weather.reply(
f"**Temperature:** `{celsius(curtemp)}°C | {fahrenheit(curtemp)}°F`\n"
+
f"**Min. Temp.:** `{celsius(min_temp)}°C | {fahrenheit(min_temp)}°F`\n"
+
f"**Max. Temp.:** `{celsius(max_temp)}°C | {fahrenheit(max_temp)}°F`\n"
+ f"**Humidity:** `{humidity}%`\n" +
f"**Wind:** `{kmph[0]} kmh | {mph[0]} mph, {findir}`\n" +
f"**Sunrise:** `{sun(sunrise)}`\n" +
f"**Sunset:** `{sun(sunset)}`\n\n" + f"**{desc}**\n" +
f"`{cityname}, {fullc_n}`\n" + f"`{time}`") | b93c21eadefa2b4504708ae2663bfdcbf0555668 | 14,343 |
def read_table(source, columns=None, nthreads=1, metadata=None,
use_pandas_metadata=False):
"""
Read a Table from Parquet format
Parameters
----------
source: str or pyarrow.io.NativeFile
Location of Parquet dataset. If a string passed, can be a single file
name or directory name. For passing Python file objects or byte
buffers, see pyarrow.io.PythonFileInterface or pyarrow.io.BufferReader.
columns: list
If not None, only these columns will be read from the file.
nthreads : int, default 1
Number of columns to read in parallel. Requires that the underlying
file source is threadsafe
metadata : FileMetaData
If separately computed
use_pandas_metadata : boolean, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded
Returns
-------
pyarrow.Table
Content of the file as a table (of columns)
"""
if is_string(source):
fs = LocalFileSystem.get_instance()
if fs.isdir(source):
return fs.read_parquet(source, columns=columns,
metadata=metadata)
pf = ParquetFile(source, metadata=metadata)
return pf.read(columns=columns, nthreads=nthreads,
use_pandas_metadata=use_pandas_metadata) | 0d0e4990e88c39920e380cca315332f7c36f6ee8 | 14,344 |
def _export_cert_from_task_keystore(
task, keystore_path, alias, password=KEYSTORE_PASS):
"""
Retrieves certificate from the keystore with given alias by executing
a keytool in context of running container and loads the certificate to
memory.
Args:
task (str): Task id of container that contains the keystore
keystore_path (str): Path inside container to keystore containing
the certificate
alias (str): Alias of the certificate in the keystore
Returns:
x509.Certificate object
"""
args = ['-rfc']
if password:
args.append('-storepass "{password}"'.format(password=password))
args_str = ' '.join(args)
cert_bytes = sdk_tasks.task_exec(
task, _keystore_export_command(keystore_path, alias, args_str)
)[1].encode('ascii')
return x509.load_pem_x509_certificate(
cert_bytes, DEFAULT_BACKEND) | 8ae8a0e1d46f121597d70aff35a26ce15c448399 | 14,346 |
def eta_expand(
path: qlast.Path,
stype: s_types.Type,
*,
ctx: context.ContextLevel,
) -> qlast.Expr:
"""η-expansion of an AST path"""
if not ALWAYS_EXPAND and not stype.contains_object(ctx.env.schema):
# This isn't strictly right from a "fully η expanding" perspective,
# but for our uses, we only need to make sure that objects are
# exposed to the output, so we can skip anything not containing one.
return path
if isinstance(stype, s_types.Array):
return eta_expand_array(path, stype, ctx=ctx)
elif isinstance(stype, s_types.Tuple):
return eta_expand_tuple(path, stype, ctx=ctx)
else:
return path | 294e9b0e2aa158dc4e8d57917031986f824fd55d | 14,347 |
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
import timeit
def NBAccuracy(features_train, labels_train, features_test, labels_test):
""" compute the accuracy of your Naive Bayes classifier """
# create classifier
clf = GaussianNB()
# fit the classifier on the training features and labels
timeit(lambda: clf.fit(features_train, labels_train), "fit")
# use the trained classifier to predict labels for the test features
labels_pred = timeit(lambda: clf.predict(features_test), "predict")
# calculate and return the accuracy on the test data
# this is slightly different than the example,
# where we just print the accuracy
# you might need to import an sklearn module
accuracy = accuracy_score(labels_test, labels_pred)
return accuracy | c4c6d5a37341a811023bb0040616ebeb1c44be13 | 14,348 |
import json
def verify(body): # noqa: E501
"""verify
Verifies user with given user id. # noqa: E501
:param body: User id that is required for verification.
:type body: dict | bytes
:rtype: UserVerificationResponse
"""
if connexion.request.is_json:
body = VerifyUser.from_dict(connexion.request.get_json()) # noqa: E501
user_id = body.user_id
user_json = store.value_of(user_id)
if user_json == None:
response = Error(code=400, message="Invalid user id.")
return response, 400
user_dict = json.loads(user_json)
user = User.from_dict(user_dict)
texts = get_texts(user_id)
if not texts:
response = Error(
code=400, message="Can not recognize characters from identity card."
)
return response, 400
language = body.language
doc_text_label = get_doc(texts, language=language)
user_text_label = create_user_text_label(user)
text_validation_point = validate_text_label(doc_text_label, user_text_label)
print("text_validation_point: " + str(text_validation_point))
names = recognize_face(user_id)
if not names:
response = Error(
code=400, message="Can not recognize face from identity card."
)
return response, 400
face_validation_point = point_on_recognition(names, user_id)
print("face_validation_point: " + str(face_validation_point))
verification_rate = text_validation_point + face_validation_point
response = UserVerificationResponse(
code=200, verification_rate=verification_rate
)
return response, 200
else:
error = Error(code=400, message="Provide a json payload that contains userId")
return error, 400 | 76b2ad631dffbf59b1c2ffeb12069983d0540b51 | 14,349 |
def load_interface(interface_name, data):
"""
Load an interface
:param interface_name: a string representing the name of the interface
:param data: a dictionary of arguments to be used for initializing the interface
:return: an Interface object of the appropriate type
"""
if interface_name not in _interfaces:
raise Exception('Unknown interface')
return load_class_from_data(_interfaces[interface_name], data) | e57e0710d5ad5080b2da1f734581d1b205aedc77 | 14,350 |
def get_full_word(*args):
"""get_full_word(ea_t ea) -> ulonglong"""
return _idaapi.get_full_word(*args) | 8eb45586888fc836146441bb00bc3c3096ea2c5e | 14,352 |
def full_setup(battery_chemistry):
"""This function gets the baseline vehicle and creates modifications for different
configurations, as well as the mission and analyses to go with those configurations."""
# Collect baseline vehicle data and changes when using different configuration settings
vehicle = vehicle_setup()
# Modify Battery
net = vehicle.networks.battery_propeller
bat = net.battery
if battery_chemistry == 'NMC':
bat = SUAVE.Components.Energy.Storages.Batteries.Constant_Mass.Lithium_Ion_LiNiMnCoO2_18650()
elif battery_chemistry == 'LFP':
bat = SUAVE.Components.Energy.Storages.Batteries.Constant_Mass.Lithium_Ion_LiFePO4_18650()
bat.mass_properties.mass = 500. * Units.kg
bat.max_voltage = 500.
initialize_from_mass(bat)
# Assume a battery pack module shape. This step is optional but
# required for thermal analysis of the pack
number_of_modules = 10
bat.module_config.total = int(np.ceil(bat.pack_config.total/number_of_modules))
bat.module_config.normal_count = int(np.ceil(bat.module_config.total/bat.pack_config.series))
bat.module_config.parallel_count = int(np.ceil(bat.module_config.total/bat.pack_config.parallel))
net.battery = bat
net.battery = bat
net.voltage = bat.max_voltage
configs = configs_setup(vehicle)
# Get the analyses to be used when different configurations are evaluated
configs_analyses = analyses_setup(configs)
# Create the mission that will be flown
mission = mission_setup(configs_analyses, vehicle)
missions_analyses = missions_setup(mission)
# Add the analyses to the proper containers
analyses = SUAVE.Analyses.Analysis.Container()
analyses.configs = configs_analyses
analyses.missions = missions_analyses
return configs, analyses | 9335521ad5a238ab0a566c80d1443cc5c052a37a | 14,353 |
def sph_yn_exact(n, z):
"""Return the value of y_n computed using the exact formula.
The expression used is http://dlmf.nist.gov/10.49.E4 .
"""
zm = mpmathify(z)
s1 = sum((-1)**k*_a(2*k, n)/zm**(2*k+1) for k in xrange(0, int(n/2) + 1))
s2 = sum((-1)**k*_a(2*k+1, n)/zm**(2*k+2) for k in xrange(0, int((n-1)/2) + 1))
return -cos(zm - n*pi/2)*s1 + sin(zm - n*pi/2)*s2 | 490c31a3311fb922f5d33337aaeb70f167b25772 | 14,354 |
def f_bis(n1 : float, n2 : float, n3 : float) -> str:
""" ... cf ci-dessus ...
"""
if n1 < n2:
if n2 < n3:
return 'cas 1'
elif n1 < n3:
return 'cas 2'
else:
return 'cas 5'
elif n1 < n3:
return 'cas 3'
elif n2 < n3:
return 'cas 4'
else:
return 'cas 6' | e46c147a5baef02878700e546b11b7ae44b8909a | 14,355 |
def calc_B_effective(*B_phasors):
"""It calculates the effective value of the magnetic induction field B
(microTesla) in a given point, considering the magnetic induction of
all the cables provided.
Firstly, the function computes the resulting real and imaginary parts
of the x and y magnetic induction field components considering all the
contributing cables given as input (typically three or six cables).
The 'B_components' 2x2 numpy matrix indicates this intermediate step.
Secondly, the module of the effective magnetic induction field B is
calculated as the squared root of the sum of the squares of the
components mentioned above.
Lastly, the result is transformed from Tesla units to micro Tesla units.
Parameters
-------------------
*B_phasors : numpy.ndarray
Respectively the real and imaginary part (columns) of the
x and y components (rows) of the magnetic induction field B
produced by a single cable in a given point
Returns
-------------------
B_effective_microT : float
Effective magnetic induction field B (microTesla) calculated in the given point
Notes
-------------------
The current function implements the calculations present both in
[1]_"Norma Italiana CEI 106-11" formulas (3-4) and [2]_"Norma Italiana
CEI 211-4" formulas (17).
References
-------------------
..[1] Norma Italiana CEI 106-11, "Guide for the determination of
the respect widths for power lines and substations according to
DPCM 8 July 2003 (Clause 6) - Part 1: Overhead lines and cables",
first edition, 2006-02.
..[2] Norma Italiana CEI 211-4, "Guide to calculation methods of
electric and magnetic fields generated by power-lines and electrical
substations", second edition, 2008-09.
"""
B_components = 0
for B_phasor in B_phasors:
B_components += B_phasor
B_effective_T = np.sqrt(np.sum(B_components**2))
B_effective_microT = B_effective_T*10**(6)
return B_effective_microT | a24ae9330018f9dc24ebbba66ccc4bc4bd79a7cb | 14,356 |
def elision_count(l):
"""Returns the number of elisions in a given line
Args:
l (a bs4 <line>): The line
Returns:
(int): The number of elisions
"""
return sum([(1 if _has_elision(w) else 0) for w in l("word")]) | c031765089a030328d68718577872a6d2f70b88d | 14,357 |
def get_final_df(model, data):
"""
This function takes the `model` and `data` dict to
construct a final dataframe that includes the features along
with true and predicted prices of the testing dataset
"""
# if predicted future price is higher than the current,
# then calculate the true future price minus the current price, to get the buy profit
buy_profit = lambda current, pred_future, true_future: true_future - current if pred_future > current else 0
# if the predicted future price is lower than the current price,
# then subtract the true future price from the current price
sell_profit = lambda current, pred_future, true_future: current - true_future if pred_future < current else 0
X_test = data["X_test"]
y_test = data["y_test"]
# perform prediction and get prices
y_pred = model.predict(X_test)
if SCALE:
y_test = np.squeeze(data["column_scaler"]["adjclose"].inverse_transform(np.expand_dims(y_test, axis=0)))
y_pred = np.squeeze(data["column_scaler"]["adjclose"].inverse_transform(y_pred))
test_df = data["test_df"]
# add predicted future prices to the dataframe
test_df[f"adjclose_{LOOKUP_STEP}"] = y_pred
# add true future prices to the dataframe
test_df[f"true_adjclose_{LOOKUP_STEP}"] = y_test
# sort the dataframe by date
test_df.sort_index(inplace=True)
final_df = test_df
# add the buy profit column
final_df["buy_profit"] = list(map(buy_profit,
final_df["adjclose"],
final_df[f"adjclose_{LOOKUP_STEP}"],
final_df[f"true_adjclose_{LOOKUP_STEP}"])
# since we don't have profit for last sequence, add 0's
)
# add the sell profit column
final_df["sell_profit"] = list(map(sell_profit,
final_df["adjclose"],
final_df[f"adjclose_{LOOKUP_STEP}"],
final_df[f"true_adjclose_{LOOKUP_STEP}"])
# since we don't have profit for last sequence, add 0's
)
return final_df | e28f19c5072693872bf17a89cf39cc6afa74517b | 14,358 |
def read_fragment_groups(input_string,natoms,num_channels):
""" read in the fragment groups for each channel
"""
inp_line = _get_integer_line(input_string,'FragmentGroups',natoms)
assert inp_line is not None
out=' '.join(inp_line)
return out | 81ae922add4d0c1680bbeb5223ad85a265ac3040 | 14,359 |
def pad_tile_on_edge(tile, tile_row, tile_col, tile_size, ROI):
""" add the padding to the tile on the edges. If the tile's center is
outside of ROI, move it back to the edge
Args:
tile: tile value
tile_row: row number of the tile relative to its ROI
tile_col: col number of the tile relative to its ROI
tile_size: default tile size which may be different from the input
tile
ROI: ROI value which contains the input tile
Return:
the padded tile
"""
tile_height, tile_width, tile_channel = tile.shape
tile_row_lower = tile_row
tile_row_upper = tile_row + tile_height
tile_col_lower = tile_col
tile_col_upper = tile_col + tile_width
# if the tile's center is outside of ROI, move it back to the edge,
# and then add the padding
if tile_height < tile_size / 2:
tile_row_lower = tile_row_upper - tile_size // 2
tile_height = tile_size // 2
if tile_width < tile_size / 2:
tile_col_lower = tile_col_upper - tile_size // 2
tile_width = tile_size // 2
tile = ROI[tile_row_lower: tile_row_upper, tile_col_lower: tile_col_upper, ]
padding = ((0, tile_size - tile_height), (0, tile_size - tile_width), (0, 0))
return np.pad(tile, padding, "reflect") | bf829ead79f6347423dae8f4c534352d648a3845 | 14,361 |
def calc_kfold_score(model, df, y, n_splits=3, shuffle=True):
"""
Calculate crossvalidation score for the given model and data. Uses sklearn's KFold with shuffle=True.
:param model: an instance of sklearn-model
:param df: the dataframe with training data
:param y: dependent value
:param n_splits: the amount of splits (i.e. K in K-fold)
:param shuffle: whether to shuffle or not
:return: mean, std
"""
kf = KFold(n_splits=n_splits, shuffle=shuffle)
scores = list(calc_kfold_score_helper(model, kf, df, y))
mean = np.mean(scores)
std = np.std(scores)
return mean, std | 09e646d40b245be543c5183b8c964fe8ee4f699f | 14,362 |
def obter_forca (unidade):
"""Esta funcao devolve a forca de ataque da unidade dada como argumento"""
return unidade[2] | 34fe4acac8e0e3f1964faf8e4b26fa31148cf2a6 | 14,363 |
from typing import Dict
def get_default_configuration(cookiecutter_json: CookiecutterJson) -> Dict[str, str]:
"""
Get the default values for the cookiecutter configuration.
"""
default_options = dict()
for key, value in cookiecutter_json.items():
if isinstance(value, str) and "{{" not in value: # ignore templated values
default_options[key] = value
elif isinstance(value, list):
assert len(value) > 0, "Option list must have at least one element"
default_options[key] = value[0]
return default_options | 8a8e3a9b80d3440f1e6031498c210b7b5577aa03 | 14,364 |
def random_nodes_generator(num_nodes, seed=20):
"""
:param int num_nodes: An Integer denoting the number of nodes
:param int seed: (Optional) Integer specifying the seed for controlled randomization.
:return: A dictionary containing the coordinates.
:rtype: dict
"""
np.random.seed(seed)
max_coord_val = num_nodes
num_coord_grid = max_coord_val * max_coord_val
index = np.arange(max_coord_val * max_coord_val)
np.random.shuffle(index)
random_slice_start = np.random.randint(0, num_coord_grid - num_nodes)
coord_index = index[random_slice_start:random_slice_start + num_nodes]
x_array = np.arange(max_coord_val).repeat(max_coord_val)
y_array = np.tile(np.arange(max_coord_val), max_coord_val)
node_coord = np.empty((num_nodes, 2), dtype=np.int32)
node_coord[:, 0] = x_array[coord_index]
node_coord[:, 1] = y_array[coord_index]
node_dict = {}
for i in range(num_nodes):
node_dict[i] = (x_array[coord_index[i]], y_array[coord_index[i]])
return node_dict | 5fa08ccc2cd3a4c34962a29ca9a7566ca5e64592 | 14,365 |
def OpenDocumentTextMaster():
""" Creates a text master document """
doc = OpenDocument('application/vnd.oasis.opendocument.text-master')
doc.text = Text()
doc.body.addElement(doc.text)
return doc | 6813f527ced0f1cd89e0824ac10aeb78d06799a4 | 14,366 |
def get_visible_desktops():
"""
Returns a list of visible desktops.
The first desktop is on Xinerama screen 0, the second is on Xinerama
screen 1, etc.
:return: A list of visible desktops.
:rtype: util.PropertyCookie (CARDINAL[]/32)
"""
return util.PropertyCookie(util.get_property(root, '_NET_VISIBLE_DESKTOPS')) | 9fba922a5c83ed4635391f6ed5319fcfe4fd424d | 14,367 |
def spherical_noise(
gridData=None, order_max=8, kind="complex", spherical_harmonic_bases=None
):
"""Returns order-limited random weights on a spherical surface.
Parameters
----------
gridData : io.SphericalGrid
SphericalGrid containing azimuth and colatitude
order_max : int, optional
Spherical order limit [Default: 8]
kind : {'complex', 'real'}, optional
Spherical harmonic coefficients data type [Default: 'complex']
spherical_harmonic_bases : array_like, optional
Spherical harmonic base coefficients (not yet weighted by spatial
sampling grid) [Default: None]
Returns
-------
noisy_weights : array_like, complex
Noisy weights
"""
if spherical_harmonic_bases is None:
if gridData is None:
raise TypeError(
"Either a grid or the spherical harmonic bases have to be provided."
)
gridData = SphericalGrid(*gridData)
spherical_harmonic_bases = sph_harm_all(
order_max, gridData.azimuth, gridData.colatitude, kind=kind
)
else:
order_max = _np.int(_np.sqrt(spherical_harmonic_bases.shape[1]) - 1)
return _np.inner(
spherical_harmonic_bases,
_np.random.randn((order_max + 1) ** 2)
+ 1j * _np.random.randn((order_max + 1) ** 2),
) | 82ef238ef0d72100435306267bf8248f82b70fd8 | 14,369 |
def rollback_command():
"""Command to perform a rollback fo the repo."""
return Command().command(_rollback_command).require_clean().require_migration().with_database() | abfed20ff8cfa08af084fc8c956ebb8d312c985f | 14,370 |
import numpy
def circumcenter(vertices):
"""
Compute the circumcenter of a triangle (the center of the circle which passes through all the vertices of the
triangle).
:param vertices: The triangle vertices (3 by n matrix with the vertices as rows (where n is the dimension of the
space)).
:returns: The triangle circumcenter.
:rtype: n-dimensional vector
"""
# Compute trilinear coordinates
trilinear = numpy.zeros(3)
for i in range(3):
trilinear[i] = numpy.cos(angle(vertices, i))
bary = trilinear_to_barycentric(trilinear, vertices)
return barycentric_to_cartesian(bary, vertices) | 314e7650b6fec6c82880541de32c1378e720c8c8 | 14,371 |
def get_ROC_curve_naive(values, classes):
""" Naive implementation of a ROC curve generator that iterates over a number of thresholds.
"""
# get number of positives and negatives:
n_values = len(values);
totalP = len(np.where(classes > 0)[0]);
totalN = n_values - totalP;
min_val = np.min(values);
max_val = np.max(values);
thresholds = np.arange(min_val, max_val, 1.0);
n_thresholds = len(thresholds);
TP = np.zeros([n_thresholds, 1]);
FP = np.zeros([n_thresholds, 1]);
for t in range(n_thresholds):
inds = np.where(values >= thresholds[t]);
P = np.sum(classes[inds[0]]);
TP[t] = P / totalP;
F = len(inds[0]) - P;
FP[t] = F / totalN;
return TP, FP; | 5950c207746c39c4787b3a472c83fbca5599d654 | 14,373 |
import re
def worker(path, opt):
"""Worker for each process.
Args:
path (str): Image path.
opt (dict): Configuration dict. It contains:
crop_size (int): Crop size.
step (int): Step for overlapped sliding window.
thresh_size (int): Threshold size. Patches whose size is smaller
than thresh_size will be dropped.
save_folder (str): Path to save folder.
compression_level (int): for cv2.IMWRITE_PNG_COMPRESSION.
Returns:
process_info (str): Process information displayed in progress bar.
"""
crop_size = opt['crop_size']
step = opt['step']
thresh_size = opt['thresh_size']
img_name, extension = osp.splitext(osp.basename(path))
# remove the x2, x3, x4 and x8 in the filename for DIV2K
img_name = re.sub('x[2348]', '', img_name)
img = mmcv.imread(path, flag='unchanged')
if img.ndim == 2 or img.ndim == 3:
h, w = img.shape[:2]
else:
raise ValueError(f'Image ndim should be 2 or 3, but got {img.ndim}')
h_space = np.arange(0, h - crop_size + 1, step)
if h - (h_space[-1] + crop_size) > thresh_size:
h_space = np.append(h_space, h - crop_size)
w_space = np.arange(0, w - crop_size + 1, step)
if w - (w_space[-1] + crop_size) > thresh_size:
w_space = np.append(w_space, w - crop_size)
index = 0
for x in h_space:
for y in w_space:
index += 1
cropped_img = img[x:x + crop_size, y:y + crop_size, ...]
cv2.imwrite(
osp.join(opt['save_folder'],
f'{img_name}_s{index:03d}{extension}'), cropped_img,
[cv2.IMWRITE_PNG_COMPRESSION, opt['compression_level']])
process_info = f'Processing {img_name} ...'
return process_info | 68b43995cb147bedad7b94c6f4960cef2646ed87 | 14,374 |
def RobotNet(images,dropout):
"""
Build the model for Robot where it will be used as RobotNet.
Args:
images: 4-D tensor with shape [batch_size, height, width, channals].
dropout: A Python float. The probability that each element is kept.
Returns:
Output tensor with the computed classes.
"""
# _X = tf.reshape(images, shape=[-1, IMAGE_HEIGTH, IMAGE_WIDTH, IMAGE_CHANNAL])
# X = tf.cast(_X, tf.float32)
X = tf.cast(images, tf.float32)
weights1=tf.Variable(tf.random_normal([11, 11, 3, 96],stddev=0.01))
biases1=tf.Variable(tf.zeros([96]))
conv1 = conv2d('conv1', X, weights1, biases1,stride=[4,4],padding='SAME')
norm1 = norm('norm1', conv1, lsize=2)
pool1= max_pool('pool1', norm1, 3, 2)
weights2=tf.Variable(tf.random_normal([5, 5, 96, 256],stddev=0.01))
biases2=tf.Variable(tf.constant(0.1,shape=[256]))
conv2 = conv2d('conv2', pool1, weights2, biases2,stride=[1,1],padding='SAME')
norm2 = norm('norm2', conv2, lsize=2)
pool2= max_pool('pool2', norm2, 3, 2)
weights3=tf.Variable(tf.random_normal([3, 3, 256, 384],stddev=0.01))
biases3=tf.Variable(tf.zeros([384]))
conv3 = conv2d('conv3', pool2, weights3, biases3,stride=[1,1],padding='SAME')
weights4=tf.Variable(tf.random_normal([3, 3, 384, 384],stddev=0.01))
biases4=tf.Variable(tf.constant(0.1,shape=[384]))
conv4 = conv2d('conv4', conv3, weights4, biases4,stride=[1,1],padding='SAME')
weights5=tf.Variable(tf.random_normal([3, 3, 384, 256],stddev=0.01))
biases5=tf.Variable(tf.constant(0.1,shape=[256]))
conv5 = conv2d('conv5', conv4, weights5, biases5,stride=[1,1],padding='SAME')
pool5= max_pool('pool5', conv5, 3, 2)
p_h=pool5.get_shape().as_list()[1]
p_w=pool5.get_shape().as_list()[2]
print('p_h:',p_h)
print('p_w:',p_w)
weights6=tf.Variable(tf.random_normal([p_h*p_w*256, 4096],stddev=0.005))
biases6=tf.Variable(tf.constant(0.1,shape=[4096]))
dense1 = tf.reshape(pool5, [-1, weights6.get_shape().as_list()[0]])
fc6= tf.nn.relu(tf.matmul(dense1, weights6) + biases6, name='fc6')
drop6=tf.nn.dropout(fc6, dropout)
weights7=tf.Variable(tf.random_normal([4096, 4096],stddev=0.005))
biases7=tf.Variable(tf.constant(0.1,shape=[4096]))
fc7= tf.nn.relu(tf.matmul(drop6, weights7) + biases7, name='fc7')
drop7=tf.nn.dropout(fc7, dropout)
weights8=tf.Variable(tf.random_normal([4096, 2],stddev=0.01))
biases8=tf.Variable(tf.zeros([2]))
net_out= tf.matmul(drop7, weights8) + biases8
saver = tf.train.Saver({v.op.name: v for v in [weights1,biases1,weights2,biases2,weights3,biases3,
weights4,biases4,weights5,biases5,weights6,biases6,
weights7,biases7,weights8,biases8]})
return net_out,saver | 9bab69a9a0a01436c9b4225ec882231dc561c204 | 14,375 |
def cvReleaseMemStorage(*args):
"""cvReleaseMemStorage(PyObject obj)"""
return _cv.cvReleaseMemStorage(*args) | a11985f756672ab7b7c5ed58336daad1a975c0d2 | 14,376 |
import typing
def GetCommitsInOrder(
repo: git.Repo,
head_ref: str = "HEAD",
tail_ref: typing.Optional[str] = None) -> typing.List[git.Commit]:
"""Get a list of all commits, in chronological order from old to new.
Args:
repo: The repo to list the commits of.
head_ref: The starting point for iteration, e.g. the commit closest to
head.
tail_ref: The end point for iteration, e.g. the commit closest to tail.
This commit is NOT included in the returned values.
Returns:
A list of git.Commit objects.
"""
def TailCommitIterator():
stop_commit = repo.commit(tail_ref)
for commit in repo.iter_commits(head_ref):
if commit == stop_commit:
break
yield commit
if tail_ref:
commit_iter = TailCommitIterator()
else:
commit_iter = repo.iter_commits(head_ref)
try:
return list(reversed(list(commit_iter)))
except git.GitCommandError:
# If HEAD is not found, an exception is raised.
return [] | 57db975d44af80a5c6a8e251842182f6a0f572af | 14,378 |
def return_sw_checked(softwareversion, osversion):
"""
Check software existence, return boolean.
:param softwareversion: Software release version.
:type softwareversion: str
:param osversion: OS version.
:type osversion: str
"""
if softwareversion is None:
serv = bbconstants.SERVERS["p"]
softwareversion = networkutils.sr_lookup(osversion, serv)
softwareversion, swchecked = sw_check_contingency(softwareversion)
else:
swchecked = True
return softwareversion, swchecked | 9f7efb06150468e553ac6a066b2c7750fd233d4c | 14,380 |
from re import A
import copy
def apply(
f: tp.Callable[..., None],
obj: A,
*rest: A,
inplace: bool = False,
_top_inplace: tp.Optional[bool] = None,
_top_level: bool = True,
) -> A:
"""
Applies a function to all `to.Tree`s in a Pytree. Works very similar to `jax.tree_map`,
but its values are `to.Tree`s instead of leaves, also `f` should apply the changes inplace to Tree object.
Arguments:
f: The function to apply.
obj: a pytree possibly containing Trees.
*rest: additional pytrees.
inplace: If `True`, the input `obj` is mutated.
Returns:
A new pytree with the updated Trees or the same input `obj` if `inplace` is `True`.
"""
if _top_inplace is None:
_top_inplace = inplace
if _top_level:
rest = copy(rest)
if not inplace:
obj = copy(obj)
objs = (obj,) + rest
def nested_fn(obj, *rest):
if isinstance(obj, Tree):
apply(
f,
obj,
*rest,
inplace=True,
_top_inplace=_top_inplace,
_top_level=False,
)
jax.tree_map(
nested_fn,
*objs,
is_leaf=lambda x: isinstance(x, Tree) and not x in objs,
)
if isinstance(obj, Tree):
if _top_inplace or obj._mutable:
f(obj, *rest)
else:
with _make_mutable_toplevel(obj):
f(obj, *rest)
return obj | 9a4d29546eb47ba4838fccb58f78495411c99e1c | 14,381 |
import requests
def imageSearch(query, top=10):
"""Returns the decoded json response content
:param query: query for search
:param top: number of search result
"""
# set search url
query = '%27' + parse.quote_plus(query) + '%27'
# web result only base url
base_url = 'https://api.datamarket.azure.com/Bing/Search/v1/Image'
url = base_url + '?Query=' + query + '&$top=' + str(top) + '&$format=json&ImageFilters=%27Aspect%3ASquare%27'
# create credential for authentication
user_agent = "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.73 Safari/537.36"
# create auth object
auth = HTTPBasicAuth("", API_KEY)
# set headers
headers = {'User-Agent': user_agent}
# get response from search url
response_data = requests.get(url, headers=headers, auth=auth)
# decode json response content
json_result = response_data.json()
return json_result['d']['results'] | 4c731b0ead57e5ec4ab290c9afc67d6066cce093 | 14,382 |
def pack_inputs(inputs):
"""Pack a list of `inputs` tensors to a tuple.
Args:
inputs: a list of tensors.
Returns:
a tuple of tensors. if any input is None, replace it with a special constant
tensor.
"""
inputs = tf.nest.flatten(inputs)
outputs = []
for x in inputs:
if x is None:
outputs.append(tf.constant(0, shape=[], dtype=tf.int32))
else:
outputs.append(x)
return tuple(outputs) | 2801929a1109cd3c416d8b3229399a0a9b73a38f | 14,383 |
def entries_as_dict(month_index):
"""Convert index xml list to list of dictionaries."""
# Search path
findentrylist = etree.ETXPath("//section[@id='month-index']/ul/li")
# Extract data
entries_xml = findentrylist(month_index)
entries = [to_entry_dict(entry_index_xml)
for entry_index_xml in entries_xml]
return entries | 2fba6699457ca9726d4ce93b480e722bb6c8223d | 14,384 |
def resnet50():
"""Constructs a ResNet-50 model.
"""
return Bottleneck, [3, 4, 6, 3] | 197dc833c966146226721e56315d3f12d9c13398 | 14,385 |
def build_service_job_mapping(client, configured_jobs):
"""
:param client: A Chronos client used for getting the list of running jobs
:param configured_jobs: A list of jobs configured in Paasta, i.e. jobs we
expect to be able to find
:returns: A dict of {(service, instance): last_chronos_job}
where last_chronos_job is the latest job matching (service, instance)
or None if there is no such job
"""
service_job_mapping = {}
all_chronos_jobs = client.list()
for job in configured_jobs:
# find all the jobs belonging to each service
matching_jobs = chronos_tools.filter_chronos_jobs(
jobs=all_chronos_jobs,
service=job[0],
instance=job[1],
include_disabled=True,
include_temporary=True,
)
matching_jobs = chronos_tools.sort_jobs(matching_jobs)
# Only consider the most recent one
service_job_mapping[job] = matching_jobs[0] if len(matching_jobs) > 0 else None
return service_job_mapping | 58cdf0a7f7561d1383f8f4c4e4cdd0f46ccfad0c | 14,386 |
def _validate_labels(labels, lon=True):
"""
Convert labels argument to length-4 boolean array.
"""
if labels is None:
return [None] * 4
which = 'lon' if lon else 'lat'
if isinstance(labels, str):
labels = (labels,)
array = np.atleast_1d(labels).tolist()
if all(isinstance(_, str) for _ in array):
bool_ = [False] * 4
opts = ('left', 'right', 'bottom', 'top')
for string in array:
if string in opts:
string = string[0]
elif set(string) - set('lrbt'):
raise ValueError(
f'Invalid {which}label string {string!r}. Must be one of '
+ ', '.join(map(repr, opts))
+ " or a string of single-letter characters like 'lr'."
)
for char in string:
bool_['lrbt'.index(char)] = True
array = bool_
if len(array) == 1:
array.append(False) # default is to label bottom or left
if len(array) == 2:
if lon:
array = [False, False, *array]
else:
array = [*array, False, False]
if len(array) != 4 or any(isinstance(_, str) for _ in array):
raise ValueError(f'Invalid {which}label spec: {labels}.')
return array | 6b4f4870692b3c89f2c51f920892852eeecc418d | 14,387 |
def celsius_to_fahrenheit(temperature_C):
""" converts C -> F """
return temperature_C * 9.0 / 5.0 + 32.0 | 47c789c560c5b7d035252418bd7fb0819b7631a4 | 14,388 |
import re
from datetime import datetime
def _parse_date_time(date):
"""Parse time string.
This matches 17:29:43.
Args:
date (str): the date string to be parsed.
Returns:
A tuple of the format (date_time, nsec), where date_time is a
datetime.time object and nsec is 0.
Raises:
ValueError: if the date format does not match.
"""
pattern = re.compile(
r'^(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})$'
)
if not pattern.match(date):
raise ValueError('Wrong date format: {}'.format(date))
hour = pattern.search(date).group('hour')
minute = pattern.search(date).group('min')
sec = pattern.search(date).group('sec')
nsec = 0
time = datetime.time(int(hour), int(minute), int(sec))
return time, nsec | 2d7f4b067d1215623c2e8e4217c98591a1794481 | 14,389 |
def parsed_user(request, institute_obj):
"""Return user info"""
user_info = {
'email': '[email protected]',
'name': 'John Doe',
'location': 'here',
'institutes': [institute_obj['internal_id']],
'roles': ['admin']
}
return user_info | 773363dc41f599abe27a5913434f88d1a20c131d | 14,390 |
def lastFromUT1(ut1, longitude):
"""Convert from universal time (MJD)
to local apparent sidereal time (deg).
Inputs:
- ut1 UT1 MJD
- longitude longitude east (deg)
Returns:
- last local apparent sideral time (deg)
History:
2002-08-05 ROwen First version, loosely based on the TCC's tut_LAST.
2014-04-25 ROwen Add from __future__ import division, absolute_import and use relative import.
"""
# convert UT1 to local mean sidereal time, in degrees
lmst = lmstFromUT1(ut1, longitude)
# find apparent - mean sidereal time, in degrees
# note: this wants the TDB date, but UT1 is probably close enough
appMinusMean = llv.eqeqx(ut1) / opscore.RO.PhysConst.RadPerDeg
# find local apparent sideral time, in degrees, in range [0, 360)
return opscore.RO.MathUtil.wrapPos (lmst + appMinusMean) | 0ba587125c0c422349acf7bb9753b09956fd9bed | 14,391 |
import itertools
def strip_translations_header(translations: str) -> str:
"""
Strip header from translations generated by ``xgettext``.
Header consists of multiple lines separated from the body by an empty line.
"""
return "\n".join(itertools.dropwhile(len, translations.splitlines())) | b96c964502724008306d627d785224be08bddb86 | 14,393 |
import random
def sample_targets_and_primes(targets, primes, n_rounds,
already_sampled_targets=None, already_sampled_primes=None):
"""
Sample targets `targets` and primes `primes` for `n_rounds` number of rounds. Omit already sampled targets
or primes which can be passed as sets `already_sampled_targets` and `already_sampled_primes`.
`targets` and `primes` must be dicts with class -> [files] mapping (as delivered from `get_amp_images`.
Will return a sample of size "num. of target classes * number of targets per class // `n_rounds`" as list of
4-tuples with:
- target class
- target file
- prime class
- prime file
This function makes sure that you present N targets in random order split into R rounds, where each prime of each
prime class is matched with a random target *per target class* by calling it as such:
```
# round 1:
sample_round_1 = sample_targets_and_primes(targets, primes, Constants.num_rounds)
# round 2:
sample_round_2 = sample_targets_and_primes(targets, primes, Constants.num_rounds,
already_sampled_targets=<targets from sample_round_1>)
...
```
"""
# set defaults
if not already_sampled_targets:
already_sampled_targets = set()
if not already_sampled_primes:
already_sampled_primes = set()
# make sure we have sets
if not isinstance(already_sampled_targets, set):
raise ValueError('`already_sampled_targets` must be a set.')
if not isinstance(already_sampled_primes, set):
raise ValueError('`already_sampled_primes` must be a set.')
# get number of classes
n_prime_classes = len(primes)
n_target_classes = len(targets)
if not n_prime_classes:
raise ValueError('No target images found.')
if not n_target_classes:
raise ValueError('No prime images found.')
# create a list of primes with 2-tuples: (class, file)
# order of primes is random inside prime class
primes_list = []
for primes_classname, class_primes in primes.items():
class_primes = list(set(class_primes) - already_sampled_primes) # omit already sampled primes
# random order of primes inside class
random.shuffle(class_primes)
primes_list.extend(zip([primes_classname] * len(class_primes), class_primes))
n_primes = len(primes_list)
targets_round = [] # holds the output list with 4-tuples
# construct a sample of targets per target class
for target_classname, class_targets in targets.items():
n_targets = len(class_targets)
if n_targets % n_rounds != 0:
raise ValueError('Number of targets in class (%d in "%s") must be a multiple of'
' number of rounds (%d)'
% (n_targets, target_classname, n_rounds))
# omit already sampled targets
class_targets = set(class_targets) - already_sampled_targets
# get a sample of class targets as random sample without replacement of size "number of targets divided
# by number of rounds" so that you can split targets into several rounds
targets_sample = random.sample(class_targets, n_targets // n_rounds)
n_targets_sample = len(targets_sample)
if n_targets_sample % n_primes != 0:
raise ValueError('Number of sampled targets in class (%d in "%s") must be a multiple of'
' number of primes (%d)'
% (n_targets_sample, target_classname, n_primes))
# primes sample is the primes list repeated so that it matches the length of targets in this target class
# this makes sure that for each target class all primes will be shown
primes_sample = primes_list * (n_targets_sample // n_primes)
primes_sample_classes, primes_sample_prime = list(zip(*primes_sample))
assert len(primes_sample) == n_targets_sample
# add targets-primes combinations for this round
targets_round.extend(zip([target_classname] * n_targets_sample, targets_sample,
primes_sample_classes, primes_sample_prime))
# random order of targets-primes combinations
random.shuffle(targets_round)
return targets_round | 6b73cdadf3016f1dc248deb2625eae1dd620553b | 14,394 |
def getsign(num):
"""input the raw num string, return a tuple (sign_num, num_abs).
"""
sign_num = ''
if num.startswith('±'):
sign_num = plus_minus
num_abs = num.lstrip('±+-')
if not islegal(num_abs):
return sign_num, ''
else:
try:
temp = float(num)
if (temp < 0) and (sign_num == ''):
sign_num = sign_negative
elif (temp > 0) and (sign_num == ''):
if ('+' in num):
sign_num = sign_positive
else:
if num.startswith('-'):
sign_num = sign_negative
if num.startswith('+'):
sign_num = sign_positive
num_abs = num.lstrip('+-')
except ValueError:
raise
return sign_num, num_abs | 46fc5a7ac8e366479c40e2ccc1a33f57a736b343 | 14,395 |
from typing import Mapping
from typing import Union
def _convert_actions_to_commands(
subvol: Subvol,
build_appliance: Subvol,
action_to_names_or_rpms: Mapping[RpmAction, Union[str, _LocalRpm]],
) -> Mapping[YumDnfCommand, Union[str, _LocalRpm]]:
"""
Go through the list of RPMs to install and change the action to
downgrade if it is a local RPM with a lower version than what is
installed.
Also use `local_install` and `local_remove` for _LocalRpm.
See the docs in `YumDnfCommand` for the rationale.
"""
cmd_to_names_or_rpms = {}
for action, names_or_rpms in action_to_names_or_rpms.items():
for nor in names_or_rpms:
cmd, new_nor = _action_to_command(
subvol, build_appliance, action, nor
)
if cmd == YumDnfCommand.noop:
continue
if cmd is None: # pragma: no cover
raise AssertionError(f"Unsupported {action}, {nor}")
cmd_to_names_or_rpms.setdefault(cmd, set()).add(new_nor)
return cmd_to_names_or_rpms | 519d09754b18bcabef405f3d39a959b1296d3c6c | 14,396 |
def fit_DBscan (image_X,
eps,
eps_grain_boundary,
min_sample,
min_sample_grain_boundary,
filter_boundary,
remove_large_clusters,
remove_small_clusters,
binarize_bdr_coord,
binarize_grain_coord,
):
""" Function to measure counts and average sizes of instances within an image
args:
image_X: np array containing a preporcessed image
eps: float, parameter for the DBscan algorithm from sklearn
eps_grain_boundary:float, parameter for the DBscan algorithm from sklearn
min_sample: int, parameter for the DBscan algorithm from sklearn
min_sample_grain_boundary: int float, parameter for the DBscan algorithm from sklearn
filter_boundary:int, threshold to apply while finding the grain boundaries
remove_large_clusters: int indicating how many of the largest clusters
shall be removed
remove_small_clusters:int indicating how many of the smallest clusters
shall be removed
binarize_bdr_coord: int for the binarization of the grain boundaries
binarize_grain_coord:: int for the binarization of the grain interiors
returns:
m_CL: float, log10(mean (predicted cluster radius in pixels))
s_CL: float, log10(predicted cluster count)
"""
print('Finding grain boundaries')
bdr_coord=np.array(binarize_array_high(image_X, binarize_bdr_coord))
bdr_coord=find_grain_boundaries(bdr_coord, eps=eps_grain_boundary, min_sample=min_sample_grain_boundary, filter_boundary=filter_boundary)
bdr_coord_df=pd.DataFrame(bdr_coord)
bdr_coord_df.columns=['X','Y']
bdr_coord_df['Z']=255
df_grain=pd.pivot_table(bdr_coord_df, index='X', columns='Y', values='Z', fill_value=0)
df_grain=df_grain.to_numpy()
print('Measuring grains')
grain_coord = np.array(binarize_array(df_grain, binarize_grain_coord))
(m_CL, s_CL, clusters)=find_grains(grain_coord, eps, min_sample, remove_large_clusters, remove_small_clusters)
return (m_CL, s_CL, clusters) | b32218e6d45dc766d749af9247ee1d343236fef0 | 14,397 |
def get_today_timestring():
"""Docen."""
return pd.Timestamp.today().strftime('%Y-%m-%d') | f749a5a63f7c55053918eb3f95bb56c2325f4362 | 14,398 |
from pathlib import Path
import json
def load_json(filepath):
"""
Load a json file
:param filepath: path to json file
"""
fp = Path(filepath)
if not fp.exists():
raise ValueError("Unrecognized file path: {}".format(filepath))
with open(filepath) as f:
data = json.load(f)
return data | 657509a50961f7b9c83536a8973884eef5bbed5e | 14,399 |
import requests
import random
def handle(req):
"""handle a request to the function
Args:
req (str): request body
"""
r = requests.get("http://api.open-notify.org/astros.json")
result = r.json()
index = random.randint(0, len(result["people"]) - 1)
name = result["people"][index]["name"]
return "{} is in space".format(name) | 7d951443bc5b6f3db86602d635a8c9ce84b703fb | 14,400 |
import hashlib
def get_file_hashsum(file_name: str):
"""Generate a SHA-256 hashsum of the given file."""
hash_sha256 = hashlib.sha256()
with open(file_name, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest() | d515b6b7b743396240ada8d888b8bfbc4c316373 | 14,401 |
def generate_primes(n):
"""Generates a list of prime numbers up to `n`
"""
global PRIMES
k = PRIMES[-1] + 2
while k <= n:
primes_so_far = PRIMES[:]
divisible = False
for p in primes_so_far:
if k % p == 0:
divisible = True
break
if not divisible:
PRIMES.append(k)
k += 2
return PRIMES | 4ea29f8dc8dad11bf6c71fcb1dcc6b75b91bece5 | 14,402 |
def Convex(loss, L2_reg):
"""
loss: src_number loss
[loss_1, loss_2, ... loss_src_number]
"""
src_number = len(loss)
lam = cp.Variable(src_number)
prob = cp.Problem(
cp.Minimize(lam @ loss + L2_reg * cp.norm(lam, 2)), [cp.sum(lam) == 1, lam >= 0]
)
# prob.solve()
prob.solve(solver="SCS")
lam_optimal = lam.value
return lam_optimal | f2a6ecc464e2f87684d1537775816d22dc30d837 | 14,403 |
from limitlessled.pipeline import Pipeline
def state(new_state):
"""State decorator.
Specify True (turn on) or False (turn off).
"""
def decorator(function):
"""Decorator function."""
# pylint: disable=no-member,protected-access
def wrapper(self, **kwargs):
"""Wrap a group state change."""
pipeline = Pipeline()
transition_time = DEFAULT_TRANSITION
# Stop any repeating pipeline.
if self.repeating:
self.repeating = False
self.group.stop()
# Not on and should be? Turn on.
if not self.is_on and new_state is True:
pipeline.on()
# Set transition time.
if ATTR_TRANSITION in kwargs:
transition_time = kwargs[ATTR_TRANSITION]
# Do group type-specific work.
function(self, transition_time, pipeline, **kwargs)
# Update state.
self._is_on = new_state
self.group.enqueue(pipeline)
self.schedule_update_ha_state()
return wrapper
return decorator | 156b7bbad0a943af6bb4280e4fdb1dde2b6e320a | 14,404 |
def rotTransMatrixNOAD(axis, s, c, t):
"""
build a rotate * translate matrix - MUCH faster for derivatives
since we know there are a ton of zeros and can act accordingly
:param axis: x y or z as a character
:param s: sin of theta
:param c: cos of theta
:param t: translation (a 3 tuple)
:return:
"""
if axis == "Z" or axis == "z":
return N.array([[c, -s, 0, c * t[0] - s * t[1]],
[s, c, 0, s * t[0] + c * t[1]],
[0, 0, 1, t[2]],
[0, 0, 0, 1]])
elif axis == "Y" or axis == "y":
return N.array([[c, 0, s, c * t[0] + s * t[2]],
[0, 1, 0, t[1]],
[-s, 0, c, c * t[2] - s * -t[0]],
[0, 0, 0, 1]])
elif axis == "X" or axis == "x":
return N.array([[1, 0, 0, t[0]],
[0, c, -s, c * t[1] - s * t[2]],
[0, s, c, s * t[1] + c * t[2]],
[0, 0, 0, 1]])
else:
print "Unsupported Axis:", axis
raise NotImplementedError | f6e8d6474ba90e3a253229124e0571d67025c818 | 14,405 |
def angular_diameter_distance(z, cosmo=None):
""" Angular diameter distance in Mpc at a given redshift.
This gives the proper (sometimes called 'physical') transverse
distance corresponding to an angle of 1 radian for an object at
redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
angdist : astropy.units.Quantity
Angular diameter distance at each input redshift.
"""
if cosmo is None:
cosmo = _get_current()
return cosmo.angular_diameter_distance(z) | 694f9fcaa6ce2585315f63e69351ac47589e248c | 14,406 |
def main(stdin):
"""
Take sorted standard in from Hadoop and return lines.
Value is just a place holder.
"""
for line_num in stdin:
# Remove trailing newlines.
line_num = line_num.rstrip()
# Omit empty lines.
try:
(line, num) = line_num.rsplit('\t', 1)
print(("{line}\t{num}").format(line=line, num=num))
except ValueError:
pass
return None | 811e184d9425c1c76681c823b463b99ebde2c25c | 14,407 |
def _ignore_module_import_frames(file_name, name, line_number, line):
"""
Ignores import frames of extension loading.
Parameters
----------
file_name : `str`
The frame's respective file's name.
name : `str`
The frame's respective function's name.
line_number : `int`
The line's index where the exception occurred.
line : `str`
The frame's respective stripped line.
Returns
-------
should_show_frame : `bool`
Whether the frame should be shown.
"""
should_show_frame = True
if file_name.startswith('<') and file_name.endswith('>'):
should_show_frame = False
elif file_name == EXTENSION_LOADER_EXTENSION_FILE_PATH:
if name == '_load':
if line == 'loaded = self._load_module()':
should_show_frame = False
elif name == '_load_module':
if line == 'spec.loader.exec_module(module)':
should_show_frame = False
elif file_name == EXTENSION_LOADER_EXTENSION_LOADER_FILE_PATH:
if name == '_extension_loader_task':
if line in (
'module = await KOKORO.run_in_executor(extension._load)',
'await entry_point(module)',
'entry_point(module)',
):
should_show_frame = False
elif name == '_extension_unloader_task':
if line in (
'await exit_point(module)',
'exit_point(module)',
):
should_show_frame = False
return should_show_frame | 8f3e506e99b32d2945ea665367d5447ef1b05732 | 14,408 |
def get_tf_tensor_data(tensor):
"""Get data from tensor."""
assert isinstance(tensor, tensor_pb2.TensorProto)
is_raw = False
if tensor.tensor_content:
data = tensor.tensor_content
is_raw = True
elif tensor.float_val:
data = tensor.float_val
elif tensor.dcomplex_val:
data = tensor.dcomplex_val
elif tensor.int_val:
data = tensor.int_val
elif tensor.int64_val:
data = tensor.int64_val
elif tensor.bool_val:
data = tensor.bool_val
elif tensor.dtype == tf.int32:
data = [0]
elif tensor.dtype == tf.int64:
data = [0]
elif tensor.dtype == tf.float32:
data = [0.]
elif tensor.dtype == tf.float16:
data = [0]
elif tensor.string_val:
data = tensor.string_val
else:
raise ValueError('tensor data not supported')
return [is_raw, data] | f2d62a7ccba252d5c94cd9979eb01a0b44282d4a | 14,409 |
def coth(x):
"""
Return the hyperbolic cotangent of x.
"""
return 1.0/tanh(x) | 92d490563c8595b8c11334cd38afbf9ef389dfe8 | 14,411 |
def param_is_numeric(p):
"""
Test whether any parameter is numeric; functionally, determines if any
parameter is convertible to a float.
:param p: An input parameter
:return:
"""
try:
float(p)
return True
except ValueError:
return False | b92579ba019389cf21002b63ca6e2ebdfad7d86f | 14,412 |
def convert_graph_to_angular_abstract_graph(graph: Graph, simple_graph=True, return_tripel_edges=False) -> Graph:
"""Converts a graph into an abstract angular graph
Can be used to calculate a path tsp
Arguments:
graph {Graph} -- Graph to be converted
simple_graph {bool} -- Indicates if graph is simple
return_tripel_edges {bool} -- Also return translation for original edges to abstract
Returns:
Graph -- Converted abstract graph
"""
# create a vertex for every edge in the original graph
# For geometric instances, only one direction of edges is needed
vertices = np.array([[u, v] for u, v in graph.edges if u < v])
edges = {}
tripel_edges = {}
for i, vertex in enumerate(vertices):
ran = range(i+1, len(vertices)) if simple_graph else range(len(vertices))
for j in ran:
if j == i:
continue
other = vertices[j]
if np.intersect1d(vertex, other).size > 0:
shared_vertex = np.intersect1d(vertex, other)
non_shared = np.setdiff1d(np.hstack([vertex, other]), shared_vertex)
edges[(i, j)] = get_angle(
graph.vertices[shared_vertex],
graph.vertices[non_shared[0]],
graph.vertices[non_shared[1]]
)
if return_tripel_edges:
from_vertex = np.intersect1d(vertex, non_shared)
to_vertex = np.intersect1d(other, non_shared)
edge = (*from_vertex, *to_vertex)
tripel_edges[(*shared_vertex, *edge)] = (i, j)
graph = Graph(vertices, edges.keys(), c=edges)
if return_tripel_edges:
return (tripel_edges, graph)
return graph | 2f81743824549d8e19f70f1843d6449eb12e7e5d | 14,413 |
def login_to_site(url, username, password, user_tag, pass_tag):
"""
:param url:
:param username:
:param password:
:param user_tag:
:param pass_tag:
:return: :raise:
"""
browser = mechanize.Browser(factory=mechanize.RobustFactory())
browser.set_handle_robots(False)
browser.set_handle_referer(True)
browser.set_handle_refresh(True)
browser.set_handle_robots(False)
browser.open(url)
# noinspection PyCallingNonCallable,PyCallingNonCallable,PyCallingNonCallable,PyCallingNonCallable
browser.select_form(nr=0)
browser["USER"] = username
browser["password"] = password
# noinspection PyCallingNonCallable
browser.submit()
# noinspection PyCallingNonCallable
if "Case Search Login Error" in browser.response().get_data():
raise ValueError("Could not login to PACER Case Search. Check your "
"username and password")
print ("You are logged on to the Public Access to Court Electronic "
"Records (PACER) Case Search website as " + username + ". All costs "
"will be billed to this account.")
return browser | 6c906e037a619031eb45bb26f01da71833fa3e41 | 14,414 |
async def test_db(
service: Service = Depends(Service)
) -> HTTPSuccess:
"""Test the API to determine if the database is connected."""
if service.test().__class__ is not None:
return { "message": "Database connected." }
else:
return {"message": "Database not connected." } | 67e4530af6959fef03f55879ee3781ebf993f11c | 14,415 |
import tqdm
def model_fit_predict():
"""
Training example was implemented according to machine-learning-mastery forum
The function takes data from the dictionary returned from splitWindows.create_windows function
https://machinelearningmastery.com/stateful-stateless-lstm-time-series-forecasting-python/
:return: np.array of predictions
"""
X, y, test_input = windows_dict['X'], windows_dict['y'], windows_dict['X_test']
# Predictions are stored in a list
predictions = []
with tqdm(total=X.shape[0], desc="Training the model, saving predictions") as progress_bar:
# Save model History in order to check error data
history = History()
# build model framework
current_model = model_builder(X)
# Make predictions for each window
for i in range(X.shape[0]):
# TRAIN (FIT) model for each epoch
# history = current_model.fit(
# input_X[i], target_X[i],
# epochs=_epochs, batch_size=batch,
# verbose=0, shuffle=False, validation_split=0.1,
# callbacks=[history]
# )
# print(X[i].shape, X[i].dtype, y[i].shape, y[i].dtype)
for e in range(epochs):
current_model.fit(
X[i], y[i],
epochs=1, batch_size=batch,
verbose=0, shuffle=False,
callbacks=[history]
)
current_model.reset_states()
# PREDICT and save results
predictions.append(
current_model.predict(test_input[i], batch_size=batch_test, verbose=0)
)
progress_bar.update(1)
return np.asarray(predictions) | 753ca4e90034864e709809cc1bd2f30640554f28 | 14,416 |
from functools import partial
import multiprocessing as mp
import gc
def mp_variant_annotations(df_mp, df_split_cols='', df_sampleid='all',
drop_hom_ref=True, n_cores=1):
"""
Multiprocessing variant annotations
see variantAnnotations.process_variant_annotations for description of annotations
This function coordinates the annotation of variants using the
multiprocessing library.
Parameters
---------------
df_mp: pandas df, required
VCF DataFrame
df_split_cols: dict, optional
key:FORMAT id value:#fields expected
e.g. {'AD':2} indicates Allelic Depth should be
split into 2 columns.
df_sampleid: list, required
list of sample_ids, can be 'all'
drop_hom_ref: bool, optional
specifies whether to drop all homozygous reference
variants from dataframe.
FALSE REQUIRES LARGE MEMORY FOOTPRINT
n_cores: int, optional
Number of multiprocessing jobs to start.
Be careful as memory is copied to each process, RAM intensive
"""
print('starting multiprocessing')
pool = mp.Pool(int(n_cores))
# tasks = np.array_split(df_mp.copy(), int(n_cores)) #breaks with older
# pandas/numpy
dfs = df_split(df_mp.copy(), int(n_cores))
mp_process = partial(process_variant_annotations, sample_id=df_sampleid,
split_columns=df_split_cols, drop_hom_ref=drop_hom_ref)
results = []
del df_mp
gc.collect()
r = pool.map_async(mp_process, \
dfs, callback=results.append)
r.wait()
pool.close()
pool.join()
pool.terminate()
print('multiprocessing complete')
res_df = pd.concat([df for df in results[0] if len(df) > 0])
cat_cols = ['vartype1', 'vartype2', 'a1', 'a2', \
'GT1', 'GT2', 'GT','sample_ids', 'zygosity']
res_df.loc[:, cat_cols] = res_df[cat_cols].astype('category')
return res_df | 8569a4eb82ff04db1bee46b00bbd9eedf8b4d094 | 14,417 |
def find_attachments(pattern, cursor):
"""Return a list of attachments that match the specified pattern.
Args:
pattern: The path to the attachment, as a SQLite pattern (to be
passed to a LIKE clause).
cursor: The Cursor object through which the SQLite queries are
sent to the Zotero database.
Returns:
A list of (parentItemID, path) pairs that match the specified
pattern. The returned list is empty if no matches are found.
"""
query = 'SELECT parentItemID, path FROM itemAttachments WHERE path LIKE ?'
cursor.execute(query, (pattern,))
return list(cursor) | 614649f6fd5972b026b191bb1a272e270dedffe5 | 14,418 |
def generate_symmetric_matrix(n_unique_action: int, random_state: int) -> np.ndarray:
"""Generate symmetric matrix
Parameters
-----------
n_unique_action: int (>= len_list)
Number of actions.
random_state: int
Controls the random seed in sampling elements of matrix.
Returns
---------
symmetric_matrix: array-like, shape (n_unique_action, n_unique_action)
"""
random_ = check_random_state(random_state)
base_matrix = random_.normal(scale=5, size=(n_unique_action, n_unique_action))
symmetric_matrix = (
np.tril(base_matrix) + np.tril(base_matrix).T - np.diag(base_matrix.diagonal())
)
return symmetric_matrix | acb5d537762f2f306be8f300845dc6560c1dd121 | 14,419 |
def model_fields_map(model, fields=None, exclude=None, prefix='', prefixm='', attname=True, rename=None):
"""
На основании переданной модели, возвращает список tuple, содержащих путь в орм к этому полю,
и с каким именем оно должно войти в результат.
Обрабатываются только обычные поля, m2m и generic сюда не войдут.
ARGUMENTS:
:param model: модель или инстанс модели, на основе которой будет формироваться список полей
:param None | collections.Container fields: список полей, которые будут забраны из модели
:param None | collections.Container exclude: список полей, которые не будут забираться
:param str prefix: ORM путь, по которому будут распологаться модель в запросе
:param str prefixm: префикс, который будет добавлен к имени поля
:param bool attname: использовать имя name (model) или attname(model_id) эти поля отличаются для внешних ключей
:param dict rename: словарь переименования полей
:rtype: list[tuple[str]]
"""
data = []
rename = rename or {}
attribute = 'attname' if attname else 'name'
for f in model._meta.concrete_fields:
if fields and f.attname not in fields and f.name not in fields:
continue
if exclude and f.attname in exclude and f.name not in exclude:
continue
param_name = getattr(f, attribute)
new_param_name = rename[param_name] if param_name in rename else param_name
data.append(('{}{}'.format(prefix, param_name), '{}{}'.format(prefixm, new_param_name)))
return data | 812247543e5f714e0d2ef57cf018b0741679f83e | 14,420 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.