content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def first_order_smoothness_loss(
image, flow,
edge_weighting_fn):
"""Computes a first-order smoothness loss.
Args:
image: Image used for the edge-aware weighting [batch, height, width, 2].
flow: Flow field for with to compute the smoothness loss [batch, height,
width, 2].
edge_weighting_fn: Function used for the edge-aware weighting.
Returns:
Average first-order smoothness loss.
"""
img_gx, img_gy = image_grads(image)
weights_x = edge_weighting_fn(img_gx)
weights_y = edge_weighting_fn(img_gy)
# Compute second derivatives of the predicted smoothness.
flow_gx, flow_gy = image_grads(flow)
# Compute weighted smoothness
return ((tf.reduce_mean(input_tensor=weights_x * robust_l1(flow_gx)) +
tf.reduce_mean(input_tensor=weights_y * robust_l1(flow_gy))) / 2.) | 92e0eb047bb9d5d67a32c8ba7a601e4b7c0333b8 | 13,151 |
def where_is_my_birthdate_in_powers_of_two(date: int) -> int:
"""
>>> where_is_my_birthdate_in_powers_of_two(160703)
<BLANKLINE>
Dans la suite des
<BLANKLINE>
0 1 3 765
2 , 2 , 2 , …, 2
<BLANKLINE>
Ta date de naissance apparaît ici!:
<BLANKLINE>
…5687200260623819378316070394980560315787
^~~~~~
<BLANKLINE>
À la position #88532
<BLANKLINE>
765
"""
date = str(date)
located = False
sequence = ""
sequence_index = 0
while not located:
sequence_index += 1
sequence += str(2 ** sequence_index)
found_at = sequence.find(date)
if found_at != -1:
print(f"""
Dans la suite des
0 1 3 {sequence_index}
2 , 2 , 2 , …, 2
Ta date de naissance apparaît ici!:
…{numbers_around(sequence, at=found_at, lookaround=20)}
{20 * ' '}^{(len(date)-1) * '~'}
À la position #{sequence.find(date)}
""")
return sequence_index | 656e7c76c849ba4348a5499a1c5cbd02574db011 | 13,152 |
def make_df_health_all(datadir):
"""
Returns full dataframe from health data at specified location
"""
df_health_all = pd.read_csv(str(datadir) + '/health_data_all.csv')
return df_health_all | c9fe0efe65f4455bc9770aa621bed5aaa54fb47f | 13,153 |
def lothars_in_cv2image(image, lothars_encoders,fc):
"""
Given image open with opencv finds
lothars in the photo and the corresponding name and encoding
"""
# init an empty list for selfie and corresponding name
lothar_selfies=[]
names=[]
encodings=[]
# rgb image
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#convert image to Greyscale for HaarCascade
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# cycle angles to until cv find a faces
found=False
angles=rotation_angles(5)
for angle in angles:
r_gray=rotate_image(gray,angle)
faces = fc.detectMultiScale(r_gray,
scaleFactor=1.3,
minNeighbors=6,
minSize=(30, 40),
flags=cv2.CASCADE_SCALE_IMAGE)
# cycle all faces found
for i,face in enumerate(faces):
# define the face rectangle
(x,y,w,h) = face
height, width = image.shape[:2]
extra_h=((1+2*extra)/ratio-1)/2
x=int(max(0,x-w*extra))
y=int(max(0,y-h*extra_h))
w=int(min(w+2*w*extra,width))
h=int(min(h+2*h*extra_h,height))
#print('w/h=',w/h)
# rotate colored image
rotated_image=rotate_image(image,angle)
# Save just the rectangle faces in SubRecFaces (no idea of meaning of 255)
#cv2.rectangle(rotated_image, (x,y), (x+w,y+h), (255,255,255))
sub_face = rotated_image[y:y+h, x:x+w]
index, name, encoding = lothars_in_selfies([subface], lothars_encoders,
[x,y,w,h],
num_jitters=2,keep_searching=False)
if (len(name)>0):
lothar_selfies.append(sub_face)
names.append(which_lothar_is)
encodings.append(encoding)
found=True
# break angle changes if a lothar was found
if (found):
break
return lothar_selfies, names, encodings | be869c922299178dd3f4835a6fd547c779c4e11a | 13,154 |
def approx_nth_prime_upper(n):
""" approximate upper limit for the nth prime number. """
return ceil(1.2 * approx_nth_prime(n)) | 9cfebe3c1dbac176fe917f97664b287b8024c5d4 | 13,155 |
def wavelength_to_velocity(wavelengths, input_units, center_wavelength=None,
center_wavelength_units=None, velocity_units='m/s',
convention='optical'):
"""
Conventions defined here:
http://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html
* Radio V = c (c/l0 - c/l)/(c/l0) f(V) = (c/l0) ( 1 - V/c )
* Optical V = c ((c/l0) - f)/f f(V) = (c/l0) ( 1 + V/c )^-1
* Redshift z = ((c/l0) - f)/f f(V) = (c/l0) ( 1 + z )-1
* Relativistic V = c ((c/l0)^2 - f^2)/((c/l0)^2 + f^2) f(V) = (c/l0) { 1 - (V/c)2}1/2/(1+V/c)
"""
if input_units in velocity_dict:
print "Already in velocity units (%s)" % input_units
return wavelengths
if center_wavelength is None:
raise ValueError("Cannot convert wavelength to velocity without specifying a central wavelength.")
if center_wavelength_units not in wavelength_dict:
raise ValueError("Bad wavelength units: %s" % (center_wavelength_units))
if velocity_units not in velocity_dict:
raise ValueError("Bad velocity units: %s" % (velocity_units))
wavelength_m = wavelengths / wavelength_dict['meters'] * wavelength_dict[input_units]
center_wavelength_m = center_wavelength / wavelength_dict['meters'] * wavelength_dict[center_wavelength_units]
frequency_hz = speedoflight_ms / wavelength_m
center_frequency_hz = speedoflight_ms / center_wavelength_m
# the order is very ugly because otherwise, if scalar, the spectroscopic axis attributes won't be inherited
if convention == 'radio':
velocity = ( frequency_hz - center_frequency_hz ) / center_frequency_hz * speedoflight_ms * -1
elif convention == 'optical':
velocity = ( frequency_hz - center_frequency_hz ) / frequency_hz * speedoflight_ms * -1
elif convention == 'relativistic':
velocity = ( frequency_hz**2 - center_frequency_hz**2 ) / ( center_frequency_hz**2 + frequency_hz )**2 * speedoflight_ms * -1
else:
raise ValueError('Convention "%s" is not allowed.' % (convention))
velocities = velocity * velocity_dict['m/s'] / velocity_dict[velocity_units]
return velocities | ffa1c1bb69a7f6767efcd2f5494da43847a80d3b | 13,156 |
import json
def gen_api_json(api):
"""Apply the api literal object to the template."""
api = json.dumps(
api, cls=Encoder, sort_keys=True, indent=1, separators=(',', ': ')
)
return TEMPLATE_API_DEFINITION % (api) | d9acea9483199746a9c97d905f06b17f20bff18c | 13,157 |
import copy
from bs4 import BeautifulSoup
import re
def get_massage():
"""
Provide extra data massage to solve HTML problems in BeautifulSoup
"""
# Javascript code in ths page generates HTML markup
# that isn't parsed correctly by BeautifulSoup.
# To avoid this problem, all document.write fragments are removed
my_massage = copy(BeautifulSoup.MARKUP_MASSAGE)
my_massage.append((re.compile(u"document.write(.+);"), lambda match: ""))
my_massage.append((re.compile(u'alt=".+">'), lambda match: ">"))
return my_massage | b2a7555b48f4a208545ffb75a4cc36c8c43a1eb7 | 13,158 |
def generate_test_images():
"""Generate all test images.
Returns
-------
results: dict
A dictionary mapping test case name to xarray images.
"""
results = {}
for antialias, aa_descriptor in antialias_options:
for canvas, canvas_descriptor in canvas_options:
for func in (generate_test_001,
generate_test_002,
generate_test_003,
generate_test_004,
generate_test_005,
generate_test_007,
):
points, name = func()
aggregators = draw_lines(canvas, points, antialias)
img = shade(aggregators, cmap=cmap01)
description = "{}_{}_{}".format(
name, aa_descriptor, canvas_descriptor)
results[description] = img
for func in (generate_test_006, ):
points, name = func()
aggregator = draw_multi_segment_line(canvas, points, antialias)
img = shade(aggregator, cmap=cmap01)
description = "{}_{}_{}".format(
name, aa_descriptor, canvas_descriptor)
results[description] = img
return results | d4de85956dfae0cc7d5405b55c21a5063c4dc2c6 | 13,159 |
from typing import Dict
from typing import Type
import copy
def registered_metrics() -> Dict[Text, Type[Metric]]:
"""Returns standard TFMA metrics."""
return copy.copy(_METRIC_OBJECTS) | 0311def576648d6e621d35e6ac89f8cda1302029 | 13,160 |
def text_dataset_construction(train_or_test, janossy_k, task, janossy_k2, sequence_len, all_data_size=0):
""" Data Generation """
janossy_k = 1
janossy_k2 = 1
args = parse_args()
task = str(args.task).lower()
X = np.load('../data_'+str(task)+str(sequence_len)+'.npy')
output_X = np.load('../label_'+str(task)+str(sequence_len)+'.npy')
output_X = np.reshape(output_X,(output_X.shape[0],1))
total_len = X.shape[0]
if (all_data_size > 0):
total_len = all_data_size
train_len = int(total_len*0.4)
valid_len = int(total_len*0.2)
NUM_TRAINING_EXAMPLES = train_len
NUM_VALIDATION_EXAMPLES = valid_len
NUM_TEST_EXAMPLES = total_len - train_len - valid_len
#pdb.set_trace()
if train_or_test == 1:
X = X[0:train_len]
output_X = output_X[0:train_len]
num_examples = NUM_TRAINING_EXAMPLES
elif train_or_test == 2:
X = X[train_len:train_len+valid_len]
output_X = output_X[train_len:train_len+valid_len]
num_examples = NUM_VALIDATION_EXAMPLES
elif train_or_test == 0:
X = X[train_len+valid_len:]
output_X = output_X[train_len+valid_len:]
num_examples = NUM_TEST_EXAMPLES
set_numbers = X.shape[1]
train_length = X.shape[0]
if janossy_k == 1 and janossy_k2 == 1:
return X, output_X
else:
X_janossy = janossy_text_input_construction(X, janossy_k,janossy_k2)
return X_janossy, output_X | d55cdade5e2b9bd8a4a2d3ee1e80f0e15f390fc8 | 13,162 |
def neo_vis(task_id):
"""
Args:
task_id:
Returns:
"""
project = get_project_detail(task_id, current_user.id)
return redirect(
url_for(
"main.neovis_page",
port=project["remark"]["port"],
pwd=project["remark"]["password"],
)
) | cb0af50364e857d8febb8771abd0222a6d993b2e | 13,163 |
def getfont(
fontname=None,
fontsize=None,
sysfontname=None,
bold=None,
italic=None,
underline=None):
"""Monkey-patch for ptext.getfont().
This will use our loader and therefore obey our case validation, caching
and so on.
"""
fontname = fontname or ptext.DEFAULT_FONT_NAME
fontsize = fontsize or ptext.DEFAULT_FONT_SIZE
key = (
fontname,
fontsize,
sysfontname,
bold,
italic,
underline
)
if key in ptext._font_cache:
return ptext._font_cache[key]
if fontname is None:
font = ptext._font_cache.get(key)
if font:
return font
font = pygame.font.Font(fontname, fontsize)
else:
font = fonts.load(fontname, fontsize)
if bold is not None:
font.set_bold(bold)
if italic is not None:
font.set_italic(italic)
if underline is not None:
font.set_underline(underline)
ptext._font_cache[key] = font
return font | 04f12244126efd8cf6f274991193a2d71f8797f5 | 13,164 |
def change_box(base_image,box,change_array):
"""
Assumption 1: Contents of box are as follows
[x1 ,y2 ,width ,height]
"""
height, width, _ = base_image.shape
new_box = [0,0,0,0]
for i,value in enumerate(change_array):
if value != 0:
new_box[i] = box[i] + value
else:
new_box[i] = box[i]
assert new_box[0] >= 0
assert new_box[1] >= 0
assert new_box[0]+new_box[2] <= width
assert new_box[1]+new_box[3] <= height
return new_box | 960b9f2c3ab1b65e9c7a708eac700dfaf65c67ac | 13,165 |
def fetchRepositoryFilter(critic, filter_id):
"""Fetch a RepositoryFilter object with the given filter id"""
assert isinstance(critic, api.critic.Critic)
return api.impl.filters.fetchRepositoryFilter(critic, int(filter_id)) | 76aa247ddf63838ff16131d0d7f1a04092ef3c41 | 13,166 |
def load_it(file_path: str, verbose: bool = False) -> object:
"""Loads from the given file path a saved object.
Args:
file_path: String file path (with extension).
verbose: Whether to print info about loading successfully or not.
Returns:
The loaded object.
Raises:
None.
"""
obj = None
with open(file_path, 'rb') as handle:
obj = pk.load(handle)
if verbose:
print('{} is successfully loaded.'.format(file_path))
return obj | 59795488dffbc1a69556b2619e8502cfa23d6d63 | 13,167 |
def connect_contigs(contigs, align_net_file, fill_min, out_dir):
"""Connect contigs across genomes by forming a graph that includes
net format aligning regions and contigs. Compute contig components
as connected components of that graph."""
# construct align net graph and write net BEDs
if align_net_file is None:
graph_contigs_nets = nx.Graph()
else:
graph_contigs_nets = make_net_graph(align_net_file, fill_min, out_dir)
# add contig nodes
for ctg in contigs:
ctg_node = GraphSeq(ctg.genome, False, ctg.chr, ctg.start, ctg.end)
graph_contigs_nets.add_node(ctg_node)
# intersect contigs BED w/ nets BED, adding graph edges.
intersect_contigs_nets(graph_contigs_nets, 0, out_dir)
intersect_contigs_nets(graph_contigs_nets, 1, out_dir)
# find connected components
contig_components = []
for contig_net_component in nx.connected_components(graph_contigs_nets):
# extract only the contigs
cc_contigs = [contig_or_net for contig_or_net in contig_net_component if contig_or_net.net is False]
if cc_contigs:
# add to list
contig_components.append(cc_contigs)
# write summary stats
comp_out = open('%s/contig_components.txt' % out_dir, 'w')
for ctg_comp in contig_components:
ctg_comp0 = [ctg for ctg in ctg_comp if ctg.genome == 0]
ctg_comp1 = [ctg for ctg in ctg_comp if ctg.genome == 1]
ctg_comp0_nt = sum([ctg.end-ctg.start for ctg in ctg_comp0])
ctg_comp1_nt = sum([ctg.end-ctg.start for ctg in ctg_comp1])
ctg_comp_nt = ctg_comp0_nt + ctg_comp1_nt
cols = [len(ctg_comp), len(ctg_comp0), len(ctg_comp1)]
cols += [ctg_comp0_nt, ctg_comp1_nt, ctg_comp_nt]
cols = [str(c) for c in cols]
print('\t'.join(cols), file=comp_out)
comp_out.close()
return contig_components | dc262d7469f524d8b37eebc50787a6e687a1ff90 | 13,168 |
import torch
import time
def train(loader, model, crit, opt, epoch):
"""Training of the CNN.
Args:
loader (torch.utils.data.DataLoader): Data loader
model (nn.Module): CNN
crit (torch.nn): loss
opt (torch.optim.SGD): optimizer for every parameters with True
requires_grad in model except top layer
epoch (int)
"""
batch_time = AverageMeter()
losses = AverageMeter()
data_time = AverageMeter()
forward_time = AverageMeter()
backward_time = AverageMeter()
# switch to train mode
model.train()
# create an optimizer for the last fc layer
optimizer_tl = torch.optim.SGD(
model.top_layer.parameters(),
lr=args.lr,
weight_decay=10 ** args.wd,
)
end = time.time()
print(epoch)
for i, (input_tensor, target) in enumerate(loader):
data_time.update(time.time() - end)
# save checkpoint
n = len(loader) * epoch + i
input_var = torch.autograd.Variable(input_tensor.cuda())
target_var = torch.autograd.Variable(target.cuda())
output = model(input_var)
loss = crit(output, target_var)
# record loss
# losses.update(loss.data[0], input_tensor.size(0))
# compute gradient and do SGD step
opt.zero_grad()
optimizer_tl.zero_grad()
loss.backward()
opt.step()
optimizer_tl.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# sava_params(epoch, model, opt, r'mobilenetv1_30')
if (epoch + 1) / 10 == 1:
save_net(model, epoch)
sava_params(epoch, model, opt, r'mobilenetv3_small_10')
if (epoch + 1) / 30 == 1:
save_net(model, epoch)
sava_params(epoch, model, opt, r'mobilenetv3_small_30')
if (epoch + 1) / 60 == 1:
save_net(model, epoch)
sava_params(epoch, model, opt, r'mobilenetv3_small_60')
if (epoch + 1) / 90 == 1:
save_net(model, epoch)
sava_params(epoch, model, opt, r'mobilenetv3_small_90')
if (epoch + 1) / 100 == 1:
save_net(model, epoch)
sava_params(epoch, model, opt, r'mobilenetv3_small_100')
return losses.avg | 64a8213d103f57b3305060b42f41c94a3d710759 | 13,169 |
def add_scatter(x, scatter, in_place=False):
"""
Add a Gaussian scatter to x.
Parameters
----------
x : array_like
Values to add scatter to.
scatter : float
Standard deviation (sigma) of the Gaussian.
in_place : bool, optional
Whether to add the scatter to x in place or return a
new array.
Returns
-------
x : array_like
x with the added scatter.
"""
if in_place:
x += np.random.randn(*x.shape)*float(scatter)
else:
x = np.asarray(x)
x = x + np.random.randn(*x.shape)*float(scatter)
return x | 27c1423441f7841284201afd873c2c6050812d5f | 13,171 |
def validate_job_state(state):
"""
Validates whether a returned Job State has all the required fields with the right format.
If all is well, returns True,
otherwise this prints out errors to the command line and returns False.
Can be just used with assert in tests, like "assert validate_job_state(state)"
"""
required_fields = {
"job_id": str,
"user": str,
"wsid": int,
"authstrat": str,
"job_input": dict,
"updated": int,
"created": int,
"status": str,
}
optional_fields = {
"estimating": int,
"queued": int,
"running": int,
"finished": int,
"error_code": int,
"terminated_code": int,
"errormsg": str,
}
timestamp_fields = [
"created",
"updated",
"estimating",
"queued",
"running",
"completed",
]
# fields that have to be present based on the context of different statuses
valid_statuses = vars(Status)["_member_names_"]
status_context = {
"estimating": ["estimating"],
"running": ["running"],
"completed": ["completed"],
"error": ["error_code", "errormsg"],
"terminated": ["terminated_code"],
}
# 1. Make sure required fields are present and of the correct type
missing_reqs = list()
wrong_reqs = list()
for req in required_fields.keys():
if req not in state:
missing_reqs.append(req)
elif not isinstance(state[req], required_fields[req]):
wrong_reqs.append(req)
if missing_reqs or wrong_reqs:
print(f"Job state is missing required fields: {missing_reqs}.")
for req in wrong_reqs:
print(
f"Job state has faulty req - {req} should be of type {required_fields[req]}, but had value {state[req]}."
)
return False
# 2. Make sure that context-specific fields are present and the right type
status = state["status"]
if status not in valid_statuses:
print(f"Job state has invalid status {status}.")
return False
if status in status_context:
context_fields = status_context[status]
missing_context = list()
wrong_context = list()
for field in context_fields:
if field not in state:
missing_context.append(field)
elif not isinstance(state[field], optional_fields[field]):
wrong_context.append(field)
if missing_context or wrong_context:
print(f"Job state is missing status context fields: {missing_context}.")
for field in wrong_context:
print(
f"Job state has faulty context field - {field} should be of type {optional_fields[field]}, but had value {state[field]}."
)
return False
# 3. Make sure timestamps are really timestamps
bad_ts = list()
for ts_type in timestamp_fields:
if ts_type in state:
is_second_ts = is_timestamp(state[ts_type])
if not is_second_ts:
print(state[ts_type], "is not a second ts")
is_ms_ts = is_timestamp(state[ts_type] / 1000)
if not is_ms_ts:
print(state[ts_type], "is not a millisecond ts")
if not is_second_ts and not is_ms_ts:
bad_ts.append(ts_type)
if bad_ts:
for ts_type in bad_ts:
print(
f"Job state has a malformatted timestamp: {ts_type} with value {state[ts_type]}"
)
raise MalformedTimestampException()
return True | f108b7a80dee7777931aae994384d47f4a474d67 | 13,172 |
def get_urls(session):
"""
Function to get all urls of article in a table.
:param session: session establishes all conversations with the database and represents a “holding zone”.
:type session: sqlalchemy.session
:returns: integer amount of rows in table
"""
url = session.query(Article.url)
return [u[0] for u in url] | ad5e4797c1a41c63ef225becaee1a9b8814a3ea2 | 13,173 |
def bboxes_protection(boxes, width, height):
"""
:param boxes:
:param width:
:param height:
:return:
"""
if not isinstance(boxes, np.ndarray):
boxes = np.asarray(boxes)
if len(boxes) > 0:
boxes[:, [0, 2]] = np.clip(boxes[:, [0, 2]], 0, width - 1)
boxes[:, [1, 3]] = np.clip(boxes[:, [1, 3]], 0, height - 1)
return boxes | 8ab0c64788815f6ec66f42c90a4c2debc0627548 | 13,175 |
def mark_astroids(astroid_map):
"""
Mark all coordiantes in the grid with an astroid (# sign)
"""
astroids = []
for row, _ in enumerate(astroid_map):
for col, _ in enumerate(astroid_map[row]):
if astroid_map[row][col] == "#":
astroid_map[row][col] = ASTROID
astroids.append((row, col))
else:
astroid_map[row][col] = SPACE
return astroids | 36ac179f1cbc040142bea8381c4c85f90c81ecba | 13,176 |
def process_player_data(
prefix, season=CURRENT_SEASON, gameweek=NEXT_GAMEWEEK, dbsession=session
):
"""
transform the player dataframe, basically giving a list (for each player)
of lists of minutes (for each match, and a list (for each player) of
lists of ["goals","assists","neither"] (for each match)
"""
df = get_player_history_df(
prefix, season=season, gameweek=gameweek, dbsession=dbsession
)
df["neither"] = df["team_goals"] - df["goals"] - df["assists"]
df.loc[(df["neither"] < 0), ["neither", "team_goals", "goals", "assists"]] = [
0.0,
0.0,
0.0,
0.0,
]
alpha = get_empirical_bayes_estimates(df)
y = df.sort_values("player_id")[["goals", "assists", "neither"]].values.reshape(
(
df["player_id"].nunique(),
df.groupby("player_id").count().iloc[0]["player_name"],
3,
)
)
minutes = df.sort_values("player_id")["minutes"].values.reshape(
(
df["player_id"].nunique(),
df.groupby("player_id").count().iloc[0]["player_name"],
)
)
nplayer = df["player_id"].nunique()
nmatch = df.groupby("player_id").count().iloc[0]["player_name"]
player_ids = np.sort(df["player_id"].unique())
return (
dict(
nplayer=nplayer,
nmatch=nmatch,
minutes=minutes.astype("int64"),
y=y.astype("int64"),
alpha=alpha,
),
player_ids,
) | dcbf210242509fa3df1ae5ca35614d802c460381 | 13,177 |
def copy_to_table(_dal, _values, _field_names, _field_types, _table_name, _create_table=None, _drop_existing=None):
"""Copy a matrix of data into a table on the resource, return the table name.
:param _dal: An instance of DAL(qal.dal.DAL)
:param _values: The a list(rows) of lists(values) with values to be inserted
:param _field_names: The name of the fields(columns)
:param _field_types: The field types(qal.sql.types)
:param _table_name: The name of the destination tables
:param _create_table: Create the destination table based on _field_names, _field_types
:param _drop_existing: If a table with the same name as the destination table already exists, drop it
:return: The name of the destination table.
"""
if _drop_existing:
try:
_dal.execute(VerbDropTable(_table_name).as_sql(_dal.db_type))
_dal.commit()
except Exception as e:
print("copy_to_table - Ignoring error when dropping the table \"" + _table_name + "\": " + str(e))
if _create_table:
# Always create temporary table even if it ends up empty.
_create_table_sql = create_table_skeleton(_table_name, _field_names, _field_types).as_sql(_dal.db_type)
print("Creating " + _table_name + " table in "+ str(_dal) +"/" + str(_dal.connection) +", sql:\n" + _create_table_sql)
_dal.execute(_create_table_sql)
_dal.commit()
if len(_values) == 0:
print("copy_to_table: No source data, inserting no rows.")
else:
_insert_sql = make_insert_sql_with_parameters(_table_name, _field_names, _dal.db_type, _field_types)
print("Inserting " + str(len(_values)) + " rows (" + str(len(_values[0])) + " columns)")
_dal.executemany(_insert_sql, _values)
_dal.commit()
return _table_name | 765ae0310811fe64b063c88182726174411960a0 | 13,178 |
from scipy.optimize import minimize
def Uni(A, b, x=None, maxQ=False, x0=None, tol=1e-12, maxiter=1e3):
"""
Вычисление распознающего функционала Uni.
В случае, если maxQ=True то находится максимум функционала.
Parameters:
A: Interval
Матрица ИСЛАУ.
b: Interval
Вектор правой части ИСЛАУ.
Optional Parameters:
x: float, array_like
Точка в которой вычисляется распознающий функционал.
По умолчанию x равен массиву из нулей.
maxQ: bool
Если значение параметра равно True, то производится
максимизация функционала.
x0: float, array_like
Первоначальная догадка.
tol: float
Погрешность для прекращения оптимизационного процесса.
maxiter: int
Максимальное количество итераций.
Returns:
out: float, tuple
Возвращается значение распознающего функционала в точке x.
В случае, если maxQ=True, то возвращается кортеж, где
первый элемент -- корректность завершения оптимизации,
второй элемент -- точка оптимума,
третий элемент -- значение функции в этой точке.
"""
__uni = lambda x: min(b.rad - (b.mid - A @ x).mig)
__minus_uni = lambda x: -__uni(x)
if maxQ==False:
if x is None:
x = np.zeros(A.shape[1])
return __uni(x)
else:
if x0 is None:
x0 = np.zeros(A.shape[1])+1
maximize = minimize(__minus_uni, x0, method='Nelder-Mead', tol=tol,
options={'maxiter': maxiter})
return maximize.success, maximize.x, -maximize.fun | d81f8f38e4b2f196c79eaaa00c1b604cf119b1bb | 13,180 |
def boqa(alpha, beta, query, items_stat):
"""Implementation of the BOQA algorithm.
Args:
alpha (float): False positive rate.
beta (float): False negative rate.
query (dict): Dict of query terms (standard terms). Key: term name, value: presence value
items_stat (dict): Dictionnary of items statistics. Key: disease, Value: list of items
Returns:
[dict]: Dictionnary of disease and their prediction probability.
"""
hidden = {}
p = {}
a = {}
a_init = 0
# For each disease
for disease in items_stat:
# We initiliaze Hidden Layer with values from the stats
for term in query:
if term in items_stat[disease]["feature"].keys():
proba = items_stat[disease]["feature"][term]
hidden[term] = np.random.choice([1, 0], p=[proba, 1 - proba])
else:
hidden[term] = 0
# Cardinality calculation of terms between H and Q
m = matrix_m(query, hidden)
a[disease] = (
pow(beta, m[0, 1])
* pow(1 - beta, m[1, 1])
* pow(1 - alpha, m[0, 0])
* pow(alpha, m[1, 0])
)
a_init += a[disease]
for disease in items_stat:
p[disease] = a[disease] / a_init
return p | 37ea565fa05b4a9bceeeb50f31e79394e9534966 | 13,181 |
from operator import inv
def d2c(sys,method='zoh'):
"""Continous to discrete conversion with ZOH method
Call:
sysc=c2d(sys,method='log')
Parameters
----------
sys : System in statespace or Tf form
method: 'zoh' or 'bi'
Returns
-------
sysc: continous system ss or tf
"""
flag = 0
if isinstance(sys, TransferFunction):
sys=tf2ss(sys)
flag=1
a=sys.A
b=sys.B
c=sys.C
d=sys.D
Ts=sys.dt
n=shape(a)[0]
nb=shape(b)[1]
nc=shape(c)[0]
tol=1e-12
if method=='zoh':
if n==1:
if b[0,0]==1:
A=0
B=b/sys.dt
C=c
D=d
else:
tmp1=hstack((a,b))
tmp2=hstack((zeros((nb,n)),eye(nb)))
tmp=vstack((tmp1,tmp2))
s=logm(tmp)
s=s/Ts
if norm(imag(s),ord='inf') > sqrt(sp.finfo(float).eps):
print("Warning: accuracy may be poor")
s=real(s)
A=s[0:n,0:n]
B=s[0:n,n:n+nb]
C=c
D=d
elif method=='foh':
a=mat(a)
b=mat(b)
c=mat(c)
d=mat(d)
Id = mat(eye(n))
A = logm(a)/Ts
A = real(around(A,12))
Amat = mat(A)
B = (a-Id)**(-2)*Amat**2*b*Ts
B = real(around(B,12))
Bmat = mat(B)
C = c
D = d - C*(Amat**(-2)/Ts*(a-Id)-Amat**(-1))*Bmat
D = real(around(D,12))
elif method=='bi':
a=mat(a)
b=mat(b)
c=mat(c)
d=mat(d)
poles=eigvals(a)
if any(abs(poles-1)<200*sp.finfo(float).eps):
print("d2c: some poles very close to one. May get bad results.")
I=mat(eye(n,n))
tk = 2 / sqrt (Ts)
A = (2/Ts)*(a-I)*inv(a+I)
iab = inv(I+a)*b
B = tk*iab
C = tk*(c*inv(I+a))
D = d- (c*iab)
else:
print("Method not supported")
return
sysc=StateSpace(A,B,C,D)
#print("Teste ", sysc)
if flag==1:
sysc=ss2tf(sysc)
return sysc | 41bb37fcf5b8726b5f20f54f492a568f508725fc | 13,182 |
import ipaddress
def ipv4(value):
"""
Parses the value as an IPv4 address and returns it.
"""
try:
return ipaddress.IPv4Address(value)
except ValueError:
return None | 499918424fe6a94d555379b5fc907367666f1cde | 13,183 |
from typing import Callable
from typing import Mapping
from typing import cast
from typing import Any
def test_from(
fork: str,
) -> Callable[
[Callable[[], StateTest]], Callable[[str], Mapping[str, Fixture]]
]:
"""
Decorator that takes a test generator and fills it for all forks after the
specified fork.
"""
fork = fork.capitalize()
def decorator(
fn: Callable[[], StateTest]
) -> Callable[[str], Mapping[str, Fixture]]:
def inner(engine) -> Mapping[str, Fixture]:
return fill_state_test(fn(), forks_from(fork), engine)
cast(Any, inner).__filler_metadata__ = {
"fork": fork,
"name": fn.__name__.lstrip("test_"),
}
return inner
return decorator | 6c8704978c3ab37bb2ad8434b65359683bd76bbb | 13,184 |
import hashlib
def get_hash_name(feed_id):
"""
用户提交的订阅源,根据hash值生成唯一标识
"""
return hashlib.md5(feed_id.encode('utf8')).hexdigest() | edd1caf943635a091c79831cc6151ecfa840e435 | 13,185 |
from typing import Dict
from typing import Counter
def merge_lineages(counts: Dict[str, int], min_count: int) -> Dict[str, str]:
"""
Given a dict of lineage counts and a min_count, returns a mapping from all
lineages to merged lineages.
"""
assert isinstance(counts, dict)
assert isinstance(min_count, int)
assert min_count > 0
# Merge rare children into their parents.
counts: Dict[str, int] = Counter({decompress(k): v for k, v in counts.items()})
mapping = {}
for child in sorted(counts, key=lambda k: (-len(k), k)):
if counts[child] < min_count:
parent = get_parent(child)
if parent is None:
continue # at a root
counts[parent] += counts.pop(child)
mapping[child] = parent
# Transitively close.
for old, new in list(mapping.items()):
while new in mapping:
new = mapping[new]
mapping[old] = new
# Recompress.
mapping = {compress(k): compress(v) for k, v in mapping.items()}
return mapping | 3eea022d840e4e61d46041dffb188cbd73ed097b | 13,187 |
def str2bool(s):
"""特定の文字列をbool値にして返す。
s: bool値に変換する文字列(true, false, 1, 0など)。
"""
if isinstance(s, bool):
return s
else:
s = s.lower()
if s == "true":
return True
elif s == "false":
return False
elif s == "1":
return True
elif s == "0":
return False
else:
raise ValueError("%s is incorrect value!" % (s)) | 54b991e234896c0ad684ce5f0f2ccceeada65d8e | 13,188 |
def show_map_room(room_id=None):
"""Display a room on a map."""
return get_map_information(room_id=room_id) | 84c1e90ce5b0a210b75f104da429f03dff7b2ca1 | 13,191 |
def parse_line_regex(line):
"""Parse raw data line into list of floats using regex.
This regex approach works, but is very slow!! It also requires two helper functions to clean up
malformed data written by ls-dyna (done on purpose, probably to save space).
Args:
line (str): raw data line from nodout
Returns:
raw_data (list of floats): [nodeID, xdisp, ydisp, zdisp]
"""
try:
raw_data = line.split()
raw_data = [float(x) for x in raw_data]
except ValueError:
line = correct_neg(line)
line = correct_Enot(line)
raw_data = line.split()
raw_data = [float(x) for x in raw_data[0:4]]
return raw_data | b8b18a8d47f5f1c9a682ca86668bf311282b0439 | 13,193 |
def create_page_metadata(image_dir,
image_dir_path,
font_files,
text_dataset,
speech_bubble_files,
speech_bubble_tags):
"""
This function creates page metadata for a single page. It includes
transforms, background addition, random panel removal,
panel shrinking, and the populating of panels with
images and speech bubbles.
:param image_dir: List of images to pick from
:type image_dir: list
:param image_dir_path: Path of images dir to add to
panels
:type image_dir_path: str
:param font_files: list of font files for speech bubble
text
:type font_files: list
:param text_dataset: A dask dataframe of text to
pick to render within speech bubble
:type text_dataset: pandas.dataframe
:param speech_bubble_files: list of base speech bubble
template files
:type speech_bubble_files: list
:param speech_bubble_tags: a list of speech bubble
writing area tags by filename
:type speech_bubble_tags: list
:return: Created Page with all the bells and whistles
:rtype: Page
"""
# Select page type
page_type = np.random.choice(
list(cfg.vertical_horizontal_ratios.keys()),
p=list(cfg.vertical_horizontal_ratios.values())
)
# Select number of panels on the page
# between 1 and 8
number_of_panels = np.random.choice(
list(cfg.num_pages_ratios.keys()),
p=list(cfg.num_pages_ratios.values())
)
page = get_base_panels(number_of_panels, page_type)
if np.random.random() < cfg.panel_transform_chance:
page = add_transforms(page)
page = shrink_panels(page)
page = populate_panels(page,
image_dir,
image_dir_path,
font_files,
text_dataset,
speech_bubble_files,
speech_bubble_tags
)
if np.random.random() < cfg.panel_removal_chance:
page = remove_panel(page)
if number_of_panels == 1:
page = add_background(page, image_dir, image_dir_path)
else:
if np.random.random() < cfg.background_add_chance:
page = add_background(page, image_dir, image_dir_path)
return page | 45499abf5374c8eaaa55a03f8ef0bb7fca6e18f5 | 13,194 |
def __merge_results(
result_list: tp.List[tp.Dict[str, tp.Dict[str, tp.Set[tp.Union[CVE, CWE]]]]]
) -> tp.Dict[str, tp.Dict[str, tp.Set[tp.Union[CVE, CWE]]]]:
"""
Merge a list of results into one dictionary.
Args:
result_list: a list of ``commit -> cve`` maps to be merged
Return:
the merged dictionary with line number as key and commit hash, a list of
unique CVE's and a list of unique CWE's as values
"""
results: tp.Dict[str, tp.Dict[str, tp.Set[tp.Union[
CVE, CWE]]]] = defaultdict(lambda: defaultdict(set))
for unmerged in result_list:
for entry in unmerged.keys():
results[entry]['cve'].update(unmerged[entry]['cve'])
results[entry]['cwe'].update(unmerged[entry]['cwe'])
return results | 685ce8e22483fc1dfe423763367857db22065a3c | 13,195 |
def compactness(xyz):
"""
Input: xyz
Output: compactness (V^2/SA^3) of convex hull of 3D points.
"""
xyz = np.array(xyz)
ch = ConvexHull(xyz, qhull_options="QJ")
return ch.volume**2/ch.area**3 | b1185e8aaec5962e39866594aeccdd5d5ae2807d | 13,196 |
def guide(batch, z_dim, hidden_dim, out_dim=None, num_obs_total=None):
"""Defines the probabilistic guide for z (variational approximation to posterior): q(z) ~ p(z|q)
:param batch: a batch of observations
:return: (named) sampled z from the variational (guide) distribution q(z)
"""
assert(jnp.ndim(batch) == 3)
batch_size = jnp.shape(batch)[0]
batch = jnp.reshape(batch, (batch_size, -1)) # squash each data item into a one-dimensional array (preserving only the batch size on the first axis)
out_dim = jnp.shape(batch)[1]
num_obs_total = batch_size if num_obs_total is None else num_obs_total
encode = numpyro.module('encoder', encoder(hidden_dim, z_dim), (batch_size, out_dim))
with plate('batch', num_obs_total, batch_size):
z_loc, z_std = encode(batch) # obtain mean and variance for q(z) ~ p(z|x) from encoder
z = sample('z', dist.Normal(z_loc, z_std).to_event(1)) # z follows q(z)
return z | 1198ee7b12bed9118d7bb865ed45ff10eef917d4 | 13,198 |
def trip2str(trip):
""" Pretty-printing. """
header = "{} {} {} - {}:".format(trip['departureTime'],
trip['departureDate'], trip['origin'],
trip['destination'])
output = [header]
for subtrip in trip['trip']:
originstr = u'{}....{}'.format(subtrip['departureTime'],
subtrip['origin'])
output.append(originstr)
for subsubtrip in subtrip['trip']:
t = subsubtrip['arrivalTime']
d = subsubtrip['stop']
intermediatestr = t+u'.'*8+d
output.append(intermediatestr)
destinationstr = u'{}....{}'.format(subtrip['arrivalTime'],
subtrip['destination'])
output.append(destinationstr)
return "\n".join(output) | 67daf3feb6b81d40d3102a8c610b20e68571b131 | 13,199 |
def station_suffix(station_type):
""" Simple switch, map specific types on to single letter. """
suffix = ' (No Dock)'
if 'Planetary' in station_type and station_type != 'Planetary Settlement':
suffix = ' (P)'
elif 'Starport' in station_type:
suffix = ' (L)'
elif 'Asteroid' in station_type:
suffix = ' (AB)'
elif 'Outpost' in station_type:
suffix = ' (M)'
elif 'Carrier' in station_type:
suffix = ' (C)'
return suffix | c28c4d3f0da8401ffc0721a984ec2b2e2cd50b24 | 13,200 |
def temperatures_equal(t1, t2):
"""Handle 'off' reported as 126.5, but must be set as 0."""
if t1 == settings.TEMPERATURE_OFF:
t1 = 0
if t2 == settings.TEMPERATURE_OFF:
t2 = 0
return t1 == t2 | 5c054d23317565e474f6c8d30b7d87c0b832fe9f | 13,201 |
import csv
def import_capitals_from_csv(path):
"""Imports a dictionary that maps country names to capital names.
@param string path: The path of the CSV file to import this data from.
@return dict: A dictionary of the format {"Germany": "Berlin", "Finland": "Helsinki", ...}
"""
capitals = {}
with open(path) as capitals_file:
reader = csv.reader(capitals_file)
for row in reader:
country, capital = row[0], row[1]
capitals[country] = capital
return capitals | 3c6a9c91df455cb8721371fe40b248fb7af8d866 | 13,203 |
def deg_to_qcm2(p, deg):
"""Return the center-of-momentum momentum transfer q squared, in MeV^2.
Parameters
----------
p_rel = float
relative momentum given in MeV.
degrees = number
angle measure given in degrees
"""
return (p * np.sqrt( 2 * (1 - np.cos(np.radians(deg))) ))**(2) | 4b1840a8c672b443ac1954c0bde0a81a01338862 | 13,205 |
from django.conf import settings
def i18n(request):
"""
Set client language preference, lasts for one month
"""
next = request.META.get('HTTP_REFERER', None)
if not next:
next = settings.SITE_ROOT
lang = request.GET.get('lang', 'en')
res = HttpResponseRedirect(next)
res.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang, max_age=30*24*60*60)
return res | a56289c03b14b76719fc31bcee7e0e4e590f47ef | 13,206 |
import random
def normal222(startt,endt,money2,first,second,third,forth,fifth,sixth,seventh,zz1,zz2,bb1,bb2,bb3,aa1,aa2):
"""
for source and destination id generation
"""
"""
for type of banking work,label of fraud and type of fraud
"""
idvariz=random.choice(bb3)
idgirande=random.choice(zz2)
first.append("transfer")
second.append(idvariz)
third.append(idgirande)
sixth.append("0")
seventh.append("none")
"""
for amount of money generation
"""
numberofmoney=random.randrange(50000,money2)
forth.append(numberofmoney)
"""
for date and time generation randomly between two dates
"""
final=randomDate(startt,endt, random.random())
fifth.append(final)
return (first,second,third,forth,fifth,sixth,seventh) | 5d0a4cfbd5e7ef3223cc67c4759967e86c4081c8 | 13,208 |
def _convert_to_coreml(tf_model_path, mlmodel_path, input_name_shape_dict,
output_names):
""" Convert and return the coreml model from the Tensorflow
"""
model = tf_converter.convert(tf_model_path=tf_model_path,
mlmodel_path=mlmodel_path,
output_feature_names=output_names,
input_name_shape_dict=input_name_shape_dict)
return model | b7266a1afe0f03717a8e710f9099a0349cc5b085 | 13,209 |
def _tavella_randell_nonuniform_grid(x_min, x_max, x_star, num_grid_points,
alpha, dtype):
"""Creates non-uniform grid clustered around a specified point.
Args:
x_min: A real `Tensor` of shape `(dim,)` specifying the lower limit of the
grid.
x_max: A real `Tensor` of same shape and dtype as `x_min` specifying the
upper limit of the grid.
x_star: A real `Tensor` of same shape and dtype as `x_min` specifying the
location on the grid around which higher grid density is desired.
num_grid_points: A scalar integer `Tensor` specifying the number of points
on the grid.
alpha: A scalar parameter which controls the degree of non-uniformity of the
grid. The smaller values of `alpha` correspond to greater degree of
clustering around `x_star`.
dtype: The default dtype to use when converting values to `Tensor`s.
Returns:
A real `Tensor` of shape `(dim, num_grid_points+1)` containing the
non-uniform grid.
"""
c1 = tf.math.asinh((x_min - x_star) / alpha)
c2 = tf.math.asinh((x_max - x_star) / alpha)
i = tf.expand_dims(tf.range(0, num_grid_points + 1, 1, dtype=dtype), axis=-1)
grid = x_star + alpha * tf.math.sinh(c2 * i / num_grid_points + c1 *
(1 - i / num_grid_points))
# reshape from (num_grid_points+1, dim) to (dim, num_grid_points+1)
return tf.transpose(grid) | 6c209871b3a8aba291b00a513056d5e1ebe111f8 | 13,210 |
import uuid
def tourme_details():
""" Display Guides loan-details """
return render_template('tourme_details.html', id=str(uuid.uuid4())) | e332546257670bd26d08be8baf4122f93feab170 | 13,212 |
import json
def gen_dot_ok(notebook_path, endpoint):
"""
Generates .ok file and return its name
Args:
notebook_path (``pathlib.Path``): the path to the notebook
endpoint (``str``): an endpoint specification for https://okpy.org
Returns:
``str``: the name of the .ok file
"""
assert notebook_path.suffix == '.ipynb', notebook_path
ok_path = notebook_path.with_suffix('.ok')
name = notebook_path.stem
src = [notebook_path.name]
with open(ok_path, 'w') as out:
json.dump({
"name": name,
"endpoint": endpoint,
"src": src,
"tests": {
"tests/q*.py": "ok_test"
},
"protocols": [
"file_contents",
"grading",
"backup"
]
}, out)
return ok_path.name | 850827a3da476cc64bd50c40c2504d9765b25dfe | 13,213 |
def sarig_methods_wide(
df: pd.DataFrame, sample_id: str, element_id: str,
) -> pd.DataFrame:
"""Create a corresponding methods table to match the pivoted wide form data.
.. note::
This requires the input dataframe to already have had methods mapping applied
by running ``pygeochemtools.geochem.create_dataset.add_sarig_chem_method``
function.
Args:
df (pd.DataFrame): Dataframe containing long form data.
sample_id (str): Name of column containing sample ID's.
element_id (str): Name of column containing geochemical element names.
Returns:
pd.DataFrame: Dataframe with mapped geochemical methods converted to wide form
with one method per sample.
"""
...
df = df
# grab duplicate values
duplicate_df = df[df.duplicated(subset=[sample_id, element_id], keep="last")]
df = df.drop_duplicates(subset=[sample_id, element_id])
method_code = (
df.pivot(index=[sample_id], columns=element_id, values=["CHEM_METHOD_CODE"],)
.add_suffix("_METHOD_CODE")
.droplevel(0, axis=1)
)
determination = (
df.pivot(index=[sample_id], columns=element_id, values=["DETERMINATION"],)
.add_suffix("_DETERMINATION")
.droplevel(0, axis=1)
)
digestion = (
df.pivot(index=[sample_id], columns=element_id, values=["DIGESTION"],)
.add_suffix("_DIGESTION")
.droplevel(0, axis=1)
)
fusion = (
df.pivot(index=[sample_id], columns=element_id, values=["FUSION"],)
.add_suffix("_FUSION")
.droplevel(0, axis=1)
)
assert (
method_code.columns.size
== determination.columns.size # noqa: W503
== digestion.columns.size # noqa: W503
== fusion.columns.size # noqa: W503
), "pivoted column lengths aren't equal"
c = np.empty(
(
method_code.columns.size
+ determination.columns.size # noqa: W503
+ digestion.columns.size # noqa: W503
+ fusion.columns.size, # noqa: W503
),
dtype=object,
)
c[0::4], c[1::4], c[2::4], c[3::4] = (
method_code.columns,
determination.columns,
digestion.columns,
fusion.columns,
)
df_wide = pd.concat([method_code, determination, digestion, fusion], axis=1)[c]
if not duplicate_df.empty:
try:
dup_method_code = (
duplicate_df.pivot(
index=[sample_id], columns=element_id, values=["CHEM_METHOD_CODE"],
)
.add_suffix("_METHOD_CODE")
.droplevel(0, axis=1)
)
dup_determination = (
duplicate_df.pivot(
index=[sample_id], columns=element_id, values=["DETERMINATION"],
)
.add_suffix("_DETERMINATION")
.droplevel(0, axis=1)
)
dup_digestion = (
duplicate_df.pivot(
index=[sample_id], columns=element_id, values=["DIGESTION"],
)
.add_suffix("_DIGESTION")
.droplevel(0, axis=1)
)
dup_fusion = (
duplicate_df.pivot(
index=[sample_id], columns=element_id, values=["FUSION"],
)
.add_suffix("_FUSION")
.droplevel(0, axis=1)
)
except ValueError as e:
print(
"There were duplicate duplicates in the method list. \
So no duplicates have been included in the output",
e,
)
else:
assert (
dup_method_code.columns.size
== dup_determination.columns.size # noqa: W503
== dup_digestion.columns.size # noqa: W503
== dup_fusion.columns.size # noqa: W503
), "pivoted column lengths aren't equal"
d = np.empty(
(
dup_method_code.columns.size
+ dup_determination.columns.size # noqa: W503
+ dup_digestion.columns.size # noqa: W503
+ dup_fusion.columns.size, # noqa: W503
),
dtype=object,
)
d[0::4], d[1::4], d[2::4], d[3::4] = (
dup_method_code.columns,
dup_determination.columns,
dup_digestion.columns,
dup_fusion.columns,
)
dup_df_wide = pd.concat(
[dup_method_code, dup_determination, dup_digestion, dup_fusion], axis=1
)[d]
df_wide = df_wide.append(dup_df_wide).sort_values(by=sample_id)
return df_wide | 95d969213de0702f3a6666e8e13ae2d37e404e3a | 13,214 |
def vflip_box(box: TensorOrArray, image_center: TensorOrArray) -> TensorOrArray:
"""Flip boxes vertically, which are specified by their (cx, cy, w, h) norm
coordinates.
Reference:
https://blog.paperspace.com/data-augmentation-for-bounding-boxes/
Args:
box (TensorOrArray[B, 4]):
Boxes to be flipped.
image_center (TensorOrArray[4]):
Center of the image.
Returns:
box (TensorOrArray[B, 4]):
Flipped boxes.
"""
box[:, [1, 3]] += 2 * (image_center[[0, 2]] - box[:, [1, 3]])
box_h = abs(box[:, 1] - box[:, 3])
box[:, 1] -= box_h
box[:, 3] += box_h
return box | 99128e7b6d928c1b58457fc6d51b971c109cc77e | 13,216 |
import pathlib
def example_data():
"""Example data setup"""
tdata = (
pathlib.Path(__file__).parent.absolute() / "data" / "ident-example-support.txt"
)
return tdata | a8c9a88f8850fecc7cc05fb8c9c18e03778f3365 | 13,217 |
def add_ending_slash(directory: str) -> str:
"""add_ending_slash function
Args:
directory (str): directory that you want to add ending slash
Returns:
str: directory name with slash at the end
Examples:
>>> add_ending_slash("./data")
"./data/"
"""
if directory[-1] != "/":
directory = directory + "/"
return directory | 2062a55b59707dd48e5ae56d8d094c806d8a2c1d | 13,218 |
import scipy
def antenna_positions():
"""
Generate antenna positions for a regular rectangular array, then return
baseline lengths.
- Nx, Ny : No. of antennas in x and y directions
- Dmin : Separation between neighbouring antennas
"""
# Generate antenna positions on a regular grid
x = np.arange(Nx) * Dmin
y = np.arange(Ny) * Dmin
xx, yy = np.meshgrid(x, y)
# Calculate baseline separations
xy = np.column_stack( (xx.flatten(), yy.flatten()) )
d = scipy.spatial.distance.pdist(xy)
return d | f47a0c887489987d8fc95205816459db55bbaa19 | 13,219 |
from typing import Union
from typing import List
def folds_to_list(folds: Union[list, str, pd.Series]) -> List[int]:
"""
This function formats string or either list of numbers
into a list of unique int
Args:
folds (Union[list, str, pd.Series]): Either list of numbers or
one string with numbers separated by commas or
pandas series
Returns:
List[int]: list of unique ints
Examples:
>>> folds_to_list("1,2,1,3,4,2,4,6")
[1, 2, 3, 4, 6]
>>> folds_to_list([1, 2, 3.0, 5])
[1, 2, 3, 5]
Raises:
ValueError: if value in string or array cannot be casted to int
"""
if isinstance(folds, str):
folds = folds.split(",")
elif isinstance(folds, pd.Series):
folds = list(sorted(folds.unique()))
return list({int(x) for x in folds}) | f499cce7992b77867fc3a7d95c8dd6efc83c3c79 | 13,220 |
import gc
def predict_all(x, model, config, spline):
"""
Predict full scene using average predictions.
Args:
x (numpy.array): image array
model (tf h5): image target size
config (Config):
spline (numpy.array):
Return:
prediction scene array average probabilities
----------
Example
----------
predict_all(x, model, config, spline)
"""
for i in range(8):
if i == 0: # reverse first dimension
x_seg = predict_windowing(
x[::-1, :, :], model, config, spline=spline
).transpose([2, 0, 1])
elif i == 1: # reverse second dimension
temp = predict_windowing(
x[:, ::-1, :], model, config, spline=spline
).transpose([2, 0, 1])
x_seg = temp[:, ::-1, :] + x_seg
elif i == 2: # transpose(interchange) first and second dimensions
temp = predict_windowing(
x.transpose([1, 0, 2]), model, config, spline=spline
).transpose([2, 0, 1])
x_seg = temp.transpose(0, 2, 1) + x_seg
gc.collect()
elif i == 3:
temp = predict_windowing(
np.rot90(x, 1), model, config, spline=spline
)
x_seg = np.rot90(temp, -1).transpose([2, 0, 1]) + x_seg
gc.collect()
elif i == 4:
temp = predict_windowing(
np.rot90(x, 2), model, config, spline=spline
)
x_seg = np.rot90(temp, -2).transpose([2, 0, 1]) + x_seg
elif i == 5:
temp = predict_windowing(
np.rot90(x, 3), model, config, spline=spline
)
x_seg = np.rot90(temp, -3).transpose(2, 0, 1) + x_seg
elif i == 6:
temp = predict_windowing(
x, model, config, spline=spline
).transpose([2, 0, 1])
x_seg = temp + x_seg
elif i == 7:
temp = predict_sliding(
x, model, config, spline=spline
).transpose([2, 0, 1])
x_seg = temp + x_seg
gc.collect()
del x, temp # delete arrays
x_seg /= 8.0
return x_seg.argmax(axis=0) | 6e5888b1d97c3a0924a67500c4b731fd00654cb2 | 13,221 |
def pre_arrange_cols(dataframe):
"""
DOCSTRING
:param dataframe:
:return:
"""
col_name = dataframe.columns.values[0]
dataframe.loc[-1] = col_name
dataframe.index = dataframe.index + 1
dataframe = dataframe.sort_index()
dataframe = dataframe.rename(index=str, columns={col_name: 'all'})
return dataframe | 522c0f4ca29b10d4a736d27f07d8e9dc80cafba5 | 13,222 |
import numpy
def wDot(x,y,h):
"""
Compute the parallel weighted dot product of vectors x and y using
weight vector h.
The weighted dot product is defined for a weight vector
:math:`\mathbf{h}` as
.. math::
(\mathbf{x},\mathbf{y})_h = \sum_{i} h_{i} x_{i} y_{i}
All weight vector components should be positive.
:param x,y,h: numpy arrays for vectors and weight
:return: the weighted dot product
"""
return globalSum(numpy.sum(x*y*h)) | e9bcc295517060f95004aec581055950358c9521 | 13,223 |
def dataframify(transform):
"""
Method which is a decorator transforms output of scikit-learn feature normalizers from array to dataframe.
Enables preservation of column names.
Args:
transform: (function), a scikit-learn feature selector that has a transform method
Returns:
new_transform: (function), an amended version of the transform method that returns a dataframe
"""
@wraps(transform)
def new_transform(self, df):
arr = transform(self, df.values)
return pd.DataFrame(arr, columns=df.columns, index=df.index)
return new_transform | cf21bb7aea90e742c83fb5e1abb41c5d01cddf4e | 13,224 |
def plot_map(self, map, update=False):
"""
map plotting
Parameters
----------
map : ndarray
map to plot
update : Bool
updating the map or plotting from scratch
"""
if update:
empty=np.empty(np.shape(self.diagnostics[self.diagnostic]))
empty[:]=np.nan
self.map.set_data(empty)
return self.map_window.imshow(map, origin='lower', interpolation='nearest',cmap=self.cmap, vmin=self.vmin, vmax=self.vmax)
else:
return self.map_window.imshow(map, origin='lower', interpolation='nearest',cmap=self.cmap, vmin=self.vmin, vmax=self.vmax) | a4effd8c7e958b694f2c01afdaa861455c855a0b | 13,225 |
def definition_activate(connection, args):
"""Activate Business Service Definition"""
activator = sap.cli.wb.ObjectActivationWorker()
activated_items = ((name, sap.adt.ServiceDefinition(connection, name)) for name in args.name)
return sap.cli.object.activate_object_list(activator, activated_items, count=len(args.name)) | 61dd5de0d8e24339c67363e904628390cc79b1da | 13,226 |
import re
def extractCompositeFigureStrings(latexString):
"""
Returns a list of latex figures as strings stripping out captions.
"""
# extract figures
figureStrings = re.findall(r"\\begin{figure}.*?\\end{figure}", latexString, re.S)
# filter composite figures only and remove captions (preserving captions in subfigures)
figureStrings = [
re.findall(r"\\begin{figure}.*(?=\n.*\\caption)", figureString, re.S)[0] + "\n\\end{figure}"
for figureString in figureStrings if "\\begin{subfigure}" in figureString
]
return figureStrings | 83a80c91890d13a6a0247745835e1ffb97d579f7 | 13,227 |
import time
def osm_net_download(
polygon=None,
north=None,
south=None,
east=None,
west=None,
network_type="all_private",
timeout=180,
memory=None,
date="",
max_query_area_size=50 * 1000 * 50 * 1000,
infrastructure='way["highway"]',
):
"""
Download OSM ways and nodes within some bounding box from the Overpass API.
Parameters
----------
polygon : shapely Polygon or MultiPolygon
geographic shape to fetch the street network within
north : float
northern latitude of bounding box
south : float
southern latitude of bounding box
east : float
eastern longitude of bounding box
west : float
western longitude of bounding box
network_type : string
{'walk', 'bike', 'drive', 'drive_service', 'all', 'all_private'} what
type of street network to get
timeout : int
the timeout interval for requests and to pass to API
memory : int
server memory allocation size for the query, in bytes. If none, server
will use its default allocation size
date : string
query the database at a certain timestamp
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is in:
any polygon bigger will get divided up for multiple queries to API
(default is 50,000 * 50,000 units [ie, 50km x 50km in area, if units are
meters])
infrastructure : string
download infrastructure of given type. default is streets, ie,
'way["highway"]') but other infrastructures may be selected like power
grids, ie, 'way["power"~"line"]'
Returns
-------
response_jsons : list
"""
# check if we're querying by polygon or by bounding box based on which
# argument(s) where passed into this function
by_poly = polygon is not None
by_bbox = not (
north is None or south is None or east is None or west is None
)
if not (by_poly or by_bbox):
raise ValueError(
"You must pass a polygon or north, south, east, and west"
)
# create a filter to exclude certain kinds of ways based on the requested
# network_type
osm_filter = ox.get_osm_filter(network_type)
response_jsons = []
# pass server memory allocation in bytes for the query to the API
# if None, pass nothing so the server will use its default allocation size
# otherwise, define the query's maxsize parameter value as whatever the
# caller passed in
if memory is None:
maxsize = ""
else:
maxsize = "[maxsize:{}]".format(memory)
# define the query to send the API
# specifying way["highway"] means that all ways returned must have a highway
# key. the {filters} then remove ways by key/value. the '>' makes it recurse
# so we get ways and way nodes. maxsize is in bytes.
if by_bbox:
# turn bbox into a polygon and project to local UTM
polygon = Polygon(
[(west, south), (east, south), (east, north), (west, north)]
)
geometry_proj, crs_proj = ox.project_geometry(polygon)
# subdivide it if it exceeds the max area size (in meters), then project
# back to lat-long
geometry_proj_consolidated_subdivided = ox.consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size
)
geometry, _ = ox.project_geometry(
geometry_proj_consolidated_subdivided,
crs=crs_proj,
to_latlong=True,
)
log(
"Requesting network data within bounding box from API in {:,} request(s)".format(
len(geometry)
)
)
start_time = time.time()
# loop through each polygon rectangle in the geometry (there will only
# be one if original bbox didn't exceed max area size)
for poly in geometry:
# represent bbox as south,west,north,east and round lat-longs to 8
# decimal places (ie, within 1 mm) so URL strings aren't different
# due to float rounding issues (for consistent caching)
west, south, east, north = poly.bounds
query_template = (
date
+ "[out:json][timeout:{timeout}]{maxsize};"
+ "({infrastructure}{filters}"
+ "({south:.8f},{west:.8f},{north:.8f},{east:.8f});>;);out;"
)
query_str = query_template.format(
north=north,
south=south,
east=east,
west=west,
infrastructure=infrastructure,
filters=osm_filter,
timeout=timeout,
maxsize=maxsize,
)
response_json = ox.overpass_request(
data={"data": query_str}, timeout=timeout
)
response_jsons.append(response_json)
log(
"Got all network data within bounding box from API in {:,} request(s) and {:,.2f} seconds".format(
len(geometry), time.time() - start_time
)
)
elif by_poly:
# project to utm, divide polygon up into sub-polygons if area exceeds a
# max size (in meters), project back to lat-long, then get a list of
# polygon(s) exterior coordinates
geometry_proj, crs_proj = ox.project_geometry(polygon)
geometry_proj_consolidated_subdivided = ox.consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size
)
geometry, _ = ox.project_geometry(
geometry_proj_consolidated_subdivided,
crs=crs_proj,
to_latlong=True,
)
polygon_coord_strs = ox.get_polygons_coordinates(geometry)
log(
"Requesting network data within polygon from API in {:,} request(s)".format(
len(polygon_coord_strs)
)
)
start_time = time.time()
# pass each polygon exterior coordinates in the list to the API, one at
# a time
for polygon_coord_str in polygon_coord_strs:
query_template = (
date
+ '[out:json][timeout:{timeout}]{maxsize};({infrastructure}{filters}(poly:"{polygon}");>;);out;'
)
query_str = query_template.format(
polygon=polygon_coord_str,
infrastructure=infrastructure,
filters=osm_filter,
timeout=timeout,
maxsize=maxsize,
)
response_json = ox.overpass_request(
data={"data": query_str}, timeout=timeout
)
response_jsons.append(response_json)
log(
"Got all network data within polygon from API in {:,} request(s) and {:,.2f} seconds".format(
len(polygon_coord_strs), time.time() - start_time
)
)
return response_jsons | faf949e015c365c3822131634f55c73e2e9fef0c | 13,228 |
from re import S
def rubi_integrate(expr, var, showsteps=False):
"""
Rule based algorithm for integration. Integrates the expression by applying
transformation rules to the expression.
Returns `Integrate` if an expression cannot be integrated.
Parameters
==========
expr : integrand expression
var : variable of integration
Returns Integral object if unable to integrate.
"""
rubi = LoadRubiReplacer().load()
expr = expr.replace(sym_exp, rubi_exp)
expr = process_trig(expr)
expr = rubi_powsimp(expr)
if isinstance(expr, (int, Integer, float, Float)):
return S(expr)*var
if isinstance(expr, Add):
results = 0
for ex in expr.args:
results += rubi.replace(Integral(ex, var))
return process_final_integral(results)
results = util_rubi_integrate(Integral(expr, var))
return process_final_integral(results) | 25eccde81fe0425fbf35c522b24e79195684f537 | 13,230 |
def daily_price_read(sheet_name):
"""
读取股票名称和股票代码
:param sheet_name:
:return:
"""
sql = "SELECT * FROM public.%s limit 50000" % sheet_name
resultdf = pd.read_sql(sql, engine_postgre)
resultdf['trade_date'] = resultdf['trade_date'].apply(lambda x: x.strftime('%Y-%m-%d'))
resultdf['code'] = resultdf[['code', 'exchangeCD']].apply(lambda x: str(x[0]).zfill(6) + '.'+x[1], axis=1)
return resultdf | d68c326604c21b6375e77fb012a9776d87be617f | 13,231 |
def _isfloat(string):
"""
Checks if a string can be converted into a float.
Parameters
----------
value : str
Returns
-------
bool:
True/False if the string can/can not be converted into a float.
"""
try:
float(string)
return True
except ValueError:
return False | 74ae50761852d8b22ac86f6b6332bd70e42bf623 | 13,232 |
def get(url, **kwargs):
"""
get json data from API
:param url:
:param kwargs:
:return:
"""
try:
result = _get(url, **kwargs)
except (rq.ConnectionError, rq.ReadTimeout):
result = {}
return result | a3c17ce6ab383373e7215dd0e5b3e63130739126 | 13,233 |
def sample_indep(p, N, T, D):
"""Simulate an independent sampling mask."""
obs_ind = np.full((N, T, D), -1)
for n in range(N):
for t in range(T):
pi = np.random.binomial(n=1, p=p, size=D)
ind = np.where(pi == 1)[0]
count = ind.shape[0]
obs_ind[n, t, :count] = ind
return obs_ind | 69a469d45040ed1598f7d946b6706f70f23dc580 | 13,234 |
def _relabel_targets(y, s, ranks, n_relabels):
"""Compute relabelled targets based on predicted ranks."""
demote_ranks = set(sorted(ranks[(s == 0) & (y == 1)])[:n_relabels])
promote_ranks = set(sorted(ranks[(s == 1) & (y == 0)])[-n_relabels:])
return np.array([
_relabel(_y, _s, _r, promote_ranks, demote_ranks)
for _y, _s, _r in zip(y, s, ranks)]) | e8c06364a717210da6c0c60c883fee05d61ed3eb | 13,235 |
def exp(x):
"""Take exponetial of input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "exp", x) | 9ec31c0b9928108680c2818d1fe110d36c81b08d | 13,236 |
def check_table(words_in_block, block_width, num_lines_in_block):
""" Check if a block is a block of tables or of text."""
# average_words_per_line=24
# total_num_words = 0
ratio_threshold = 0.50
actual_num_chars = 0
all_char_ws = []
cas = []
# total_num_words += len(line)
if num_lines_in_block > 0:
for word in words_in_block:
if word['word']:
actual_num_chars += len(word['word'])
char_w = float(word['r']-word['l'])/len(word['word'])
cas.append(round(char_w, 2))
all_char_ws.extend(cas)
average_char_width = np.mean(all_char_ws)
expected_num_chars = (float(block_width)/average_char_width)*num_lines_in_block
# expected_word_count = average_words_per_line*num_lines_in_block
ratio = actual_num_chars/expected_num_chars
if ratio < ratio_threshold:
return True
else: return False
else: return False | 46c785eefc7694a88d1814b9ca01f41fffa9d1f8 | 13,237 |
def defaultSampleFunction(xy1, xy2):
"""
The sample function compares how similar two curves are.
If they are exactly the same it will return a value of zero.
The default function returns the average error between each sample point in two arrays of x/y points,
xy1 and xy2.
Parameters
----------
xy1 : array
The first input 2D x/y array of points.
xy2 : array
The second input 2D x/y array of points.
Returns
-------
float
The average "distance" between each point on the curve. The output quantity is unitless.
"""
x1 = xy1[:,0]
x2 = xy2[:,0]
y1 = xy1[:,1]
y2 = xy2[:,1]
diff = ((x1 - x2)**2 + (y1 - y2)**2)**(0.5)
return np.sum(diff)/len(x1) | 5654145e9a2d3701d289094ababc94d9ed972def | 13,238 |
from re import S
def makesubs(formula,intervals,values=None,variables=None,numden=False):
"""Generates a new formula which satisfies this condition:
for all positive variables new formula is nonnegative iff
for all variables in corresponding intervals old formula is nonnegative.
>>> newproof()
>>> makesubs('1-x^2','[0,1]')
Substitute $x\to 1 - \frac{1}{a + 1}$ #depend on shiro.display
(2*a + 1)/(a**2 + 2*a + 1)
>>> makesubs('1-x^2','[0,1]',values='1/2')
Substitute $x\to 1 - \frac{1}{b + 1}$ #depend on shiro.display
((2*b + 1)/(b**2 + 2*b + 1), [1])
>>> makesubs('1-x^2','[0,1]',values='1/2',numden=True)
Substitute $x\to 1 - \frac{1}{c + 1}$ #depend on shiro.display
(2*c + 1, c**2 + 2*c + 1, [1])
"""
formula=S(formula)
addsymbols(formula)
intervals=_smakeiterable2(intervals)
if variables: variables=_smakeiterable(variables)
else: variables=sorted(formula.free_symbols,key=str)
if values!=None:
values=_smakeiterable(values)
equations=[var-value for var,value in zip(variables,values)]
else:
equations=[]
newvars=[]
warn=0
usedvars=set()
for var,interval in zip(variables,intervals):
end1,end2=interval
z=newvar()
newvars+=[z]
usedvars|={var}
if (end1.free_symbols|end2.free_symbols)&usedvars:
warn=1
if end1 in {S('-oo'),S('oo')}:
end1,end2=end2,end1
if {end1,end2}=={S('-oo'),S('oo')}:
sub1=sub2=(z-1/z)
elif end2==S('oo'):
sub1=sub2=(end1+z)
elif end2==S('-oo'):
sub1=sub2=end1-z
else:
sub1=end2+(end1-end2)/z
sub2=end2+(end1-end2)/(1+z)
formula=formula.subs(var,sub1)
shiro.display(shiro.translation['Substitute']+" $"+latex(var)+'\\to '+latex(sub2)+'$')
equations=[equation.subs(var,sub1) for equation in equations]
num,den=fractioncancel(formula)
for var,interval in zip(newvars,intervals):
if {interval[0],interval[1]} & {S('oo'),S('-oo')}==set():
num=num.subs(var,var+1)
den=den.subs(var,var+1)
equations=[equation.subs(var,var+1) for equation in equations]
if values:
values=ssolve(equations,newvars)
if len(values):
values=values[0]
num,den=expand(num),expand(den)
#shiro.display(shiro.translation["Formula after substitution:"],"$$",latex(num/den),'$$')
if warn:
shiro.warning(shiro.translation[
'Warning: intervals contain backwards dependencies. Consider changing order of variables and intervals.'])
if values and numden:
return num,den,values
elif values:
return num/den,values
elif numden:
return num,den
else:
return num/den | 238923ac5e6e9e577f90091b61b6182323fdee75 | 13,239 |
def generateKey(accountSwitchKey=None,keytype=None):
""" Generate Key"""
genKeyEndpoint = '/config-media-live/v2/msl-origin/generate-key'
if accountSwitchKey:
params = {'accountSwitchKey': accountSwitchKey}
params["type"] = keytype
key = prdHttpCaller.getResult(genKeyEndpoint, params)
else:
parms = {'type':keytype}
key = prdHttpCaller.getResult(genKeyEndpoint,params)
return key | 5da825d809fbb03b5929bcc14f0da0451eaf639a | 13,240 |
def positions_count_for_one_ballot_item_doc_view(request):
"""
Show documentation about positionsCountForOneBallotItem
"""
url_root = WE_VOTE_SERVER_ROOT_URL
template_values = positions_count_for_one_ballot_item_doc.positions_count_for_one_ballot_item_doc_template_values(
url_root)
template_values['voter_api_device_id'] = get_voter_api_device_id(request)
return render(request, 'apis_v1/api_doc_page.html', template_values) | 3c56d186560fa8fae6117b6b656ee0c8d40a8728 | 13,241 |
def atcab_sign_base(mode, key_id, signature):
"""
Executes the Sign command, which generates a signature using the ECDSA algorithm.
Args:
mode Mode determines what the source of the message to be signed (int)
key_id Private key slot used to sign the message. (int)
signature Signature is returned here. Format is R and S integers in
big-endian format. 64 bytes for P256 curve (Expects bytearray)
Returns:
Stauts code
"""
if not isinstance(signature, bytearray):
status = Status.ATCA_BAD_PARAM
else:
c_signature = create_string_buffer(64)
status = get_cryptoauthlib().atcab_sign_base(mode, key_id, byref(c_signature))
signature[0:] = bytes(c_signature.raw)
return status | 87c9770d6ba456947206ea1abb46b10a3f413811 | 13,242 |
from typing import List
def load_gs(
gs_path: str,
src_species: str = None,
dst_species: str = None,
to_intersect: List[str] = None,
) -> dict:
"""Load the gene set file (.gs file).
Parameters
----------
gs_path : str
Path to the gene set file with the following two columns, separated by tab:
- 'TRAIT'
- 'GENESET':
(1) <gene1>,<gene2>,... each gene will be weighted uniformly or
(2) <gene1>:<weight1>,<gene2>:<weight2>,... each gene will be weighted by its weight.
src_species : str, default=None
Source species, must be either 'mmusculus' or 'hsapiens' if not None
dst_species : str, default=None
Destination species, must be either 'mmusculus' or 'hsapiens' if not None
to_intersect : List[str], default None.
Gene list to intersect with the input .gs file.
Returns
-------
dict_gs : dict
Dictionary of gene sets: {
trait1: (gene_list, gene_weight_list),
trait2: (gene_list, gene_weight_list),
...
}
"""
assert (src_species is None) == (
dst_species is None
), "src_species and dst_species must be both None or not None"
# Load homolog map dict_map; only needed when src_species and dst_species
# are not None and different.
if ((src_species is not None) & (dst_species is not None)) and (
src_species != dst_species
):
dict_map = load_homolog_mapping(src_species, dst_species) # type: ignore
else:
dict_map = None # type: ignore
# Load gene set file
dict_gs = {}
df_gs = pd.read_csv(gs_path, sep="\t")
for i, (trait, gs) in df_gs.iterrows():
gs_info = [g.split(":") for g in gs.split(",")]
if np.all([len(g) == 1 for g in gs_info]):
# if all genes are weighted uniformly
dict_weights = {g[0]: 1.0 for g in gs_info}
elif np.all([len(g) == 2 for g in gs_info]):
# if all genes are weighted by their weights
dict_weights = {g[0]: float(g[1]) for g in gs_info}
else:
raise ValueError(f"gene set {trait} contains genes with invalid format")
# Convert species if needed
# convert gene list to homologs, if gene can not be mapped, remove it
# in both gene list and gene weight
if dict_map is not None:
dict_weights = {
dict_map[g]: w for g, w in dict_weights.items() if g in dict_map
}
# Intersect with other gene sets
if to_intersect is not None:
to_intersect = set(to_intersect)
dict_weights = {g: w for g, w in dict_weights.items() if g in to_intersect}
gene_list = list(dict_weights.keys())
dict_gs[trait] = (
gene_list,
[dict_weights[g] for g in gene_list],
)
return dict_gs | 0e13c355b1a3bd7e88785844262a01c6963ef0ee | 13,243 |
from .points import remove_close
def sample_surface_even(mesh, count, radius=None):
"""
Sample the surface of a mesh, returning samples which are
VERY approximately evenly spaced.
This is accomplished by sampling and then rejecting pairs
that are too close together.
Parameters
---------
mesh : trimesh.Trimesh
Geometry to sample the surface of
count : int
Number of points to return
radius : None or float
Removes samples below this radius
Returns
---------
samples : (count, 3) float
Points in space on the surface of mesh
face_index : (count,) int
Indices of faces for each sampled point
"""
# guess radius from area
if radius is None:
radius = np.sqrt(mesh.area / (3 * count))
# get points on the surface
points, index = sample_surface(mesh, count * 3)
# remove the points closer than radius
points, mask = remove_close(points, radius)
# we got all the samples we expect
if len(points) >= count:
return points[:count], index[mask][:count]
# warn if we didn't get all the samples we expect
util.log.warning('only got {}/{} samples!'.format(
len(points), count))
return points, index[mask] | 9dd9c4aa27f4811511d81ef0c8dabe3097026061 | 13,244 |
def allocate_probabilities(results, num_substations, probabilities):
"""
Allocate cumulative probabilities.
Parameters
----------
results : list of dicts
All iterations generated in the simulation function.
num_substations : list
The number of electricity substation nodes we wish to select for each scenario.
probabilities : list
Contains the cumulative probabilities we wish to use.
Returns
-------
output : list of dicts
Contains all generated results.
"""
output = []
for nodes in num_substations:
ranked_data = add_cp(results, nodes, probabilities)
for probability in probabilities:
scenario = min(
ranked_data,
key=lambda x: abs(float(x["cum_probability"]) - probability)
)
output.append(scenario)
return output | 5692517aa55776c94c7f4027f175eab985000fe1 | 13,245 |
def delete_page_groups(request_ctx, group_id, url, **request_kwargs):
"""
Delete a wiki page
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param group_id: (required) ID
:type group_id: string
:param url: (required) ID
:type url: string
:return: Delete page
:rtype: requests.Response (with Page data)
"""
path = '/v1/groups/{group_id}/pages/{url}'
url = request_ctx.base_api_url + path.format(group_id=group_id, url=url)
response = client.delete(request_ctx, url, **request_kwargs)
return response | d9802d66e87eb0ef9e9fe5bee98686caade3e79d | 13,247 |
def solver(f, p_e, mesh, degree=1):
"""
Solving the Darcy flow equation on a unit square media with pressure boundary conditions.
"""
# Creating mesh and defining function space
V = FunctionSpace(mesh, 'P', degree)
# Defining Dirichlet boundary
p_L = Constant(1.0)
def boundary_L(x, on_boundary):
return on_boundary and near(x[0], 0)
bc_L = DirichletBC(V, p_L, boundary_L)
p_R = Constant(0.0)
def boundary_R(x, on_boundary):
return on_boundary and near(x[0], 1)
bc_R = DirichletBC(V, p_R, boundary_R)
bcs = [bc_L, bc_R]
# If p = p_e on the boundary, then use:-
#def boundary(x, on_boundary):
#return on_boundary
#bc = DirichletBC(V, p_e, boundary)
# Defining variational problem
p = TrialFunction(V)
v = TestFunction(V)
d = 2
I = Identity(d)
M = Expression('fmax(0.10, exp(-pow(10.0*x[1]-1.0*sin(10.0*x[0])-5.0, 2)))', degree=2, domain=mesh)
K = M*I
a = dot(K*grad(p), grad(v))*dx
L = inner(f, v)*dx
# Computing Numerical Pressure
p = Function(V)
solve(a == L, p, bcs)
return p | 0630b1199f064044976c24d825332aa2d879dab2 | 13,249 |
def set_stretchmatrix(coefX=1.0, coefY=1.0):
"""Stretching matrix
Args:
coefX:
coefY:coefficients (float) for the matrix
[coefX 0
0 coefY]
Returns:
strectching_matrix: matrix
"""
return np.array([[coefX, 0],[0, coefY]]) | cd11bf0351a205e52b1f99893fe43709636978d3 | 13,250 |
def set_bit(v, index, x):
"""Set the index:th bit of v to 1 if x is truthy, else to 0, and return the new value."""
mask = 1 << index # Compute mask, an integer with just bit 'index' set.
v &= ~mask # Clear the bit indicated by the mask (if x is False)
if x:
v |= mask # If x was True, set the bit indicated by the mask.
return v | 627744c06709eecec18f0c5956f1af4c57a57b8a | 13,251 |
def test_credentials() -> (str, str):
"""
Read ~/.synapseConfig and retrieve test username and password
:return: endpoint, username and api_key
"""
config = _get_config()
return config.get(DEFAULT_CONFIG_AUTH_SECTION, DEFAULT_CONFIG_USERNAME_OPT),\
config.get(DEFAULT_CONFIG_AUTH_SECTION, DEFAULT_CONFIG_PASSWORD_OPT) | 2ceb441b338b5ea8b1154c19406af9b1e21ed85e | 13,252 |
import re
def BCA_formula_from_str(BCA_str):
"""
Get chemical formula string from BCA string
Args:
BCA_str: BCA ratio string (e.g. 'B3C1A1')
"""
if len(BCA_str)==6 and BCA_str[:3]=='BCA':
# format: BCAxyz. suitable for single-digit integer x,y,z
funits = BCA_str[-3:]
else:
# format: BxCyAz. suitable for multi-digit or non-integer x,y,z
funits = re.split('[BCA]',BCA_str)
funits = [u for u in funits if len(u) > 0]
funits
components = ['BaO','CaO','Al2O3']
formula = ''.join([f'({c}){n}' for c,n in zip(components, funits)])
return formula | 36375e62d70995628e253ba68ba8b777eb88d728 | 13,253 |
import numpy
def get_strongly_connected_components(graph):
"""
Get strongly connected components for a directed graph
The returned list of components is in reverse topological order, i.e.,
such that the nodes in the first component have no dependencies on
other components.
"""
nodes = list(graph.keys())
node_index_by_node = {node: index for index, node in enumerate(nodes)}
row_indexes = []
col_indexes = []
for node, targets in graph.items():
row_indexes += [node_index_by_node[node]] * len(targets)
col_indexes += [node_index_by_node[target] for target in targets]
data = numpy.ones((len(row_indexes)), dtype=int)
n_nodes = len(nodes)
csgraph = csr_matrix((data, (row_indexes, col_indexes)), shape=(n_nodes, n_nodes))
n_components, labels = connected_components(csgraph, directed=True, connection='strong')
sccs = [[] for i in range(n_components)]
for index, label in enumerate(labels):
sccs[label] += [nodes[index]]
return [frozenset(scc) for scc in sccs] | 42783756d8fdada032e2d0ca8a306f10d4977e16 | 13,254 |
def dot(a, b):
"""
Computes a @ b, for a, b of the same rank (both 2 or both 3).
If the rank is 2, then the innermost dimension of `a` must match the
outermost dimension of `b`.
If the rank is 3, the first dimension of `a` and `b` must be equal and the
function computes a batch matmul.
Supports both dense and sparse multiplication (including sparse-sparse).
:param a: Tensor or SparseTensor with rank 2 or 3.
:param b: Tensor or SparseTensor with same rank as b.
:return: Tensor or SparseTensor with rank 2 or 3.
"""
a_ndim = K.ndim(a)
b_ndim = K.ndim(b)
assert a_ndim == b_ndim, "Expected equal ranks, got {} and {}" "".format(
a_ndim, b_ndim
)
a_is_sparse = K.is_sparse(a)
b_is_sparse = K.is_sparse(b)
# Handle cases: rank 2 sparse-dense, rank 2 dense-sparse
# In these cases we can use the faster sparse-dense matmul of tf.sparse
if a_ndim == 2:
if a_is_sparse and not b_is_sparse:
return tf.sparse.sparse_dense_matmul(a, b)
if not a_is_sparse and b_is_sparse:
return ops.transpose(
tf.sparse.sparse_dense_matmul(ops.transpose(b), ops.transpose(a))
)
# Handle cases: rank 2 sparse-sparse, rank 3 sparse-dense,
# rank 3 dense-sparse, rank 3 sparse-sparse
# In these cases we can use the tfsp.CSRSparseMatrix implementation (slower,
# but saves memory)
if a_is_sparse:
a = tfsp.CSRSparseMatrix(a)
if b_is_sparse:
b = tfsp.CSRSparseMatrix(b)
if a_is_sparse or b_is_sparse:
out = tfsp.matmul(a, b)
if hasattr(out, "to_sparse_tensor"):
return out.to_sparse_tensor()
else:
return out
# Handle case: rank 2 dense-dense, rank 3 dense-dense
# Here we use the standard dense operation
return tf.matmul(a, b) | 789c9d045d82eb048ff5319d5a7ae99ffb02376d | 13,255 |
def unreshuffle_2d(x, i0, shape):
"""Undo the reshuffle_2d operation."""
x_flat = unreshuffle_1d(x, i0)
x_rev = np.reshape(x_flat, shape)
x_rev[1::2, :] = x_rev[1::2, ::-1] # reverse all odd rows
return x_rev | 72cf59b9e547cf2eb516fca33e9eea7d01c1702b | 13,256 |
def findNodeJustBefore(target, nodes):
"""
Find the node in C{nodes} which appeared immediately before C{target} in
the input document.
@type target: L{twisted.web.microdom.Element}
@type nodes: C{list} of L{twisted.web.microdom.Element}
@return: An element from C{nodes}
"""
result = None
for node in nodes:
if comparePosition(target, node) < 0:
return result
result = node
return result | 85d435a2c10dbaabb544c81d440e2110a6083dd7 | 13,257 |
def _format_line(submission, position, rank_change, total_hours):
"""
Formats info about a single post on the front page for logging/messaging. A single post will look like this:
Rank Change Duration Score Flair Id User Slug
13. +1 10h 188 [Episode](gkvlja) <AutoLovepon> <arte_episode_7_discussion>
"""
line = "{:3}".format(f"{position}.")
if rank_change is None:
line += " (new) "
elif rank_change != 0:
line += " {:7}".format(f"{rank_change:+d} {total_hours}h")
else:
line += " {:7}".format(f"-- {total_hours}h")
line += f" {submission.score:>5}"
line += " {:>24}".format(f"[{submission.link_flair_text}]({submission.id})")
line += f" <{submission.author.name}>"
line += f" <{reddit_utils.slug(submission)}>"
return line | 70840d0e57194b43fbaf0352ebecdfefa74bd4d7 | 13,258 |
from typing import List
from typing import Dict
from typing import Any
from typing import Tuple
def run_erasure( # pylint: disable = too-many-arguments
privacy_request: PrivacyRequest,
policy: Policy,
graph: DatasetGraph,
connection_configs: List[ConnectionConfig],
identity: Dict[str, Any],
access_request_data: Dict[str, List[Row]],
) -> Dict[str, int]:
"""Run an erasure request"""
traversal: Traversal = Traversal(graph, identity)
with TaskResources(privacy_request, policy, connection_configs) as resources:
def collect_tasks_fn(
tn: TraversalNode, data: Dict[CollectionAddress, GraphTask]
) -> None:
"""Run the traversal, as an action creating a GraphTask for each traversal_node."""
if not tn.is_root_node():
data[tn.address] = GraphTask(tn, resources)
env: Dict[CollectionAddress, Any] = {}
traversal.traverse(env, collect_tasks_fn)
def termination_fn(*dependent_values: int) -> Tuple[int, ...]:
"""The dependent_values here is an int output from each task feeding in, where
each task reports the output of 'task.rtf(access_request_data)', which is the number of
records updated.
The termination function just returns this tuple of ints."""
return dependent_values
dsk: Dict[CollectionAddress, Any] = {
k: (t.erasure_request, access_request_data[str(k)]) for k, t in env.items()
}
# terminator function waits for all keys
dsk[TERMINATOR_ADDRESS] = (termination_fn, *env.keys())
v = dask.delayed(get(dsk, TERMINATOR_ADDRESS))
update_cts: Tuple[int, ...] = v.compute()
# we combine the output of the termination function with the input keys to provide
# a map of {collection_name: records_updated}:
erasure_update_map: Dict[str, int] = dict(
zip([str(x) for x in env], update_cts)
)
return erasure_update_map | 2b9579ca1c4960da46ee7bb1388ab667dc808f40 | 13,259 |
def RichTextBuffer_FindHandlerByName(*args, **kwargs):
"""RichTextBuffer_FindHandlerByName(String name) -> RichTextFileHandler"""
return _richtext.RichTextBuffer_FindHandlerByName(*args, **kwargs) | 1122822db4885b6d745f4e893522fb01b988ee3f | 13,261 |
def likelihood(tec, phase, tec_conversion, lik_sigma, K = 2):
"""
Get the likelihood of the tec given phase data and lik_var variance.
tec: tensor B, 1
phase: tensor B, Nf
tec_conversion: tensor Nf
lik_sigma: tensor B, 1 (Nf)
Returns:
log_prob: tensor (B,1)
"""
mu = wrap(tec*tec_conversion[None,:])# B, Nf
phase = wrap(phase)
#K, B, Nf
d = tf.stack([tf.distributions.Normal(mu + tf.convert_to_tensor(k*2*np.pi,float_type),
lik_sigma).log_prob(phase) for k in range(-K,K+1,1)], axis=0)
#B, Nf -> B
log_lik = tf.reduce_sum(tf.reduce_logsumexp(d, axis=0), axis=1)
# B, 1
# tec_mu = tf.gather(tec, neighbour)
# tec_std = 0.001 * tf.exp(-0.25*neighbour_dist**2)
# tec_prior = tf.distributions.Normal(tec_mu, tec_std).log_prob(tec)
# sigma_priors = log_normal_solve(0.2,0.1)
# #B, 1
# sigma_prior = tf.distributions.Normal(
# tf.convert_to_tensor(sigma_priors[0],dtype=float_type),
# tf.convert_to_tensor(sigma_priors[1],dtype=float_type)).log_prob(tf.log(lik_sigma)) - tf.log(lik_sigma)
#B, 1
log_prob = log_lik[:,None]# + tec_prior # + sigma_prior
return -log_prob | c5f566484ee8f8cbc48e5302365f0e06c81f49e3 | 13,262 |
def angle_between(v1, v2):
"""Returns the angle in radians between vectors 'v1' and 'v2'::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
# https://stackoverflow.com/a/13849249/782170
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
cos_theta = np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)
if cos_theta > 0:
return np.arccos(cos_theta)
else:
return np.arccos(cos_theta) - np.pi / 2.0 | fc2246c9d3fb55a0c2f692c1533e821a187599b8 | 13,263 |
from aiida.engine import ExitCode
from ase.lattice.cubic import FaceCenteredCubic
from ase.lattice.cubic import BodyCenteredCubic
def create_substrate_bulk(wf_dict_node):
"""
Calcfunction to create a bulk structure of a substrate.
:params wf_dict: AiiDA dict node with at least keys lattice, host_symbol and latticeconstant
(If they are not there, raises KeyError)
Lattice key supports only fcc and bcc
raises ExitCode 380, ERROR_NOT_SUPPORTED_LATTICE
"""
wf_dict = wf_dict_node.get_dict()
lattice = wf_dict['lattice']
if lattice == 'fcc':
structure_factory = FaceCenteredCubic
elif lattice == 'bcc':
structure_factory = BodyCenteredCubic
else:
return ExitCode(380, 'ERROR_NOT_SUPPORTED_LATTICE', message='Specified substrate has to be bcc or fcc.')
directions = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
host_symbol = str(wf_dict['host_symbol'])
latticeconstant = float(wf_dict['latticeconstant'])
size = (1, 1, 1)
structure = structure_factory(directions=directions,
symbol=host_symbol,
pbc=(1, 1, 1),
latticeconstant=latticeconstant,
size=size)
return StructureData(ase=structure) | cb232093c3f6866bd14d2b19115ef988f279bf2f | 13,264 |
def run_Net_on_multiple(patchCreator, input_to_cnn_depth=1, cnn = None,
str_data_selection="all", save_file_prefix="",
apply_cc_filtering = False, output_filetype = 'h5', save_prob_map = False):
""" run runNetOnSlice() on neighbouring blocks of data.
if opt_cnn is not none, it should point to a CNN / Net that will be used.
if patchCreator contains a list of 3D data blocks (patchCreator.second_input_data) then it will be used as second input to cnn.output()
"""
assert str_data_selection in ["all", "train", "test"]
MIN = 0 if str_data_selection in ["all", "train"] else patchCreator.training_set_size
MAX = patchCreator.training_set_size if str_data_selection =="train" else len(patchCreator.data)
second_input_data = None
DATA = patchCreator.data
timings=[]
# if hasattr(patchCreator,"second_input_data"):
# second_input_data = patchCreator.second_input_data[opt_list_index]
for opt_list_index in range(MIN, MAX):
print "-"*30
print "@",opt_list_index+1,"of max.",len(patchCreator.data)
postfix = "" if opt_list_index==None else "_" + utilities.extract_filename(patchCreator.file_names[opt_list_index])[1] if isinstance(patchCreator.file_names[0], str) else str(patchCreator.file_names[opt_list_index]) if not isinstance(patchCreator.file_names[opt_list_index], tuple) else utilities.extract_filename(patchCreator.file_names[opt_list_index][0])[1]
if opt_list_index is not None:
is_training = "_train" if (opt_list_index < patchCreator.training_set_size) else "_test"
else:
is_training=""
this_save_name = save_file_prefix+"prediction"+postfix+"_"+is_training
t0 = time.clock()
sav = run_Net_on_Block(cnn, DATA[opt_list_index], patchCreator, bool_predicts_on_softmax=1,
second_input_data = second_input_data) #this one does all the work
t1 = time.clock()
timings.append(t1-t0)
if apply_cc_filtering:
sav = remove_small_conneceted_components(sav)
sav = 1 - remove_small_conneceted_components(1 - sav)
save_pred(sav, this_save_name, output_filetype, save_prob_map)
print 'timings (len',len(timings),')',np.mean(timings),'+-',np.std(timings)
return None | 9d4c9c2fa3248258299243e3d7585362f47776a2 | 13,265 |
def user_to_janrain_capture_dict(user):
"""Translate user fields into corresponding Janrain fields"""
field_map = getattr(settings, 'JANRAIN', {}).get('field_map', None)
if not field_map:
field_map = {
'first_name': {'name': 'givenName'},
'last_name': {'name': 'familyName'},
'email': {'name': 'email'},
'username': {'name': 'displayName'},
}
result = {}
for field in user._meta.fields:
if field.name in field_map:
fm = field_map[field.name]
value = getattr(user, field.name)
func = fm.get('function', None)
if func:
value = func(value)
# Plurals are dot delimited
parts = fm['name'].split('.')
key = parts[0]
if len(parts) == 1:
result[key] = value
else:
result.setdefault(key, {})
result[key][parts[1]] = value
return result | 767b7003a282e481c1d1753f4c469fec42a5002a | 13,266 |
from typing import Callable
from typing import Optional
from typing import Generator
def weighted(generator: Callable, directed: bool = False, low: float = 0.0, high: float = 1.0,
rng: Optional[Generator] = None) -> Callable:
"""
Takes as input a graph generator and returns a new generator function that outputs weighted
graphs. If the generator is dense, the output will be the weighted adjacency matrix. If the
generator is sparse, the new function will return a tuple (adj_list, weights).
Parameters
----------
generator : Callable
A callable that generates graphs
directed: bool
Whether to generate weights for directed graphs
low : float, optional
Lower boundary of the sampling distribution interval,
i.e., interval in [low, high), by default 0.0
high : float, optional
Upper boundary of the sampling distribution interval,
i.e., interval in [low, high), by default 1.0
rng : Generator, optional
Numpy random number generator, by default None
Returns
-------
Callable
A callable that generates weighted graphs
Examples
--------
>> weighted(erdos_renyi)(num_nodes=100, prob=0.5)
"""
if rng is None:
rng = default_rng()
def weighted_generator(*args, **kwargs):
adj = generator(*args, **kwargs)
if adj.shape[0] == adj.shape[1]:
num_nodes = adj.shape[0]
weights = rng.uniform(low=low, high=high, size=(num_nodes, num_nodes))
if not directed:
weights = np.triu(weights)
weights = weights + weights.T
adj = adj.astype(float) * weights
return adj
weights = rng.uniform(low=low, high=high, size=(adj.shape[0], 1))
return adj, weights
return weighted_generator | d4c4d0b93784ca7bee22ecaffc3e8005315aa631 | 13,267 |
def generate_sampled_graph_and_labels(triplets, sample_size, split_size,
num_rels, adj_list, degrees,
negative_rate,tables_id, sampler="uniform"):
"""Get training graph and signals
First perform edge neighborhood sampling on graph, then perform negative
sampling to generate negative samples
"""
# perform edge neighbor sampling
if sampler == "uniform":
edges = sample_edge_uniform(adj_list, degrees, len(triplets), sample_size)
elif sampler == "neighbor":
edges = sample_edge_neighborhood(adj_list, degrees, len(triplets), sample_size,tables_id)
else:
raise ValueError("Sampler type must be either 'uniform' or 'neighbor'.")
# relabel nodes to have consecutive node ids
edges = triplets[edges]
src, rel, dst = edges.transpose()
# my_graph = nx.Graph()
# edges_to_draw = list(set(list(zip(dst, src, rel))))
# edges_to_draw = sorted(edges_to_draw)
# # my_graph.add_edges_from(edges_to_draw[:10])
#
# for item in edges_to_draw:
# my_graph.add_edge(item[1], item[0], weight=item[2]*10)
# pos = nx.spring_layout(my_graph)
# labels = nx.get_edge_attributes(my_graph, 'weight')
# plt.figure()
# nx.draw(my_graph, pos, edge_color='black', width=1, linewidths=1, arrows=True,
# node_size=100, node_color='red', alpha=0.9,
# labels={node: node for node in my_graph.nodes()})
# nx.draw_networkx_edge_labels(my_graph, pos, edge_labels=labels, font_color='red')
# plt.axis('off')
# plt.show()
uniq_v, edges = np.unique((src, dst), return_inverse=True)
src, dst = np.reshape(edges, (2, -1))
relabeled_edges = np.stack((src, rel, dst)).transpose()
# negative sampling
samples, labels = negative_sampling(relabeled_edges, len(uniq_v),
negative_rate)
#samples, labels = negative_relations(relabeled_edges, len(uniq_v),
# negative_rate)
# further split graph, only half of the edges will be used as graph
# structure, while the rest half is used as unseen positive samples
split_size = int(sample_size * split_size)
graph_split_ids = np.random.choice(np.arange(sample_size),
size=split_size, replace=False)
src = src[graph_split_ids]
dst = dst[graph_split_ids]
rel = rel[graph_split_ids]
# build DGL graph
print("# sampled nodes: {}".format(len(uniq_v)))
print("# sampled edges: {}".format(len(src) * 2))
#g, rel, norm,_ = build_graph_from_triplets_modified(len(uniq_v), num_rels,
# (src, rel, dst))
g, rel, norm=build_graph_directly(len(uniq_v), (src, rel, dst))
return g, uniq_v, rel, norm, samples, labels | 7386eada0e0aa70478c063aa4525c62cbc997b2e | 13,269 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.