content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def archived_attendance_overview():
"""
Show archived attendance view
"""
this_year = 2019
last_year = 2018
attendance = CommitteeMeetingAttendance.annual_attendance_trends(
period='historical')
# index by year and cte id
years = {
year: {
cte_id: list(cte_group)[0]
for cte_id, cte_group in groupby(group, lambda r: r.committee_id)
}
for year, group in groupby(attendance, lambda r: r.year)
}
attendance = {
'NA': [],
'NCOP': [],
}
for cte in Committee.list().all():
curr = years[this_year].get(cte.id)
prev = years[last_year].get(cte.id)
if cte.house.sphere != 'national':
continue
if not curr or cte.ad_hoc or cte.house.name_short == 'Joint':
continue
attendance[cte.house.name_short].append({
'committee':
cte.name,
'committee_id':
cte.id,
'n_meetings':
curr.n_meetings,
'avg_attendance':
curr.avg_attendance * 100,
'change':
(curr.avg_attendance - (prev.avg_attendance if prev else 0)) * 100,
})
# rank them
for att in attendance.itervalues():
att.sort(key=lambda a: a['avg_attendance'], reverse=True)
for i, item in enumerate(att):
att[i]['rank'] = len(att) - i
return render_template(
'archive_attendance_overview.html',
year=this_year,
attendance_na=attendance['NA'],
attendance_ncop=attendance['NCOP'])
pass
|
6d895a63e9b248b135385fd316112e4b749792a3
| 26,811 |
def _get_medical_image_blob(roidb):
""" Builds an input blob from the medical image in the roidb
"""
num_images = len(roidb)
processed_ims = []
pre_ims = []
post_ims = []
abdo_masks = []
for i in range(num_images):
im = raw_reader(roidb[i]["image"], cfg.MET_TYPE, [roidb[i]["height"], roidb[i]["width"]])
if roidb[i]['flipped']:
im = im[:, ::-1]
processed_ims.append(im)
mask = abdominal_mask(im.copy())
abdo_masks.append(mask)
if cfg.THREE_SLICES:
# get pre-image
basename = osp.basename(roidb[i]["image"])
names = basename[:-4].split("_")
slice_num = int(names[-1])
if slice_num == 0:
pre_im = im
else:
slice_num -= 1
names[-1] = str(slice_num)
basename = "_".join(names) + ".raw"
pre_path = osp.join(osp.dirname(roidb[i]["image"]), basename)
pre_im = raw_reader(pre_path, cfg.MET_TYPE, [roidb[i]["height"], roidb[i]["width"]])
if roidb[i]['flipped']:
pre_im = pre_im[:, ::-1]
pre_ims.append(pre_im)
# get post-image
basename = osp.basename(roidb[i]["image"])
names = basename[:-4].split("_")
names[-1] = str(int(names[-1]) + 1)
basename = "_".join(names) + ".raw"
post_path = osp.join(osp.dirname(roidb[i]["image"]), basename)
try:
post_im = raw_reader(post_path, cfg.MET_TYPE, [roidb[i]["height"], roidb[i]["width"]])
if roidb[i]['flipped']:
post_im = post_im[:, ::-1]
except FileNotFoundError:
post_im = im
post_ims.append(post_im)
num_images = len(processed_ims)
blob = np.zeros((num_images, cfg.TRAIN.MAX_SIZE, cfg.TRAIN.MAX_SIZE, 3), dtype=np.float32)
abdo_mask = np.zeros((num_images, cfg.TRAIN.MAX_SIZE, cfg.TRAIN.MAX_SIZE), dtype=np.bool)
if cfg.THREE_SLICES:
for i in range(num_images):
blob[i,:,:,0] = pre_ims[i]
blob[i,:,:,1] = processed_ims[i]
blob[i,:,:,2] = post_ims[i]
abdo_mask[i,:,:] = abdo_masks[i]
else:
for i in range(num_images):
blob[i,:,:,0] = processed_ims[i]
blob[i,:,:,1] = processed_ims[i]
blob[i,:,:,2] = processed_ims[i]
abdo_mask[i,:,:] = abdo_masks[i]
if cfg.USE_WIDTH_LEVEL:
win, wind2, lev = cfg.WIDTH, cfg.WIDTH / 2, cfg.LEVEL
blob = (np.clip(blob, lev - wind2, lev + wind2) - (lev - wind2)) / 2**16 * win
else:
blob /= cfg.MED_IMG_UPPER
blob = np.clip(blob, -1., 1.)
return blob, abdo_mask
|
9b09d53fbf08ff785eeaadacf3733b8b5bb995b4
| 26,813 |
def is_weak(key: object) -> bool:
"""
Check whether key is weak or not.
Key is weak if it is a basic key - not an action and not a modifier.
"""
return not is_action(key) and not is_mod(key)
|
9493afd199fad665e97ae18da973c8a15671bfbb
| 26,814 |
def mock_cei_get_trades(mocker: MockerFixture) -> MockerFixture:
"""Fixture for mocking report_reader.get_trades."""
return mocker.patch("irpf_investidor.report_reader.get_trades")
|
fcceb00df6443a30544b92d001983c7bb243650a
| 26,815 |
def _normalize(op1, op2, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
if op1.exp < op2.exp:
tmp = op2
other = op1
else:
tmp = op1
other = op2
# Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).
# Then adding 10**exp to tmp has the same effect (after rounding)
# as adding any positive quantity smaller than 10**exp; similarly
# for subtraction. So if other is smaller than 10**exp we replace
# it with 10**exp. This avoids tmp.exp - other.exp getting too large.
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
exp = tmp.exp + min(-1, tmp_len - prec - 2)
if other_len + other.exp - 1 < exp:
other.int = 1
other.exp = exp
tmp.int *= 10 ** (tmp.exp - other.exp)
tmp.exp = other.exp
return op1, op2
|
f0ed5a47e6fa466a8fb7aacf837c42f092ff031e
| 26,817 |
def change_tags(project_id):
"""Change the list of tags on a project."""
project = Project.query.get_or_404(project_id)
if project.lifetime_stage not in (LifetimeStage.ongoing, LifetimeStage.finalizing):
abort(400, {'message': 'Tags can only be modified in ongoing and finalizing stages.'})
if current_user != project.creator and not current_user.is_admin:
abort(403)
in_schema = ProjectSchema(only=('tags',))
try:
updated_project = in_schema.load({'tags': request.json}, instance=project)
except ValidationError as err:
abort(400, {'message': err.messages})
try:
db.session.add(updated_project)
db.session.commit()
except IntegrityError as err:
db.session.rollback()
log.exception(err)
abort(400, {'message': 'Data integrity violated.'})
return NO_PAYLOAD
|
0f723eb4816066f8f3a4353b1261c207c2754100
| 26,818 |
from typing import List
from typing import Dict
from typing import Any
import six
from typing import Tuple
from re import T
from typing import Union
def sort_db_results(results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Deterministically sort DB results.
Args:
results: List[Dict], results from a DB.
Returns:
List[Dict], sorted DB results.
"""
sort_order: List[str] = []
if len(results) > 0:
sort_order = sorted(six.iterkeys(results[0]))
def sort_key(result: Dict[str, Any]) -> Tuple[Tuple[bool, Any], ...]:
"""Convert None/Not None to avoid comparisons of None to a non-None type."""
return tuple((result[col] is not None, result[col]) for col in sort_order)
def sorted_value(value: T) -> Union[List[Any], T]:
"""Return a sorted version of a value, if it is a list."""
if isinstance(value, list):
return sorted(value)
return value
return sorted(
[{k: sorted_value(v) for k, v in six.iteritems(row)} for row in results], key=sort_key
)
|
27a8a206ee08810cf7391a1d4d6e0cf68d06fe14
| 26,819 |
def classify_one(models, cooccurence_csv):
"""
Args:
cooccurence_csv (str): the CSV file path for the co-occurrence feautres
Output:
dx (int): the class for diagnosis
dx_name (str): the name for diagnosis class
"""
# Get the features from the CSV file
cooc_features = open(cooccurence_csv, "r").read().split(",")
freq_features = open(cooccurence_csv.replace("Cooccurrence", "Frequency"), "r").read().split(",")
cooc_features = np.array([float(_.strip().rstrip()) for _ in cooc_features])
freq_features = np.array([float(_.strip().rstrip()) for _ in freq_features])
# Get the results only for upper triangle matrix (including diagonal).
# Other parts will be all zero
cooc_features = np.triu(cooc_features.reshape(8, 8), k=0).reshape(-1)
# Normalize features
cooc_features = cooc_features / np.sum(cooc_features)
freq_features = freq_features / np.sum(freq_features)
features = np.array(cooc_features.tolist() + freq_features.tolist()).reshape(1, -1)
# First decision
for experiment in ["Invasive v.s. Noninvasive",
"Atypia and DCIS v.s. Benign",
"DCIS v.s. Atypia"]:
pca = models[experiment + " PCA"]
if pca is not None:
features = pca.transform(features).reshape(1, -1)
model = models[experiment + " model"]
rst = model.predict(features)[0]
if rst:
if experiment == "Invasive v.s. Noninvasive":
return 4, "Invasive"
if experiment == "Atypia and DCIS v.s. Benign":
return 1, "Benign"
if experiment == "DCIS v.s. Atypia":
return 3, "DCIS"
raise("programming error! unknown experiment")
if experiment == "DCIS v.s. Atypia" and not rst:
return 2, "Atypia"
raise("programming error 2! Unknown experiment and rst")
|
6761a01ab832df15f88ee9032e4b1423a2d235cd
| 26,820 |
import math
def discretize_q(q, tol=None, lat_range=(-math.pi / 2, math.pi / 2), points=None):
"""
simulate feature database by giving closest lat & roll with given tolerance
and set lon to zero as feature detectors are rotation invariant (in opengl coords)
"""
if tol is not None and points is not None or tol is None and points is None:
assert False, 'Give either tol or points'
elif tol is not None:
points = bf2_lat_lon(tol, lat_range=lat_range)
lat, lon, roll = q_to_ypr(q)
(nlat, nroll), idx = find_nearest_arr(
points,
np.array((lat, roll)),
ord=2,
fun=wrap_rads,
)
nq0 = ypr_to_q(nlat, 0, nroll)
return nq0, idx
|
a72653753d48e53bcebab94088828e9cecf19f34
| 26,821 |
from typing import Union
import scipy
def generate_testdata(
pars: dataclass,
ntrials: int = 1,
baseclass: Union[object, None] = None,
func: Union[object, None] = None,
exp_test_set: bool=True,
):
"""
meanrate is in Hz(events/second)
maxt is in seconds - duration of trace
bigevent is a dict {'t': delayinsec, 'I': amplitudeinA}
"""
# if baseclass is None and func is not None:
# raise ValueError("Need base class definition")
timebase = np.arange(0.0, pars.maxt, pars.dt) # in ms
t_psc = np.arange(
0.0, pars.tdur, pars.dt
) # time base for single event template in ms
if baseclass is None and func is None: # make double-exp event
print('Using our template - which may make a different psc than you want')
tau_1 = pars.taus[0] # ms
tau_2 = pars.taus[1] # ms
Aprime = (tau_2 / tau_1) ** (tau_1 / (tau_1 - tau_2))
g = Aprime * (-np.exp(-t_psc / tau_1) + np.exp((-t_psc / tau_2)))
gmax = np.max(g)
g = pars.sign * g * pars.amp / gmax
elif baseclass is not None: # use template from the class
baseclass._make_template()
gmax = np.max(pars.sign*baseclass.template)
g = pars.amp * baseclass.template / gmax
else:
raise ValueError("Need base class or func definition")
testpsc = np.zeros((ntrials, timebase.shape[0]))
testpscn = np.zeros((ntrials, timebase.shape[0]))
i_events = [None] * ntrials
t_events = [None] * ntrials
for i in range(ntrials):
if exp_test_set:
pars.expseed = i * 47 # starting seed for intervals
pars.noiseseed = i # starting seed for background noise
if pars.expseed is None:
eventintervals = np.random.exponential(
1.0 / pars.meanrate, int(pars.maxt * pars.meanrate)
)
else:
np.random.seed(pars.expseed + i)
eventintervals = np.random.exponential(
1.0 / pars.meanrate, int(pars.maxt * pars.meanrate)
)
eventintervals = eventintervals[eventintervals < 10.0]
events = np.cumsum(eventintervals)
if pars.bigevent is not None:
events = np.append(events, pars.bigevent["t"])
events = np.sort(events)
# time of events with exp distribution:
t_events[i] = events[events < pars.maxt]
i_events[i] = np.array([int(x / pars.dt) for x in t_events[i]])
testpsc[i][i_events[i]] = np.random.normal(
1.0, pars.ampvar / pars.amp, len(i_events[i])
)
if pars.bigevent is not None:
ipos = int(pars.bigevent["t"] / pars.dt) # position in array
testpsc[ipos] = pars.bigevent["I"]
testpsc[i] = scipy.signal.convolve(testpsc[i], g, mode="full")[
: timebase.shape[0]
]
if pars.noise > 0:
if pars.noiseseed is None:
testpscn[i] = testpsc[i] + np.random.normal(
0.0, pars.noise, testpsc.shape[1]
)
else:
np.random.seed(pars.noiseseed)
testpscn[i] = testpsc[i] + np.random.normal(
0.0, pars.noise, testpsc.shape[1]
)
else:
testpscn[i] = testpsc[i]
# print(t_events)
# print(i_events)
# print(testpscn[0][i_events])
# mpl.plot(timebase, testpscn[0])
# mpl.show()
# exit()
return timebase, testpsc, testpscn, i_events, t_events
|
2c2fdd81608ca1df1564e7b6a7121c164d06e8d9
| 26,823 |
def has_param(param):
"""
Generate function, which will check `param` is in html element.
This function can be used as parameter for .find() method in HTMLElement.
"""
def has_param_closure(element):
"""
Look for `param` in `element`.
"""
if element.params.get(param, "").strip():
return True
return False
return has_param_closure
|
6800725c378714b5161772f0a2f9ef89ae278400
| 26,824 |
import torch
def neg_log_likeDUN(output, target, sigma, cat):
"""
function to compute expected loss across different network depths
inputs:
- preds_list, list of predictions for different depths
- targets, single set of targets
- noise, aleatoric noise (length 12)
- cat, variational categorical distribution
"""
target_reshape = target.reshape(1,len(target),-1)
sigma_reshape = sigma.reshape(1, 1, len(sigma))
exponent = -0.5*torch.sum((target_reshape - output)**2/sigma_reshape**2, 2)
log_coeff = -torch.sum(torch.log(sigma)) - len(sigma) * torch.log(torch.sqrt(torch.tensor(2*np.pi)))
scale = 1 / (exponent.size()[1])
pre_expectation = - scale * torch.sum(log_coeff + exponent, 1)
expectation = (pre_expectation * cat).sum()
return expectation
|
8c2bcd9833e9a309270087331b67223b5e4ce74e
| 26,825 |
def shoot(C, D, b, maxiter=1000, abs_tol=1e-7):
"""Return random equality set of P that projects on a projection facet.
Returns randomly selected equality set E_0 of P such
that the projection of the equality set is a facet of the projection.
@param C: Matrix defining the polytope Cx+Dy <= b
@param D: Matrix defining the polytope Cx+Dy <= b
@param b: Vector defining the polytope Cx+Dy <= b
@return: `E_0,af,bf`: Equality set and affine hull
"""
d = C.shape[1]
k = D.shape[1]
iter = 0
while True:
if iter > maxiter:
raise Exception(
"shoot: could not find starting equality set")
gamma = np.random.rand(d) - 0.5
c = np.zeros(k + 1)
c[0] = -1
G = np.hstack([np.array([np.dot(C, gamma)]).T, D])
sol = solvers.lpsolve(c, G, b, solver='glpk')
opt_sol = np.array(sol['x']).flatten()
opt_dual = np.array(sol['z']).flatten()
r_opt = opt_sol[0]
y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten()
x_opt = r_opt * gamma
E_0 = np.nonzero(
np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol)[0]
DE0 = D[E_0, :]
CE0 = C[E_0, :]
b0 = b[E_0]
if rank(np.dot(null_space(DE0.T).T, CE0)) == 1:
break
iter += 1
af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol)
if is_dual_degenerate(c, G, b, None, None, opt_sol,
opt_dual, abs_tol=abs_tol):
E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol)
af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0])
if len(bf) > 1:
raise Exception("shoot: wrong dimension of affine hull")
return E_0, af.flatten(), bf
|
1cd85fbd4752ff859553d7ef907ee85b20a99210
| 26,826 |
def bwdist(
img,
method=cv2.DIST_L2,
dist_mask=cv2.DIST_MASK_5,
label_type=cv2.DIST_LABEL_CCOMP,
ravel=True,
):
"""Mimics Matlab's bwdist function, similar to OpenCV's distanceTransform()
but with different output.
https://www.mathworks.com/help/images/ref/bwdist.html
Available metrics:
https://docs.opencv.org/3.4/d7/d1b/group__imgproc__misc.html#gaa2bfbebbc5c320526897996aafa1d8eb
Available distance masks:
https://docs.opencv.org/3.4/d7/d1b/group__imgproc__misc.html#gaaa68392323ccf7fad87570e41259b497
Available label types:
https://docs.opencv.org/3.4/d7/d1b/group__imgproc__misc.html#ga3fe343d63844c40318ee627bd1c1c42f
"""
flip = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY_INV)[1]
dist, labeled = cv2.distanceTransformWithLabels(flip, method, dist_mask)
if ravel: # return linear indices if ravel == True (default)
idx = np.zeros(img.shape, dtype=np.intp)
idx_func = np.flatnonzero
else: # return two-channel indices if ravel == False
idx = np.zeros((*img.shape, 2), dtype=np.intp)
idx_func = lambda masked: np.dstack(np.where(masked))
for l in np.unique(labeled):
mask = labeled == l
idx[mask] = idx_func(img * mask)
return dist, idx
|
9889e731dab572846936f7cc79b88cb794cd9852
| 26,827 |
def calculate_cumulative_conf(areaP90: float=1., areaP10: float=10., pdP90: float=10., pdP10: float=24):
"""Calculate cumulative confidence level for expected development size in MW
Args:
areaP90 (float): pessimistic area in sqkm
areaP10 (float): optimistic area in sqkm
pdP90 (float): pessimistic power density in MWe/sqkm
pdP10 (float): optimistic power density in MWe/sqkm
Returns:
prob_df (pandas Dataframe): cumulative confidence curve in Reservoir Size
"""
# calculate area > 250 °C
area_mu = ((np.log(areaP90)+np.log(areaP10))/2)
area_sigma = (np.log(areaP10)-np.log(areaP90))/((norm.ppf(0.9)-(norm.ppf(0.1))))
# calculate powerdensity mean and standard dev
powerdens_mu = ((np.log(pdP90)+np.log(pdP10))/2)
powerdens_sigma = (np.log(pdP10)-np.log(pdP90))/((norm.ppf(0.9)-(norm.ppf(0.1))))
capacity_mu = area_mu + powerdens_mu
capacity_sigma = ((area_sigma**2)+(powerdens_sigma**2))**0.5
eds = [lognorm.ppf(x/100, capacity_sigma, loc=0, scale=np.exp(capacity_mu)) for x in range(0,100)]
indx = list(np.arange(0,100)[::-1])
edsepc_tups = list(zip(indx,eds))
prob_df = pd.DataFrame(edsepc_tups, columns = ['Cumulative confidence (%)', 'Expected development size (MWe)'])
return prob_df
|
9231e1d7942ad8c0e36e606c3a7ddd25439016fb
| 26,828 |
def get_all_keys(data):
"""Get all keys from json data file"""
all_keys = set(data[0].keys())
for row in data:
all_keys = set.union(all_keys, set(row.keys()))
return list(all_keys)
|
5532af993f87bf4e00c7bec13eb971e0114e736c
| 26,830 |
def submitmq(request):
"""Message queue submit page"""
context = {
"type": "mq",
"choices": FRUIT_CHOICES,
}
if request.method == 'POST': # If the form has been submitted...
p = None
try:
# save preference
p = Preference()
p.name = request.POST['name']
p.email = request.POST['email']
p.fruit = request.POST['fruit']
p.save()
# Save the newly created preference in the session
# so it can be used again
request.session['preference_id'] = p.id
except:
msg_error = u'Something went wrong saving the message queue preference task'
logger.error(msg_error)
messages.add_message(request, messages.ERROR, msg_error)
a = ActivityLog(pref=p, message=msg_error)
a.save()
return render(request, 'error.html', context)
# Add a task to the message queue to send a confirmation email
credentials = pika.credentials.PlainCredentials(
settings.AMQP_USERNAME,
settings.AMQP_PASSWORD
)
# credentials=credentials,
connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=settings.AMQP_HOST,
)
)
try:
channel = connection.channel()
channel.queue_declare(queue=settings.AMQP_QUEUE, durable=True)
channel.basic_publish(exchange='',
routing_key=settings.AMQP_QUEUE,
body=str(p.id),
properties=pika.BasicProperties(
delivery_mode = 2, # make message persistent
), mandatory=True, immediate=False)
logger.info("Queued message task for {} {} {}".format(p.id, p.name, p.email))
except:
msg_error = u'Something went wrong saving the message queue preference task'
logger.error(msg_error)
messages.add_message(request, messages.ERROR, msg_error)
a = ActivityLog(pref=p, message=msg_error)
a.save()
return render(request, 'error.html', context)
channel.close()
connection.close()
return HttpResponseRedirect(reverse('thanks'))
return render(request, 'refactortoscale/choices.html', context)
|
2b84e7f8ee9a118d0989024dcd58f26d52f30351
| 26,831 |
def plot_projections(img: np.ndarray, spacing: int = 5, zoom: float = None) -> np.ndarray:
"""
Plot three maximum intensity projections of the given image side-by-side.
Parameters
----------
img : np.ndarray
Input 3D image.
spacing : int
Number of pixels to separate the projections in the output image.
Default: 5
zoom : float, optional
Scale factor to interpolate the image along z axis.
Default: None (no interpolation).
Returns
-------
np.ndarray
Output image with the three maximum projections
"""
m0 = np.max(img, axis=0)
m1 = np.max(img, axis=1)
m2 = np.max(img, axis=2)
if zoom is not None:
zoom_arr = [1.] * len(m1.shape)
zoom_arr[0] = zoom
m1 = ndimage.interpolation.zoom(m1 * 1., zoom_arr, order=1)
m2 = ndimage.interpolation.zoom(m2 * 1., zoom_arr, order=1)
maxproj = np.zeros((m0.shape[0] + m1.shape[0] + spacing,
m0.shape[1] + m2.shape[0] + spacing) +
img.shape[3:])
maxproj[:m0.shape[0], :m0.shape[1]] = m0
maxproj[m0.shape[0] + spacing:, :m0.shape[1]] = m1
maxproj[:m0.shape[0], m0.shape[1] + spacing:] = np.swapaxes(m2, 0, 1)
return maxproj
|
cc68f75bb10b249107b90ef6f8d664ff09aee4f2
| 26,832 |
def _ngl_write_atom(
num,
species,
x,
y,
z,
group=None,
num2=None,
occupancy=1.0,
temperature_factor=0.0,
):
"""
Writes a PDB-formatted line to represent an atom.
Args:
num (int): Atomic index.
species (str): Elemental species.
x, y, z (float): Cartesian coordinates of the atom.
group (str): A...group name? (Default is None, repeat elemental species.)
num2 (int): An "alternate" index. (Don't ask me...) (Default is None, repeat first number.)
occupancy (float): PDB occupancy parameter. (Default is 1.)
temperature_factor (float): PDB temperature factor parameter. (Default is 0.
Returns:
(str): The line defining an atom in PDB format
Warnings:
* The [PDB docs](https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html) indicate that
the xyz coordinates might need to be in some sort of orthogonal basis. If you have weird behaviour,
this might be a good place to investigate.
"""
if group is None:
group = species
if num2 is None:
num2 = num
return "ATOM {:>6} {:>4} {:>4} {:>5} {:10.3f} {:7.3f} {:7.3f} {:5.2f} {:5.2f} {:>11} \n".format(
num, species, group, num2, x, y, z, occupancy, temperature_factor, species
)
|
92a5d62f3c4f6d927aa5a6010b217344d0d241d3
| 26,833 |
def get_insert_components(options):
""" Takes a list of 2-tuple in the form (option, value) and returns a
triplet (colnames, placeholders, values) that permits making a database
query as follows: c.execute('INSERT INTO Table ({colnames}) VALUES
{placeholders}', values). """
col_names = ','.join(opt[0] for opt in options)
placeholders = ','.join('?' for i in range(len(options)))
if len(col_names) > 0:
col_names = ',' + col_names
if len(placeholders) > 0:
placeholders = ',' + placeholders
values = tuple(opt[1] for opt in options)
return col_names, placeholders, values
|
3e1deecd39b0e519124278f47713d5b3a1571815
| 26,834 |
from typing import List
def send_recording_to_gist(table: str, results: List['FormattedResult'], assignment: str):
"""Publish a table/result pair to a private gist"""
# the "-" at the front is so that github sees it first and names the gist
# after the homework
table_filename = '-stograde report {} table.txt'.format(assignment)
files = {
table_filename: {'content': table},
}
for file in results:
filename = file.student + '.' + file.type.name.lower()
files[filename] = {
'content': file.content.strip()
}
return post_gist('log for ' + assignment, files)
|
41cd2dc428018893659a0aaa5c67b50e37375dc8
| 26,835 |
def append_composite_importance(composite_terms, ebm, X, composite_name=None, global_exp=None, global_exp_name=None, contributions=None):
"""Computes the importance of the composite_terms and appends it to a global explanation.
In case a global explanation is provided, the composite importance will be appended to it and returned.
Otherwise, a new global explanation will be creted and returned.
The composite importance will only be displayed in the Summary graph.
Args:
composite_terms: A list of term names or term indices
ebm: A fitted EBM
X (numpy array): Samples used to compute the composite importance
composite_name (str, optional): User-defined composite Name
global_exp (EBMExplanation, optional): User-defined global explanation object
global_exp_name (str, optional): User-defined name when creating a new global explanation
contributions (numpy array, optional): Contributions of all terms per X's row
Returns:
EBMExplanation: A global explanation with the composite importance appended to it
"""
check_is_fitted(ebm, "has_fitted_")
if global_exp is not None:
if global_exp.explanation_type != "global":
raise ValueError("The provided explanation is {} but a global explanation is expected.".format(global_exp.explanation_type))
elif global_exp._internal_obj is None or global_exp._internal_obj["overall"] is None:
raise ValueError("The global explanation object is incomplete.")
else:
global_explanation = global_exp
else:
global_explanation = ebm.explain_global(global_exp_name)
if composite_name is None:
composite_name = _get_composite_name(composite_terms, ebm.term_names_)
composite_importance = compute_composite_importance(composite_terms, ebm, X, contributions)
global_explanation._internal_obj["overall"]["names"].append(composite_name)
global_explanation._internal_obj["overall"]["scores"].append(composite_importance)
return global_explanation
|
12c8eeb6ad9dd49c96163ed349236fbc8cdf8ed5
| 26,836 |
import abc
def apply_pair(main, other, *, fn):
"""`Apply` optimized for use with paired structured containers."""
# special sequence types must precede generic `Sequence` check
if isinstance(main, (str, bytes)):
return fn(main, other)
# delegate other types to the function (int, float, etc.)
elif not isinstance(main, (abc.Sequence, abc.Mapping)):
return fn(main, other)
# `objects` is Iterable[Union[Sequence, Mapping]], but not `str` or `bytes`
# check that the current level data are objects of the same type
if not (other is None or isinstance(other, type(main))):
raise TypeError(f'`{other}` does not match type `{type(main)}`')
# (dict, OrderedDict, etc) -> dict
if isinstance(main, abc.Mapping):
other = {} if other is None else other
# recurse and rebuild the mapping as dict (main left-joins with others)
return {k: apply_pair(main[k], other.get(k), fn=fn)
for k in main.keys()}
# (tuple, namedtuple, list, etc) -> list*
elif isinstance(main, abc.Sequence):
if other is None:
other = repeat(None)
else:
# check if sequences conform
assert len(main) == len(other) # TypeError
values = [apply_pair(m, o, fn=fn) for m, o in zip(main, other)]
# demote mutable sequences to lists
if isinstance(main, abc.MutableSequence):
return values
# ... and immutable to tuple, keeping in mind special namedtuples
return getattr(main, '_make', tuple)(values)
|
c547182fd33fe97136fccba635fa61d96ec73447
| 26,837 |
import requests
import json
def get_info(connector, host, key, datasetid):
"""Get basic dataset information from UUID.
Keyword arguments:
connector -- connector information, used to get missing parameters and send status updates
host -- the clowder host, including http and port, should end with a /
key -- the secret key to login to clowder
datasetid -- the dataset to get info of
"""
url = "%sapi/datasets/%s?key=%s" % (host, datasetid, key)
result = requests.get(url,
verify=connector.ssl_verify if connector else True)
result.raise_for_status()
return json.loads(result.text)
|
982e3b9304b3c2ca04348efb794b8b8d34b86b89
| 26,838 |
from typing import Union
from typing import Tuple
from typing import List
def solve_network(
NWN: nx.Graph,
source_node: Union[Tuple, List[Tuple]],
drain_node: Union[Tuple, List[Tuple]],
input: float,
type: str = "voltage",
solver: str = "spsolve",
**kwargs
) -> np.ndarray:
"""
Solve for the voltages of each node in a given NWN. Each drain node will
be grounded. If the type is "voltage", each source node will be at the
specified input voltage. If the type is "current", current will be sourced
from each source node.
Parameters
----------
NWN : Graph
Nanowire network.
source_node : tuple, or list of tuples
Voltage/current source nodes.
drain_node : tuple, or list of tuples
Grounded output nodes.
input : float
Supplied voltage (current) in units of v0 (i0).
type : {"voltage", "current"}, optional
Input type. Default: "voltage".
solver: str, optional
Name of sparse matrix solving algorithm to use. Default: "spsolve".
**kwargs
Keyword arguments passed to the solver.
Returns
-------
out : ndarray
Output array containing the voltages of each node. If the input type
is voltage, the current is also in this array as the last element.
"""
# Get lists of source and drain nodes
if isinstance(source_node, tuple):
source_node = [source_node]
if isinstance(drain_node, tuple):
drain_node = [drain_node]
# Pass to solvers
if type == "voltage":
out = _solve_voltage(NWN, input, source_node, drain_node, solver, **kwargs)
elif type == "current":
out = _solve_current(NWN, input, source_node, drain_node, solver, **kwargs)
else:
raise ValueError("Invalid source type.")
return out
|
c5678fe12cc9abdd9404694c513155e7a85f0713
| 26,839 |
def network_allocations_get_for_share_server(context, share_server_id,
session=None):
"""Get network allocation for share server."""
return IMPL.network_allocations_get_for_share_server(context,
share_server_id,
session=session)
|
7fd30c9249ed167ecf17b2902879de992a023db9
| 26,841 |
def round_corner(radius, fill):
"""Draw a round corner"""
corner = Image.new('L', (radius, radius), 0) # (0, 0, 0, 0))
draw = ImageDraw.Draw(corner)
draw.pieslice((0, 0, radius * 2, radius * 2), 180, 270, fill=fill)
return corner
|
b5f781123ed0dd45dcf61b5d130a9c38c4d3d38f
| 26,842 |
def hindcasts(hcsts, obsvs=None, hists=None, shade=False, ax=None, figsize=(15, 4)):
"""
Plot sets of hindcasts. Where multiple variables are provided, it is
assumed that all inputs contain the same variables.
Parameters
----------
hcsts : dict
Dictionary of hindcasts to plot with the format {"name": hcst}, where
hcst is an xarray.Dataset with dimensions "init" and "lead"
obsvs : dict, optional
Dictionary of observations to plot with the format {"name": obsv},
where obsv is an xarray.Dataset with dimension "time"
hist : dict, optional
Dictionary of historical runs to plot with the format {"name": hist},
where hist is an xarray.Dataset with dimension "time"
shade : bool, optional
If True, shade background according to change in bias correction in
CAFE60v1
"""
def _shading(ax):
trans = cftime.datetime(1992, 1, 1)
end = cftime.datetime(2040, 1, 1)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
ax.fill_between(
[trans, end],
[ylim[1], ylim[1]],
[ylim[0], ylim[0]],
color=[0.9, 0.9, 0.9],
)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
n_vars = len(hcsts[list(hcsts.keys())[0]].data_vars)
if ax is None:
fig = plt.figure(figsize=(figsize[0], n_vars * figsize[1]))
axs = fig.subplots(n_vars, 1, sharex=True)
if n_vars == 1:
axs = [axs]
else:
axs = [ax]
# Plot the hindcasts
colormaps = ["autumn", "winter", "cool"]
colormapcycler = cycle(colormaps)
for name, hcst in hcsts.items():
if "time" in hcst:
hcst_time = "time"
elif "valid_time" in hcst:
hcst_time = "valid_time"
else:
raise ValueError("I can't work out the time variable in hcsts")
color = getattr(cm, next(colormapcycler))(np.linspace(0, 0.9, len(hcst.init)))
for a, var in enumerate(hcst.data_vars):
for idx, (i, c) in enumerate(zip(hcst[var].init, color)):
if idx == 0:
label = name
else:
label = "_nolabel_"
h = hcst[var].sel(init=i)
if "member" in h.dims:
h_mean = h.mean("member", keep_attrs=True)
for m in h.member:
axs[a].plot(
h[hcst_time],
h.sel(member=m),
color=[0.8, 0.8, 0.8],
linestyle="-",
label="_nolabel_",
zorder=-1,
)
else:
h_mean = h
axs[a].plot(
h_mean[hcst_time][0],
h_mean[0],
color=c,
marker="o",
label="_nolabel_",
)
axs[a].plot(
h_mean[hcst_time], h_mean, color=c, linestyle="-", label=label
)
xlim = (hcst[hcst_time].min().item(), hcst[hcst_time].max().item())
# Plot the observations
if obsvs is not None:
lines = ["-", "--", "-.", ":"]
linecycler = cycle(lines)
for name, obsv in obsvs.items():
line = next(linecycler)
for a, var in enumerate(hcst.data_vars):
axs[a].plot(
obsv.time, obsv[var], color="black", label=name, linestyle=line
)
# Plot the historical runs
if hists is not None:
for name, hist in hists.items():
for a, var in enumerate(hist.data_vars):
h_mean = (
hist[var].mean("member", keep_attrs=True)
if "member" in hist[var].dims
else hist[var]
)
axs[a].plot(h_mean.time, h_mean, label=name)
# Format plots
ticks = xr.cftime_range(start=xlim[0], end=xlim[-1], freq="5AS", calendar="julian")
years = xr.cftime_range(start=xlim[0], end=xlim[-1], freq="AS", calendar="julian")
xlim = (years.shift(-1, "AS")[0], years.shift(2, "AS")[-1])
for a, var in enumerate(hcst.data_vars):
axs[a].set_xticks(ticks.values)
axs[a].set_xticklabels(ticks.year, rotation=40)
axs[a].set_xlim(xlim)
axs[a].set_ylabel(hcst[var].attrs["long_name"])
axs[a].grid()
if a == 0:
axs[a].legend()
if a == (n_vars - 1):
axs[a].set_xlabel("year")
else:
axs[a].set_xlabel("")
if shade:
_shading(axs[a])
plt.tight_layout()
if ax is None:
fig.patch.set_facecolor("w")
return fig
else:
return ax
|
da483ad5f39ca200eb6191cf10e77e3dc2b589c2
| 26,843 |
def _create_integration_test_executable(
orig_target_name,
target_suffix,
executable):
"""Create _integration_test_executable rule and return its names.
Args:
orig_target_name: The name given to the sut_component or the
integration_test.
target_suffix: A suffix to append to the orig_target_name to make a unique
target name for the _integration_test_executable.
executable: Can be either a string, which is interpretted as a program,
or a dictionary that has a mandatory "program" field (whose value is a
string), and some optional fields. If a string is provided or if the
optional field 'timeout_seconds' is not provided, it will default to
DEFAULT_EXECUTABLE_TIMEOUT_SECONDS.
Returns:
The target name of the _integration_test_executable rule.
"""
# Create a target name for the _integration_test_executable rule.
target_name = "_%s_%s" % (orig_target_name, target_suffix)
# isinstance is not supported in skylark
# pylint: disable=unidiomatic-typecheck
if type(executable) == "string":
_integration_test_executable(
name = target_name,
program = executable,
)
return target_name
# Validate that executable is a valid dictionary.
# isinstance is not supported in skylark
# pylint: disable=unidiomatic-typecheck
if type(executable) != "dict":
fail("Error in target %s: %s is neither a string nor a dictionary." % (orig_target_name, target_suffix))
for key in executable:
if key not in ["program", "args", "input_files", "data", "deps", "output_properties", "output_files", "timeout_seconds"]:
fail("Error in target %s: %s has an invalid key %s." % (orig_target_name, target_suffix, key))
_integration_test_executable(
name = target_name,
timeout_seconds = executable.get("timeout_seconds"),
program = executable.get("program"),
args = executable.get("args"),
input_files = executable.get("input_files"),
data = executable.get("data"),
deps = executable.get("deps"),
output_properties = executable.get("output_properties"),
output_files = executable.get("output_files"),
)
return target_name
|
761a0c586784ce37c3cd22feafb755d42908bca1
| 26,845 |
import torch
def pgd(model, inp, label,
epsilon=0.3,
step_size=0.01,
num_steps=40,
random_start=True,
pixel_range=(-0.5, 0.5)):
"""Short summary.
Parameters
----------
model : nn.Module
Model to attack
inp : tensor
Input to perturb adversarially.
label : tensor
Target label to minimize score of.
epsilon : float
Magnitude of perturbation (the default is 0.3).
step_size : float
Size of PGD step (the default is 0.01).
num_steps : float
Number of PGD steps for one attack. Note that the model is called this
many times for each attack. (the default is 40).
random_start : float
Whether or not to add a uniform random (-epsilon to epsilon) perturbation
before performing PGD. (the default is True).
pixel_range : float
Range to clip the output. (the default is (-0.5, 0.5)).
Returns
-------
tensor
Adversarially perturbed input.
"""
adv_inp = inp.clone().detach()
if epsilon == 0:
return adv_inp
if random_start:
adv_inp += torch.rand(*adv_inp.shape, device=inp.device)*2*epsilon - epsilon
for i in range(num_steps):
inp_var = adv_inp.clone().requires_grad_(True)
output = model(inp_var)
loss = nn.CrossEntropyLoss()(output, label)
loss.backward()
adv_inp += inp_var.grad.sign()*step_size
adv_inp = torch.max(torch.min(adv_inp, inp+epsilon), inp-epsilon)
adv_inp = torch.clamp(adv_inp, *pixel_range)
return adv_inp.clone().detach()
|
3c59fdf7f4f9d6941a1622c9dc9a9a378c5120c9
| 26,846 |
def enumerate_trials(perievents):
"""
adds an index to perievents_2D that counts the number of trials per session and event
starting with 1, removes FrameCounter index
:param perievents: perievents df, non-column based format
:return: perievents df with additional index Trial
"""
# unstack indices to make several counts for each event and session
perievents = perievents.reset_index('FrameCounter', drop=True)
idx = list(perievents.index.names)
perievents['Trial'] = perievents.groupby(idx).cumcount() + 1
return perievents.set_index('Trial', append=True)
|
d469a823b6af60d305dc37b1ce42176f16b21f8d
| 26,847 |
import torch
def add_dims_right(tensor, ndims, right_indent=0):
""" Add empty dimensions to the right of tensor shape
"""
assert right_indent >= 0
for i in range(ndims):
tensor = torch.unsqueeze(tensor, -1-right_indent)
return tensor
|
7d4c1b47eb659f0bcfc9dbcf7f7b04c1ccbafb80
| 26,848 |
def ciClim( imData, sigma = 2.5 ):
"""
Confidence interval color limits, for images. Most useful for highly spikey data.
"""
meanData = np.mean( imData )
stdData = np.std( imData )
return np.array( [meanData - sigma*stdData, meanData + sigma*stdData] )
|
64debb8fea07c60da0d9af825482959312ef09d5
| 26,849 |
def get_submodel_name(history = 60, lag = 365, num_neighbors = 20, margin_in_days = None, metric = "cos"):
"""Returns submodel name for a given setting of model parameters
"""
submodel_name = '{}-autoknn-hist{}-nbrs{}-margin{}-lag{}'.format(metric,
history,
num_neighbors,
margin_in_days,
lag)
return submodel_name
|
69fa276a86c39f342ffceba72408ab6970dd0a41
| 26,850 |
def create_db_scoped_session(connection_string=None):
"""Create scoped session."""
# we use NullPool, so that SQLAlchemy doesn't pool local connections
# and only really uses connections while writing results
return scoped_session(
sessionmaker(bind=create_engine(
connection_string or configuration.POSTGRES_CONNECTION,
poolclass=NullPool)))
|
da0583601147dbebb97c25d945f05fdb19859709
| 26,851 |
import torch
def sens_reduce_precise(x: torch.Tensor, sens_maps: torch.Tensor) -> torch.Tensor:
"""
Combine num_coils individual coil images into a single image using estimates of the sensitivity maps.
Then replace the magnitude with the RSS estimate to reduce inaccuracies caused by the sensitivity map estimates.
Args:
x: Tensor of shape (num_coils, H, W, 2) that contains the individual coil images.
sens_maps: Sensitivity maps of shape (num_coils, H, W, 2).
Returns:
The combined image of shape (H, W, 2).
"""
x_reduced = sens_reduce(x, sens_maps)
x_phi = to_polar(x_reduced)[...,1]
x_rss = fastmri.rss_complex(x)
x_polar = torch.stack((x_rss, x_phi), dim=-1)
return to_cartesian(x_polar)
|
5d40c20dc9dd276a734378602f8ce7a42f402087
| 26,852 |
from typing import Callable
def dict_to_object(item, callback: Callable = None):
"""This function convert a Python Dict into a Python object:
>>> data = {"name": "John Smith", "hometown": {"name": "New York", "id": 123}}type >>> c = json_to_object(data)
type>> c
<class 'automatic'>
>>> c.name
"John Smith"
>>> c.hometown.name
"New York"
typevars(c)
mappingproxy({'name': 'Jotypemith', 'hometown': <class 'automatic'>, '__dict__':typetribute '__dict__' of 'automatic' objects>, '__weakref__': <attribute '__weakref__' of 'automatic' objects>, '__doc__': None})
"""
def convert(item):
if isinstance(item, dict):
return type('automatic', (), {
k: convert(v) for k, v in item.items()
})
if isinstance(item, list):
def yield_convert(item):
for index, value in enumerate(item):
yield convert(value)
return list(yield_convert(item))
else:
return item
return convert(item)
|
6a44ccdf53e54609cac2954f60e080a148daa1ed
| 26,853 |
def show_vm(vm_id):
"""
Executes the onevm show command and returns the xml output.
:param int vm_id: vm id number
:return: XML output from onevm show command
"""
return _get_subp_out(["onevm", "show", "-x", str(vm_id)])
|
962b98bb061b5e647c2f11e2447b7b838994d3d4
| 26,854 |
def isragged(arr):
"""Test if an array is ragged (i.e. unequal-length records).
Parameters
----------
arr : array
Returns
-------
ragged : bool
Returns True of not all entries in arr are of same length
(i.e. arr is indeed ragged). False otherwise.
"""
ragged = not (np.unique([len(r) for r in arr]).size == 1)
return ragged
|
1fdc0c23be00a8b7535d7a85df05e5d1b323c298
| 26,855 |
def compress_public_key(public_key):
"""Compresses a given uncompressed public key.
:param public_key: the key to compress, as bytes
:return: the compressed key, as bytes
"""
if public_key[0] != 0x04 or len(public_key) != 65:
raise ValueError('invalid uncompressed public key')
# We take the y coordinate
y = int.from_bytes(public_key, 'big')
# And check its parity, to add the appropriate byte
if y % 2:
return b'\x03' + public_key[1:33]
else:
return b'\x02' + public_key[1:33]
|
8ff7c609a216e29b9cc31584e3cd824f7ab2879d
| 26,856 |
import re
def sphinx_doc_parser(doc: str) -> DocParserRetType:
"""A doc parser handling Sphinx-style and epydoc-style docstrings
Args:
doc: The function doc string
Returns:
The parsed doc string and parsed parameter list
"""
main_doc, param_docs = plain_doc_parser(doc)
parts = re.split(r'(^[:@].*:)', main_doc, 1, re.M)
if len(parts) <= 1:
return main_doc, param_docs
main_doc = parts[0]
for group in indented_groups(''.join(parts[1:])):
match = re.match(r'[:@]param\s+([^:]+):\s*(.*)$', ' '.join(group))
if not match:
continue
name = match.group(1).strip('`').lstrip('*')
param_docs[name] = ParamInfo(match.group(2).strip())
return main_doc, param_docs
|
1179149b7215838a7cfaa720b1ba6262170031c1
| 26,857 |
def bilinear_interpolate(X, x, y):
"""
Estimates of the pixel values at the coordinates (x, y) in `X` via bilinear
interpolation.
Notes
-----
Assumes the current entries in X reflect equally-spaced
samples from a 2D integer grid.
Modified from https://bit.ly/2NMb1Dr
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(in_rows, in_cols, in_channels)`
An input image sampled along a grid of `in_rows` by `in_cols`.
x : list of length `k`
A list of x-coordinates for the samples we wish to generate
y : list of length `k`
A list of y-coordinates for the samples we wish to generate
Returns
-------
samples : list of length `(k, in_channels)`
The samples for each (x,y) coordinate computed via bilinear
interpolation
"""
x0 = np.floor(x).astype(int)
y0 = np.floor(y).astype(int)
x1 = x0 + 1
y1 = y0 + 1
x0 = np.clip(x0, 0, X.shape[1] - 1)
y0 = np.clip(y0, 0, X.shape[0] - 1)
x1 = np.clip(x1, 0, X.shape[1] - 1)
y1 = np.clip(y1, 0, X.shape[0] - 1)
Ia = X[y0, x0, :].T
Ib = X[y1, x0, :].T
Ic = X[y0, x1, :].T
Id = X[y1, x1, :].T
wa = (x1 - x) * (y1 - y)
wb = (x1 - x) * (y - y0)
wc = (x - x0) * (y1 - y)
wd = (x - x0) * (y - y0)
return (Ia * wa).T + (Ib * wb).T + (Ic * wc).T + (Id * wd).T
|
76692e28fe68e6af5266de8672ee0b32754297c3
| 26,858 |
import time
def do_prediction(intbl, pbms, gene_names,
filteropt="p-value", filterval=0.0001, spec_ecutoff=0.4, nonspec_ecutoff=0.35):
"""
intbl: preprocessed table
filteropt: p-value or z-score
filterval: # TFs for opt z-score and p-val cutoff for p-value
"""
# intbl: #rowidx,seq,val,diff,t,pbmname,escore_seq
start_time = time.time()
# move the comment here for testing
predfiles = [config.PREDDIR + "/" + pbm for pbm in pbms] # os.listdir(preddir)
preds = utils.chunkify(predfiles,config.PCOUNT) # chunks the predfiles for each process
# need to use manager here
shared_ready_sum = mp.Manager().Value('i', 0)
pool = mp.Pool(processes=config.PCOUNT)
if filteropt == "p-value":
filterval = float(filterval)
else: #z-score
filterval = int(filterval)
async_pools = [pool.apply_async(predict, (preds[i], intbl, shared_ready_sum, filteropt, filterval, spec_ecutoff, nonspec_ecutoff)) for i in range(0,len(preds))]
total = len(predfiles)
while not all([p.ready() for p in async_pools]):
time.sleep(2)
res = [p.get() for p in async_pools]
pool.terminate()
colnames,datavalues = postprocess(res,gene_names,filteropt,filterval)
return colnames,datavalues
|
70bcd1ed8ffdaf1b97d64ab2afdb1ad48f4a2047
| 26,859 |
def fetch_res(n, k, dim, ampl_noise, type_mat, scaled, norm_laplacian, n_avrg,
save_res_dir, embedding_method):
"""
Get the results from a given experiment when they were saved to a file,
in order to use them for plotting in plot_from_res, or not to redo the same
computation twice in run_synthetic_exps.
"""
fn = "n_{}-k_{}-dim_{}-ampl_{}-type_mat_{}-embedding_{}" \
"-scaled_{}-norm_laplacian_{}-n_avrg_{}.res".format(
n, k, dim, ampl_noise, type_mat, embedding_method,
scaled, norm_laplacian, n_avrg)
fn = save_res_dir + "/" + fn
with open(fn, 'r') as f:
first_line = f.readlines()[0]
res_mean, res_std = [float(el) for el in first_line.split()]
return(res_mean, res_std)
|
ce8cac77d36b5c6bd2920426e054899681318508
| 26,860 |
def load_gramar_from_SYGUS_spec(spec):
"""Creates Grammar from the given SYGUS grammar specification (http://sygus.seas.upenn.edu/files/SyGuS-IF.pdf, last access 3.08.2016).
:param spec: SYGUS grammar specification.
:return: New grammar object.
"""
grammar = Grammar()
spec = spec.replace('(', '( ')
spec = spec.replace(')', ' )')
words = spec.split()
brackets_open = 0
grammar_rule = None
rule_symb = None
i = 0
def get_production_complex_body(start):
body = []
opened = 0
k = start
while k < len(words):
if words[k] == '(':
opened += 1
elif words[k] == ')':
opened -= 1
body.append(words[k])
if opened == 0:
return body, k
k += 1
return body, k
while i < len(words):
w = words[i]
# print('processing: ' + ' ('+str(brackets_open)+') ' + w )
if w == '(' and brackets_open < 3:
brackets_open += 1
i += 1
continue
elif w == ')':
brackets_open -= 1
i += 1
continue
if brackets_open == 2:
# On the level of 2 opened brackets there are always production names (symb) and their sorts.
rule_symb = words[i]
sort = words[i + 1]
if sort in {"(", ")"}:
raise Exception("Malformed grammar!")
grammar_rule = GrammarRule(rule_symb, sort, grammar)
i += 2
continue
elif brackets_open == 3:
# On the level of 3 opened brackets there are defined concrete right hands for current product.
j = i
while j < len(words):
if words[j] == '(':
body, j = get_production_complex_body(j)
grammar_rule.add(Production(rule_symb, body, grammar_rule))
elif words[j] == ')':
i = j
brackets_open -= 1
break
else:
# Simple symbol not requiring surrounding it with brackets.
body = [ words[j] ]
grammar_rule.add(Production(rule_symb, body, grammar_rule))
j += 1
i += 1
if i < len(words) and words[i] == ")":
grammar.add_rule(grammar_rule)
continue
else:
raise Exception("Malformed grammar!")
i += 1
if brackets_open != 0:
raise Exception("Malformed grammar!")
# print('Final grammar:\n' + str(grammar))
return grammar
|
b909fdb2df0fd005a6ddc1c3069e1ba7659619c2
| 26,861 |
def im_entropy(im):
"""
Calculate entropy of one single image
:param im: a greyscale image in numpy format
:return: entropy of the image
"""
h, w = im.shape
hist = np.histogram(im.reshape(-1), bins=256)[0]
probs = hist / (h * w)
probs = probs[probs > 0]
ent = np.sum(-probs * np.log(probs))
return ent
|
aeaddd3c087e2f621ce0b35c108d6d1de630d477
| 26,862 |
def forward(layers, x):
"""
function for performing forward propagation in all the layers
Parameters:
layers : list
x : numpy array
Returns:
list : (contains output of all layers in the form of numpy arrays)
"""
conv = layers[0]
pool = layers[1]
dense = layers[2]
conv_output = conv.forward((x/255)- 0.5)
pool_output = pool.forward(conv_output)
dense_output = dense.forward(pool_output)
return [conv_output, pool_output, dense_output]
|
2e7ae3c8ab513ca5f138c77a868783dc942063ee
| 26,863 |
def gen_word_vec_matrix(doc, nrows=100, stop_min=4, return_df=False):
"""
"""
# filter stop words and non-alpha tokens
vecs = [token.vector for token in doc if token.is_alpha and
((not token.is_stop) or (token.is_stop and len(token)>stop_min))]
vec_df = pd.DataFrame(vecs)
# pad or downsample to desired numbers of rows
nvecs = vec_df.shape[0]
if nvecs < nrows:
vec_df = pd.concat([vec_df, vec_df.sample(nrows-nvecs)])
elif nvecs > nrows:
vec_df = vec_df.sample(nrows)
# return output in desired format
if return_df:
return vec_df
else:
return vec_df.values
|
98ddf6efc3ddadda8d72151a2d63860355aa6769
| 26,864 |
def __same_axes(x_axis, y_axis, xlim, ylim):
"""Check if two axes are the same, used to determine squared plots"""
axes_same_and_not_none = (x_axis == y_axis) and (x_axis is not None)
axes_same_lim = xlim == ylim
return axes_same_and_not_none and axes_same_lim
|
ce7538ffa17e15df0fc055809103bf69af88e7aa
| 26,865 |
import csv
def create_column_dicts(path_to_input):
"""Creates dictionaries: {column_index, column_name} and
{column_name, column_index}"""
cols = {}
with open(path_to_input, newline="") as csvfile:
inputreader = csv.reader(csvfile, delimiter=",")
row = next(inputreader)
for i, col in enumerate(row):
cols[i] = col
# reverse dictionary: {column_name, column_index}
cols_inds = {v: k for k, v in cols.items()}
return cols, cols_inds
|
13bf2e3c99fea9b1d2580b7bfc34e445af8b7e98
| 26,866 |
from typing import Type
from typing import Any
def autocommit(session_cls: Type[Any], do_commit_at_outermost: bool = True) -> Any:
"""
自动提交装饰器
:param session_cls: Sqlalchemy Session Class, 且该类可以被inject获取到
:param do_commit_at_outermost: 上下文结束后是否提交
:return:
"""
def decorator(func: FunctionType):
@wraps(func)
def wrap(*args, **kwargs):
session: session_cls = inject.instance(session_cls)
with CommitContext(session, do_commit_at_outermost=do_commit_at_outermost):
return func(*args, **kwargs)
return wrap
return decorator
|
f974ba9d28353ba7cf6f676d08b6b6bcbb751d0e
| 26,867 |
from operator import add
def multiply(multiplicand, multiplier):
"""Multiplies the multiplicand by the multiplier
This function multiplies the multiplicand by the multiplier only
when they are either integers or floats
Args:
multiplicand(int/float): The quantity that is to be multiplied
by another
multiplier(int/float): The quantity by which a given number is
to be multiplied
>>> multiply(6, 3)
18
Returns:
The product of the multiplicand multiplied by the multiplier
Raises:
TypeError if the multiplicand or multiplier is neither an
integer or float
"""
if not isinstance(multiplicand, (int, float)):
raise TypeError(f"{multiplicand} is not an integer or float")
if not isinstance(multiplier, (int, float)):
raise TypeError(f"{multiplier} is not an integer or float")
if multiplicand is None:
multiplicand = 0
if multiplier is None:
multiplier = 0
product = 0
list_multiplier = []
for num in range(multiplier):
list_multiplier.append(multiplicand)
while list_multiplier:
product = add(product, list_multiplier.pop())
return product
|
ad5da56004fd6ad6df327b892ea7cff2c09e9ac6
| 26,868 |
from typing import Optional
from typing import Container
def rolling_enveloped_dashboard(
channel_df_dict: dict, desired_num_points: int = 250, num_rows: Optional[int] = None,
num_cols: Optional[int] = 3, width_for_subplot_row: int = 400, height_for_subplot_row: int = 400,
subplot_colors: Optional[Container] = None, min_points_to_plot: int = 1, plot_as_bars: bool = False,
plot_full_single_channel: bool = False, opacity: float = 1, y_axis_bar_plot_padding: float = 0.06
) -> go.Figure:
"""
A function to create a Plotly Figure with sub-plots for each of the available data sub-channels, designed to reduce
the number of points/data being plotted without minimizing the insight available from the plots. It will plot
either an envelope for rolling windows of the data (plotting the max and the min as line plots), or a bar based
plot where the top of the bar (rectangle) is the highest value in the time window that bar spans, and the bottom of
the bar is the lowest point in that time window (choosing between them is done with the `plot_as_bars` parameter).
:param channel_df_dict: A dictionary mapping channel names to Pandas DataFrames of that channels data
:param desired_num_points: The desired number of points to be plotted in each subplot. The number of points
will be reduced from it's original sampling rate by applying metrics (e.g. min, max) over sliding windows
and then using that information to represent/visualize the data contained within the original data. If less than
the desired number of points are present, then a sliding window will NOT be used, and instead the points will be
plotted as they were originally recorded (also the subplot will NOT be plotted as a bar based plot even if
`plot_as_bars` was set to true).
:param num_rows: The number of columns of subplots to be created inside the Plotly figure. If None is given, (then
`num_cols` must not be None), then this number will automatically be determined by what's needed. If more rows
are specified than are needed, the number of rows will be reduced to the minimum needed to contain all the subplots
:param num_cols: The number of columns of subplots to be created inside the Plotly figure. See the description of
the `num_rows` parameter for more details on this parameter, and how the two interact. This also follows the same
approach to handling None when given
:param width_for_subplot_row: The width of the area used for a single subplot (in pixels).
:param height_for_subplot_row: The height of the area used for a single subplot (in pixels).
:param subplot_colors: An 'array-like' object of strings containing colors to be cycled through for the subplots.
If None is given (which is the default), then the `colorway` variable in Plotly's current theme/template will
be used to color the data on each of the subplots uniquely, repeating from the start of the `colorway` if
all colors have been used.
:param min_points_to_plot: The minimum number of data points required to be present to create a subplot for a
channel/subchannel (NOT including `NaN` values).
:param plot_as_bars: A boolean value indicating if the plot should be visualized as a set of rectangles, where a
shaded rectangle is used to represent the maximum and minimum values of the data during the time window
covered by the rectangle. These maximum and minimum values are visualized by the locations of the top and bottom
edges of the rectangle respectively, unless the height of the rectangle would be 0, in which case a line segment
will be displayed in it's place. If this parameter is `False`, two lines will be plotted for each
of the subplots in the figure being created, creating an 'envelope' around the data. An 'envelope' around the
data consists of a line plotted for the maximum values contained in each of the time windows, and another line
plotted for the minimum values. Together these lines create a boundary which contains all the data points
recorded in the originally recorded data.
:param plot_full_single_channel: If instead of a dashboard of subplots a single plot with multiple sub-channels
should be created. If this is True, only one (key, value) pair can be given for the `channel_df_dict` parameter
:param opacity: The opacity to use for plotting bars/lines
:param y_axis_bar_plot_padding: Due to some unknown reason the bar subplots aren't having their y axis ranges
automatically scaled so this is the ratio of the total y-axis data range to pad both the top and bottom of the
y axis with. The default value is the one it appears Plotly uses as well.
:return: The Plotly Figure containing the subplots of sensor data (the 'dashboard')
"""
if not (num_rows is None or isinstance(num_rows, (int, np.integer))):
raise TypeError(f"`num_rows` is of type `{type(num_rows)}`, which is not allowed. "
"`num_rows` can either be `None` or some type of integer.")
elif not (num_cols is None or isinstance(num_cols, (int, np.integer))):
raise TypeError(f"`num_cols` is of type `{type(num_cols)}`, which is not allowed. "
"`num_cols` can either be `None` or some type of integer.")
# I'm pretty sure the below is correct and it appears to work correctly, but it'd be nice if someone could
# double check this logic
# This removes any channels with less than `min_points_to_plot` data points per sub-channel, and removes any
# sub-channels which have less than `min_points_to_plot` non-NaN data points
channel_df_dict = {
k: v.drop(columns=v.columns[v.notna().sum(axis=0) < min_points_to_plot])
for (k, v) in channel_df_dict.items() if v.shape[0] >= min_points_to_plot}
subplot_titles = [' '.join((k, col)) for (k, v) in channel_df_dict.items() for col in v.columns]
if num_rows is None and num_cols is None:
raise TypeError("Both `num_rows` and `num_columns` were given as `None`! "
"A maximum of one of these two parameters may be given as None.")
elif num_rows is None:
num_rows = 1 + (len(subplot_titles) - 1) // num_cols
elif num_cols is None:
num_cols = 1 + (len(subplot_titles) - 1) // num_rows
elif len(subplot_titles) > num_rows * num_cols:
raise ValueError("The values given for `num_rows` and `num_columns` result in a maximum "
f"of {num_rows * num_cols} avaialable sub-plots, but {len(subplot_titles)} subplots need "
"to be plotted! Try setting one of these variables to `None`, it will then "
"automatically be set to the optimal number of rows/columns.")
else:
num_rows = 1 + (len(subplot_titles) - 1) // num_cols
num_cols = int(np.ceil(len(subplot_titles)/num_rows))
if subplot_colors is None:
colorway = pio.templates[pio.templates.default]['layout']['colorway']
else:
colorway = subplot_colors
if plot_full_single_channel:
if len(channel_df_dict) != 1:
raise ValueError("The 'channel_df_dict' parameter must be length 1 when "
"'plot_full_single_channel' is set to true!")
num_rows = 1
num_cols = 1
fig = go.Figure(layout_title_text=list(channel_df_dict.keys())[0])
else:
fig = make_subplots(
rows=num_rows,
cols=num_cols,
subplot_titles=subplot_titles,
figure=go.Figure(
layout_height=height_for_subplot_row * num_rows,
layout_width=width_for_subplot_row * num_cols,
),
)
# A counter to keep track of which subplot is currently being worked on
subplot_num = 0
# A dictionary to be used to modify the Plotly Figure layout all at once after all the elements have been added
layout_changes_to_make = {}
for channel_data in channel_df_dict.values():
window = int(np.around((channel_data.shape[0]-1) / desired_num_points, decimals=0))
# If a window size of 1 is determined, it sets the stride to 1 so we don't get an error as a result of the
# 0 length stride
stride = 1 if window == 1 else window - 1
rolling_n = channel_data.rolling(window)
min_max_tuple = (rolling_n.min()[::stride], rolling_n.max()[::stride])
min_max_equal = min_max_tuple[0] == min_max_tuple[1]
is_nan_min_max_mask = np.logical_not(
np.logical_and(
pd.isnull(min_max_tuple[0]),
pd.isnull(min_max_tuple[1])))
# If it's going to be plotted as bars, force it's time stamps to be uniformly spaced
# so that the bars don't have any discontinuities in the X-axis
if len(channel_data) >= desired_num_points and plot_as_bars:
if isinstance(channel_data.index, pd.core.indexes.datetimes.DatetimeIndex):
new_index = pd.date_range(
channel_data.index.values[0],
channel_data.index.values[-1],
periods=len(channel_data),
)
else:
new_index = np.linspace(
channel_data.index.values[0],
channel_data.index.values[-1],
num=len(channel_data),
)
channel_data.set_index(new_index)
# Loop through each of the sub-channels, and their respective '0-height rectangle mask'
for subchannel_name, cur_min_max_equal in min_max_equal[channel_data.columns].iteritems():
traces = []
cur_color = colorway[subplot_num % len(colorway)]
cur_subchannel_non_nan_mask = is_nan_min_max_mask[subchannel_name]
# If there are less data points than the desired number of points
# to be plotted, just plot the data as a line plot
if len(channel_data) < desired_num_points:
not_nan_mask = np.logical_not(pd.isnull(channel_data[subchannel_name]))
traces.append(
go.Scatter(
x=channel_data.index[not_nan_mask],
y=channel_data.loc[not_nan_mask, subchannel_name],
name=subchannel_name,
opacity=opacity,
line_color=cur_color,
showlegend=plot_full_single_channel,
)
)
# If it's going to plot the data with bars
elif plot_as_bars:
# If there are any 0-height rectangles
if np.any(cur_min_max_equal):
equal_data_df = min_max_tuple[0].loc[cur_min_max_equal.values, subchannel_name]
# Half of the sampling period
half_dt = np.diff(cur_min_max_equal.index[[0, -1]])[0] / (2 * (len(cur_min_max_equal) - 1))
# Initialize the arrays we'll use for creating line segments where
# rectangles would have 0 width so it will end up formatted as follows
# (duplicate values for y since line segements are horizontal):
# x = [x1, x2, None, x3, x4, None, ...]
# y = [y12, y12, None, y34, y34, None, ...]
x_patch_line_segs = np.repeat(equal_data_df.index.values, 3)
y_patch_line_segs = np.repeat(equal_data_df.values, 3)
# All X axis values are the same, but these values are supposed to represent pairs of start and end
# times for line segments, so the time stamp is shifted half it's duration backwards for the start
# time, and half it's duration forward for the end time
x_patch_line_segs[::3] -= half_dt
x_patch_line_segs[1::3] += half_dt
# This is done every third value so that every two pairs of points is unconnected from eachother,
# since the (None, None) point will not connect to either the point before it nor behind it
x_patch_line_segs[2::3] = None
y_patch_line_segs[2::3] = None
traces.append(
go.Scatter(
x=x_patch_line_segs,
y=y_patch_line_segs,
name=subchannel_name,
opacity=opacity,
mode='lines',
line_color=cur_color,
showlegend=plot_full_single_channel,
)
)
min_data_point = np.min(min_max_tuple[0][subchannel_name])
max_data_point = np.max(min_max_tuple[1][subchannel_name])
y_padding = (max_data_point - min_data_point) * y_axis_bar_plot_padding
traces.append(
go.Bar(
x=min_max_tuple[0].index[cur_subchannel_non_nan_mask],
y=(min_max_tuple[1].loc[cur_subchannel_non_nan_mask, subchannel_name] -
min_max_tuple[0].loc[cur_subchannel_non_nan_mask, subchannel_name]),
marker_color=cur_color,
opacity=opacity,
marker_line_width=0,
base=min_max_tuple[0].loc[cur_subchannel_non_nan_mask, subchannel_name],
showlegend=plot_full_single_channel,
name=subchannel_name,
)
)
# Adds a (key, value) pair to the dict for setting this subplot's Y-axis display range (applied later)
min_y_range = min_data_point - y_padding
max_y_range = max_data_point + y_padding
y_axis_id = f'yaxis{1 + subplot_num}_range'
if plot_full_single_channel:
y_axis_id = 'yaxis_range'
if layout_changes_to_make:
min_y_range = min(min_y_range, layout_changes_to_make[y_axis_id][0])
max_y_range = max(max_y_range, layout_changes_to_make[y_axis_id][1])
layout_changes_to_make[y_axis_id] = [min_y_range, max_y_range]
else:
for cur_df in min_max_tuple:
traces.append(
go.Scatter(
x=cur_df.index[cur_subchannel_non_nan_mask],
y=cur_df.loc[cur_subchannel_non_nan_mask, subchannel_name],
name=subchannel_name,
opacity=opacity,
line_color=cur_color,
showlegend=plot_full_single_channel,
)
)
# Add the traces created for the current subchannel of data to the plotly figure
if plot_full_single_channel:
fig.add_traces(traces)
else:
fig.add_traces(
traces,
rows=1 + subplot_num // num_cols,
cols=1 + subplot_num % num_cols,
)
subplot_num += 1
fig.update_layout(
**layout_changes_to_make,
bargap=0,
barmode='overlay'
)
return fig
|
bd26531b9f941e340c03ce519f977d55f860d345
| 26,869 |
def compare_clusterers(clusterer_1, clusterer_2, ground_truth, reduction,
figsize=(15, 5), **kwargs):
"""Utility function for comparing two clustering approaches on a problem
for which we have a ground truth.
Args:
- clusterer_1: a dictionary where key is the name of the clustering
method and value are the predicted labels.
- clusterer_2: a dictionary where key is the name of the clustering
method and value are the predicted labels.
- ground_truth: a numpy array, the ground truth labels.
- reduction: a numpy array, a matrix holding the low dimensional
projection of the original input data.
- figsize: a tuple, specifying the size of the figure
- **kwargs: keyword arguments passed to plt.scatter.
Returns:
- None
"""
fig, axs = plt.subplots(1, 3, figsize=figsize)
score_1 = adjusted_rand_score(
labels_true=ground_truth,
labels_pred=next(iter(clusterer_1.values()))
)
score_2 = adjusted_rand_score(
labels_true=ground_truth,
labels_pred=next(iter(clusterer_2.values()))
)
axs[0].scatter(
reduction[:, 0],
reduction[:, 1],
c=ground_truth,
**kwargs
)
axs[0].set_title('Ground Truth')
axs[1].scatter(
reduction[:, 0],
reduction[:, 1],
c=next(iter(clusterer_1.values())),
**kwargs
)
axs[1].set_title(
f'Clust {list(clusterer_1.keys())[0]} - ARScore {round(score_1, 2)}'
)
axs[2].scatter(
reduction[:, 0],
reduction[:, 1],
c=next(iter(clusterer_2.values())),
**kwargs
)
axs[2].set_title(
f'Clust {list(clusterer_2.keys())[0]} - ARScore {round(score_2, 2)}'
)
plt.show()
return None
|
2406894a354255e48d5e8777b54eed2e50391a25
| 26,870 |
def generate_and_write_ed25519_keypair(password, filepath=None):
"""Generates ed25519 key pair and writes custom JSON-formatted keys to disk.
The private key is encrypted using AES-256 in CTR mode, with the passed
password strengthened in PBKDF2-HMAC-SHA256.
NOTE: The custom key format includes 'ed25519' as signing scheme.
Arguments:
password: An encryption password.
filepath (optional): The path to write the private key to. If not passed,
the key is written to CWD using the keyid as filename. The public key
is written to the same path as the private key using the suffix '.pub'.
Raises:
UnsupportedLibraryError: pyca/pynacl or pyca/cryptography is not available.
FormatError: Arguments are malformed.
ValueError: An empty string is passed as 'password'.
StorageError: Key files cannot be written.
Side Effects:
Writes key files to disk.
Returns:
The private key filepath.
"""
formats.PASSWORD_SCHEMA.check_match(password)
return _generate_and_write_ed25519_keypair(
filepath=filepath, password=password, prompt=False)
|
3c9249d6e72ce442562b9dcb1af8cea59111d369
| 26,871 |
def scale_bicubic(data, scale=(1., 1., 1.)):
"""
returns a interpolated, scaled version of data
the output shape is scaled too.
Parameters
----------
data: ndarray
3d input array
scale: float, tuple
scaling factor along each axis (x,y,z)
interpolation: str
either "nearest" or "linear"
Returns
-------
scaled output
"""
if not (isinstance(data, np.ndarray) and data.ndim == 3):
raise ValueError("input data has to be a 3d array!")
options_types = {np.uint8: ["-D", "TYPENAME=uchar", "-D", "READ_IMAGE=read_imageui"],
np.uint16: ["-D", "TYPENAME=short", "-D", "READ_IMAGE=read_imageui"],
np.float32: ["-D", "TYPENAME=float", "-D", "READ_IMAGE=read_imagef"],
}
dtype = data.dtype.type
if not dtype in options_types:
raise ValueError("type %s not supported! Available: %s" % (dtype, str(list(options_types.keys()))))
if not isinstance(scale, (tuple, list, np.ndarray)):
scale = (scale,) * 3
if len(scale) != 3:
raise ValueError("scale = %s misformed" % scale)
d_im = OCLImage.from_array(data)
nshape = _scale_shape(data.shape, scale)
res_g = OCLArray.empty(nshape, dtype)
prog = OCLProgram(abspath("kernels/scale.cl"),
build_options=options_types[dtype])
prog.run_kernel("scale_bicubic",
res_g.shape[::-1], None,
d_im, res_g.data)
return res_g.get()
|
95b400b5cd05fb8e476bf5f5936641bca159015f
| 26,872 |
def test_calc_functions_multiple_arguments():
"""Tests parse/eval handling functions with multiple arguments correctly"""
def h1(x): return x
def h2(x, y): return x * y
def h3(x, y, z): return x * y * z
assert evaluator("h(2)", {}, {"h": h1}, {})[0] == 2.0
assert evaluator("h(2, 3)", {}, {"h": h2}, {})[0] == 6.0
assert evaluator("h(2, 3, 4)", {}, {"h": h3}, {})[0] == 24.0
assert equal_as_arrays(evaluator("h(2, [1, 2, 3])", {}, {"h": h2})[0],
MathArray([2, 4, 6]))
with raises(ArgumentError):
evaluator("h(2, 1)", {}, {"h": h1}, {})
with raises(UnableToParse):
evaluator("h()", {}, {"h": h1}, {})
with raises(ArgumentError):
evaluator("h(1)", {}, {"h": h2}, {})
with raises(ArgumentError):
evaluator("h(1,2,3)", {}, {"h": h2}, {})
with raises(UnableToParse):
evaluator("h()", {}, {"h": h3}, {})
with raises(ArgumentError):
evaluator("h(1)", {}, {"h": h3}, {})
with raises(ArgumentError):
evaluator("h(1,2)", {}, {"h": h3}, {})
|
cce31c7386e954389a4d59402d881f602f16373a
| 26,873 |
import string
def get_sentiment(text, word_map):
"""
Identifies the overall sentiment of the text by taking the average
of each word.
Note: Words not found in the word_map dict are give zero value.
"""
# remove all punctuation
text = text.translate(str.maketrans("", "", string.punctuation))
# split into tokens
text = text.split()
total_score, length = 0, 0
# get score for each word, put zero if not found
scores = (word_map.get(token.lower(), 0) for token in text)
# find average score
for score in scores:
total_score += score
length += 1
return total_score / length
|
ee9e57c999539c0126e5c0d38711a617e82dab10
| 26,874 |
def point_line_seg_distance(p1: tuple, p2: tuple, p: tuple, extend_line: bool = False):
"""
Find shortest distance between a line segment (p1, p2) and a given point p.
"""
assert isinstance(p1, tuple) and len(p1) == 2
assert isinstance(p2, tuple) and len(p2) == 2
assert isinstance(p, tuple) and len(p) == 2
# Code adapted from https://stackoverflow.com/questions/849211/shortest-distance-between-a-point-and-a-line-segment
x1, y1 = p1
x2, y2 = p2
x3, y3 = p
x1, x2, x3, y1, y2, y3 = float(x1), float(x2), float(x3), float(y1), float(y2), float(y3)
px = x2 - x1
py = y2 - y1
norm = px * px + py * py
if norm == 0:
dx = x1 - x3
dy = y1 - y3
dist = (dx * dx + dy * dy) ** .5
return dist, (x1, y1)
u = ((x3 - x1) * px + (y3 - y1) * py) / float(norm)
if not extend_line:
if u > 1:
u = 1
elif u < 0:
u = 0
x = x1 + u * px
y = y1 + u * py
dx = x - x3
dy = y - y3
dist = (dx * dx + dy * dy) ** .5
return dist, (x, y)
|
47c669bb5fc3db9d97cf2ac03179b2fc700ad0a5
| 26,875 |
def Tanh_Derivative(x):
"""
这是Tanh激活函数的导数计算公式
:param x: 需要进行计算的数据
:return: Tanh激活函数的导数的函数值
"""
f = Tanh(x)
derivative = 1.0-f*f
return derivative
|
9142765acc9299ef3b35a16538956dfa3d61efc8
| 26,876 |
async def api_update_donation(
data: CreateDonation, donation_id=None, g: WalletTypeInfo = Depends(get_key_type)
):
"""Update a donation with the data given in the request"""
if donation_id:
donation = await get_donation(donation_id)
if not donation:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND, detail="Donation does not exist."
)
if donation.wallet != g.wallet.id:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN, detail="Not your donation."
)
donation = await update_donation(donation_id, **data.dict())
else:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST, detail="No donation ID specified"
)
return donation.dict()
|
d60f59d50e2515a8e5402c04b13c51a53855fa80
| 26,877 |
def random_monoalpha_cipher(pool=None):
"""Generate a Monoalphabetic Cipher"""
if pool is None:
pool = letters + digits
original_pool = list(pool)
shuffled_pool = list(pool)
shuffle(shuffled_pool)
return dict(zip(original_pool, shuffled_pool))
|
a12976552fc57183e939bd04f2c61cb175d3adec
| 26,878 |
from typing import Union
def get_comment(entity: Union[Table, TableProtocol, Column, Constraint]) -> str:
"""Get comment on entity"""
if isinstance(entity, TableProtocol):
return to_table(entity).comment or ""
elif isinstance(entity, Table):
return entity.comment or ""
elif isinstance(entity, Column):
return entity.comment or ""
elif isinstance(entity, Constraint):
if hasattr(entity, "info"):
return getattr(entity, "info").get("comment") or ""
return ""
raise ValueError("invalid entity passed to get_comment")
|
4517d43980f596d1a31773a4322805331d871890
| 26,880 |
from typing import List
def merge_neighbor_spans(spans: List[Span], distance) -> List[Span]:
"""Merge neighboring spans in a list of un-overlapped spans:
when the gaps between neighboring spans is not larger than the
specified distance, they are considered as the neighbors.
Args:
spans (List[Span]): The input list of spans.
distance (int, optional):
The upper bound of interval gaps between two neighboring spans.
Defaults to 1.
Returns:
List[Span]: A list of merged spans
"""
is_neighboring_spans = (
lambda span1, span2: min(
abs(span1.start - span2.end), abs(span1.end - span2.start)
)
<= distance
)
# It assumes non-overlapped intervals within the list
merge_neighboring_spans = lambda span1, span2: Span(
min(span1.start, span2.start), max(span1.end, span2.end)
)
spans = sorted(spans, key=lambda ele: ele.start)
# When sorted, only one iteration round is needed.
if len(spans) == 0:
return []
if len(spans) == 1:
return spans
cur_merged_spans = [spans[0]]
for cur_span in spans[1:]:
prev_span = cur_merged_spans.pop()
if is_neighboring_spans(cur_span, prev_span):
cur_merged_spans.append(merge_neighboring_spans(prev_span, cur_span))
else:
# In this case, the prev_span should be moved to the bottom of the stack
cur_merged_spans.extend([prev_span, cur_span])
return cur_merged_spans
|
a059228aefefe52c7417d414ed517cb36ee8b753
| 26,881 |
def handle_all_serials(oid, *args):
"""Return dict of oid to serialno from store() and tpc_vote().
Raises an exception if one of the calls raised an exception.
The storage interface got complicated when ZEO was introduced.
Any individual store() call can return None or a sequence of
2-tuples where the 2-tuple is either oid, serialno or an
exception to be raised by the client.
The original interface just returned the serialno for the
object.
The updated multi-commit API returns nothing from store(), and
returns a sequence of resolved oids from tpc_vote.
NOTE: This function is removed entirely in ZODB 5.
"""
d = {}
for arg in args:
if isinstance(arg, bytes):
d[oid] = arg
elif arg:
for t in arg:
if isinstance(t, bytes):
# New protocol. The caller will use the tid
# returned from tpc_finish if we return a dict
# missing the oid.
pass
else:
oid, serial = t
if not isinstance(serial, bytes):
raise serial # error from ZEO server
d[oid] = serial
return d
|
f2ff56d43f40f4bad5a802acbbe7a8fa869831d3
| 26,882 |
def remove_null_fields(data):
"""Remove all keys with 'None' values"""
for k, v in data.items():
if isinstance(v, dict):
remove_null_fields(v)
if isinstance(v, list):
for element in v:
remove_null_fields(element)
if not data[k]:
del data[k]
return data
|
3dc90d215d899afb2316acb92b5755fd93da204f
| 26,883 |
from scipy.ndimage.filters import gaussian_filter1d
from math import log
import numpy
def epsautoconfeval(epsilon, plotTitle):
"""
investigate distance properties for clustering autoconfiguration
plots of k-nearest-neighbor distance histogram and "knee"
See SegmentedMessages#autoconfigureDBSCAN
:param plotTitle: Part of plot's filename and header
:param epsilon The manually determined "best" epsilon for comparison
:return:
"""
# # distribution of all distances in matrix
# hstplt = SingleMessagePlotter(specimens, tokenizer+'-distance-distribution-histo', args.interactive)
# hstplt.histogram(tril(sm.distances), bins=[x / 50 for x in range(50)])
# plt.axvline(epsilon, label="manually determined eps={:0.2f}".format(epsilon), c="red")
# hstplt.text('max {:.3f}, mean {:.3f}'.format(sm.distances.max(), sm.distances.mean()))
# hstplt.writeOrShowFigure()
# del hstplt
neighbors = tyl.sm.neighbors() # list of tuples: (index from sm.distances, distance) sorted by distance
mmp = MultiMessagePlotter(specimens, "knn-distance-funtion_" + plotTitle, 1, 2,
isInteractive=False)
mmp.axes[0].axhline(epsilon, label="manually determined eps={:0.2f}".format(epsilon), c="red")
mmp.axes[1].axhline(epsilon, label="manually determined eps={:0.2f}".format(epsilon), c="red")
krange = (0, 16, 1)
for k in range(*krange):
knearest = sorted([nfori[k][1] for nfori in neighbors])
mmp.plotToSubfig(1, knearest, alpha=.4, label="k={}".format(k))
# # kneedle approach: yields unusable results. does not find a knee!
# smoothing approach
sigma = log(len(neighbors))
knearest = dict()
smoothknearest = dict()
seconddiff = dict() # type: Dict[int, numpy.ndarray]
seconddiffMax = (0, 0, 0)
# ksteepeststats = list()
# can we omit k = 0 ?
# --> No - recall and even more so precision deteriorates for dns and dhcp (1000s)
for k in range(0, len(neighbors) // 10): # round(2*log(len(neighbors)))
knearest[k] = sorted([nfori[k][1] for nfori in neighbors])
smoothknearest[k] = gaussian_filter1d(knearest[k], sigma)
# max of second difference (maximum upwards curvature) as knee
seconddiff[k] = numpy.diff(smoothknearest[k], 2)
seconddiffargmax = seconddiff[k].argmax()
# noinspection PyArgumentList
diffrelmax = seconddiff[k].max() / smoothknearest[k][seconddiffargmax]
if 2*sigma < seconddiffargmax < len(neighbors) - 2*sigma and diffrelmax > seconddiffMax[2]:
seconddiffMax = (k, seconddiffargmax, diffrelmax)
# ksteepeststats.append((k, seconddiff[k].max(), diffrelmax))
# print(tabulate(ksteepeststats, headers=("k", "max(f'')", "max(f'')/f")))
# prepare to plot the smoothed nearest neighbor distribution and its second derivative
k = seconddiffMax[0]
x = seconddiffMax[1] + 1
# # calc mean of first derivative to estimate the noisiness (closer to 1 is worse)
# firstdiff = numpy.diff(smoothknearest[k], 1)
# # alt: integral
# diag = numpy.empty_like(smoothknearest[k])
# for i in range(diag.shape[0]):
# diag[i] = smoothknearest[k][0] + i*(smoothknearest[k][-1] - smoothknearest[k][0])/smoothknearest[k][-1]
# belowdiag = diag - smoothknearest[k]
# print("f' median={:.2f}".format(numpy.median(firstdiff)))
# print("diag-f={:.2f}".format(sum(belowdiag)))
mmp.plotToSubfig(0, smoothknearest[k], label="smooth k={}, sigma={:.2f}".format(k, sigma), alpha=.4)
mmp.plotToSubfig(1, smoothknearest[k], label="smooth k={}, sigma={:.2f}".format(k, sigma), alpha=1, color='blue')
mmp.plotToSubfig(0, knearest[k], alpha=.4)
ax0twin = mmp.axes[0].twinx()
# mmp.plotToSubfig(ax0twin, seconddiff[k], linestyle='dotted', color='cyan', alpha=.4)
# noinspection PyTypeChecker
mmp.plotToSubfig(ax0twin, [None] + seconddiff[k].tolist(), linestyle='dotted',
color='magenta', alpha=.4)
# epsilon = knearest[k][x]
epsilon = smoothknearest[k][x]
mmp.axes[0].axhline(epsilon, linestyle='dashed', color='blue', alpha=.4,
label="curvature max {:.2f} of k={}".format(
epsilon, k))
mmp.axes[0].axvline(x, linestyle='dashed', color='blue', alpha=.4)
mmp.writeOrShowFigure(filechecker.reportFullPath)
del mmp
# if args.interactive:
# from tabulate import tabulate
# IPython.embed()
# exit(0)
return epsilon
|
4c23d8c50dd182c0d045b047e44c2d2484482eec
| 26,884 |
def equal_value_solution(par_dict1=params ,par_dict2=params1 , mean=-0.4, variance=0.35,N=1000):
""" This function sets average tax revenue equal under different sets of parameters by changing the base tax rate.
This is done by minizing the squared difference between old and new average revenue using the base tax rate tg
as the argument.
Args:
mean: Distribution mean
par_dict1: Original parameters - will be fixed after calculation
par_dict2: New parameters from which a new base tax rate is calculated
var: Distribution variance
N: Number of draws
tg: level of base housing tax - set separately from other parameters
Returns:
Tax rate required for different parameters to give the same average tax revenue.
"""
#Set seed
np.random.seed(1)
#Draw m
m_draw = np.random.lognormal(mean, variance, N)
#Set parameters
tg_set = par_dict1["tg"]
#Calculate original revenue
original_rev = calc_rev_5(tg_set,par_dict1, m_draw, N)
print(f'Tax revenue under the first set of parameters: {original_rev}')
#Define value objective as squared difference between old and new revenue
def equal_value_objective(tg_set):
value_of_choice=(calc_rev_5(tg_set, par_dict2, m_draw, N)-original_rev)**2
return value_of_choice
#Minimize squared distance
solution_g=optimize.minimize_scalar(equal_value_objective,method='bounded', bounds=(0,np.inf))
tg_final=solution_g.x
print(f'Tax revenue under new set of parameters with solution base tax rate {calc_rev_5(tg_final, par_dict2, m_draw, N)}')
print(f' Solution tax rate {tg_final}')
return tg_final
|
8a6d2b15c2b6b751647f62a3d0172cde45f23f94
| 26,885 |
def dist_l2(v, w):
"""
Euclidean (L2) distance between
two vectors.
"""
s = 0
for i in range(len(v)):
s = s + (v[i] - w[i]) ** 2
s = np.sqrt(s)
return s
|
c65dced806a0e121263f3a4e26e88bd88e43fa43
| 26,886 |
def convert_single_example_dual(ex_index, example, max_seq_length, tokenizer,
label_list):
"""Converts a single `InputExample` into one or more `DualInputFeatures`.
Args:
ex_index: index of the example
example: InputExample
max_seq_length: maximal sequence length of query + one passage in tokens
tokenizer: mapping a string to a list of tokens
label_list: labls for all passage candidates
Returns:
DualInputFeatures representing the records
This flattens out the tokens of all candidates in a single sequence for the
passages
The query is only listed once and has form [CLS] q [SEP]
Passages are as many as candidates and have form [CLS] p [SEP]
"""
max_seq_length_passage = max_seq_length - FLAGS.max_query_length
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
_truncate_seq(tokens_a, FLAGS.max_query_length - 2)
assert (len(example.text_b_list)) == FLAGS.num_candidates
tokens_b_list = []
for text_b in example.text_b_list:
tokens_b_list.append(tokenizer.tokenize(text_b))
for tokens_b in tokens_b_list:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
# _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 4)
_truncate_seq(tokens_b, max_seq_length_passage - 2)
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
all_segment_ids = []
all_input_ids = []
all_input_mask = []
all_unique_ids = []
all_label_ids = []
all_tokens = []
query_tokens = []
query_input_ids = []
query_input_mask = []
query_segment_ids = []
# first process the query tokens
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < FLAGS.max_query_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == FLAGS.max_query_length
assert len(input_mask) == FLAGS.max_query_length
assert len(segment_ids) == FLAGS.max_query_length
# copy to query variables
query_tokens.extend(tokens)
query_input_ids.extend(input_ids)
query_segment_ids.extend(segment_ids)
query_input_mask.extend(input_mask)
for cand_index, tokens_b in enumerate(tokens_b_list):
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(1)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length_passage:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(1)
assert len(input_ids) == max_seq_length_passage
assert len(input_mask) == max_seq_length_passage
assert len(segment_ids) == max_seq_length_passage
label_id = int(example.labels[cand_index])
all_input_ids.extend(input_ids)
all_input_mask.extend(input_mask)
all_unique_ids.append(example.guid + "-" + example.text_b_guids[cand_index])
all_label_ids.append(label_id)
all_segment_ids.extend(segment_ids)
all_tokens.extend(tokens)
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
# query info
tf.logging.info(
"query tokens: %s" %
" ".join([tokenization.printable_text(x) for x in query_tokens]))
tf.logging.info("query input_ids: %s" %
" ".join([str(x) for x in query_input_ids]))
tf.logging.info("query input_mask: %s" %
" ".join([str(x) for x in query_input_mask]))
tf.logging.info("query segment_ids: %s" %
" ".join([str(x) for x in query_segment_ids]))
tf.logging.info(
"tokens: %s" %
" ".join([tokenization.printable_text(x) for x in all_tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in all_input_ids]))
tf.logging.info("input_mask: %s" %
" ".join([str(x) for x in all_input_mask]))
tf.logging.info("segment_ids: %s" %
" ".join([str(x) for x in all_segment_ids]))
tf.logging.info("labels ids: %s" %
" ".join([str(x) for x in all_label_ids]))
tf.logging.info("prior scores: %s" %
" ".join([str(x) for x in example.scores]))
tf.logging.info("labels str: %s" %
" ".join([str(x) for x in example.labels]))
feature = DualInputFeatures(
input_ids_1=query_input_ids,
input_mask_1=query_input_mask,
segment_ids_1=query_segment_ids,
input_ids_2=all_input_ids,
input_masks_2=all_input_mask,
segment_ids_2=all_segment_ids,
label_ids=all_label_ids,
unique_ids=all_unique_ids,
scores=example.scores)
return feature
|
4c89637846ad9b79db1450bd1af4bfa38f80e08c
| 26,887 |
def find_recipes(rounds):
"""
Calculate the last ten recipes in the sequence.
:param rounds: the number of rounds
:return: a list of the last 10 recipes in the sequence
>>> find_recipes(5)
[0, 1, 2, 4, 5, 1, 5, 8, 9, 1]
>>> find_recipes(18)
[9, 2, 5, 1, 0, 7, 1, 0, 8, 5]
>>> find_recipes(2018)
[5, 9, 4, 1, 4, 2, 9, 8, 8, 2]
"""
recipes = [3, 7]
elf1 = 0
elf2 = 1
while len(recipes) < rounds + 10:
new_recipe = recipes[elf1] + recipes[elf2]
for i in str(new_recipe):
recipes.append(int(i))
elf1 = (elf1 + recipes[elf1] + 1) % len(recipes)
elf2 = (elf2 + recipes[elf2] + 1) % len(recipes)
return recipes[rounds:rounds+10]
|
281cd92094cf9c4be608de5a57075f7f38a531d0
| 26,888 |
def query_abs_over_wiki(abstract):
"""
query from es with the document is wiki_entity
"""
return {
"track_total_hits": "true",
"version": "true",
"size": 1000,
"sort": [{
"_score": {
"order": "desc"
}
}],
"_source": {
"includes": [
"entity",
"stem",
"description"
]
},
"stored_fields": [
"*"
],
"script_fields": {},
"docvalue_fields": [],
"query": {
"match": {
"entity": abstract
}
},
"highlight": {
"pre_tags": [
"@@"
],
"post_tags": [
"@@"
],
"fields": {
"*": {}
},
"fragment_size": 2147483647
}
}
|
7e35af5b210cd3485113d7cb5b72f7dd32745d94
| 26,889 |
def mitm_digest_preimage(tshash_params, digest, except_bitstrings=(), max_length=_MITM_DEFAULT_MAX_LENGTH):
""" Performs a meet-in-the-middle attack to find a preimage whose hash arrives at a certain digest. """
# We currently don't discard bits from the state to get a digest
# possible_truncations = ('0b01', '0b11')
possible_truncations = ('',)
end_states = tuple(digest + possible_truncation for possible_truncation in possible_truncations)
return mitm(tshash_params=tshash_params,
end_states=end_states,
except_bitstrings=except_bitstrings,
max_length=max_length)
|
7e5dc01a0f6f16d9cf064f7ba8070819c0543706
| 26,890 |
import re
def similarityWithResult(Address_1: Address, Address_2: Address):
"""
地址相似度计算
:param Address_1: 地址1, 由 Geocoding.normalizing 方法返回的 Address 类
:param Address_2: 地址2, 由 Geocoding.normalizing 方法返回的 Address 类
:return:
"""
geocoding = jpype.JClass('io.patamon.geocoding.Geocoding')
pattern = re.compile("similarity=(.*?)\n\)", re.S)
if type(Address_1) == type(Address_2) == Address:
return eval(re.findall(pattern,
str(geocoding.similarityWithResult(Address_1.__java__,
Address_2.__java__).toString()))[0])
else:
raise TypeError(
"Geocoding.similarityWithResult仅支持计算两个由 Geocoding.normalizing 方法返回的Address类之间的相似度")
|
b4bf880eeb5b0666c06ef987d89beeb952ad6e1a
| 26,891 |
def validFilename(string):
""" return a valid filename from the input string
stripping characters that are not allowed
"""
out = ''
allowed = _allowedfilenamechars()
for s in string:
if s in allowed:
out += s
return out
|
14dd0cb10dd775f61035ec1a85c74f981c5432aa
| 26,893 |
def contains_source_code(http_response):
"""
:param http_response: The HTTP response object
:return: A tuple with:
- re.match object if the file_content matches a source code file
- A tuple containing the programming language names
"""
body = http_response.get_body()
for match, _, _, lang in _multi_re.query(body):
if is_false_positive(http_response, match, lang):
continue
return match, lang
return None, None
|
eadabdf1782e6785b34b7c65853e1cecfc81ab81
| 26,894 |
def wht(at1,at2,ints):
"""
Assign weights for Hessian matrix given 1-2,1-3,1-4 interactions
:param at1: first atom
:param at2: second atom
:param ints: tuples of 1-2,1-3,1-4 interacting pair
:return: 0 for diagonal, assigned weight constant for all other
"""
apair = [at1,at2]
int2, int3, int4 = ints
if at1 == at2:
return 0.0
elif apair in int2:
return co.WEIGHTS['h12']
elif apair in int3:
return co.WEIGHTS['h13']
elif apair in int4:
return co.WEIGHTS['h14']
else:
return co.WEIGHTS['h']
|
b9e5e01743a6f732c63fbfc71dc264293d890d1b
| 26,895 |
def _get_users_with_perms(obj, attach_perms=False, with_group_users=None):
"""
Returns a list of users with their permissions on an object obj.
"""
user_obj_perms = get_user_perms(obj)
if user_obj_perms is None:
return get_users_with_perms(
obj, attach_perms=attach_perms, with_group_users=with_group_users)
user_perms = {}
if attach_perms:
for perm in user_obj_perms:
if perm.user in user_perms:
user_perms[perm.user].add(perm.permission.codename)
else:
user_perms[perm.user] = set([perm.permission.codename])
else:
user_perms = [
perm.user for perm in user_obj_perms.only('user').distinct('user')
]
if with_group_users:
user_perms = _get_group_users_with_perms(obj, attach_perms, user_perms)
return user_perms
|
79e0f80d28133e2dc1aef69bcf727d572d259d5b
| 26,896 |
import base64
def get_file_data(request, oid, version=None, status=None):
"""Get document data"""
document = get_document(request, oid, version, status)
if request.content_type.startswith('text/xml'):
data = Binary(document.data.data)
else:
data = base64.b64encode(document.data.data).decode()
return {
'data': data
}
|
1195671fb7f428e624cccb1f7dc89e92b7ab5d13
| 26,897 |
def batch_get_all_movies():
"""Attempt to get all movies downloaded and serialized. Pickup where
the last attempt left off."""
goal = len(movie_id_df.movie_id)
pickup = check_row_files()
for i in range(5000, goal, 5000):
print(f"Batch serializing the next 5000 movies (to {i}).")
batch_serialize(pickup, i)
if goal % 5000 != 0:
remainder = goal % 5000
batch_serialize(goal - remainder, goal)
return True
|
0225c89dfa76223d1354df48c5cb441dbb171461
| 26,898 |
def dataset_from_datasource(dsdict, *, datasource_name, dataset_name=None, **dsrc_args):
"""Transformer: Create a Dataset from a DataSource object
This is just a thin wrapper around Dataset.from_datasource in order to
conform to the transformer API
Parameters
----------
dsdict: dict, ignored.
Because this is a source, this argument is unnecessary (except to
conform to the transformer function API) and is ignored
datasource_name: str, required
Name of datasource in DataSource catalog
dataset_name: str
Name of the generated Dataset. If None, this will be the `datasource_name`
dsrc_args: dict
Arguments are the same as the `Dataset.from_datasource()` constructor
Returns
-------
dict: {dataset_name: Dataset}
"""
if dataset_name is None:
dataset_name = datasource_name
ds = Dataset.from_datasource(dataset_name=dataset_name, datasource_name=datasource_name, **dsrc_args)
return {dataset_name: ds}
|
644bb001c874c0e3d4552363b5edb8ecb369ee05
| 26,899 |
def fix_saving_name(name):
"""Neutralizes backslashes in Arch-Vile frame names"""
return name.rstrip('\0').replace('\\', '`')
|
ba7063766f3397b955a427b4304605fa2add48fb
| 26,902 |
def compute_metrics(logits, labels, lengths):
"""Computes metrics and returns them."""
loss = cross_entropy_loss(logits, labels, lengths)
# Computes sequence accuracy, which is the same as the accuracy during
# inference, since teacher forcing is irrelevant when all output are correct.
token_accuracy = jnp.argmax(logits, -1) == jnp.argmax(labels, -1)
sequence_accuracy = (
jnp.sum(mask_sequences(token_accuracy, lengths), axis=-1) == lengths
)
accuracy = jnp.mean(sequence_accuracy)
metrics = {
'loss': loss,
'accuracy': accuracy,
}
return metrics
|
2fa08ad9c06e5860f4cb57ca891087b7d67e7806
| 26,905 |
def replace_prelu(input_graph_def: util.GraphDef) -> util.GraphDef:
"""
Replace all Prelu-activations in the graph with supported TF-operations.
Args:
input_graph_def: TF graph definition to examine
Returns:
Updated copy of the input graph with Prelu-nodes replaced by supported
TF operations
"""
def _predicate(node): return node.op == 'Prelu'
return util.replace_matching_nodes(input_graph_def, _predicate,
_split_prelu)
|
694d84f2661261a320a3d9cee360dd5980bb3a9d
| 26,906 |
def tf_parse_filename_classes(filename, normalization='None', normalization_factor=1, augmentation=False):
"""Take batch of filenames and create point cloud and label"""
idx_lookup = {'airplane': 0, 'bathtub': 1, 'bed': 2, 'bench': 3, 'bookshelf': 4,
'bottle': 5, 'bowl': 6, 'car': 7, 'chair': 8, 'cone': 9,
'cup': 10, 'curtain': 11, 'desk': 12, 'door': 13, 'dresser': 14,
'flower_pot': 15, 'glass_box': 16, 'guitar': 17, 'keyboard': 18,
'lamp': 19, 'laptop': 20, 'mantel': 21, 'monitor': 22, 'night_stand': 23,
'person': 24, 'piano': 25, 'plant': 26, 'radio': 27, 'range_hood': 28,
'sink': 29, 'sofa': 30, 'stairs': 31, 'stool': 32, 'table': 33,
'tent': 34, 'toilet': 35, 'tv_stand': 36, 'vase': 37, 'wardrobe': 38,
'xbox': 39, 'sockettt':0, 'sockettf': 1, 'can':2, 'tin_can':3, 'mug':4, 'jar':5, 'AC1':0, 'AC5_2':1,
'AC6_2':2, 'AC8_20200518':3, 'AC9':4, 'AC12':5}
def parse_filename_classes(filename_batch, normalization='None', normalization_factor=1, augmentation=False):
pt_clouds = []
labels = []
inds = []
pt_cloud_no_outliers = np.asarray([])
for filename in filename_batch:
inds = []
if tf.strings.split(filename, '.')[-1].numpy().decode() == 'npy':
# Read in point cloud
filename_str = filename.numpy().decode()
pt_cloud = np.load(filename_str)
else:
filename_str = filename.numpy().decode()
pc = o3d.io.read_point_cloud(filename_str)
pt_cloud = np.asarray(pc.points)
pt_cloud = np.asarray(pc.points)
#inds.extend(abs(pt_cloud[:,2] - np.mean(pt_cloud[:,2])) > 0.008 * np.std(pt_cloud[:,2]))
#inds.extend(abs(pt_cloud[:,1] - np.mean(pt_cloud[:,1])) > 0.008 * np.std(pt_cloud[:,1]))
#inds.extend(abs(pt_cloud[:,0] - np.mean(pt_cloud[:,0])) > 0.008 * np.std(pt_cloud[:,0]))
#inds = np.unique(np.asarray(inds))
#print(len(inds))
#pt_cloud_no_outliers = np.asarray([pt_cloud[i] for i in range(len(pt_cloud)) if i not in inds])
#_, inds = o3d.geometry.PointCloud().remove_statistical_outlier(20, 0.2)
#o3d.visualization.draw_geometries([pc_no_outliers])
#pt_cloud_no_outliers = np.asarray(pc.points)[inds]
#center = [np.mean(pt_cloud_no_outliers[:,0]), np.mean(pt_cloud_no_outliers[:,1]), np.mean(pt_cloud_no_outliers[:,2])]
#pt_cloud = pt_cloud - np.asarray(center)
center = [np.mean(pt_cloud[:,0]), np.mean(pt_cloud[:,1]), np.mean(pt_cloud[:,2])]
pt_cloud = pt_cloud - np.asarray(center)
#inds.extend(np.argwhere(abs(pt_cloud[:,2] - np.mean(pt_cloud[:,2])) > 2 * np.std(pt_cloud[:,2])))
#inds.extend(np.argwhere(abs(pt_cloud[:,1] - np.mean(pt_cloud[:,1])) > 2 * np.std(pt_cloud[:,1])))
#inds.extend(np.argwhere(abs(pt_cloud[:,0] - np.mean(pt_cloud[:,0])) > 2 * np.std(pt_cloud[:,0])))
#inds = np.unique(np.asarray(inds))
#pt_cloud_no_outliers = np.asarray([pt_cloud[i] for i in range(len(pt_cloud)) if i not in inds])
#tf.print(inds.shape)
#dists = np.linalg.norm(pt_cloud, axis=1)
if normalization=='Single':
#old_range = (np.max(np.linalg.norm(pt_cloud, axis=1))- np.min(np.linalg.norm(pt_cloud, axis=1)))
#new_range = 1
#pt_cloud = ((pt_cloud - np.min(np.linalg.norm(pt_cloud, axis=1)))/old_range) + 0.5
pt_cloud = pt_cloud/np.max(np.linalg.norm(pt_cloud, axis=1))#pt_cloud_no_outliers, axis=1))
# Add rotation and jitter to point cloud
if augmentation:
theta = np.random.random() * 2*3.141
A = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
offsets = np.random.normal(0, 0.01, size=pt_cloud.shape)
pt_cloud = np.matmul(pt_cloud, A) + offsets
# Create classification label
obj_type = filename_str.split('/')[1]# e.g., airplane, bathtub
#label = np.zeros(40, dtype=np.float32)
#label[idx_lookup[obj_type]] = 1.0
label = idx_lookup[obj_type]
labels.append(label)
pt_clouds.append(pt_cloud)
#max_individual = np.asarray([np.max(np.linalg.norm(pc, axis=1)) for pc in pt_clouds])
#if normalization.numpy().decode()=='Single':
# pt_clouds = [pc/max_individual[i] for i,pc in enumerate(pt_clouds)]
#elif normalization.numpy().decode()=='Relative':
# pt_clouds = [pc/normalization_factor for i,pc in enumerate(pt_clouds)]
return np.stack(pt_clouds), np.stack(labels)
x,y = tf.py_function(parse_filename_classes, [filename, normalization, normalization_factor, augmentation], [tf.float32, tf.int32])
x.set_shape([None for _ in range(3)])
y.set_shape([None for _ in range(1)])
return x, y
|
75cc87faa46f25485e2c097c5243f0a44bda1554
| 26,907 |
def next_traj_points(dimension: int, last_point):
"""
:param dimension: dimension of our fake latent trajectory
:param last_point: the last point that was sent to the client as a numpy array
:return: here we are sending 3 points at a time from a noisy Lorenz system
"""
#Euler step size
step = 0.001
#Lorenz parameters
beta = 2.666666
sigma = 10
rho = 28
point = np.zeros((4, dimension), dtype='float32')
point[0] = last_point
#compute the next few points
for i in range(1, 4):
x, y, z = point[i - 1, 0], point[i - 1, 1], point[i - 1, 2]
#Lorenz system
dx, dy, dz = sigma*(y - x), x*(rho - z) - y, x*y - beta*z
point[i, 0] = x + step*dx
point[i, 1] = y + step*dy
point[i, 2] = z + step*dz
#simple uniform noise
point[i] = point[i - 1] + np.rand(dimension, dtype='float32')
new_point = point[2]
#we will actually send a scaled down version to the server for visualization purposes
point *= 0.01
string = bs.BitArray(bits='')
for i in range(1, 4):
for j in range(dimension):
string = string + bs.Bits(point[i, j])
return string, new_point
|
6e33833b877cc8bc218d8c0a74357c6b4d8d2a9b
| 26,908 |
from six.moves import urllib
def has_internet():
"""
Test if Internet is available.
Failure of connecting to the site "http://www.sagemath.org" within a second
is regarded as internet being not available.
EXAMPLES::
sage: from sage.doctest.external import has_internet
sage: has_internet() # random
True
"""
try:
urllib.request.urlopen("http://www.sagemath.org",timeout=1)
return True
except urllib.error.URLError:
return False
|
d9cacc17a315abe85022e9a889d4a1da3c9b6a49
| 26,909 |
def read_warfle_text(path: str) -> str:
"""Returns text from *.warfle files"""
try:
with open(path, "r") as text:
return text.read()
except Exception as e:
raise Exception(e)
|
ba15fe6a62fbefe492054b0899dcdbff35462154
| 26,910 |
from typing import List
from typing import OrderedDict
def from_mido(midi: MidiFile, duplicate_note_mode: str = "fifo") -> Music:
"""Return a mido MidiFile object as a Music object.
Parameters
----------
midi : :class:`mido.MidiFile`
Mido MidiFile object to convert.
duplicate_note_mode : {'fifo', 'lifo', 'all'}, default: 'fifo'
Policy for dealing with duplicate notes. When a note off
message is presetned while there are multiple correspoding note
on messages that have not yet been closed, we need a policy to
decide which note on messages to close.
- 'fifo' (first in first out): close the earliest note on
- 'lifo' (first in first out): close the latest note on
- 'all': close all note on messages
Returns
-------
:class:`muspy.Music`
Converted Music object.
"""
if duplicate_note_mode.lower() not in ("fifo", "lifo", "all"):
raise ValueError(
"`duplicate_note_mode` must be one of 'fifo', 'lifo' and " "'all'."
)
def _get_active_track(t_idx, program, channel):
"""Return the active track."""
key = (program, channel)
if key in tracks[t_idx]:
return tracks[t_idx][key]
tracks[t_idx][key] = Track(program, _is_drum(channel))
return tracks[t_idx][key]
# Raise MIDIError if the MIDI file is of Type 2 (i.e., asynchronous)
if midi.type == 2:
raise MIDIError("Type 2 MIDI file is not supported.")
# Raise MIDIError if ticks_per_beat is not positive
if midi.ticks_per_beat < 1:
raise MIDIError("`ticks_per_beat` must be positive.")
time = 0
song_title = None
tempos: List[Tempo] = []
key_signatures: List[KeySignature] = []
time_signatures: List[TimeSignature] = []
lyrics: List[Lyric] = []
annotations: List[Annotation] = []
copyrights = []
# Create a list to store converted tracks
tracks: List[OrderedDict] = [
OrderedDict() for _ in range(len(midi.tracks))
]
# Create a list to store track names
track_names = [None] * len(midi.tracks)
# Iterate over MIDI tracks
for track_idx, midi_track in enumerate(midi.tracks):
# Set current time to zero
time = 0
# Keep track of the program used in each channel
channel_programs = [0] * 16
# Keep track of active note on messages
active_notes = defaultdict(list)
# Iterate over MIDI messages
for msg in midi_track:
# Update current time (delta time is used in a MIDI message)
time += msg.time
# === Meta Data ===
# Tempo messages
if msg.type == "set_tempo":
tempos.append(
Tempo(time=int(time), qpm=float(tempo2bpm(msg.tempo)))
)
# Key signature messages
elif msg.type == "key_signature":
if msg.key.endswith("m"):
mode = "minor"
root = note_str_to_note_num(msg.key[:-1])
else:
mode = "major"
root = note_str_to_note_num(msg.key)
key_signatures.append(
KeySignature(time=int(time), root=root, mode=mode)
)
# Time signature messages
elif msg.type == "time_signature":
time_signatures.append(
TimeSignature(
time=int(time),
numerator=int(msg.numerator),
denominator=int(msg.denominator),
)
)
# Lyric messages
elif msg.type == "lyrics":
lyrics.append(Lyric(time=int(time), lyric=str(msg.text)))
# Marker messages
elif msg.type == "marker":
annotations.append(
Annotation(
time=int(time),
annotation=str(msg.text),
group="marker",
)
)
# Text messages
elif msg.type == "text":
annotations.append(
Annotation(
time=int(time), annotation=str(msg.text), group="text"
)
)
# Copyright messages
elif msg.type == "copyright":
copyrights.append(str(msg.text))
# === Track specific Data ===
# Track name messages
elif msg.type == "track_name":
if midi.type == 0 or track_idx == 0:
song_title = msg.name
else:
track_names[track_idx] = msg.name
# Program change messages
elif msg.type == "program_change":
# Change program of the channel
channel_programs[msg.channel] = msg.program
# Note on messages
elif msg.type == "note_on" and msg.velocity > 0:
# Will later be closed by a note off message
active_notes[(msg.channel, msg.note)].append(
(time, msg.velocity)
)
# Note off messages
# NOTE: A note on message with a zero velocity is also
# considered a note off message
elif msg.type == "note_off" or (
msg.type == "note_on" and msg.velocity == 0
):
# Skip it if there is no active notes
note_key = (msg.channel, msg.note)
if not active_notes[note_key]:
continue
# Get the active track
program = channel_programs[msg.channel]
track = _get_active_track(track_idx, program, msg.channel)
# NOTE: There is no way to disambiguate duplicate notes
# (of the same pitch on the same channel). Thus, we
# need a policy for handling duplicate notes.
# 'FIFO': (first in first out) close the earliest note
if duplicate_note_mode.lower() == "fifo":
onset, velocity = active_notes[note_key][0]
track.notes.append(
Note(
time=int(onset),
pitch=int(msg.note),
duration=int(time - onset),
velocity=int(velocity),
)
)
del active_notes[note_key][0]
# 'LIFO': (last in first out) close the latest note on
elif duplicate_note_mode.lower() == "lifo":
onset, velocity = active_notes[note_key][-1]
track.notes.append(
Note(
time=int(onset),
pitch=int(msg.note),
duration=int(time - onset),
velocity=int(velocity),
)
)
del active_notes[note_key][-1]
# 'close_all' - close all note on messages
elif duplicate_note_mode.lower() == "close_all":
for onset, velocity in active_notes[note_key]:
track.notes.append(
Note(
time=int(onset),
pitch=int(msg.note),
duration=int(time - onset),
velocity=int(velocity),
)
)
del active_notes[note_key]
# Control change messages
elif msg.type == "control_change":
# Get the active track
program = channel_programs[msg.channel]
track = _get_active_track(track_idx, program, msg.channel)
# Append the control change message as an annotation
track.annotations.append(
Annotation(
time=int(time),
annotation={
"number": int(msg.control),
"value": int(msg.value),
},
group="control_changes",
)
)
# End of track message
elif msg.type == "end_of_track":
break
# Close all active notes
for (channel, note), note_ons in active_notes.items():
program = channel_programs[channel]
track = _get_active_track(track_idx, program, channel)
for onset, velocity in note_ons:
track.notes.append(
Note(
time=int(onset),
pitch=int(note),
duration=int(time - onset),
velocity=int(velocity),
)
)
music_tracks = []
for track, track_name in zip(tracks, track_names):
for sub_track in track.values():
sub_track.name = track_name
music_tracks.extend(track.values())
# Sort notes
for music_track in music_tracks:
music_track.notes.sort(
key=attrgetter("time", "pitch", "duration", "velocity")
)
# Meta data
metadata = Metadata(
title=str(song_title),
source_format="midi",
copyright=" ".join(copyrights) if copyrights else None,
)
return Music(
metadata=metadata,
resolution=int(midi.ticks_per_beat),
tempos=tempos,
key_signatures=key_signatures,
time_signatures=time_signatures,
lyrics=lyrics,
tracks=music_tracks,
)
|
795b13dcba41a7c10270e6cd799ef439a0ceb426
| 26,911 |
def iterable_validator(iterable_type, member_type):
# type: (ISINSTANCE, ISINSTANCE) -> Callable[[object, Attribute, Iterable[Any]], None]
"""``attrs`` validator to perform deep type checking of iterables."""
def _validate_iterable(instance, attribute, value):
# type: (object, Attribute, Iterable[Any]) -> None
# pylint: disable=unused-argument
"""Validate that a dictionary is structured as expected.
:raises TypeError: if ``value`` is not of ``iterable_type`` type
:raises TypeError: if ``value`` members are not all of ``member_type`` type
"""
if not isinstance(value, iterable_type):
raise TypeError('"{name}" must be a {type}'.format(name=attribute.name, type=iterable_type))
for member in value:
if not isinstance(member, member_type):
raise TypeError(
'"{name}" members must all be of type "{type}"'.format(name=attribute.name, type=member_type)
)
return _validate_iterable
|
708738c7bc55e4bb4c4fa9ae93cf56ddb038ebda
| 26,912 |
def sample_filtering(df, metadata, filter_by):
"""Filter samples based on selected features and values."""
# Get the variable a values specified for sample filtering
filter_col = filter_by[0]
filter_values = filter_by[1].split(sep=',')
# Saving a new metadata file containing only the samples remaining after filtering
filt_metadata = pd.DataFrame()
for i in filter_values:
filt_metadata = filt_metadata.append(metadata[metadata[filter_col] == i])
filt_metadata_file = 'filtered_metadata_norm_test.csv'
filt_metadata.to_csv(filt_metadata_file, index=False)
# Saving a new input file containing only the samples remaining after filtering
from_formularity = [
'Mass', 'C', 'H', 'O', 'N', 'C13', 'S', 'P', 'Na', 'El_comp', 'Class',
'NeutralMass', 'Error_ppm', 'Candidates'
]
col_df = filt_metadata['SampleID'].to_list()
from_formularity.extend(col_df)
filt_df = df[from_formularity]
filt_df_file = 'filtered_input_norm_test.csv'
filt_df.to_csv(filt_df_file, index=False)
return filt_df_file, filt_metadata_file
|
437391b946ed61817292c160402b1c6e0b81fa94
| 26,913 |
def user_is_registered_or_more(user_id):
"""Check that user is registered, moderator, or admin."""
user = Users.query.filter_by(UserID=user_id).first()
user_map = UsersAccessMapping.query.filter_by(UserID=user_id).first()
if user is None:
return False
if user_map is None:
return False
if user_map.UsersAccessLevelID >= CONFIG.REGISTERED_LEVEL:
return True
return False
|
a405ee98673ffa87f260519ec8fc9ff88efa2089
| 26,915 |
from typing import Union
from typing import Sequence
from typing import Optional
from typing import Type
def run(
cmds: Union[str, Sequence[Union[str, Sequence[str]]]],
shell: Optional[Union[str, bool]] = None,
mode: Type[Mode] = str,
block: bool = True,
**kwargs
) -> Processes:
"""
Runs several commands that pipe to each other in a python-aware way.
Args:
cmds: Any number of commands (lists or strings) to pipe together. This may be
a string, in which case it will be split on the pipe ('|') character to
get the component commands.
shell: Can be a boolean specifying whether to execute the command
using the shell, or a string value specifying the shell executable to use
(which also implies shell=True). If None, the value is auto-detected - `True` if `cmds`
is a string otherwise `False. If `true` the command is executed via the default shell
(which, according to the `subprocess` docs, is `/bin/sh`).
mode: I/O mode; can be str (text) or bytes (raw).
block: Whether to block until all processes have completed.
kwargs: Additional keyword arguments to pass to :class:`Processes`
constructor.
Returns:
A :class:`subby.Processes` object.
Raises:
subprocess.CalledProcessError: if any subprocess in pipe returns exit
code not 0.
Examples:
Usage 1: Pipe multiple commands together and print output to file
example_cmd1 = ['dx', 'download', 'file-xxxx']
example_cmd2 = ['gunzip']
out_f = "somefilename.fasta"
chain([example_cmd1, example_cmd2], stdout=out_f)
This function will print and execute the following command:
'dx download file-xxxx | gunzip > somefilename.fasta'
Usage 2: Pipe multiple commands together and return output
example_cmd1 = ['gzip', 'file.txt']
example_cmd2 = ['dx', 'upload', '-', '--brief']
file_id = chain([example_cmd1, example_cmd2], block=True).output
This function will print and execute the following command:
'gzip file.txt | dx upload - --brief '
and return the output.
Usage 3: Run a single command with output to file
run('echo "hello world"', stdout='test2.txt')
Note: This calls the run function instead of chain.
Usage 4: A command failing mid-pipe should return CalledProcessedError
chain(
[['echo', 'hi:bye'], ['grep', 'blah'], ['cut', '-d', ':', '-f', '1']]
)
Traceback (most recent call last):
...
CalledProcessError: Command '['grep', 'blah']' returned non-zero
exit status 1
"""
if isinstance(cmds, str):
cmds = [c.strip() for c in cmds.split("|")]
if shell is None:
shell = True
else:
cmds = list(cmds)
if len(cmds) == 0:
raise ValueError("'cmds' cannot be an empty list")
if shell is None:
shell = False
if shell is False:
cmds = utils.command_strings_to_lists(cmds)
else:
cmds = utils.command_lists_to_strings(cmds)
if shell is True:
executable = DEFAULT_EXECUTABLE
elif isinstance(shell, str):
executable = shell
else:
executable = None
processes = Processes(
cmds, mode=mode, shell=(shell is not False), executable=executable, **kwargs
)
if block:
with processes:
processes.block()
else:
processes.run()
return processes
|
370f4867701fcda0683030749e0a9754bf2b518e
| 26,917 |
def _infer_labels(center_e):
"""Create labels based on center extrema."""
# Infer labels
if center_e == 'trough':
labels = ['Trough', 'Peak', 'Inflection']
keys = ['sample_trough', 'sample_next_peak', 'sample_end']
elif center_e == 'peak':
labels = ['Peak', 'Trough', 'Inflection']
keys = ['sample_peak', 'sample_next_trough', 'sample_end']
return labels, keys
|
854a6bbe1c45a806d3c6ecd15bee11f3ec9496a4
| 26,918 |
def MXXXtoMYYY(redshift = 0.3,
MXXX = 6E14,
CXXX = 3.0,
wrt = "crit",
new_wrt = "crit",
XXX = 500.0,
YYY = 500.0,
cosmo = cosmo):
"""
It converts the (MXXX,CXXX) into (MYYY,CYYY) for a halo at a given redshift assuming spherical sysmetry.
(1) XXX and YYY are float and negative value means virial estimation.
(2) it returns (the MXXX,CXXX,RXXX,MYYY,CYYY,RYYY,rhos,rs)
where MXXX, MYYY are in the Msun, RXXX and RYYY are in Mpc.
rhos is in Msun/Mpc^3 and rs is in Mpc.
---
a. It first solves the NFW model parameters - rhos, rs.
rhos = FactorO(redshift,XXX) * (CXXX**3 / 3) / (log(1+CXXX) - CXXX/(1+CXXX))
where FactorO(z,XXX) = OverDen(z) * rho_m(z) if XXX = VIR
XXX * rho_crit(z) if XXX != VIR
b. It solves for CYYY:
FactorO(redshift,YYY) * (CYYY**3 / 3) / (log(1+CYYY) - CYYY/(1+CYYY))
= FactorO(redshift,XXX) * (CXXX**3 / 3) / (log(1+CXXX) - CXXX/(1+CXXX))
c. Solve for rs:
rs**3 = MXXX / ( 4*pi*rhos*(log(1+CXXX) - CXXX/(1+CXXX)) )
d. MYYY = 4*pi*rhos*rs**3 * [log(1+CYYY) - CYYY/(1+CYYY)]
e. RYYY = CYYY * rs and RXXX = CXXX * rs
---
Parameters:
-`redshift`: float, the redshift of the halo.
-`MXXX`: float, the mass of the halo in the unit of Msun.
-`CXXX`: float, the concentration of the halo associating to the overdensity of the input mass.
-`wrt`: string. It has to be either 'crit' or 'mean'. It will be overwritten as 'vir' if XXX < 0.0.
-`new_wrt`: string. Same as above, but it will be overwritten as 'vir' if YYY < 0.0.
-`XXX`: float, the overdensity against the rho_crit for the given input halo mass.
Negative if it is with respect to the virial mass.
-`YYY`: float, the overdensity against the rho_crit for the desired output halo mass.
Negative if it is with respect to the virial mass.
-`cosmo`: dict. The cosmology parameter for this halo. It has to be compatible with
the format of the cosmolopy module.
Return:
-`MXXX`: float, the input halo mass in the unit of Msun.
-`CXXX`: float, the input concentration.
-`RXXX`: float, the radius for the given input halo mass in the unit of Mpc.
-`MYYY`: float, the output halo mass in the unit of Msun.
-`CYYY`: float, the output concentration.
-`RYYY`: float, the radius for the output halo mass in the unit of Mpc.
-`rhos`: float, the normalization of the NFW model (Msun/Mpc^3)
-`rs`: float, the core radius of the NFW model (Mpc).
"""
# sanitiy check
if not ( (redshift >= 0.0) and (MXXX > 0.0) and (CXXX > 0.0) ):
raise ValueError("The input halo params are wrong, (redshift, MXXX, CXXX):", redshift, MXXX, CXXX, ".")
# sanity check on wrt
if wrt not in ["crit", "mean"]:
raise NameError("The input wrt", wrt, "has to be crit or mean.")
if new_wrt not in ["crit", "mean"]:
raise NameError("The input new_wrt", new_wrt, "has to be crit or mean.")
# set up wrt
if XXX < 0.0: wrt = "vir"
if YYY < 0.0: new_wrt = "vir"
# Define the function form for the convenience
def FactorCC(CXXX):
return (CXXX**3 / 3.0) / (log(1.0+CXXX) - CXXX / (1.0+CXXX))
# Define the function FactorO
def FactorO(redshift, XXX, wrt):
if wrt == "crit":
return XXX * \
cosdens.cosmo_densities(**cosmo)[0] * cosdist.e_z(redshift, **cosmo)**2
elif wrt == "mean":
return XXX * \
cosdens.cosmo_densities(**cosmo)[0] * cosdist.e_z(redshift, **cosmo)**2 * cosdens.omega_M_z(redshift, **cosmo)
elif wrt == "vir":
return calc_vir_overden(zd = redshift, cosmo = cosmo) * \
cosdens.cosmo_densities(**cosmo)[0] * cosdist.e_z(redshift, **cosmo)**2 * cosdens.omega_M_z(redshift, **cosmo)
# Solve rhos Msun/Mpc^3
rhos = FactorO(redshift = redshift, XXX = XXX, wrt = wrt) * FactorCC(CXXX = CXXX)
#Define the function we solve for CYYY:
def Solve4CYYY_Func(CYYY):
return FactorO(redshift = redshift, XXX = YYY, wrt = new_wrt) * FactorCC(CXXX = CYYY) - \
FactorO(redshift = redshift, XXX = XXX, wrt = wrt) * FactorCC(CXXX = CXXX)
# Solve for CYYY
CYYY = optimize.newton(Solve4CYYY_Func, CXXX, fprime=None, args=(), tol=1.48e-08, maxiter=50)
#Solve for rs [Mpc]
rs = ( MXXX / ( 4.0 * pi * rhos * ( log(1.0+CXXX) - CXXX/(1.0+CXXX) ) ) )**(1.0/3.0)
#Solve for MYYY [Msun]
MYYY = 4.0 * pi * rhos * rs**3 * ( log(1.0+CYYY) - CYYY/(1.0+CYYY) )
#Solve for RXXX and RYYY [Mpc]
RXXX = CXXX * rs
RYYY = CYYY * rs
return np.array([ MXXX, CXXX, RXXX, MYYY, CYYY, RYYY, rhos, rs ], dtype=float)
|
f6c7e628ce79785c9d28939e6e3de8fe3ed3878a
| 26,919 |
import html
def gldas_to_cycles(
latitude,
longitude,
output_file,
start_date="2000-01-01",
end_date="2017-12-31",
gldas_path="/raw-data/GLDAS",
):
"""Transform GLDAS to Cycles."""
j = Job("gldas_to_cycles")
j.addProfile(Profile(Namespace.CONDOR, key="+SingularityImage", value=html.unescape(""/cvmfs/singularity.opensciencegrid.org/mintproject/cycles:0.9.4-alpha"")))
j.addArguments("--start-date", start_date)
j.addArguments("--end-date", end_date)
j.addArguments("--latitude", latitude)
j.addArguments("--longitude", longitude)
j.addArguments("--gldas-path", gldas_path)
j.addArguments("--output", output_file)
j.uses(File(output_file), link=Link.OUTPUT, transfer=True)
return j
|
1ce787266d5b232f0b8c76d639328a0dc4384e2f
| 26,920 |
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='VALID')
|
45141be3715fd821c8712bc81c644498871d7b8c
| 26,921 |
def set_bit_value(val, offs, value):
"""Set bit at offset 'offs' to a specific value in 'val'."""
if value:
return set_bit(val, offs)
else:
return clear_bit(val, offs)
|
793165cc53adc140b60521b6fc772efa80b69ebb
| 26,923 |
def gradientFunction(theta, X, y):
"""
Compute cost and gradient for logistic regression with regularization
computes the cost of using theta as the parameter for regularized logistic
regression and the gradient of the cost w.r.t. to the parameters.
"""
# Initialize some useful values
# number of training examples
m = X.shape[0]
# number of parameters
n = X.shape[1]
theta = theta.reshape((n, 1)) # due to the use of fmin_tnc
# gradient variable
grad = 0.
# ====================== YOUR CODE HERE ======================
# Instructions: Compute the gradient of a particular choice of theta.
# Compute the partial derivatives and set grad to the partial
# derivatives of the cost w.r.t. each parameter in theta
# h_teta = sigmoid(X @ theta)
grad = (1 / m) * X.T @ (sigmoid(X @ theta) - y)
# inner = (h_teta - y) * X
# grad = sum(inner) / m
# =============================================================
return grad
|
5ca8c355474c9cab10b2b255b71b943b2b6b0aa1
| 26,924 |
def splice(tree, rep, tag):
"""Splice in a tree into another tree.
Walk ``tree``, replacing the first occurrence of a ``Name(id=tag)`` with
the tree ``rep``.
This is convenient for first building a skeleton with a marker such as
``q[name["_here_"]]``, and then splicing in ``rep`` later. See ``forall``
and ``envify`` for usage examples.
"""
@Walker
def doit(tree, *, stop, **kw):
if type(tree) is Name and tree.id == tag:
stop()
return rep
return tree
return doit.recurse(tree)
|
b42c5300b7ad9d5d04ba0233c94c735686e1300a
| 26,925 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.