content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def record(location):
"""Creates an empty record."""
draft = RDMDraft.create({})
record = RDMRecord.publish(draft)
return record | 77c8069e4fd894f2ed1d760fc983a9a2094c0f6d | 18,717 |
def firfls(x, f_range, fs=1000, w=3, tw=.15):
"""
Filter signal with an FIR filter
*Like firls in MATLAB
x : array-like, 1d
Time series to filter
f_range : (low, high), Hz
Cutoff frequencies of bandpass filter
fs : float, Hz
Sampling rate
w : float
Length of the filter in terms of the number of cycles
of the oscillation whose frequency is the low cutoff of the
bandpass filter
tw : float
Transition width of the filter in normalized frequency space
Returns
-------
x_filt : array-like, 1d
Filtered time series
"""
if w <= 0:
raise ValueError(
'Number of cycles in a filter must be a positive number.')
if np.logical_or(tw < 0, tw > 1):
raise ValueError('Transition width must be between 0 and 1.')
nyq = fs / 2
if np.any(np.array(f_range) > nyq):
raise ValueError('Filter frequencies must be below nyquist rate.')
if np.any(np.array(f_range) < 0):
raise ValueError('Filter frequencies must be positive.')
Ntaps = np.floor(w * fs / f_range[0])
if len(x) < Ntaps:
raise RuntimeError(
'Length of filter is loger than data. '
'Provide more data or a shorter filter.')
# Characterize desired filter
f = [0, (1 - tw) * f_range[0] / nyq, f_range[0] / nyq,
f_range[1] / nyq, (1 + tw) * f_range[1] / nyq, 1]
m = [0, 0, 1, 1, 0, 0]
if any(np.diff(f) < 0):
raise RuntimeError(
'Invalid FIR filter parameters.'
'Please decrease the transition width parameter.')
# Perform filtering
taps = firwin2(Ntaps, f, m)
x_filt = filtfilt(taps, [1], x)
if any(np.isnan(x_filt)):
raise RuntimeError(
'Filtered signal contains nans. Adjust filter parameters.')
# Remove edge artifacts
return _remove_edge(x_filt, Ntaps) | 71c7c3fc229ce8745f5940593a6e5a2f9bf12490 | 18,718 |
def extract_and_coadd(ra, dec, pm_ra, pm_dec, match_radius=4./3600.,
search_radius=25./60, sigma_clip=None, query_timeout=60.,
upper_limits=True, return_exps=False):
"""
The top-level function of this module, extract_and_coadd finds sources in
GALEX archive matching the target while accounting for its proper motion
between observing visits, the coadds the fluxes from each visit.
Parameters
----------
ra : float
Right ascencion of target in decimal degrees.
dec : float
Declination of target in decimal degrees.
pm_ra : float
Right ascencion proper motion of target in mas/yr.
pm_dec : float
Declination proper motion of target in mas/yr.
match_radius : float
Radius within which to consider a GALEX source a match to the target
in degrees. For reference, the 1-sigma astrometric uncertainty is 0.4
arcseconds for GALEX.
search_radius : float
Radius in which to query the MCAT in degrees. If upper limits are
desired, this should be large enough for the MCAT to return results
whenever exposures were taken near enough that the target could have
been in the aperture.
sigma_clip : float
Exclude fluxes > this many sigma from median flux relative to their
measurement error. Careful with this. Stars show real variability
that is often well beyond measurement errors, so it is probably
unwise to sigma clip in most cases.
query_timeout : float
Seconds to wait for server to respond before giving up.
upper_limits : bool
Estimate upper limits for exposures where there is no match for the
source.
return_exps : bool
If True, return all the data provided by extract_source.
Returns
-------
nuv_coadd : tuple
Coadded flux and error in counts s-1 and, optionally, exposure info returned by
extract_source. Upper limits show up as -999 for the flux with a positive error.
fuv_coadd : tuple
As above, for fuv.
"""
data = extract_source(ra, dec, pm_ra, pm_dec, match_radius,
search_radius, query_timeout, upper_limits)
nuv_data, fuv_data = data
nuv = list(coadd_fluxes(*nuv_data[:3], sigma_clip=sigma_clip))
fuv = list(coadd_fluxes(*fuv_data[:3], sigma_clip=sigma_clip))
if return_exps:
nuv.append(nuv_data)
fuv.append(fuv_data)
nuv, fuv = map(tuple, (nuv, fuv))
return (nuv, fuv) | d28d62877ba0c8a99aba1596128dc764bc3e19b7 | 18,719 |
import aiohttp
async def create_payout(
session: ClientSession, data: CreatePayoutRequest
) -> CreatePayoutResponse:
"""
Create a payout.
"""
url = RAZORPAY_BASE_URL + "/payouts"
async with session.post(
url,
json=data.__dict__,
auth=aiohttp.BasicAuth(RAZORPAY_KEY_ID, RAZORPAY_KEY_SECRET),
) as resp:
response = await resp.json()
print(response, resp.status)
return from_dict(data_class=CreatePayoutResponse, data=response) | c4b9dae09111c83efb1d5a9c5fd88050f11b5510 | 18,720 |
def eval_ocr_metric(pred_texts, gt_texts):
"""Evaluate the text recognition performance with metric: word accuracy and
1-N.E.D. See https://rrc.cvc.uab.es/?ch=14&com=tasks for details.
Args:
pred_texts (list[str]): Text strings of prediction.
gt_texts (list[str]): Text strings of ground truth.
Returns:
eval_res (dict[str: float]): Metric dict for text recognition, include:
- word_acc: Accuracy in word level.
- word_acc_ignore_case: Accuracy in word level, ignore letter case.
- word_acc_ignore_case_symbol: Accuracy in word level, ignore
letter case and symbol. (default metric for
academic evaluation)
- char_recall: Recall in character level, ignore
letter case and symbol.
- char_precision: Precision in character level, ignore
letter case and symbol.
- 1-N.E.D: 1 - normalized_edit_distance.
"""
assert isinstance(pred_texts, list)
assert isinstance(gt_texts, list)
assert len(pred_texts) == len(gt_texts)
match_res = count_matches(pred_texts, gt_texts)
eps = 1e-8
char_recall = 1.0 * match_res['true_positive_char_num'] / (
eps + match_res['gt_char_num'])
char_precision = 1.0 * match_res['true_positive_char_num'] / (
eps + match_res['pred_char_num'])
word_acc = 1.0 * match_res['match_word_num'] / (
eps + match_res['gt_word_num'])
word_acc_ignore_case = 1.0 * match_res['match_word_ignore_case'] / (
eps + match_res['gt_word_num'])
word_acc_ignore_case_symbol = 1.0 * match_res[
'match_word_ignore_case_symbol'] / (
eps + match_res['gt_word_num'])
eval_res = {}
eval_res['word_acc'] = word_acc
eval_res['word_acc_ignore_case'] = word_acc_ignore_case
eval_res['word_acc_ignore_case_symbol'] = word_acc_ignore_case_symbol
eval_res['char_recall'] = char_recall
eval_res['char_precision'] = char_precision
eval_res['1-N.E.D'] = 1.0 - match_res['ned']
eval_res['BLEU'] = match_res['bleu']
for key, value in eval_res.items():
eval_res[key] = float('{:.4f}'.format(value))
return eval_res | 0ec92be231d93abf9db8247369ba5ea546bd1b17 | 18,721 |
def get_all_zones():
"""Return a list of all available zones."""
cf = CloudFlare.CloudFlare(raw=True)
page_number = 0
total_pages = 1
all_zones = []
while page_number < total_pages:
page_number += 1
raw_results = cf.zones.get(params={'per_page':100, 'page':page_number})
zones = raw_results['result']
all_zones += zones
total_pages = raw_results['result_info']['total_pages']
return all_zones | 0b9f6bf9b7b8fe274f7c6f856abf1d9397384c3c | 18,722 |
def entry_id(e):
"""entry identifier which is not the bibtex key
"""
authortitle = ''.join([author_id(e),title_id(e)])
return (e.get('doi','').lower(), authortitle) | 7c663d6c2bbdfcef8168c11a78e176e634cf644b | 18,723 |
def AskNumber(text="unknown task"):
"""
Asks the user to interactively input a number (float or int) at any point in the script, and returns the input number.
| __option__ | __description__
| --- | ---
| *text | an optional string to identify for what purpose the chosen number will be used.
"""
def ValidateNumber(text):
try:
innumber = input("\n\nwrite a comma or integer number to use for "+str(text)+" (example: 15 or 15.83)\nnumber = ")
except NameError:
print("""\n---> unknown error""")
return ValidateNumber(text)
if not isinstance(innumber,(float,int)):
print("""\n---> error: the number must be either a floating point comma or integer number""")
return ValidateNumber(text)
return innumber
return ValidateNumber(text) | 41949d0a2e2d87b5cdb26d2db9bff9a64fbeeb1d | 18,724 |
def TokenEmphasis(character="_"):
"""
Italic (`<i>`, `<em>`) text is rendered with one asterisk or underscore
"""
assert character in ("_", "*")
return {
"type": "Characters",
"data": character,
"_md_type": mdTokenTypes["TokenEmphasis"],
} | 2012fdeb9ca4d9483b4cc403010f9900dcc1230f | 18,725 |
import random
def generate_plan(suite, node):
"""Randomly generates a plan, completely ignoring norms. This is mainly for testing the norm driven algorithm"""
plan = [node]
next_actions = next_actions(suite,node)
# print "Next actions ", next_actions
while (next_actions != []):
a = random.sample(next_actions,1)[0]
node = a.path[1:]
plan[len(plan):] = node
node = node[-1] # if we have a sequence of actions
next_actions = next_actions(suite,node)
return plan | 30d967986d1c4237b4b312470d47d1ecce06ecbc | 18,727 |
def update_y(pred_coords, ypart_tracker, history=1500):
"""
Update y-tracker and store last 1500 detection
:param pred_coords: y coordinates
:param ypart_tracker: choose keypoints based on input conditions
:return: y-tracker
"""
anks_val = (pred_coords[15] + pred_coords[16]) * 0.5
shdr_val = (pred_coords[5] + pred_coords[6]) * 0.5
ypart_tracker['anks'] = np.append(ypart_tracker['anks'], [anks_val], axis=0)
ypart_tracker['shdr'] = np.append(ypart_tracker['shdr'], [shdr_val], axis=0)
ypart_tracker['anks-shdr'] = np.append(ypart_tracker['anks-shdr'], [anks_val - shdr_val], axis=0)
ypart_tracker = {k: v[-history:] for k, v in ypart_tracker.items()}
return ypart_tracker | ee1880e27b121dae4661a93286bf07117bc7bb34 | 18,728 |
def augmentData(features, labels):
"""
For augmentation of the data
:param features:
:param labels:
:return:
"""
features = np.append(features, features[:, :, ::-1], axis=0)
labels = np.append(labels, -labels, axis=0)
return features, labels | ef684ae2bf9eb4fca9a0636d3b0089020805f4be | 18,729 |
def sigmoid(x):
""" Implement 1 / ( 1 + exp( -x ) ) in terms of tanh."""
return 0.5 * (np.tanh(x / 2.) + 1) | 95d6dd0cd62db2c43df419358ef368609ede42c8 | 18,731 |
def get_unique_output_values(signals):
"""
Based on segment length, determine how many of the possible four
uniquely identifiable digits are in the set of signals.
"""
unique_digit_count = 0
for signal in signals:
for digit in signal["output"]:
if len(digit) in (2, 3, 4, 7):
unique_digit_count += 1
return unique_digit_count | 84098d4d294bfdd1b983ea70d51da1453b17245a | 18,732 |
import itertools
def split_and_pad(s, sep, nsplit, pad=None):
""" Splits string s on sep, up to nsplit times.
Returns the results of the split, pottentially padded with
additional items, up to a total of nsplit items.
"""
l = s.split(sep, nsplit)
return itertools.chain(l, itertools.repeat(None, nsplit+1-len(l))) | 6c439301df7109d9b01a06a87bd7d6adafb8ee1e | 18,733 |
def transpose_report(report):
"""Transposes the report. Columns into rows"""
return list(map(list, zip(*report))) | bc59f9106496b0b830fdc9ac0266f3b774a8f759 | 18,734 |
def _shape_from_resolution(resolution):
"""
Calculate the shape of the global Earth relief grid given a resolution.
Parameters
----------
resolution : str
Same as the input for load_earth_relief
Returns
-------
shape : (nlat, nlon)
The calculated shape.
Examples
--------
>>> _shape_from_resolution('60m')
(181, 361)
>>> _shape_from_resolution('30m')
(361, 721)
>>> _shape_from_resolution('10m')
(1081, 2161)
"""
minutes = int(resolution[:2])
nlat = 180*60//minutes + 1
nlon = 360*60//minutes + 1
return (nlat, nlon) | c726d599696cee2259bc450606e63480b0991451 | 18,735 |
def __virtual__():
"""
Load module only if cx_Oracle installed
"""
if HAS_CX_ORACLE:
return __virtualname__
return (
False,
"The oracle execution module not loaded: python oracle library not found.",
) | a64eddc8b78e5d7b3c8e0588a72a0c238b4c12d0 | 18,736 |
def get_fuel_from(mass: int) -> int:
"""Gets fuel from mass.
Args:
mass (int): mass for the fuel
Returns:
int: fuel necessary for the mass
"""
return mass // 3 - 2 | 37390c8cb9ba7e84c7b5c14841528d6c38f1589e | 18,739 |
def test_energy_density_function():
"""
Compute the Zeeman energy density over the entire mesh, integrate it, and
compare it to the expected result.
"""
mesh = df.RectangleMesh(df.Point(-50, -50), df.Point(50, 50), 10, 10)
unit_length = 1e-9
H = 1e6
# Create simulation object.
sim = finmag.Simulation(mesh, 1e5, unit_length=unit_length)
# Set uniform magnetisation.
def m_ferromagnetic(pos):
return np.array([0., 0., 1.])
sim.set_m(m_ferromagnetic)
# Assign zeeman object to simulation
sim.add(Zeeman(H * np.array([0., 0., 1.])))
# Get energy density function
edf = sim.get_interaction('Zeeman').energy_density_function()
# Integrate it over the mesh and compare to expected result.
total_energy = df.assemble(edf * df.dx) * unit_length
expected_energy = -mu0 * H
assert (total_energy + expected_energy) < 1e-6 | 10a4da043554a93c3d6c90f32c554741b2fe2c7b | 18,740 |
def my_Bayes_model_mse(params):
""" Function fits the Bayesian model from Tutorial 4
Args :
params (list of positive floats): parameters used by the model (params[0] = posterior scaling)
Returns :
(scalar) negative log-likelihood :sum of log probabilities
"""
trial_ll = np.zeros_like(true_stim)
## Create the prior Matrix outside of trial loop
alpha=params[0]
prior_mean = 0
prior_sigma1 = 0.5
prior_sigma2 = 3
prior1 = my_gaussian(x, prior_mean, prior_sigma1)
prior2 = my_gaussian(x, prior_mean, prior_sigma2)
prior_combined = (1-alpha) * prior1 + (alpha * prior2)
prior_combined = prior_combined / np.sum(prior_combined)
prior_matrix = np.tile(prior_combined, hypothetical_stim.shape[0]).reshape((hypothetical_stim.shape[0],-1))
## Create posterior matrix outside of trial loop
posterior_matrix = np.zeros_like(likelihood_matrix)
for i_posterior in np.arange(posterior_matrix.shape[0]):
posterior_matrix[i_posterior,:] = np.multiply(prior_matrix[i_posterior,:], likelihood_matrix[i_posterior,:])
posterior_matrix[i_posterior,:] = posterior_matrix[i_posterior,:] / np.sum(posterior_matrix[i_posterior,:])
## Create Binary decision matrix outside of trial loop
binary_decision_matrix = np.zeros_like(posterior_matrix)
for i_posterior in np.arange(posterior_matrix.shape[0]):
mean, _, _ = moments_myfunc(x, posterior_matrix[i_posterior,:])
idx = np.argmin(np.abs(x - mean))
binary_decision_matrix[i_posterior,idx] = 1
# Loop over stimuli
for i_stim in np.arange(true_stim.shape[0]):
input_matrix = np.zeros_like(posterior_matrix)
for i in np.arange(x.shape[0]):
input_matrix[:, i] = my_gaussian(hypothetical_stim, true_stim[i_stim], 1)
input_matrix[:, i] = input_matrix[:, i] / np.sum(input_matrix[:, i])
marginalization_matrix = input_matrix * binary_decision_matrix
marginal = np.sum(marginalization_matrix, axis=0)
marginal = marginal / np.sum(marginal)
action = behaviour[i_stim]
idx = np.argmin(np.abs(x - action))
trial_ll[i_stim] = np.log(marginal[idx] + np.finfo(float).eps)
neg_ll = -np.sum(trial_ll)
return neg_ll | b4a03e3edd0c9894b518ecd2589949aed8337479 | 18,741 |
def validate_request_tween_factory(handler, registry):
"""
Updates request.environ's REQUEST_METHOD to be X_REQUEST_METHOD if present.
Asserts that if a POST (or similar) request is in application/json format,
with exception for /metadata/* endpoints.
Apache config:
SetEnvIf Request_Method HEAD X_REQUEST_METHOD=HEAD
"""
def validate_request_tween(request):
# Fix Request method changed by mod_wsgi.
# See: https://github.com/GrahamDumpleton/mod_wsgi/issues/2
environ = request.environ
if 'X_REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = environ['X_REQUEST_METHOD']
if request.method in ('GET', 'HEAD'):
# If GET request, don't need to check `request.content_type`
# Includes page text/html requests.
return handler(request)
elif request.content_type != 'application/json':
if request.content_type == 'application/x-www-form-urlencoded' and request.path[0:10] == '/metadata/':
# Special case to allow us to POST to metadata TSV requests via form submission
return handler(request)
detail = "Request content type %s is not 'application/json'" % request.content_type
raise HTTPUnsupportedMediaType(detail)
return handler(request)
return validate_request_tween | 909e7d67044e31c1b3c0a97774d398f7d64d40bb | 18,742 |
async def get_rank(display_number: int, minimal_msg_number: int,
display_total_number: int, group_id: int) -> str:
""" 获取排行榜 """
repeat_list = recorder_obj.repeat_list(group_id)
msg_number_list = recorder_obj.msg_number_list(group_id)
ranking = Ranking(group_id, display_number, minimal_msg_number,
display_total_number, repeat_list, msg_number_list)
str_data = await ranking.ranking()
if not str_data:
str_data = '暂时还没有满足条件的数据~>_<~'
return str_data | f1ca183890e33b15b77d7693771b37c33af9535e | 18,743 |
def feedback(request):
"""FeedbackForm"""
if (request.method == 'POST'):
form = forms.FeedbackForm(request.POST)
# pdb.set_trace()
if form.is_valid():
form.save()
type = form.cleaned_data['type']
type = dict(form.fields['type'].choices)[type]
settings.EMAIL_HOST_USER += '[email protected]'
send_mail(
'[' + type + '] ' + form.cleaned_data['title'],
'A new feedback was posted on JobPort' + '\n\n' +
form.cleaned_data['body'], ['[email protected]']
)
settings.EMAIL_HOST_USER += ''
messages.success(
request, 'Thanks for filling your precious feedback! :) ')
return HttpResponseRedirect('/')
else:
context = {'form': form}
return render(request, 'jobport/feedback.html', context)
else:
form = forms.FeedbackForm()
context = {'form': form}
return render(request, 'jobport/feedback.html', context) | 188dfa77d7e72555062e25acc15518f90c252b33 | 18,744 |
def get_convolutional_args(call, include_buffers=False, remove_constants=False):
"""A method to extract the arguments from conv2d or depthwise_conv2d extern call."""
args = call.args
conv_args = []
remove_indices = [0]
if remove_constants:
remove_indices += [41, 42, 44, 45]
for i, arg in enumerate(args):
if i in remove_indices:
continue
elif isinstance(arg, tvm.tir.expr.IntImm) or isinstance(arg, tvm.tir.expr.FloatImm):
conv_args.append(arg.value)
elif isinstance(arg, tvm.tir.expr.Load) and not include_buffers:
conv_args.append(arg.index)
else:
conv_args.append(arg)
return conv_args | 01db4d4e025bb9212bcb20a8852a7d4f1250e4b2 | 18,745 |
def view_party(party_id):
"""View dashboard for that party."""
party = party_service.find_party(party_id)
if party is None:
abort(404)
days = party_service.get_party_days(party)
days_until_party = (party.starts_at.date() - date.today()).days
orga_count = orga_team_service.count_memberships_for_party(party.id)
orga_team_count = orga_team_service.count_teams_for_party(party.id)
seating_area_count = seating_area_service.count_areas_for_party(party.id)
seat_count = seat_service.count_seats_for_party(party.id)
ticket_sale_stats = ticket_service.get_ticket_sale_stats(party.id)
tickets_checked_in = ticket_service.count_tickets_checked_in_for_party(
party.id
)
seat_utilization = seat_service.get_seat_utilization(party.id)
guest_servers = guest_server_service.get_all_servers_for_party(party.id)
return {
'party': party,
'days': days,
'days_until_party': days_until_party,
'orga_count': orga_count,
'orga_team_count': orga_team_count,
'seating_area_count': seating_area_count,
'seat_count': seat_count,
'ticket_sale_stats': ticket_sale_stats,
'tickets_checked_in': tickets_checked_in,
'seat_utilization': seat_utilization,
'guest_servers': guest_servers,
} | de02cf21c9afe35a1dc5ef7a896d99d40a9bd43f | 18,746 |
def subcat_add():
"""
添加小分类
"""
if request.method == 'POST':
cat_name = request.form['cat_name']
super_cat_id = request.form['super_cat_id']
# 检测名称是否存在
subcat = SubCat.query.filter_by(cat_name=cat_name).count()
if subcat :
return "<script>alert('该小分类已经存在');history.go(-1);</script>"
# 组织数据
data = SubCat(
super_cat_id = super_cat_id,
cat_name = cat_name,
)
db.session.add(data)
db.session.commit()
return redirect(url_for("admin.subcat_list"))
supercat = SuperCat.query.all() # 获取大分类信息
return render_template("admin/subcat_add.html",supercat=supercat) | 046011d15be00557b28f9300d813ffc6e23d43e0 | 18,748 |
async def async_setup_entry(hass, config_entry, async_add_devices):
"""Set up the Alexa sensor platform by config_entry."""
return await async_setup_platform(
hass,
config_entry.data,
async_add_devices,
discovery_info=None) | b7a4a8a4573ecc008f43a4a9c3e9dcfa21fb0d78 | 18,749 |
def parse_duration(dur: str) -> int:
"""Generates seconds from a human readable duration."""
if not DURATION_REGEX.match(dur):
raise ValueError('Time passed does not match required format: `XX:XX` or `XX:XX:XX`')
parts = dur.split(':')
seconds = 0
if len(parts) == 3:
seconds += int(parts[0]) * 60 * 60
seconds += int(parts[1]) * 60
seconds += int(parts[2])
else:
seconds += int(parts[0]) * 60
seconds += int(parts[1])
return seconds | ec60b2362d8dc2e898e278b4e1dbf0aca764bc87 | 18,750 |
import random
def shorter_uuid(length=7, starter=None, with_original=False):
"""
Generate an even shorter short UUID generated by the shortuuid library.
:param length: Length of trimmed ID.
:param starter: Whether to begin with an already-created ShortUUID.
Useful when using recursively.
:param with_original: Also return initially-generated ShortUUID
:return:
"""
original_id = str(shortuuid.uuid()) if starter is None else starter
n = len(original_id)
dx = min(length, len(original_id)) # ID length
if starter is not None and len(starter) < dx * 2:
original_id = str(shortuuid.uuid())
start_point = random.randint(0, n - dx)
shorter_id = original_id[start_point:(start_point + dx)]
return shorter_id if not with_original else [shorter_id, original_id] | 80eca9d14ff3ebeccd77a4b989dde52e4786a042 | 18,751 |
from typing import Iterable
from typing import Callable
from typing import Optional
from typing import List
from typing import Dict
def group_by(s: Iterable[_ElementType],
key: Callable[[_ElementType], _GroupType],
gfunc: Optional[Callable[[List[_ElementType]], _ResultType]] = None) -> Dict[_GroupType, _ResultType]:
"""
Overview:
Divide the elements into groups.
:param s: Elements.
:param key: Group key, should be a callable object.
:param gfunc: Post-process function for groups, should be a callable object. Default is ``None`` which means \
no post-processing will be performed.
:return: Grouping result.
Examples::
>>> from hbutils.collection import group_by
>>>
>>> foods = [
... 'apple', 'orange', 'pear',
... 'banana', 'fish', 'pork', 'milk',
... ]
>>> group_by(foods, len) # group by length
{5: ['apple'], 6: ['orange', 'banana'], 4: ['pear', 'fish', 'pork', 'milk']}
>>> group_by(foods, len, len) # group and get length
{5: 1, 6: 2, 4: 4}
>>> group_by(foods, lambda x: x[0]) # group by first letter
{'a': ['apple'], 'o': ['orange'], 'p': ['pear', 'pork'], 'b': ['banana'], 'f': ['fish'], 'm': ['milk']}
>>> group_by(foods, lambda x: x[0], len) # group and get length
{'a': 1, 'o': 1, 'p': 2, 'b': 1, 'f': 1, 'm': 1}
"""
gfunc = gfunc or (lambda x: x)
_result_dict: Dict[_GroupType, List[_ElementType]] = {}
for item in s:
_item_key = key(item)
if _item_key not in _result_dict:
_result_dict[_item_key] = []
_result_dict[_item_key].append(item)
return {
key: gfunc(grps)
for key, grps in _result_dict.items()
} | b515e0b3a3467b47ad29552bed39b14eca2d2978 | 18,752 |
def init_templateflow_wf(
bids_dir,
output_dir,
participant_label,
mov_template,
ref_template='MNI152NLin2009cAsym',
use_float=True,
omp_nthreads=None,
mem_gb=3.0,
modality='T1w',
normalization_quality='precise',
name='templateflow_wf',
fs_subjects_dir=None,
):
"""
A Nipype workflow to perform image registration between two templates
*R* and *M*. *R* is the *reference template*, selected by a templateflow
identifier such as ``MNI152NLin2009cAsym``, and *M* is the *moving
template* (e.g., ``MNI152Lin``). This workflows maps data defined on
template-*M* space onto template-*R* space.
1. Run the subrogate images through ``antsBrainExtraction``.
2. Recompute :abbr:`INU (intensity non-uniformity)` correction using
the mask obtained in 1).
3. Independently, run spatial normalization of every
:abbr:`INU (intensity non-uniformity)` corrected image
(supplied via ``in_files``) to both templates.
4. Calculate an initialization between both templates, using them directly.
5. Run multi-channel image registration of the images resulting from
3). Both sets of images (one registered to *R* and another to *M*)
are then used as reference and moving images in the registration
framework.
**Parameters**
in_files: list of files
a list of paths pointing to the images that will be used as surrogates
mov_template: str
a templateflow identifier for template-*M*
ref_template: str
a templateflow identifier for template-*R* (default: ``MNI152NLin2009cAsym``).
"""
# number of participants
ninputs = len(participant_label)
ants_env = {
'NSLOTS': '%d' % omp_nthreads,
'ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS': '%d' % omp_nthreads,
'OMP_NUM_THREADS': '%d' % omp_nthreads,
}
# Get path to templates
tpl_ref = str(get_template(ref_template, suffix=modality, desc=None, resolution=1))
tpl_ref_mask = str(get_template(ref_template, suffix='mask',
desc='brain', resolution=1))
tpl_mov = str(get_template(mov_template, suffix=modality, desc=None, resolution=1))
tpl_mov_mask = str(get_template(mov_template, suffix='mask',
desc='brain', resolution=1))
wf = pe.Workflow(name)
inputnode = pe.Node(niu.IdentityInterface(fields=['participant_label']),
name='inputnode')
inputnode.iterables = ('participant_label', sorted(list(participant_label)))
pick_file = pe.Node(niu.Function(function=_bids_pick),
name='pick_file', run_without_submitting=True)
pick_file.inputs.bids_root = bids_dir
ref_bex = init_brain_extraction_wf(
in_template=ref_template,
omp_nthreads=omp_nthreads,
mem_gb=mem_gb,
bids_suffix=modality,
name='reference_bex',
)
mov_bex = init_brain_extraction_wf(
in_template=mov_template,
omp_nthreads=omp_nthreads,
mem_gb=mem_gb,
bids_suffix=modality,
name='moving_bex',
)
ref_norm = pe.Node(
Registration(
from_file=pkgr.resource_filename(
'niworkflows.data', 't1w-mni_registration_%s_000.json' % normalization_quality)),
name='ref_norm', n_procs=omp_nthreads)
ref_norm.inputs.fixed_image = tpl_ref
ref_norm.inputs.fixed_image_masks = tpl_ref_mask
ref_norm.inputs.environ = ants_env
# Register the INU-corrected image to the other template
mov_norm = pe.Node(
Registration(
from_file=pkgr.resource_filename(
'niworkflows.data', 't1w-mni_registration_%s_000.json' % normalization_quality)),
name='mov_norm', n_procs=omp_nthreads)
mov_norm.inputs.fixed_image = tpl_mov
mov_norm.inputs.fixed_image_masks = tpl_mov_mask
mov_norm.inputs.environ = ants_env
# Initialize between-templates transform with antsAI
init_aff = pe.Node(AI(
metric=('Mattes', 32, 'Regular', 0.2),
transform=('Affine', 0.1),
search_factor=(20, 0.12),
principal_axes=False,
convergence=(10, 1e-6, 10),
verbose=True,
fixed_image=tpl_ref,
fixed_image_mask=tpl_ref_mask,
moving_image=tpl_mov,
moving_image_mask=tpl_mov_mask,
environ=ants_env,
), name='init_aff', n_procs=omp_nthreads)
ref_buffer = pe.JoinNode(niu.IdentityInterface(
fields=['fixed_image']),
joinsource='inputnode', joinfield='fixed_image', name='ref_buffer')
mov_buffer = pe.JoinNode(niu.IdentityInterface(
fields=['moving_image']),
joinsource='inputnode', joinfield='moving_image', name='mov_buffer')
flow = pe.Node(
Registration(
from_file=pkgr.resource_filename(
'niworkflows.data', 't1w-mni_registration_%s_000.json' % normalization_quality)),
name='flow_norm', n_procs=omp_nthreads,
)
flow.inputs.fixed_image_masks = tpl_ref_mask
flow.inputs.moving_image_masks = tpl_mov_mask
flow.inputs.metric = [[v] * ninputs for v in flow.inputs.metric]
flow.inputs.metric_weight = [[1 / ninputs] * ninputs
for _ in flow.inputs.metric_weight]
flow.inputs.radius_or_number_of_bins = [
[v] * ninputs for v in flow.inputs.radius_or_number_of_bins]
flow.inputs.sampling_percentage = [
[v] * ninputs for v in flow.inputs.sampling_percentage]
flow.inputs.sampling_strategy = [
[v] * ninputs for v in flow.inputs.sampling_strategy]
flow.inputs.environ = ants_env
# Datasinking
ref_norm_ds = pe.Node(
DerivativesDataSink(base_directory=str(output_dir.parent),
out_path_base=output_dir.name, space=ref_template,
desc='preproc', keep_dtype=True),
name='ref_norm_ds', run_without_submitting=True
)
mov_norm_ds = pe.Node(
DerivativesDataSink(base_directory=str(output_dir.parent),
out_path_base=output_dir.name, space=mov_template,
desc='preproc', keep_dtype=True),
name='mov_norm_ds', run_without_submitting=True
)
xfm_ds = pe.Node(DerivativesDataSink(
base_directory=str(output_dir.parent), out_path_base=output_dir.name,
allowed_entities=['from', 'mode'], mode='image', suffix='xfm',
source_file='group/tpl-{0}_T1w.nii.gz'.format(ref_template),
**{'from': mov_template}),
name='xfm_ds', run_without_submitting=True)
wf.connect([
(inputnode, pick_file, [('participant_label', 'participant_label')]),
(pick_file, ref_bex, [('out', 'inputnode.in_files')]),
(pick_file, mov_bex, [('out', 'inputnode.in_files')]),
(ref_bex, ref_norm, [('outputnode.bias_corrected', 'moving_image'),
('outputnode.out_mask', 'moving_image_masks'),
('norm.forward_transforms', 'initial_moving_transform')]),
(ref_bex, mov_norm, [('outputnode.bias_corrected', 'moving_image')]),
(mov_bex, mov_norm, [('outputnode.out_mask', 'moving_image_masks'),
('norm.forward_transforms', 'initial_moving_transform')]),
(init_aff, flow, [('output_transform', 'initial_moving_transform')]),
(ref_norm, ref_buffer, [('warped_image', 'fixed_image')]),
(mov_norm, mov_buffer, [('warped_image', 'moving_image')]),
(ref_buffer, flow, [('fixed_image', 'fixed_image')]),
(mov_buffer, flow, [('moving_image', 'moving_image')]),
(pick_file, ref_norm_ds, [('out', 'source_file')]),
(ref_norm, ref_norm_ds, [('warped_image', 'in_file')]),
(pick_file, mov_norm_ds, [('out', 'source_file')]),
(mov_norm, mov_norm_ds, [('warped_image', 'in_file')]),
(flow, xfm_ds, [('composite_transform', 'in_file')]),
])
if fs_subjects_dir:
fssource = pe.Node(
FreeSurferSource(subjects_dir=str(fs_subjects_dir)),
name='fssource', run_without_submitting=True)
tonative = pe.Node(fs.Label2Vol(subjects_dir=str(fs_subjects_dir)),
name='tonative')
tonii = pe.Node(
fs.MRIConvert(out_type='niigz', resample_type='nearest'),
name='tonii')
ref_aparc = pe.Node(
ApplyTransforms(interpolation='MultiLabel', float=True,
reference_image=tpl_ref, environ=ants_env),
name='ref_aparc', mem_gb=1, n_procs=omp_nthreads
)
mov_aparc = pe.Node(
ApplyTransforms(interpolation='MultiLabel', float=True,
reference_image=tpl_mov, environ=ants_env),
name='mov_aparc', mem_gb=1, n_procs=omp_nthreads
)
ref_aparc_buffer = pe.JoinNode(
niu.IdentityInterface(fields=['aparc']),
joinsource='inputnode', joinfield='aparc', name='ref_aparc_buffer')
ref_join_labels = pe.Node(
AntsJointFusion(
target_image=[tpl_ref],
out_label_fusion='merged_aparc.nii.gz',
out_intensity_fusion_name_format='merged_aparc_intensity_%d.nii.gz',
out_label_post_prob_name_format='merged_aparc_posterior_%d.nii.gz',
out_atlas_voting_weight_name_format='merged_aparc_weight_%d.nii.gz',
environ=ants_env,
),
name='ref_join_labels', n_procs=omp_nthreads)
ref_join_labels_ds = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir.parent),
out_path_base=output_dir.name,
suffix='dtissue', desc='aparc', keep_dtype=False,
source_file='group/tpl-{0}_T1w.nii.gz'.format(ref_template)),
name='ref_join_labels_ds', run_without_submitting=True)
ref_join_probs_ds = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir.parent),
out_path_base=output_dir.name,
suffix='probtissue', desc='aparc', keep_dtype=False,
source_file='group/tpl-{0}_T1w.nii.gz'.format(ref_template)),
name='ref_join_probs_ds', run_without_submitting=True)
# ref_join_voting_ds = pe.Node(
# DerivativesDataSink(
# base_directory=str(output_dir.parent),
# out_path_base=output_dir.name, space=ref_template,
# suffix='probtissue', desc='aparcvoting', keep_dtype=False,
# source_file='group/tpl-{0}_T1w.nii.gz'.format(ref_template)),
# name='ref_join_voting_ds', run_without_submitting=True)
mov_aparc_buffer = pe.JoinNode(
niu.IdentityInterface(fields=['aparc']),
joinsource='inputnode', joinfield='aparc', name='mov_aparc_buffer')
mov_join_labels = pe.Node(
AntsJointFusion(
target_image=[tpl_mov],
out_label_fusion='merged_aparc.nii.gz',
out_intensity_fusion_name_format='merged_aparc_intensity_%d.nii.gz',
out_label_post_prob_name_format='merged_aparc_posterior_%d.nii.gz',
out_atlas_voting_weight_name_format='merged_aparc_weight_%d.nii.gz',
environ=ants_env,
),
name='mov_join_labels', n_procs=omp_nthreads)
mov_join_labels_ds = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir.parent),
out_path_base=output_dir.name,
suffix='dtissue', desc='aparc', keep_dtype=False,
source_file='group/tpl-{0}_T1w.nii.gz'.format(mov_template)),
name='mov_join_labels_ds', run_without_submitting=True)
mov_join_probs_ds = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir.parent),
out_path_base=output_dir.name,
suffix='probtissue', desc='aparc', keep_dtype=False,
source_file='group/tpl-{0}_T1w.nii.gz'.format(mov_template)),
name='mov_join_probs_ds', run_without_submitting=True)
ref_aparc_ds = pe.Node(
DerivativesDataSink(base_directory=str(output_dir.parent),
out_path_base=output_dir.name, space=ref_template,
suffix='dtissue', desc='aparc', keep_dtype=False),
name='ref_aparc_ds', run_without_submitting=True
)
mov_aparc_ds = pe.Node(
DerivativesDataSink(base_directory=str(output_dir.parent),
out_path_base=output_dir.name, space=mov_template,
suffix='dtissue', desc='aparc', keep_dtype=False),
name='mov_aparc_ds', run_without_submitting=True
)
# Extract surfaces
cifti_wf = init_gifti_surface_wf(
name='cifti_surfaces',
subjects_dir=str(fs_subjects_dir))
# Move surfaces to template spaces
gii2csv = pe.MapNode(GiftiToCSV(itk_lps=True),
iterfield=['in_file'], name='gii2csv')
ref_map_surf = pe.MapNode(
ApplyTransformsToPoints(dimension=3, environ=ants_env),
n_procs=omp_nthreads, name='ref_map_surf', iterfield=['input_file'])
ref_csv2gii = pe.MapNode(
CSVToGifti(itk_lps=True),
name='ref_csv2gii', iterfield=['in_file', 'gii_file'])
ref_surfs_buffer = pe.JoinNode(
niu.IdentityInterface(fields=['surfaces']),
joinsource='inputnode', joinfield='surfaces', name='ref_surfs_buffer')
ref_surfs_unzip = pe.Node(UnzipJoinedSurfaces(), name='ref_surfs_unzip',
run_without_submitting=True)
ref_ply = pe.MapNode(SurfacesToPointCloud(), name='ref_ply',
iterfield=['in_files'])
ref_recon = pe.MapNode(PoissonRecon(), name='ref_recon',
iterfield=['in_file'])
ref_avggii = pe.MapNode(PLYtoGifti(), name='ref_avggii',
iterfield=['in_file', 'surf_key'])
ref_smooth = pe.MapNode(fs.SmoothTessellation(), name='ref_smooth',
iterfield=['in_file'])
ref_surfs_ds = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir.parent),
out_path_base=output_dir.name, space=ref_template,
keep_dtype=False, compress=False),
name='ref_surfs_ds', run_without_submitting=True)
ref_avg_ds = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir.parent),
out_path_base=output_dir.name, space=ref_template,
keep_dtype=False, compress=False,
source_file='group/tpl-{0}_T1w.nii.gz'.format(ref_template)),
name='ref_avg_ds', run_without_submitting=True)
mov_map_surf = pe.MapNode(
ApplyTransformsToPoints(dimension=3, environ=ants_env),
n_procs=omp_nthreads, name='mov_map_surf', iterfield=['input_file'])
mov_csv2gii = pe.MapNode(
CSVToGifti(itk_lps=True),
name='mov_csv2gii', iterfield=['in_file', 'gii_file'])
mov_surfs_buffer = pe.JoinNode(
niu.IdentityInterface(fields=['surfaces']),
joinsource='inputnode', joinfield='surfaces', name='mov_surfs_buffer')
mov_surfs_unzip = pe.Node(UnzipJoinedSurfaces(), name='mov_surfs_unzip',
run_without_submitting=True)
mov_ply = pe.MapNode(SurfacesToPointCloud(), name='mov_ply',
iterfield=['in_files'])
mov_recon = pe.MapNode(PoissonRecon(), name='mov_recon',
iterfield=['in_file'])
mov_avggii = pe.MapNode(PLYtoGifti(), name='mov_avggii',
iterfield=['in_file', 'surf_key'])
mov_smooth = pe.MapNode(fs.SmoothTessellation(), name='mov_smooth',
iterfield=['in_file'])
mov_surfs_ds = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir.parent),
out_path_base=output_dir.name, space=mov_template,
keep_dtype=False, compress=False),
name='mov_surfs_ds', run_without_submitting=True)
mov_avg_ds = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir.parent),
out_path_base=output_dir.name, space=mov_template,
keep_dtype=False, compress=False,
source_file='group/tpl-{0}_T1w.nii.gz'.format(mov_template)),
name='mov_avg_ds', run_without_submitting=True)
wf.connect([
(inputnode, fssource, [(('participant_label', _sub_decorate), 'subject_id')]),
(inputnode, cifti_wf, [
(('participant_label', _sub_decorate), 'inputnode.subject_id')]),
(pick_file, cifti_wf, [('out', 'inputnode.in_t1w')]),
(pick_file, tonii, [('out', 'reslice_like')]),
# Select DKT aparc
(fssource, tonative, [(('aparc_aseg', _last), 'seg_file'),
('rawavg', 'template_file'),
('aseg', 'reg_header')]),
(tonative, tonii, [('vol_label_file', 'in_file')]),
(tonii, ref_aparc, [('out_file', 'input_image')]),
(tonii, mov_aparc, [('out_file', 'input_image')]),
(ref_norm, ref_aparc, [('composite_transform', 'transforms')]),
(mov_norm, mov_aparc, [('composite_transform', 'transforms')]),
(ref_buffer, ref_join_labels, [
('fixed_image', 'atlas_image')]),
(ref_aparc, ref_aparc_buffer, [('output_image', 'aparc')]),
(ref_aparc_buffer, ref_join_labels, [
('aparc', 'atlas_segmentation_image')]),
(mov_buffer, mov_join_labels, [
('moving_image', 'atlas_image')]),
(mov_aparc, mov_aparc_buffer, [('output_image', 'aparc')]),
(mov_aparc_buffer, mov_join_labels, [
('aparc', 'atlas_segmentation_image')]),
# Datasinks
(ref_join_labels, ref_join_labels_ds, [('out_label_fusion', 'in_file')]),
(ref_join_labels, ref_join_probs_ds, [
('out_label_post_prob', 'in_file'),
(('out_label_post_prob', _get_extra), 'extra_values')]),
# (ref_join_labels, ref_join_voting_ds, [
# ('out_atlas_voting_weight_name_format', 'in_file')]),
(mov_join_labels, mov_join_labels_ds, [('out_label_fusion', 'in_file')]),
(mov_join_labels, mov_join_probs_ds, [
('out_label_post_prob', 'in_file'),
(('out_label_post_prob', _get_extra), 'extra_values')]),
(pick_file, ref_aparc_ds, [('out', 'source_file')]),
(ref_aparc, ref_aparc_ds, [('output_image', 'in_file')]),
(pick_file, mov_aparc_ds, [('out', 'source_file')]),
(mov_aparc, mov_aparc_ds, [('output_image', 'in_file')]),
# Mapping ref surfaces
(cifti_wf, gii2csv, [
(('outputnode.surf_norm', _discard_inflated), 'in_file')]),
(gii2csv, ref_map_surf, [('out_file', 'input_file')]),
(ref_norm, ref_map_surf, [
(('inverse_composite_transform', _ensure_list), 'transforms')]),
(ref_map_surf, ref_csv2gii, [('output_file', 'in_file')]),
(cifti_wf, ref_csv2gii, [
(('outputnode.surf_norm', _discard_inflated), 'gii_file')]),
(pick_file, ref_surfs_ds, [('out', 'source_file')]),
(ref_csv2gii, ref_surfs_ds, [
('out_file', 'in_file'),
(('out_file', _get_surf_extra), 'extra_values')]),
(ref_csv2gii, ref_surfs_buffer, [('out_file', 'surfaces')]),
(ref_surfs_buffer, ref_surfs_unzip, [('surfaces', 'in_files')]),
(ref_surfs_unzip, ref_ply, [('out_files', 'in_files')]),
(ref_ply, ref_recon, [('out_file', 'in_file')]),
(ref_recon, ref_avggii, [('out_file', 'in_file')]),
(ref_surfs_unzip, ref_avggii, [('surf_keys', 'surf_key')]),
(ref_avggii, ref_smooth, [('out_file', 'in_file')]),
(ref_smooth, ref_avg_ds, [
('surface', 'in_file'),
(('surface', _get_surf_extra), 'extra_values')]),
# Mapping mov surfaces
(gii2csv, mov_map_surf, [('out_file', 'input_file')]),
(mov_norm, mov_map_surf, [
(('inverse_composite_transform', _ensure_list), 'transforms')]),
(mov_map_surf, mov_csv2gii, [('output_file', 'in_file')]),
(cifti_wf, mov_csv2gii, [
(('outputnode.surf_norm', _discard_inflated), 'gii_file')]),
(pick_file, mov_surfs_ds, [('out', 'source_file')]),
(mov_csv2gii, mov_surfs_ds, [
('out_file', 'in_file'),
(('out_file', _get_surf_extra), 'extra_values')]),
(mov_csv2gii, mov_surfs_buffer, [('out_file', 'surfaces')]),
(mov_surfs_buffer, mov_surfs_unzip, [('surfaces', 'in_files')]),
(mov_surfs_unzip, mov_ply, [('out_files', 'in_files')]),
(mov_ply, mov_recon, [('out_file', 'in_file')]),
(mov_recon, mov_avggii, [('out_file', 'in_file')]),
(mov_surfs_unzip, mov_avggii, [('surf_keys', 'surf_key')]),
(mov_avggii, mov_smooth, [('out_file', 'in_file')]),
(mov_smooth, mov_avg_ds, [
('surface', 'in_file'),
(('surface', _get_surf_extra), 'extra_values')]),
])
return wf | 677218b13cbfc48881440523d87667eaed7ea2e8 | 18,753 |
def Qest(ICobj, r=None):
"""
Estimate Toomre Q at r (optional) for ICs, assuming omega=epicyclic
frequency. Ignores disk self-gravity
"""
if not hasattr(ICobj, 'sigma'):
raise ValueError, 'Could not find surface density profile (sigma)'
G = SimArray(1.0, 'G')
kB = SimArray(1.0, 'k')
if r is None:
r = ICobj.sigma.r_bins
sigma = ICobj.sigma(r)
T = ICobj.T(r)
M = ICobj.settings.physical.M
m = ICobj.settings.physical.m
M = match_units(M, 'Msol')[0]
m = match_units(m, 'm_p')[0]
gamma = ICobj.settings.physical.gamma_cs()
Q = np.sqrt(M*kB*T*gamma/(G*m*r**3))/(np.pi*sigma)
Q.convert_units('1')
return Q | f262c0f68683dc069dd983981b2cbd1d9a9e608a | 18,754 |
def get_projector_csr_file(config_name: str) -> str:
"""Returns full path to projector server crt file"""
return join(get_run_configs_dir(), config_name, f'{PROJECTOR_JKS_NAME}.csr') | 159bc798d28bf23ce06d356591d8c41bcea40356 | 18,755 |
def make_epsilon_greedy_policy(Q: defaultdict, epsilon: float, nA: int) -> callable:
"""
Creates an epsilon-greedy policy based on a given Q-function and epsilon.
I.e. create weight vector from which actions get sampled.
:param Q: tabular state-action lookup function
:param epsilon: exploration factor
:param nA: size of action space to consider for this policy
"""
def policy_fn(observation):
policy = np.ones(nA) * epsilon / nA
best_action = np.random.choice(np.flatnonzero( # random choice for tie-breaking only
Q[observation] == Q[observation].max()
))
policy[best_action] += (1 - epsilon)
return policy
return policy_fn | f0fe733b18b416939db44acd830ee605bc41e18f | 18,758 |
def write_init(proxy_parameters=None, exception=None):
"""Encodes and returns an MPI ('Metadata Init') response."""
return _write_init(Method.MPI, MetadataProviderError, proxy_parameters,
exception) | 0a73c4949796a93549e208da523e805894170193 | 18,759 |
def pfam_to_pubmed(family):
"""get a list of associated pubmed ids for given pfam access key.
:param family: pfam accession key of family
:type family: str
:return: List of associated Pubmed ids
:rettype:list"""
url='https://pfam.xfam.org/family/'+family
pattern='http://www.ncbi.nlm.nih.gov/pubmed/'
return _xfam_to(url,pattern) | e3d63050d7e2e782ccd9d376fb4cd2d33c177be6 | 18,760 |
def cvConvexHull2(input, hull_storage=None, orientation=CV_CLOCKWISE, return_points=0):
"""CvSeq_or_CvMat cvConvexHull2(list_or_tuple_of_CvPointXYZ input, void* hull_storage=NULL, int orientation=CV_CLOCKWISE, int return_points=0)
Finds convex hull of point set
[ctypes-opencv] OpenCV's note: a vertex of the detected convex hull can be represented by:
a point of the same type with every point in 'input', if return_points==1
an index to a point in 'input', if return_points==0 and hull_storage is a CvMat
a pointer to a point in 'input', if return_points==0 and hull_storage is a CvStorage
[ctypes-opencv] If input is a (subclass of) CvSeq, 'hull_storage' can be:
None: detected vertices are stored in input's storage
an instance of CvStorage or CvMat: detected vertices are stored here
[ctypes-opencv] If input is 1d CvMat of 2D 32-bit points, 'hull_storage' can be:
None: 'hull_storage' is internally created as a 1d CvMat of 2D 32-bit points.
an instance of CvStorage or CvMat: detected vertices are stored here
[ctypes-opencv] In any case, the function returns a sequence (CvSeq) of detected vertices if 'hull_storage' is an instance CvStorage, or 'hull_storage' itself if otherwise.
"""
if isinstance(input, _CvSeqStructure): # a sequence
return pointee(_cvConvexHull2(input, hull_storage, orientation, return_points), input if hull_storage is None else hull_storage)
if hull_storage is None:
hull_storage = cvCreateMat(1, input.rows*input.cols, CV_MAT_TYPE(input) if return_points else CV_32SC1)
_cvConvexHull2(input, hull_storage, orientation, return_points)
return hull_storage | 3def10577e29e6b9bcf2611ad194dca2f6e2feb7 | 18,761 |
from typing import Union
from typing import Optional
from typing import Dict
def init_classifier(config: Union[str, mmcv.Config],
checkpoint: Optional[str] = None,
device: str = 'cuda:0',
options: Optional[Dict] = None) -> nn.Module:
"""Prepare a few shot classifier from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str | None): Checkpoint path. If left as None, the model
will not load any weights. Default: None.
device (str): Runtime device. Default: 'cuda:0'.
options (dict | None): Options to override some settings in the
used config. Default: None.
Returns:
nn.Module: The constructed classifier.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if options is not None:
config.merge_from_dict(options)
model = build_classifier(config.model)
if checkpoint is not None:
map_loc = 'cpu' if device == 'cpu' else None
load_checkpoint(model, checkpoint, map_location=map_loc)
# save the config in the model for convenience in later use
model.cfg = config
model.to(device)
model.eval()
return model | 20f819892295f6bfeb9c01f4e6d558731a2f8e68 | 18,762 |
def Newton_method(f, df, start:float=0.0, max_step:int=32, sign_dig:int=6)->float:
"""
Newton method.
---------------------------
Args:
None.
Returns:
None.
Raises:
None.
"""
fun = lambda x: x - f(x)/df(x)
return fixed_point(fun, start, max_step, sign_dig) | d3a803a1a10b6c6d34831efeccd6fb7bae43689a | 18,764 |
def get_all(isamAppliance, count=None, start=None, filter=None, check_mode=False, force=False):
"""
Retrieve a list of federations
"""
return isamAppliance.invoke_get("Retrieve a list of federations",
"{0}/{1}".format(uri, tools.create_query_string(count=count, start=start,
filter=filter)),
requires_modules=requires_modules,
requires_version=requires_version) | d65529bfc953976247fd44cb50051d5efddf10ea | 18,765 |
def roty(theta):
"""
Rotation about Y-axis
@type theta: number
@param theta: the rotation angle
@rtype: 3x3 orthonormal matrix
@return: rotation about Y-axis
@see: L{rotx}, L{rotz}, L{rotvec}
"""
ct = cos(theta)
st = sin(theta)
return mat([[ct, 0, st],
[0, 1, 0],
[-st, 0, ct]]) | 702051efbd9f0999e04d5d7faca207c53520d712 | 18,766 |
def flush(name, family="ipv4", ignore_absence=False, **kwargs):
"""
.. versionadded:: 2014.7.0
.. versionchanged:: Magnesium
Flush current nftables state
family
Networking family, either ipv4 or ipv6
ignore_absence
If set to True, attempts to flush a non-existent table will not
result in a failed state.
.. versionadded:: Magnesium
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if __opts__["test"]:
ret["comment"] = "nftables flush not performed in test mode."
return ret
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
if "table" not in kwargs:
kwargs["table"] = "filter"
check_table = __salt__["nftables.check_table"](kwargs["table"], family=family)
if not ignore_absence and not check_table["result"]:
ret["result"] = False
ret[
"comment"
] = "Failed to flush table {} in family {}, table does not exist.".format(
kwargs["table"], family
)
return ret
if "chain" not in kwargs:
kwargs["chain"] = ""
else:
check_chain = __salt__["nftables.check_chain"](
kwargs["table"], kwargs["chain"], family=family
)
if not ignore_absence and not check_chain["result"]:
ret["result"] = False
ret[
"comment"
] = "Failed to flush chain {} in table {} in family {}, chain does not exist.".format(
kwargs["chain"], kwargs["table"], family
)
return ret
res = __salt__["nftables.flush"](kwargs["table"], kwargs["chain"], family)
if res["result"] or (
ignore_absence and (not check_table["result"] or not check_chain["result"])
):
ret["changes"] = {"locale": name}
ret["result"] = True
ret["comment"] = "Flush nftables rules in {} table {} chain {} family".format(
kwargs["table"], kwargs["chain"], family
)
return ret
else:
ret["result"] = False
ret["comment"] = "Failed to flush nftables rules"
return ret | 67ab6d2f7e337ff5a68704be14d605298a1447aa | 18,767 |
def solution(s):
"""
Check if a string has properly matching brackets
:param s: String to verify if it is well-formed
:return: 1 if the brackets are properly matching, 0 otherwise
"""
return check_matching_brackets(s, opening="(", closing=")") | 4ba1bb92e0a1db05557980420f2fac3a88b93086 | 18,768 |
def process_to_annotation_data(df, class_names, video_fps, min_len):
"""
This function cleans the output data, so that there are
no jumping frames.
"""
j = 1 # Helper
# Minimum qty of frames of the same task in order to
# consider it a whole task
min_frames = int(float(min_len) * float(video_fps) * float(0.6))
# Initialize variables
df["subgroup"] = (df.iloc[:, -1] != df.iloc[:, -1].shift(1)).cumsum()
added = (
df["subgroup"]
.value_counts()[df["subgroup"].value_counts() < (j + 1)]
.index.tolist()
)
# Modify jumping frames by considering the sourrounding frames
# check for frames that jump (the total group of those frames are of a max of 7)
for jj in range(min_frames):
j = jj + 1
df["subgroup"] = (df.iloc[:, -2] != df.iloc[:, -2].shift(1)).cumsum()
added = (
df["subgroup"]
.value_counts()[df["subgroup"].value_counts() < (j + 1)]
.index.tolist()
)
cnt = 0
i_prev = 0
i_prev_cnt = 0
while len(added) > 0:
added.sort()
i = added[0]
k = 1 # Helper
prev = []
after = []
prev_yes = 0
after_yes = 0
if (i - k) > 0:
prev = [df[df["subgroup"] == (i - k)].iloc[0, -2]] * len(
df[df["subgroup"] == (i - k)]
)
prev_yes = 1
if (i + k) < max(df["subgroup"]) + 1:
after = [df[df["subgroup"] == (i + k)].iloc[0, -2]] * len(
df[df["subgroup"] == (i + k)]
)
after_yes = 1
check_loop = True
if (prev_yes + after_yes) == 2:
if mode(prev).mode[0] == mode(after).mode[0]:
check_loop = False
if check_loop:
k = 1 # Helper
while len(prev) < j + 2 - i_prev_cnt:
k += 1
if (i - k) > 0:
prev_i = [df[df["subgroup"] == (i - k)].iloc[0, -2]] * len(
df[df["subgroup"] == (i - k)]
)
prev.extend(prev_i)
else:
break
k = 1 # Helper
while len(after) < j + 2 - i_prev_cnt:
k += 1
if (i + k) < max(df["subgroup"]) + 1:
prev_i = [df[df["subgroup"] == (i + k)].iloc[0, -2]] * len(
df[df["subgroup"] == (i + k)]
)
after.extend(prev_i)
else:
break
changeTo = prev
changeTo.extend(after)
changeTo = mode(changeTo).mode[0]
else:
changeTo = mode(prev).mode[0]
change_idx = df.index[df["subgroup"] == i].tolist()
df.iloc[change_idx, -2] = changeTo
df["subgroup"] = (df.iloc[:, -2] != df.iloc[:, -2].shift(1)).cumsum()
added = (
df["subgroup"]
.value_counts()[df["subgroup"].value_counts() < (j + 1)]
.index.tolist()
)
added.sort()
if i == i_prev:
i_prev_cnt += 1
else:
i_prev_cnt = 0
i_prev = i
cnt += 1
if cnt > max(df["subgroup"]) * (j + 2):
break
# Modify the output shape so that for each task we have start frame and end frame
output_df = pd.DataFrame(columns=["task", "startTime", "endTime"])
for i in range(max(df["subgroup"])):
df_i = df[df["subgroup"] == (i + 1)]
task_str = str(class_names[int(df_i.iloc[0]["task_label"])])
start_frame = int(min(df_i["frame"]))
start_frame = frame_to_time(start_frame, video_fps)
end_frame = int(max(df_i["frame"]))
end_frame = frame_to_time(end_frame, video_fps)
output_df = output_df.append(
pd.DataFrame(
[[task_str] + [start_frame] + [end_frame]],
columns=["task", "startTime", "endTime"],
)
)
return output_df | eaa0537b217030664562489a2ceeec63cf7b32c0 | 18,769 |
def reduce_memmap(a):
"""Pickle the descriptors of a memmap instance to reopen on same file."""
m = _get_backing_memmap(a)
if m is not None:
# m is a real mmap backed memmap instance, reduce a preserving striding
# information
return _reduce_memmap_backed(a, m)
else:
# This memmap instance is actually backed by a regular in-memory
# buffer: this can happen when using binary operators on numpy.memmap
# instances
return (loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL),)) | 4e4caf6bb5f1be1f62537c45671010232665ec0c | 18,770 |
def load_sparse_csr(filename):
"""Load a saved sparse matrix in csr format. Stolen from above source."""
loader = np.load(filename)
return sparse.csr_matrix((loader['data'], loader['indices'],
loader['indptr']), shape=loader['shape']) | 9654e8baedf5ada8b626d86860aef2335a04b565 | 18,771 |
from typing import Optional
def blank(name: Optional[str]) -> Output:
"""Generate a blank `Output` instance."""
return Output(file_suffix=name or _DEFAULT_SUFFIX, variables=dict()) | 0811dd8875983b89a8ae82a204243419effddcb4 | 18,772 |
def estatistica_regras(regras_pt, regras_lgp):
"""
Contagem das regras morfossintáticas no corpus.
:param regras_pt: Lado português das regras (lista)
:param regras_lgp: Lado LGP das regras (lista)
:return: Dicionário com a frequência de cada regra. Ex: {"(0, 'INT')": 1, "(1, 'CAN')": 1, "(2, 'INT')": 1}
"""
estatistica = {}
repetido = set()
for i in range(len(regras_pt)):
tipo = regras_pt[i][1]
if i in repetido:
continue
if tipo == "":
tipo = "CAN"
if str((i,tipo)) not in estatistica.keys():
estatistica[str((i, tipo))]= 1
for j in range(len(regras_pt)):
a = regras_pt[i]
b = regras_lgp[i]
c = regras_pt[j]
d = regras_lgp[j]
if i >= j:
continue
if j in repetido:
continue
if compara_regra(a,b,c,d):
repetido.add(j)
tipo = regras_pt[j][1]
if tipo == "":
tipo = "CAN"
estatistica[str((i,tipo))] +=1
if str((j, tipo)) in estatistica.keys():
del estatistica[str((j,tipo))]
else:
tipo = regras_pt[j][1]
if tipo == "":
tipo = "CAN"
if str((j, tipo)) not in estatistica.keys():
estatistica.setdefault(str((j,tipo)),0)
estatistica[str((j, tipo))] += 1
return estatistica | 67e1edae2d0418e1a36eefbb16ea4795b04728d6 | 18,773 |
def bq_create_dataset(bq_client):
"""Creates the BigQuery dataset.
If the dataset already exists, the existing dataset will be returned.
Dataset will be create in the location specified by DATASET_LOCATION.
Args:
bq_client: BigQuery client
Returns:
BigQuery dataset that will be used to store data.
"""
dataset_id = "{}.{}".format(bq_client.project, DATASET_NAME)
dataset = bigquery.Dataset(dataset_id)
dataset.location = DATASET_LOCATION
dataset = bq_client.create_dataset(dataset, exists_ok=True)
return dataset | ff2c0072210541261aff58ef8c590e94260e046d | 18,775 |
def root_node():
"""
Returns DCC scene root node
:return: str
"""
return scene.get_root_node() | 3a632bc0887a5c3a5696ec10f4387c917d12bfe5 | 18,776 |
def firsts(things):
"""
FIRSTS list
outputs a list containing the FIRST of each member of the input
list. It is an error if any member of the input list is empty.
(The input itself may be empty, in which case the output is also
empty.) This could be written as::
to firsts :list
output map \"first :list
end
but is provided as a primitive in order to speed up the iteration
tools MAP, MAP.SE, and FOREACH::
to transpose :matrix
if emptyp first :matrix [op []]
op fput firsts :matrix transpose bfs :matrix
end
"""
return [first(thing) for thing in things] | 72141a7409cb17ac6785eabde91b45d5e9e0869f | 18,777 |
def inf_set_mark_code(*args):
"""
inf_set_mark_code(_v=True) -> bool
"""
return _ida_ida.inf_set_mark_code(*args) | 0395ab40bac5f036210802fdf548534c83c78951 | 18,778 |
def get_letter(xml):
"""
:param xml:
:return: everything between <bank> tag
"""
try:
left, right = xml.index('<bank '), xml.index('</bank>') + _BANK_OFFSET
return xml[left:right]
except ValueError:
return None | 3c7a601b2a25969902d530e3e17a48ddcf0819c1 | 18,779 |
def PowercycleNode(opts, args):
"""Remove a node from the cluster.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the name of
the node to be removed
@rtype: int
@return: the desired exit code
"""
node = args[0]
if (not opts.confirm and
not AskUser("Are you sure you want to hard powercycle node %s?" % node)):
return 2
op = opcodes.OpNodePowercycle(node_name=node, force=opts.force)
result = SubmitOrSend(op, opts)
if result:
ToStderr(result)
return 0 | 557b48309bb897da29cc1c1f6f724cd6d3959e23 | 18,781 |
def collect_data(bids_dir, participant_label, queries, filters=None, bids_validate=True):
"""
Uses pybids to retrieve the input data for a given participant
"""
if isinstance(bids_dir, BIDSLayout):
layout = bids_dir
else:
layout = BIDSLayout(str(bids_dir), validate=bids_validate)
bids_filters = filters or {}
for acq, entities in bids_filters.items():
queries[acq].update(entities)
subj_data = {
dtype: sorted(
layout.get(
return_type="file",
subject=participant_label,
extension=["nii", "nii.gz"],
**query
)
)
for dtype, query in queries.items()
}
return subj_data, layout | eaa15a3b3dacbae7c16b03f0c6347d71d939b57d | 18,782 |
def minimaldescriptives(inlist):
"""this function takes a clean list of data and returns the N, sum, mean
and sum of squares. """
N = 0
sum = 0.0
SS = 0.0
for i in range(len(inlist)):
N = N + 1
sum = sum + inlist[i]
SS = SS + (inlist[i] ** 2)
mean = sum / float(N)
return N, sum, mean, SS | ca1d821ef64b93218bdb22268bfdde737f2d731c | 18,783 |
def gen_filelist(infiles, tmpd) :
"""Write all audio files to a temporary text document for ffmpeg
Returns the path of that text document."""
filename = tmpd/"files.txt"
with open(filename, "w") as f:
for file in infiles:
# This part ensures that any apostrophes are escaped
file = str(file).split("'")
if len(file) > 1:
file = "'\\''".join(file)
else:
file = file[0]
# Write the file line
f.write("file '"+file+"'\n")
return filename | c7d21c62de34fea98725a39fec735836e0cfd3d9 | 18,784 |
def VMMemoryLower() -> tvm.ir.transform.Pass:
"""Perform memory lowering. Lowers the relax.builtin.alloc_tensor intrinsic to VM intrinsics.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.VMMemoryLower() | f636ea5f854e42413395669d1a0da3c2e439fb1e | 18,786 |
def _is_disk_larger_than_max_size(device, node_uuid):
"""Check if total disk size exceeds 2TB msdos limit
:param device: device path.
:param node_uuid: node's uuid. Used for logging.
:raises: InstanceDeployFailure, if any disk partitioning related
commands fail.
:returns: True if total disk size exceeds 2TB. Returns False otherwise.
"""
try:
disksize_bytes, err = utils.execute('blockdev', '--getsize64',
device,
use_standard_locale=True,
run_as_root=True)
except (processutils.UnknownArgumentError,
processutils.ProcessExecutionError, OSError) as e:
msg = (_('Failed to get size of disk %(disk)s for node %(node)s. '
'Error: %(error)s') %
{'disk': device, 'node': node_uuid, 'error': e})
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
disksize_mb = int(disksize_bytes.strip()) // 1024 // 1024
return disksize_mb > MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR | ed39e885825cec7c2fba895121741b43e3661a58 | 18,787 |
def getLines(filename):
"""Return list of lines from file"""
with open(filename, 'r', errors='ignore') as ff:
return ff.readlines() | 36e515decaa3876eed3b5db8363fb81a5db89c84 | 18,788 |
import torch
def bbox_next_frame_v3(F_first, F_pre, seg_pre, seg_first, F_tar, bbox_first, bbox_pre, temp, name):
"""
METHOD: combining tracking & direct recognition, calculate bbox in target frame
using both first frame and previous frame.
"""
F_first, F_pre, seg_pre, seg_first, F_tar = squeeze_all(F_first, F_pre, seg_pre, seg_first, F_tar)
c, h, w = F_first.size()
coords_pre_tar = match_ref_tar(F_pre, F_tar, seg_pre, temp)
coords_first_tar = match_ref_tar(F_first, F_tar, seg_first, temp)
coords_tar = {}
for cnt, coord_first in coords_first_tar.items():
coord_pre = coords_pre_tar[cnt]
# fall-back schema
if(coord_pre is None):
coord_tar_ = coord_first
else:
coord_tar_ = coord_pre
coords_tar[cnt] = coord_tar_
_, seg_pre_idx = torch.max(seg_pre, dim = 0)
coords_tar = clean_coords(coords_tar, bbox_pre, threshold=4)
bbox_tar = bbox_in_tar(coords_tar, bbox_first, h, w)
# recoginition
seg_pred = recoginition(F_first, F_tar, bbox_first, bbox_tar, seg_first, temp)
seg_cleaned = clean_seg(seg_pred, bbox_tar, threshold=1)
# move bbox w.r.t cleaned seg
bbox_tar = shift_bbox(seg_cleaned, bbox_tar)
seg_post = post_process_seg(seg_pred.unsqueeze(0))
return seg_pred, seg_post, bbox_tar | 62d782a04b5d7c114fe0096fec50d6cd2d9db7bf | 18,789 |
def hough_lines(img, rho=2, theta=np.pi / 180, threshold=20, min_line_len=5, max_line_gap=25, thickness=3):
"""Perform a Hough transform on img
Args:
img (numpy.ndarray): input image
rho (float, optional): distance resolution in pixels of the Hough grid
theta (float, optional): angular resolution in radians of the Hough grid
threshold (float, optional): minimum number of votes (intersections in Hough grid cell)
min_line_len (int, optional): minimum number of pixels making up a line
max_line_gap (int, optional): maximum gap in pixels between connectable line segments
thickness (int, optional): thickness of lines drawn on resulting image
Returns:
numpy.ndarray: result image
"""
# Hough transform
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]),
minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
# Line extrapolation
extrapolated_lines = extrapolate_lines(lines, line_img.shape)
# Image display
draw_lines(line_img, extrapolated_lines, thickness=thickness)
return line_img | f797e8b24255225d4c2beb41677da3fa6c6e42d6 | 18,790 |
import packaging
def verify_package_version(ctx, config, remote):
"""
Ensures that the version of package installed is what
was asked for in the config.
For most cases this is for ceph, but we also install samba
for example.
"""
# Do not verify the version if the ceph-deploy task is being used to
# install ceph. Verifying the ceph installed by ceph-deploy should work,
# but the qa suites will need reorganized first to run ceph-deploy
# before the install task.
# see: http://tracker.ceph.com/issues/11248
if config.get("extras"):
log.info("Skipping version verification...")
return True
if 'repos' in config and config.get('repos'):
log.info("Skipping version verification because we have custom repos...")
return True
builder = _get_builder_project(ctx, remote, config)
version = builder.version
pkg_to_check = builder.project
installed_ver = packaging.get_package_version(remote, pkg_to_check)
if installed_ver and version in installed_ver:
msg = "The correct {pkg} version {ver} is installed.".format(
ver=version,
pkg=pkg_to_check
)
log.info(msg)
else:
raise RuntimeError(
"{pkg} version {ver} was not installed, found {installed}.".format(
ver=version,
installed=installed_ver,
pkg=pkg_to_check
)
) | 5ab0177738ccec3879c0383a13038515b2d6b6e9 | 18,791 |
def decrypt(v1: int, v2: int):
"""funcao desencriptadora"""
palavra_encriptada = int(v1) ^ int(v2)
desencriptada = palavra_encriptada.to_bytes((palavra_encriptada.bit_length() + 7) // 8, 'big')
return desencriptada.decode() | 19887e070f0c6ae7e68f677d69c2c5eb24761bdb | 18,793 |
def enforce_mixture_consistency_time_domain(mixture_waveforms,
separated_waveforms,
mix_weights=None,
mix_weights_type=''):
"""Projection implementing mixture consistency in time domain.
This projection makes the sum across sources of separated_waveforms equal
mixture_waveforms and minimizes the unweighted mean-squared error between the
sum across sources of separated_waveforms and mixture_waveforms. See
https://arxiv.org/abs/1811.08521 for the derivation.
Args:
mixture_waveforms: Tensor of mixture waveforms in waveform format.
separated_waveforms: Tensor of separated waveforms in source image format.
mix_weights: None or Tensor of weights used for mixture consistency, shape
should broadcast with denoised_waveforms. Overrides mix_weights_type.
mix_weights_type: Type of weights used for mixture consistency. Options are:
`` - No weighting.
`magsq` - Mix weights are magnitude-squared of the separated signal.
Returns:
Projected separated_waveforms as a Tensor in source image format.
"""
# Modify the source estimates such that they sum up to the mixture, where
# the mixture is defined as the sum across sources of the true source
# targets. Uses the least-squares solution under the constraint that the
# resulting source estimates add up to the mixture.
num_sources = tf.shape(separated_waveforms)[1]
# Add a sources axis to mixture_spectrograms.
mix = tf.expand_dims(mixture_waveforms, axis=1)
# mix is now of shape:
# (batch_size, 1, num_mics, samples).
mix_estimate = tf.reduce_sum(separated_waveforms, axis=1, keepdims=True)
# mix_estimate is of shape:
# (batch_size, 1, num_mics, samples).
if mix_weights is None:
if mix_weights_type == 'magsq':
mix_weights = tf.reduce_mean(tf.square(separated_waveforms), axis=[2, 3],
keepdims=True)
mix_weights /= tf.reduce_sum(mix_weights, axis=1, keepdims=True)
else:
mix_weights = (1.0 / num_sources)
mix_weights = tf.cast(mix_weights, mix.dtype)
correction = mix_weights * (mix - mix_estimate)
separated_waveforms = separated_waveforms + correction
return separated_waveforms | 294b1d927394ef388967de529ac5f24382ceec2c | 18,794 |
def MatchNormsLoss(anchor_tensors, paired_tensors):
"""A norm on the difference between the norms of paired tensors.
Gradients are only applied to the paired_tensor.
Args:
anchor_tensors: batch of embeddings deemed to have a "correct" norm.
paired_tensors: batch of embeddings that will be pushed to the norm of
anchor_tensors.
Returns:
A scalar loss
"""
anchor_norms = tf.stop_gradient(tf.norm(anchor_tensors, axis=1))
paired_norms = tf.norm(paired_tensors, axis=1)
tf.summary.histogram('norms_difference', tf.nn.l2_loss(anchor_norms
-paired_norms))
loss = tf.reduce_mean(tf.nn.l2_loss(anchor_norms-paired_norms))
return loss | 02f62a90cf51547b4af6063ad13be1bb712dfe5a | 18,795 |
def authenticate_begin(username, **_):
"""
Begin authentication procedure
Variables:
username user name of the user you want to login with
Arguments:
None
Data Block:
None
Result example:
<WEBAUTHN_AUTHENTICATION_DATA>
"""
user = STORAGE.user.get(username, as_obj=False)
if not user:
return make_api_response({'success': False}, err="Bad Request", status_code=400)
session.pop('state', None)
security_tokens = user.get('security_tokens', {}) or {}
credentials = [AttestedCredentialData(websafe_decode(x)) for x in security_tokens.values()]
auth_data, state = server.authenticate_begin(credentials)
session['state'] = state
return make_api_response(list(cbor.encode(auth_data))) | 6a42bfd2aba2f17f7ee7ec892bf23ef2f0221ee0 | 18,797 |
def tempSHT31():
"""Read temp and humidity from SHT31"""
return sht31sensor.get_temp_humi() | 1e934ad3a467d48019ec91e18ffab4e8d4c473ae | 18,798 |
import requests
def dog(argv, params):
"""Returns a slack attachment with a picture of a dog from thedogapi"""
# Print prints logs to cloudwatch
# Send response to response url
dogurl = 'https://api.thedogapi.com/v1/images/search?mime_types=jpg,png'
dogr = requests.get(dogurl)
url = dogr.json()[0].get('url')
payload = {
'statusCode': '200',
"attachments": [
{
"author_name": '@{} /catops dog'.format(
params.get('user_name', ['CatOps'])[0]),
"fallback": "Woof woof.",
"title": "Woof!",
"text": "Evil doggo.",
"image_url": url,
"color": "#764FA5"
}
],
'response_type': 'in_channel',
'headers': {'Content-Type': 'application/json'}
}
return payload | cb80426e6cab0aa2fc58b78baa0ff225d654f04a | 18,799 |
def get_public_suffix (domain):
""" get_public_suffix("www.example.com") -> "example.com"
Calling this function with a DNS name will return the
public suffix for that name.
Note that if the input does not contain a valid TLD,
e.g. "xxx.residential.fw" in which "fw" is not a valid TLD,
the returned public suffix will be "fw", and TLD will be empty
Note that for internationalized domains the list at
http://publicsuffix.org uses decoded names, so it is
up to the caller to decode any Punycode-encoded names.
"""
global Root, Domain_to_t2ld_cache
try:
return Domain_to_t2ld_cache [domain]
except KeyError:
parts = domain.lower().lstrip('.').split('.')
hits = [None] * len(parts)
_lookup_node (hits, 1, Root, parts)
for i, what in enumerate(hits):
if what is not None and what == 0:
t2ld = '.'.join(parts[i:])
Domain_to_t2ld_cache [domain] = t2ld
return t2ld | 8982df4677f1a1853fa328973cfc00c17796e3d8 | 18,800 |
from scipy.interpolate import interp1d
def interpol(data,x):
"""
Resamples data by given factor with interpolation
"""
# Resamples data by given factor by interpolation
x0 = np.linspace(0, len(data)-1, len(data))
x1 = np.linspace(0, len(data)-1, len(data)*x-(x-1))
f = interp1d(x0, data)
return f(x1) | 85cb9c9d776abc8317edbf1df5935e78ac774c02 | 18,801 |
import torch
def convert_to_torch_tensors(X_train, y_train, X_test, y_test):
""" Function to quickly convert datasets to pytorch tensors """
# convert training data
_X_train = torch.LongTensor(X_train)
_y_train = torch.FloatTensor(y_train)
# convert test data
_X_test = torch.LongTensor(X_test)
_y_test = torch.FloatTensor(y_test)
# return the tensors
return _X_train, _y_train, _X_test, _y_test | 0d40fe19c977b25e3a2571adc98790d7058a77d9 | 18,802 |
def api_auth(func):
"""
If the user is not logged in, this decorator looks for basic HTTP auth
data in the request header.
"""
@wraps(func)
def _decorator(request, *args, **kwargs):
authentication = APIAuthentication(request)
if authentication.authenticate():
return func(request, *args, **kwargs)
raise Http404
return _decorator | 624c997dae9da9b698b1dccbd5293027d54d0fc8 | 18,803 |
def object_miou(y_true, y_pred, num_classes=cfg.num_classes):
"""
衡量图中目标的iou
:param y_true: 标签
:param y_pred: 预测
:param num_classes: 分类数量
:return: miou
"""
confusion_matrix = get_confusion_matrix(y_true, y_pred, num_classes)
# Intersection = TP Union = TP + FP + FN
# IoU = TP / (TP + FP + FN)
# 取对角元素的值,对角线上的值可认为是TP或是交集
intersection = tf.linalg.diag_part(confusion_matrix)
# axis = 1表示混淆矩阵行的值;axis = 0表示取混淆矩阵列的值,都是返回一个一维列表,需要求和
union = tf.reduce_sum(confusion_matrix, axis=1) + tf.reduce_sum(confusion_matrix, axis=0) - intersection
intersection = intersection
union = union
iou = intersection / union # 其值为各个类别的IoU
# 避免nan
iou = tf.where(tf.math.is_nan(iou), tf.zeros_like(iou), iou)
# 不求包含背景部分的iou
miou = tf.reduce_mean(iou[1:])
return miou | 1051fe488fb4b16760bb256265d11b33aa613743 | 18,804 |
import datetime
def post_discussion(title: str, content: str, path: str, top: bool, private: bool = False):
"""
发送讨论
参数:
title:str 讨论题目
content:str 内容
path:str 路径
top:bool 是否置顶
返回
{
"code":-1,//是否成功执行
"discussion_id":"成功执行时的讨论ID",
"message":"错误信息"
}
"""
if not session.get("uid"):
return make_response(-1, message="请登录")
user: User = User.by_id(int(session.get("uid")))
if not permission_manager.has_permission(user.id, "discussion.manage") and top:
return make_response(-1, message="只有管理员才能发置顶讨论")
if not can_post_at(user, path):
return make_response(-1, message="你无权在这里发帖")
if not title:
return make_response(-1, message="标题不得为空")
discussion = Discussion()
discussion.content = content
discussion.title = title
discussion.path = path
discussion.time = datetime.datetime.now()
discussion.top = top
discussion.uid = user.id
discussion.private = private
db.session.add(discussion)
db.session.commit()
return make_response(0, discussion_id=discussion.id) | b633da456a8ca05d592efb3e1dd16c8a7a465e23 | 18,805 |
from operator import and_
def _get_fields_usage_data(session):
"""
Obtaining metrics of field usage in lingvodoc,
the metrics are quantity of all/deleted dictionary perspectives using this field
(also with URLs) and quantity of lexical entries in such dictionary perspectives
Result:
dict {
(client_id, object_id): dict {
'URLs': list['url_string', ...],
'metrics': dict {
'dp': dict {
'sum': quantity of all parent dictionary perspectives,
'deleted': quantity of deleted parent dictionary perspectives
},
'le': dict {
'sum': quantity of lexical entries of all parent dictionary perspectives,
'deleted': quantity of lexical entries of deleted parent dictionary perspectives
}
}
}
}
"""
f_client_id = Field.client_id.label('field_client_id')
f_object_id = Field.object_id.label('field_object_id')
dp_client_id = DictionaryPerspective.client_id.label('dictionary_perspective_client_id')
dp_object_id = DictionaryPerspective.object_id.label('dictionary_perspective_object_id')
dp_marked_for_deletion = \
DictionaryPerspective.marked_for_deletion.label('dictionary_perspective_marked_for_deletion')
subquery = session.query(f_client_id, f_object_id, dp_client_id, dp_object_id, dp_marked_for_deletion)
subquery = subquery.select_from(Field).join(DictionaryPerspectiveToField,
and_(DictionaryPerspectiveToField.field_client_id == Field.client_id,
DictionaryPerspectiveToField.field_object_id == Field.object_id))
subquery = subquery.filter(DictionaryPerspective.marked_for_deletion == False,
Field.marked_for_deletion == False)
subquery = subquery.join(DictionaryPerspective,
and_(DictionaryPerspectiveToField.parent_client_id == DictionaryPerspective.client_id,
DictionaryPerspectiveToField.parent_object_id == DictionaryPerspective.object_id))
subquery = subquery.distinct(Field.client_id, Field.object_id,
DictionaryPerspective.client_id, DictionaryPerspective.object_id)
subquery = subquery.order_by(Field.client_id, Field.object_id,
DictionaryPerspective.client_id, DictionaryPerspective.object_id)
log.info(subquery)
fields_usage = dict()
try:
for data in subquery.all():
field_id = (data.field_client_id, data.field_object_id)
if not fields_usage.get(field_id, None):
fields_usage[field_id] = {
'URLs': list(),
'metrics': {
'dp': {
'sum': 0,
'deleted': 0
},
'le': {
'sum': 0,
'deleted': 0
}
}
}
fields_usage[field_id]['URLs'].append(
_dictionary_perspective_url(
data.dictionary_perspective_client_id, data.dictionary_perspective_object_id
)
)
except exc.SQLAlchemyError as ex:
log.warning('Failed to obtain fields usage URLs at ' + __name__)
log.warning(ex)
raise
subquery = subquery.subquery('subquery')
query = session.query('subquery.field_client_id', 'subquery.field_object_id',
func.count('*'), 'subquery.dictionary_perspective_marked_for_deletion')
query = query.select_from(subquery).group_by('subquery.field_client_id',
'subquery.field_object_id',
'subquery.dictionary_perspective_marked_for_deletion')
query = query.order_by('subquery.field_client_id', 'subquery.field_object_id')
log.info(query)
try:
for data in query.all():
usage = fields_usage.get((data[0], data[1]), None)
if usage:
if data[3]:
usage['metrics']['dp']['deleted'] += data[2]
usage['metrics']['dp']['sum'] += data[2]
except exc.SQLAlchemyError as ex:
log.warning('Failed to obtain fields dictionary perspective metrics at ' + __name__)
log.warning(ex)
raise
query = session.query('subquery.field_client_id', 'subquery.field_object_id',
func.count('*'), 'subquery.dictionary_perspective_marked_for_deletion')
query = query.select_from(LexicalEntry)
query = query.join(subquery, and_('subquery.dictionary_perspective_client_id = lexicalentry.parent_client_id',
'subquery.dictionary_perspective_object_id = lexicalentry.parent_object_id'))
query = query.filter('lexicalentry.marked_for_deletion = false')
query = query.group_by('subquery.field_client_id', 'subquery.field_object_id',
'subquery.dictionary_perspective_marked_for_deletion')
log.info(query)
try:
for data in query.all():
usage = fields_usage.get((data[0], data[1]), None)
if usage:
if data[3]:
usage['metrics']['le']['deleted'] += data[2]
usage['metrics']['le']['sum'] += data[2]
except exc.SQLAlchemyError as ex:
log.warning('Failed to obtain fields lexical entry metrics at ' + __name__)
log.warning(ex)
raise
return fields_usage | d57fb6fd0c07e22ac62ba63e5ba5b72189481aed | 18,807 |
def test_merge_batch_grad_transforms_same_key_same_trafo():
"""Test merging multiple ``BatchGradTransforms`` with same key and same trafo."""
def func(t):
return t
bgt1 = BatchGradTransformsHook({"x": func})
bgt2 = BatchGradTransformsHook({"x": func})
merged = Cockpit._merge_batch_grad_transform_hooks([bgt1, bgt2])
assert len(merged._transforms.keys()) == 1
assert id(merged._transforms["x"]) == id(func) | 10aade423092d39e6a7d754c0ceecfdb53226b53 | 18,809 |
import atexit
import time
def main(selected_ssids, sample_interval, no_header, args=None):
"""
Repeatedly check internet connection status (connected or disconnected) for given WiFi SSIDs.
Output is writen as .csv to stdout.
"""
wireless_connections = [
c for c in NetworkManager.Settings.Connections
if '802-11-wireless' in c.GetSettings().keys()
]
known_ssids = [
c.GetSettings()['802-11-wireless']['ssid']
for c in wireless_connections
]
# confirm selected ssids are available as network manager connections
for ssid in selected_ssids:
assert ssid in known_ssids, f"SSID '{ssid}' not found in network manager connections. Available SSIDs: {sorted(known_ssids)}"
# get the network manager connection objects for the selected ssids
connections = {
ssid: connection
for connection in wireless_connections for ssid in selected_ssids
if connection.GetSettings()['802-11-wireless']['ssid'] == ssid
}
# get the wireless device
wireless_devs = [
d for d in NetworkManager.NetworkManager.GetDevices()
if d.DeviceType == NetworkManager.NM_DEVICE_TYPE_WIFI
]
assert len(wireless_devs) > 0, "No wifi device found. Aborting"
wireless_dev = wireless_devs[0]
# save the current active connection, to restore once this script exits
initial_connection = wireless_dev.ActiveConnection.Connection if wireless_dev.ActiveConnection else None
def restore_initial_connection():
if initial_connection:
NetworkManager.NetworkManager.ActivateConnection(
initial_connection, wireless_dev, "/")
atexit.register(restore_initial_connection)
# write the csv header
if not no_header:
print("timestamp,ssid,device_connected,ping_successful", flush=True)
# begin logging loop.
next_log_time = time.time()
while True:
# wait for the next logging iteration
restore_initial_connection(
) # leave initial connection active while waiting
time.sleep(max(next_log_time - time.time(), 0))
next_log_time += sample_interval * 60
for ssid in selected_ssids:
# activate the connection
if wireless_dev.State == NetworkManager.NM_DEVICE_STATE_ACTIVATED:
wireless_dev.Disconnect()
NetworkManager.NetworkManager.ActivateConnection(
connections[ssid], wireless_dev, "/")
connected = wait_for_connection(wireless_dev)
if connected:
# now test internet (by pinging google)
ping_successful = ping("www.google.com")
else:
ping_successful = False
# write out result
print(
f"{time.time()},{ssid},{int(connected)},{int(ping_successful)}",
flush=True)
return 0 | f47e13bdb994e450b8bb77e26e0da3d25014032f | 18,810 |
def getNarrowBandULAMIMOChannel(azimuths_tx, azimuths_rx, p_gainsdB, number_Tx_antennas, number_Rx_antennas,
normalizedAntDistance=0.5, angleWithArrayNormal=0, pathPhases=None):
"""This .m file uses ULAs at both TX and RX.
- assumes one beam per antenna element
the first column will be the elevation angle, and the second column is the azimuth angle correspondingly.
p_gain will be a matrix size of (L, 1)
departure angle/arrival angle will be a matrix as size of (L, 2), where L is the number of paths
t1 will be a matrix of size (nt, nr), each
element of index (i,j) will be the received
power with the i-th precoder and the j-th
combiner in the departing and arrival codebooks
respectively
:param departure_angles: ((elevation angle, azimuth angle),) (L, 2) where L is the number of paths
:param arrival_angles: ((elevation angle, azimuth angle),) (L, 2) where L is the number of paths
:param p_gaindB: path gain (L, 1) in dB where L is the number of paths
:param number_Rx_antennas, number_Tx_antennas: number of antennas at Rx and Tx, respectively
:param pathPhases: in degrees, same dimension as p_gaindB
:return:
"""
azimuths_tx = np.deg2rad(azimuths_tx)
azimuths_rx = np.deg2rad(azimuths_rx)
# nt = number_Rx_antennas * number_Tx_antennas #np.power(antenna_number, 2)
m = np.shape(azimuths_tx)[0] # number of rays
H = np.matrix(np.zeros((number_Rx_antennas, number_Tx_antennas)))
gain_dB = p_gainsdB
path_gain = np.power(10, gain_dB / 10)
path_gain = np.sqrt(path_gain)
#generate uniformly distributed random phase in radians
if pathPhases is None:
pathPhases = 2*np.pi * np.random.rand(len(path_gain))
else:
#convert from degrees to radians
pathPhases = np.deg2rad(pathPhases)
#include phase information, converting gains in complex-values
path_complexGains = path_gain * np.exp(-1j * pathPhases)
# recall that in the narrowband case, the time-domain H is the same as the
# frequency-domain H
for i in range(m):
# at and ar are row vectors (using Python's matrix)
at = np.matrix(arrayFactorGivenAngleForULA(number_Tx_antennas, azimuths_tx[i], normalizedAntDistance,
angleWithArrayNormal))
ar = np.matrix(arrayFactorGivenAngleForULA(number_Rx_antennas, azimuths_rx[i], normalizedAntDistance,
angleWithArrayNormal))
H = H + path_complexGains[i] * ar.conj().T * at # outer product of ar Hermitian and at
#factor = (np.linalg.norm(path_complexGains) / np.sum(path_complexGains)) * np.sqrt(
# number_Rx_antennas * number_Tx_antennas) # scale channel matrix
#H *= factor # normalize for compatibility with Anum's Matlab code
return H | bb201abaca60e2855e86a41d9c581599b9ab0c22 | 18,811 |
def get_pybricks_reset_vector():
"""Gets the boot vector of the pybricks firmware."""
# Extract reset vector from dual boot firmware.
with open("_pybricks/firmware-dual-boot-base.bin", "rb") as pybricks_bin_file:
pybricks_bin_file.seek(4)
return pybricks_bin_file.read(4) | 7d504e7e6e6ca444932fd61abb701a010a259254 | 18,812 |
def nSideCurve(sides=6, radius=1.0):
"""
nSideCurve( sides=6, radius=1.0 )
Create n-sided curve
Parameters:
sides - number of sides
(type=int)
radius - radius
(type=float)
Returns:
a list with lists of x,y,z coordinates for curve points, [[x,y,z],[x,y,z],...n]
(type=list)
"""
newpoints = []
step = 2.0 / sides
i = 0
while i < sides:
t = i * step
x = sin(t * pi) * radius
y = cos(t * pi) * radius
newpoints.append([x, y, 0])
i += 1
return newpoints | d64668ae2fdbd2dc06b36fb2523e09a8cc380d6f | 18,813 |
def _get_corr_mat(corr_transform, n_dim):
""" Input check for the arguments passed to DirectionalSimulator"""
if corr_transform is None:
return np.eye(n_dim)
if not isinstance(corr_transform, np.ndarray) or corr_transform.ndim < 2:
err_msg = "corr_transform must be a 2-D numpy array"
raise ValueError(err_msg)
if corr_transform.shape[0] != n_dim:
err_msg = "Inconsistent number of marginal distributions and "
err_msg += "corr_transform shape"
raise ValueError(err_msg)
if corr_transform.shape[0] != corr_transform.shape[1]:
err_msg = "corr_transform must be square"
raise ValueError(err_msg)
if not (corr_transform == corr_transform.T).all():
err_msg = "corr_transform must be symmetrical"
raise ValueError(err_msg)
return corr_transform | 94909cc43322e8eebf14942cd39817d10bd744fa | 18,814 |
def get_flowline_routing(NHDPlus_paths=None, PlusFlow=None, mask=None,
mask_crs=None, nhdplus_crs=4269):
"""Read a collection of NHDPlus version 2 PlusFlow (routing)
tables from one or more drainage basins and consolidate into a
single pandas DataFrame, returning the `FROMCOMID` and `TOCOMID`
columns.
Parameters
----------
NHDPlus_paths : sequence
Sequence of paths to the top level folder for each drainage basin.
For example:
.. code-block:: python
['NHDPlus/NHDPlusGL/NHDPlus04',
'NHDPlus/NHDPlusMS/NHDPlus07']
by default None
PlusFlow : string or sequence
Single path to a PlusFlow table or sequence of PlusFlow table
filepaths, by default None
Returns
-------
flowline_routing : DataFrame
[description]
Raises
------
ValueError
[description]
"""
if NHDPlus_paths is not None:
flowlines_files, pfvaa_files, pf_files, elevslope_files = \
get_nhdplus_v2_filepaths(NHDPlus_paths, raise_not_exist_error=False)
pf = shp2df(pf_files)
if mask is not None:
if isinstance(mask, tuple):
extent_poly_nhd_crs = box(*mask)
filter = mask
elif mask is not None:
extent_poly_nhd_crs = read_polygon_feature(mask,
feature_crs=mask_crs,
dest_crs=nhdplus_crs)
# ensure that filter bbox is in same crs as flowlines
# get filters from shapefiles, shapley Polygons or GeoJSON polygons
filter = get_bbox(extent_poly_nhd_crs, dest_crs=nhdplus_crs)
else:
filter = None
flowlines = shp2df(flowlines_files, filter=filter)
keep_comids = pf['FROMCOMID'].isin(flowlines['COMID']) | \
pf['TOCOMID'].isin(flowlines['COMID'])
pf = pf.loc[keep_comids]
elif PlusFlow is not None:
pf = shp2df(PlusFlow)
else:
raise ValueError(("get_flowline_routing: Must provide one of more"
" NHDPlus_path or PlusFlow table."))
pf = pf.loc[pf['FROMCOMID'] != 0]
return pf[['FROMCOMID', 'TOCOMID']] | c79d943b35f236f9d2bddbc6c9e2f470ac6ba0fc | 18,815 |
from typing import Callable
from typing import Optional
from datetime import datetime
import pytz
def df_wxyz(
time_slot_sensor: Sensor, test_source_a: BeliefSource, test_source_b: BeliefSource
) -> Callable[[int, int, int, int, Optional[datetime]], BeliefsDataFrame]:
"""Convenient BeliefsDataFrame to run tests on.
For a single sensor, it contains w events, for each of which x beliefs by y sources each (max 2),
described by z probabilistic values (max 3).
Note that the event resolution of the sensor is 15 minutes.
"""
sources = [test_source_a, test_source_b] # expand to increase max y
cps = [0.1587, 0.5, 0.8413] # expand to increase max z
def f(w: int, x: int, y: int, z: int, start: Optional[datetime] = None):
if start is None:
start = datetime(2000, 1, 3, 9, tzinfo=pytz.utc)
# Build up a BeliefsDataFrame with various events, beliefs, sources and probabilistic accuracy (for a single sensor)
beliefs = [
TimedBelief(
source=sources[s],
sensor=time_slot_sensor,
value=1000 * e + 100 * b + 10 * s + p,
belief_time=datetime(2000, 1, 1, tzinfo=pytz.utc) + timedelta(hours=b),
event_start=start + timedelta(hours=e),
cumulative_probability=cps[p],
)
for e in range(w) # w events
for b in range(x) # x beliefs
for s in range(y) # y sources
for p in range(z) # z cumulative probabilities
]
return BeliefsDataFrame(sensor=time_slot_sensor, beliefs=beliefs)
return f | 64928090a7fa58cc1f6a6e4928025c426c17e799 | 18,816 |
def not_posted(child, conn) -> bool:
"""Check if a post has been already tooted."""
child_data = child["data"]
child_id = child_data["id"]
last_posts = fetch_last_posts(conn)
return child_id not in last_posts | 5be321bf838a22cfcd742c0ddf48eb00ec1e35bf | 18,817 |
def parse_img_name(path):
"""parse image by frame name
:param name [str]
:output img_lists
"""
code = path.split('\\')[-1].split('.')[0]
vid_id = path.split('\\')[-2]
rcp_id = path.split('\\')[-3]
seg_id = int(code[:4])
frm_id = int(code[4:])
return rcp_id, vid_id, seg_id, frm_id | 6e0a140934c584400365f12feb8a86cfea3bbb2b | 18,818 |
def get_bspline_kernel(x, channels, transpose=False, dtype=tf.float32, order=4):
"""Creates a 5x5x5 b-spline kernel.
Args:
num_channels: The number of channels of the image to filter.
dtype: The type of an element in the kernel.
Returns:
A tensor of shape `[5, 5, 5, num_channels, num_channels]`.
"""
mesh = x.mesh
in_dim = x.shape[-1]
num_channels = channels.size
if order == 8:
kernel = np.array(( 1., 8., 28., 56., 70., 56., 28., 8., 1.), dtype=dtype.as_numpy_dtype())
elif order == 6:
kernel = np.array(( 1., 6., 15., 20., 15., 6., 1.), dtype=dtype.as_numpy_dtype())
elif order==2:
kernel = np.array(( 1., 2., 1.), dtype=dtype.as_numpy_dtype())
else:
kernel = np.array(( 1., 4., 6., 4., 1.), dtype=dtype.as_numpy_dtype())
size = len(kernel)
kernel = np.einsum('ij,k->ijk', np.outer(kernel, kernel), kernel)
kernel /= np.sum(kernel)
kernel = kernel[:, :, :, np.newaxis, np.newaxis]
kernel = tf.constant(kernel, dtype=dtype) * tf.eye(num_channels, dtype=dtype)
fd_dim = mtf.Dimension("fd", size)
fh_dim = mtf.Dimension("fh", size)
fw_dim = mtf.Dimension("fw", size)
if transpose:
return mtf.import_tf_tensor(mesh, kernel, shape=[fd_dim, fh_dim, fw_dim, channels, in_dim])
else:
return mtf.import_tf_tensor(mesh, kernel, shape=[fd_dim, fh_dim, fw_dim, in_dim, channels]) | 1696e3a9077c672becda474de98750f45d1fe3d4 | 18,819 |
from .protocols import Stock_solution,MonoDispensing_type1,MonoDispensing_type2,MultiBase,SMTransfer,ReactionQC,QCSolubilise,DMATransfer,\
def gen_prot_dict():
"""
:param input_list:
:return:
"""
PostWorkupTransfer,Workup,PostWorkupQCAndTransfer,PostWorkupDMSOAddition,BaseT3PMulti, PoisedReactor
input_list = [Stock_solution,MonoDispensing_type1,MonoDispensing_type2,MultiBase,SMTransfer,ReactionQC,QCSolubilise,DMATransfer,PostWorkupTransfer,
Workup,PostWorkupQCAndTransfer,PostWorkupDMSOAddition,BaseT3PMulti, PoisedReactor]
out_dict = {}
for protocol in input_list:
out_dict[str(protocol())] = protocol
return out_dict | 64c4c88f684297ea7e658015d225481005315527 | 18,820 |
def f(x):
"""
예측해야 하는 함수입니다.
"""
return np.matmul(x * np.absolute(np.sin(x)), np.array([[2], [1]])) | 228e8f431f7c071ad1587b76c73495296e1331f3 | 18,821 |
def create_frame_coords_list(coords_path):
"""
:param coords_path: [int]
:type coords_path: list
:return: int, [int]
:rtype: tuple
"""
id_number = coords_path[0]
fr_coordinates = [None]*int((len(coords_path) - 1) / 3) # excluding the index 0 (which is the id) the number of triples is the length of this array
index = 0
for i in range(1, len(coords_path), 3):
x = coords_path[i]
y = coords_path[i + 1]
frame_number = coords_path[i + 2]
fr_coordinates[index] = FrameCoord(x, y, frame_number)
index += 1
return id_number, fr_coordinates | ca835c04b67789903a74e6882434c570f33647ab | 18,822 |
def arcToolReport(function=None, arcToolMessageBool=False, arcProgressorBool=False):
"""This decorator function is designed to be used as a wrapper with other GIS functions to enable basic try and except
reporting (if function fails it will report the name of the function that failed and its arguments. If a report
boolean is true the function will report inputs and outputs of a function.-David Wasserman"""
def arcToolReport_Decorator(function):
def funcWrapper(*args, **kwargs):
try:
funcResult = function(*args, **kwargs)
if arcToolMessageBool:
arcpy.AddMessage("Function:{0}".format(str(function.__name__)))
arcpy.AddMessage(" Input(s):{0}".format(str(args)))
arcpy.AddMessage(" Ouput(s):{0}".format(str(funcResult)))
if arcProgressorBool:
arcpy.SetProgressorLabel("Function:{0}".format(str(function.__name__)))
arcpy.SetProgressorLabel(" Input(s):{0}".format(str(args)))
arcpy.SetProgressorLabel(" Ouput(s):{0}".format(str(funcResult)))
return funcResult
except Exception as e:
arcpy.AddMessage(
"{0} - function failed -|- Function arguments were:{1}.".format(str(function.__name__),
str(args)))
print(
"{0} - function failed -|- Function arguments were:{1}.".format(str(function.__name__), str(args)))
print(e.args[0])
return funcWrapper
if not function: # User passed in a bool argument
def waiting_for_function(function):
return arcToolReport_Decorator(function)
return waiting_for_function
else:
return arcToolReport_Decorator(function) | 673dd42bd96a0f5aede5ca0593efaa02d630e2e5 | 18,824 |
def check_for_pattern(input_string):
""" Check a string for a recurring pattern. If no pattern,
return False. If pattern present, return smallest integer
length of pattern.
Warning: equal_divisions discards the remainder, so if it doesn't
fit the pattern, you will get a false postive.
The specific use is to check recurring decimal patterns, so it doesn't
matter for that use.
"""
if len(input_string) < 2:
return False
length_of_division = 1
limit = len(input_string)//2
while length_of_division < limit + 1:
divisions = equal_division(input_string, length_of_division)
divisions = set(divisions)
if len(divisions) == 1:
return length_of_division
else:
length_of_division += 1
return False | 6d6e32c7228ef3cec4107a3354fe53b90ef69e04 | 18,825 |
import logging
def get_xml_namespace(file_name,pkg_type):
"""Get xml's namespace.
Args:
file_name: The path of xml file.
Returns:
xml_namespace: The namespace of xml.
for example:
xml file content:
...
<config xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
<interfaces xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces"
xmlns:ni="urn:ietf:params:xml:ns:yang:ietf-network-instance">
...
</interfaces>
</config>
xml_namespace: 'urn:ietf:params:xml:ns:yang:ietf-interfaces'
Raises:
Exception: Capture execution exception.
"""
feature_namespaces = []
try:
doc = parse(file_name)
root = doc.documentElement
if pkg_type in ['get','get-config']:
if root.getElementsByTagNameNS("urn:ietf:params:xml:ns:netconf:base:1.0", "filter"):
child_nodes = root.getElementsByTagNameNS("urn:ietf:params:xml:ns:netconf:base:1.0", "filter")[
0].childNodes
elif pkg_type == 'config':
if root.getElementsByTagNameNS("urn:ietf:params:xml:ns:netconf:base:1.0", "config"):
child_nodes = root.getElementsByTagNameNS("urn:ietf:params:xml:ns:netconf:base:1.0", "config")[
0].childNodes
else:
child_nodes = root.childNodes
logging.info("This is rpc-xml:" + file_name)
for child_node in child_nodes:
if child_node.nodeType == 1 and hasattr(child_node, 'namespaceURI'):
feature_namespaces.append(child_node.namespaceURI)
except ExpatError as expat_exception:
xml_structure_except(expat_exception, file_name)
except Exception as error_str:
error_write(error_str)
return feature_namespaces | 401bf5c321b5626d7b7171e2270df04863a01d61 | 18,826 |
def build_successors_table(tokens):
"""Return a dictionary: keys are words; values are lists of
successors.
>>> text = ['We', 'came', 'to', 'investigate', ',', 'catch', 'bad', 'guys', 'and', 'to', 'eat', 'pie', '.']
>>> table = build_successors_table(text)
>>> sorted(table)
[',', '.', 'We', 'and', 'bad', 'came', 'catch', 'eat', 'guys', 'investigate', 'pie', 'to']
>>> table['to']
['investigate', 'eat']
>>> table['pie']
['.']
>>> table['.']
['We']
"""
table = {}
prev = '.'
for word in tokens:
if prev not in table:
table[str(prev)] = [str(word)]
else: # if already in table then add this word to the list of successors
table[str(prev)] += [str(word)]
prev = word
return table | 92206bf3dd40518c23c6fb98e22dc818912c5bcc | 18,827 |
import math
def _rolling_nanmin_1d(a, w=None):
"""
Compute the rolling min for 1-D while ignoring NaNs.
This essentially replaces:
`np.nanmin(rolling_window(T[..., start:stop], m), axis=T.ndim)`
Parameters
----------
a : numpy.ndarray
The input array
w : numpy.ndarray, default None
The rolling window size
Returns
-------
output : numpy.ndarray
Rolling window nanmin.
"""
if w is None:
w = a.shape[0]
half_window_size = int(math.ceil((w - 1) / 2))
return minimum_filter1d(a, size=w)[
half_window_size : half_window_size + a.shape[0] - w + 1
] | 37229440ba632d1ddadc55a811f9abca1c8e3132 | 18,828 |
def get_model_init_fn(train_logdir,
tf_initial_checkpoint,
initialize_last_layer,
last_layers,
ignore_missing_vars=False):
"""Gets the function initializing model variables from a checkpoint.
Args:
train_logdir: Log directory for training.
tf_initial_checkpoint: TensorFlow checkpoint for initialization.
initialize_last_layer: Initialize last layer or not.
last_layers: Last layers of the model.
ignore_missing_vars: Ignore missing variables in the checkpoint.
Returns:
Initialization function.
"""
if tf_initial_checkpoint is None:
tf.logging.info('Not initializing the model from a checkpoint.')
return None
if tf.train.latest_checkpoint(train_logdir):
tf.logging.info('Ignoring initialization; other checkpoint exists')
return None
tf.logging.info('Initializing model from path: %s', tf_initial_checkpoint)
# Variables that will not be restored.
exclude_list = ['global_step']
if not initialize_last_layer:
exclude_list.extend(last_layers)
variables_to_restore = contrib_framework.get_variables_to_restore(exclude=exclude_list)
if variables_to_restore:
init_op, init_feed_dict = contrib_framework.assign_from_checkpoint(
tf_initial_checkpoint,
variables_to_restore,
ignore_missing_vars=ignore_missing_vars)
global_step = tf.train.get_or_create_global_step()
def restore_fn(sess):
sess.run(init_op, init_feed_dict)
sess.run([global_step])
return restore_fn
return None | 7fdd1bcff59fc01dff2f1ef49eda4bd29b162ea2 | 18,829 |
import time
def tokenize_protein(text):
"""
Tokenizes from a proteins string into a list of strings
"""
aa = ['A','C','D','E','F','G','H','I','K','L',
'M','N','P','Q','R','S','T','V','W','Y']
N = len(text)
n = len(aa)
i=0
seq = list()
timeout = time.time()+5
for i in range(N):
symbol = text[i]
if (symbol in aa):
seq.append(symbol)
else:
seq.append('X')
if time.time() > timeout:
break
return seq | 7dba531023aef97dcbfb37af75a9a1459a1e94d2 | 18,830 |
from typing import Callable
def read_xml_string() -> Callable[[int, int, str], str]:
"""Read an XML file to a string. Subsection string needs to include a prepending '-'."""
def _read_xml_string(number: int, year: int, subsection: str) -> str:
xmlfile = f"tests/data/xmls/session-{number:03}-{year}{subsection}.xml"
with open(xmlfile, "r", encoding="utf-8") as infile:
lines = infile.readlines()
return " ".join([line.strip() for line in lines])
return _read_xml_string | 2b4e4c3585e26138e5fecf820699e97e1011a842 | 18,831 |
def compute_mean_std_data(filelist):
"""
Compute mean and standard deviation of a dataset.
:param filelist: list of str
:return: tuple of floats
"""
tensor_list = []
for file in filelist:
img = Image.open(file)
img_np = np.array(img).ravel()
tensor_list.append(img_np.ravel())
pixels = np.concatenate(tensor_list, axis=0)
return np.mean(pixels), np.std(pixels) | 57c8d5e9294e291e9897ac0e865a661319123965 | 18,832 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.