content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_config_type(service_name):
"""
get the config tmp_type based on service_name
"""
if service_name == "HDFS":
tmp_type = "hdfs-site"
elif service_name == "HDFS":
tmp_type = "core-site"
elif service_name == "MAPREDUCE":
tmp_type = "mapred-site"
elif service_name == "HBASE":
tmp_type = "hbase-site"
elif service_name == "OOZIE":
tmp_type = "oozie-site"
elif service_name == "HIVE":
tmp_type = "hive-site"
elif service_name == "WEBHCAT":
tmp_type = "webhcat-site"
else:
tmp_type = "global"
return tmp_type
|
2fec790e67bdba757f8dffe058fae1d508b7d237
| 18,700 |
import requests
from io import StringIO
def batch_request(config, dataset_id, geographies, date_format,
record_offset=0, max_api_calls=10):
"""Fetch a NOMIS dataset from the API, in batches,
based on a configuration object.
Args:
config (dict): Configuration object, from which a get
request is formed.
dataset_id (str): NOMIS dataset ID
geographies (list): Return object from :obj:`discovery_iter`.
date_format (str): Formatting string for dates in the dataset
record_offset (int): Record to start from
max_api_calls (int): Number of requests allowed
Returns:
dfs (:obj:`list` of :obj:`pd.DataFrame`): Batch return results.
"""
config["geography"] = ",".join(str(row["nomis_id"])
for row in geographies)
config["RecordOffset"] = record_offset
date_parser = lambda x: pd.datetime.strptime(x, date_format)
# Build a list of dfs in chunks from the NOMIS API
dfs = []
offset = 25000
icalls = 0
done = False
while (not done) and icalls < max_api_calls:
#logging.debug(f"\t\t {offset}")
# Build the request payload
params = "&".join(f"{k}={v}" for k,v in config.items())
# Hit the API
r = requests.get(NOMIS.format(f"{dataset_id}.data.csv"), params=params)
# Read the data
with StringIO(r.text) as sio:
_df = pd.read_csv(sio, parse_dates=["DATE"], date_parser=date_parser)
done = len(_df) < offset
# Increment the offset
config["RecordOffset"] += offset
# Ignore empty fields
dfs.append(_df.loc[_df.OBS_VALUE > 0])
icalls += 1
# Combine and return
df = pd.concat(dfs)
df.columns = [c.lower() for c in df.columns]
return df, done, config["RecordOffset"]
|
1f615ccd60464641a554bc3bf2bc39fe917d4df4
| 18,701 |
from typing import Dict
from typing import List
from typing import Tuple
import os
import tqdm
def get_val(in_root: str, wnid2idx: Dict[str, int]) -> List[Tuple[str, int]]:
"""Get validation split sample pairs.
Args:
in_root (str): Input dataset root directory.
wnid2idx (dict): Mapping of WordNet ID to class ID.
Returns:
List of pairs of (image filename, class ID).
"""
pairs = []
filename = os.path.join(in_root, 'val', 'val_annotations.txt')
lines = open(filename).read().strip().split('\n')
for line in tqdm(lines, leave=False):
basename, wnid = line.split()[:2]
filename = os.path.join(in_root, 'val', 'images', basename)
wnid_idx = wnid2idx[wnid]
pairs.append((filename, wnid_idx))
shuffle(pairs)
return pairs
|
4f4cf7d1ec62dea5d128ef4a89fe9ca2ac02f317
| 18,702 |
def is_music(file: File) -> bool:
"""See if the ext is a Music type."""
return file.ext in {
"aac",
"m4a",
"mp3",
"ogg",
"wma",
"mka",
"opus",
"alac",
"ape",
"flac",
"wav",
}
|
7e35a4f63c656d61d534a1a0116c84c6fc30fefd
| 18,703 |
import torch
def sqeuclidean_pdist(x, y=None):
"""Fast and efficient implementation of ||X - Y||^2 = ||X||^2 + ||Y||^2 - 2 X^T Y
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
"""
x_norm = (x**2).sum(1).unsqueeze(1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y**2).sum(1).unsqueeze(0)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.squeeze().unsqueeze(0)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
# get rid of NaNs
dist[torch.isnan(dist)] = 0.
# clamp negative stuff to 0
dist = torch.clamp(dist, 0., np.inf)
# ensure diagonal is 0
if y is None:
dist[dist == torch.diag(dist)] = 0.
return dist
|
7b9077ed847847bd1030b6f850259717ccc586fe
| 18,704 |
import argparse
def parse_options() -> argparse.Namespace:
"""Parse command line arguments"""
parser: argparse.ArgumentParser = argparse.ArgumentParser(
"Arguments for pretraining")
parser.add_argument('--sample_size', type=int, default=3200,
help='sample size for training')
parser.add_argument('--batch_size', type=int, default=128,
help='input batch size for training')
parser.add_argument('--num_epochs', type=int, default=100,
help='Number of epochs for training')
parser.add_argument('--contrastive_train_epochs', type=int, default=10,
help='Number of epochs for contrastive training')
parser.add_argument('--model_id', type=int, default=0,
help='Model ID for training')
parser.add_argument('--mode', default='train',
help='train | test | debug')
opt: argparse.Namespace = parser.parse_args()
return opt
|
e746865259d024b733ede7f1c353f37a93a3f9f2
| 18,705 |
import os
import shutil
def commit(dir_info):
"""
Moves files from the temp directory to the final directory based
on the input given. Returns list of all files
Keyword arguments:
dir_info -- dictionary of service to dir_info hash
"""
def walk_file_list(base_dir, srcdir, resultdir, done_files=set()):
""" Gets files that haven't been seen yet """
result = []
if not base_dir.endswith(os.sep):
# For stripping the slash
base_dir = base_dir + os.sep
for root, dirnames, filenames in os.walk(srcdir):
after_base = root[len(base_dir):] #strip absolute
if after_base not in done_files:
for filename in filenames:
if os.path.join(after_base, filename) not in done_files:
result.append(os.path.join(resultdir, filename))
return result
result = defaultdict(list)
for service in dir_info:
# copy the directory
serv_dir = dir_info[service]['dir']
base_dir = dir_info[service]['base_dir']
log.info("Deploying %s to %s", service, base_dir)
files = set(os.listdir(serv_dir))
done_files = set()
for dirname, atomic in dir_info[service]['atomic'].items():
srcdir = os.path.join(serv_dir, dirname)
destdir = os.path.join(base_dir, dirname)
# Delete existing dir
if atomic:
if not os.path.islink(destdir):
shutil.rmtree(destdir, ignore_errors=True)
stripped = destdir.rstrip(os.sep)
makedirsp(os.path.dirname(stripped))
force_create_symlink(srcdir, stripped)
else:
# Copy
copy_tree(srcdir, destdir)
result[service].extend(walk_file_list(serv_dir, srcdir, dirname))
done_files.add(dirname.rstrip(os.sep))
# Do the remaining files
for name in files.difference(done_files):
src = os.path.join(serv_dir, name)
dst = os.path.join(base_dir, name)
if os.path.isdir(src):
if os.path.basename(os.path.normpath(src)) == '.git':
continue
_smart_copytree(src, dst, ignore=ignore_copy)
result[service].extend(walk_file_list(serv_dir, src, name, done_files))
else:
_smart_copyfile(src, dst)
result[service].append(name)
return result
|
7d9068e5203cdf961fa8abbcb76ac22ee41e5e34
| 18,706 |
def len(file, path):
"""获取dataset第一维长度。
Args:
file: 文件路径。
path: dataset路径。
Returns:
返回长度。
"""
with h5py.File(file, mode='r') as h5_file:
length = h5_file[path].len()
return length
|
d64d4ca0076a2bf76c5cb11dbba68adec15e34fa
| 18,707 |
from re import T
def loop(step_fn, n_steps,
sequences=None, outputs_info=None, non_sequences=None,
go_backwards=False):
"""
Helper function to unroll for loops. Can be used to unroll theano.scan.
The parameter names are identical to theano.scan, please refer to here
for more information.
Note that this function does not support the truncate_gradient
setting from theano.scan.
Parameters
----------
step_fn : function
Function that defines calculations at each step.
sequences : TensorVariable or list of TensorVariables
List of TensorVariable with sequence data. The function iterates
over the first dimension of each TensorVariable.
outputs_info : list of TensorVariables
List of tensors specifying the initial values for each recurrent
value. Specify output_info to None for non-arguments to
the step_function
non_sequences: list of TensorVariables
List of theano.shared variables that are used in the step function.
n_steps: int
Number of steps to unroll.
go_backwards: bool
If true the recursion starts at sequences[-1] and iterates
backwards.
Returns
-------
List of TensorVariables. Each element in the list gives the recurrent
values at each time step.
"""
if not isinstance(sequences, (list, tuple)):
sequences = [] if sequences is None else [sequences]
# When backwards reverse the recursion direction
counter = range(n_steps)
if go_backwards:
counter = counter[::-1]
output = []
# ====== check if outputs_info is None ====== #
if outputs_info is not None:
prev_vals = outputs_info
else:
prev_vals = []
output_idx = [i for i in range(len(prev_vals)) if prev_vals[i] is not None]
# ====== check if non_sequences is None ====== #
if non_sequences is None:
non_sequences = []
# ====== Main loop ====== #
for i in counter:
step_input = [s[i] for s in sequences] + \
[prev_vals[idx] for idx in output_idx] + \
non_sequences
out_ = step_fn(*step_input)
# The returned values from step can be either a TensorVariable,
# a list, or a tuple. Below, we force it to always be a list.
if isinstance(out_, T.TensorVariable):
out_ = [out_]
if isinstance(out_, tuple):
out_ = list(out_)
output.append(out_)
prev_vals = output[-1]
# iterate over each scan output and convert it to same format as scan:
# [[output11, output12,...output1n],
# [output21, output22,...output2n],...]
output_scan = []
for i in range(len(output[0])):
l = map(lambda x: x[i], output)
output_scan.append(T.stack(*l))
return output_scan
|
8abd7c0ccfcabd3e44eca10c6d30a7d9a7add627
| 18,708 |
import torch
def get_model(hidden_size=20, n_hidden=5, in_dim=2, out_dim=1, penultimate=False, use_cuda=True, bn=False):
"""
Initialize the model and send to gpu
"""
in_dim = in_dim
out_dim = out_dim #1
model = Net(in_dim, out_dim, n_hidden=n_hidden, hidden_size=hidden_size,
activation=torch.nn.ReLU(), bias=True, penultimate=penultimate, bn=bn)
if use_cuda:
model=model.cuda()
return model
|
fd7169276a2a420ce59733ac9687a289e7c3b0af
| 18,709 |
def convert_sweep(sweep,sweep_loc,new_sweep_loc,AR,taper):
"""This converts arbitrary sweep into a desired sweep given
wing geometry.
Assumptions:
None
Source:
N/A
Inputs:
sweep [degrees]
sweep_loc [unitless]
new_sweep_loc [unitless]
AR [unitless]
taper [unitless]
Outputs:
quarter chord sweep
Properties Used:
N/A
"""
sweep_LE = np.arctan(np.tan(sweep)+4*sweep_loc*
(1-taper)/(AR*(1+taper)))
new_sweep = np.arctan(np.tan(sweep_LE)-4*new_sweep_loc*
(1-taper)/(AR*(1+taper)))
return new_sweep
|
17b52460f8a9d32cc2e6f407ccad2851c3edb1f8
| 18,710 |
def is_circular(linked_list):
"""
Determine whether the Linked List is circular or not
Args:
linked_list(obj): Linked List to be checked
Returns:
bool: Return True if the linked list is circular, return False otherwise
The way we'll do this is by having two pointers, called "runners", moving
through the list at different rates. Typically we have a "slow" runner
which moves at one node per step and a "fast" runner that moves at two
nodes per step.
If a loop exists in the list, the fast runner will eventually move behind
the slow runner as it moves to the beginning of the loop. Eventually it will
catch up to the slow runner and both runners will be pointing to the same
node at the same time. If this happens then you know there is a loop in
the linked list. Below is an example where we have a slow runner
and a fast runner (the red arrow).
"""
slow = linked_list.head
fast = linked_list.head
#as fast runner will reach end first if there is no loop so
#adding a None check on just fast should be enough
while fast and fast.next:
slow = slow.next
#move fast runner 2 times to make it fast as compared to slow runner
fast = fast.next.next
if fast == slow:
return True
# If we get to a node where fast doesn't have a next node or doesn't exist itself,
# the list has an end and isn't circular
return False
|
5a641df602f983de78c9c74b825847412aa54c21
| 18,711 |
from typing import List
from typing import Any
def bm_cv(
X_train: pd.DataFrame,
y_train: pd.Series,
cv: int,
metrics: List[Any],
metrics_proba: List[Any],
metric_kwargs: dict,
model_dict: dict,
):
"""
Perform cross validation benchmark with all models specified under model_dictionary, using the metrics defined.
Args:
X_train: Array of features, used to train the model
y_train: Array of label, used to train the model
cv: Number of cross-validation fold
metrics: List of metrics that we will use to score our validation performance
metrics_proba : List of metrics that we will use to score our validation performance.
This is only applicable for classification problem. The metrics under `metrics_proba` uses the predicted
probability instead of predicted class
metrics_kwargs: Dictionary containing the extra arguments needed for specific metrics,
listed in metrics and metrics_proba
model_dict: Model_dictionary, containing the model_name as the key and catalyst.ml.model object as value.
Returns:
DataFrame, which contains all of the metrics value for each of the model specified under model_dictionary,
as well as the cross-validation index.
"""
result_cv_df = pd.DataFrame()
kf = KFold(n_splits=cv, shuffle=True, random_state=42)
for cv_idx, (dev_idx, val_idx) in enumerate(kf.split(X_train)):
X_dev, X_val, y_dev, y_val = cv_split(X_train, y_train, dev_idx, val_idx)
df = bm(
X_dev,
y_dev,
X_val,
y_val,
metrics,
metrics_proba,
metric_kwargs,
model_dict,
)
df["cv_idx"] = cv_idx
result_cv_df = pd.concat([result_cv_df, df])
return result_cv_df
|
d1d1944fd4802f196ca7a1a78cf0f55222f6886c
| 18,712 |
def index_get(array, *argv):
"""
checks if a index is available in the array and returns it
:param array: the data array
:param argv: index integers
:return: None if not available or the return value
"""
try:
for index in argv:
array = array[index]
return array
# there is either no info available or no popular times
# TypeError: rating/rating_n/populartimes wrong of not available
except (IndexError, TypeError):
return None
|
d7fbf0011fd14da905d167735e6900b1bbaf1a8f
| 18,713 |
def _env_vars_available() -> bool:
"""
Returns: `True` if all required environment variables for the Postgres connection are set, `False` otherwise
"""
return all(env_var in environ for env_var in DBConfigProviderEnvVarBasedImpl.required_env_vars)
|
8fcc9c06115056bbe8b3b691d192186c0313aeef
| 18,714 |
def precisionatk_implementation(y_true, y_pred, k):
"""Fujnction to calculate precision at k for a given sample
Arguments:
y_true {list} -- list of actual classes for the given sample
y_pred {list} -- list of predicted classes for the given sample
k {[int]} -- top k predictions we are interested in
"""
# if k = 0 return 0 as we should never have k=0
# as k is always >=1
if k == 0:
return 0
# as we are interested in top k predictions
y_pred = y_pred[:k]
# convert predictions to set
pred_set = set(y_pred)
# convert actual values to set
true_set = set(y_true)
# find comon values in both
common_values = pred_set.intersection(true_set)
# return length of common values over k
return len(common_values) / len(y_pred[:k])
|
945caa95b32681939569ca675475e2527dbdee78
| 18,715 |
import pandas
def add_plane_data(
data_frame: pandas.DataFrame,
file_path: str,
target_col: str = const.DF_PLANE_COL_NAME
) -> pandas.DataFrame:
"""Merges DataFrame with information about the flight planes
Args:
data_frame (pandas.DataFrame): Source DataFrame
file_path (str): Source file path
target_col (str): Target column to merge
Returns:
pandas.DataFrame: Source DataFrame with aditional information
"""
planes = df_fileloader.load_agenda(file_path)
data_frame[target_col] = data_frame[target_col].astype(str)
planes[target_col] = planes[target_col].astype(str)
data_frame = pandas.merge(data_frame, planes, how='outer', on=[target_col], indicator=True)
unmatched = data_frame.query('_merge == "left_only"').groupby([target_col]).size().reset_index(name='count')
if not unmatched.empty:
err_msg = 'There\'s missing information about the following planes:'
for index, row in unmatched.iterrows():
err_msg += '\n {} with {} ocurrences.'.format(row[target_col], row['count'])
utility.eprint(err_msg)
return
return data_frame.query('_merge == "both"').drop(columns=['_merge'])
|
0dbe3987cb4ee26f0ae6670173c65a3622ca9b5d
| 18,716 |
def record(location):
"""Creates an empty record."""
draft = RDMDraft.create({})
record = RDMRecord.publish(draft)
return record
|
77c8069e4fd894f2ed1d760fc983a9a2094c0f6d
| 18,717 |
def firfls(x, f_range, fs=1000, w=3, tw=.15):
"""
Filter signal with an FIR filter
*Like firls in MATLAB
x : array-like, 1d
Time series to filter
f_range : (low, high), Hz
Cutoff frequencies of bandpass filter
fs : float, Hz
Sampling rate
w : float
Length of the filter in terms of the number of cycles
of the oscillation whose frequency is the low cutoff of the
bandpass filter
tw : float
Transition width of the filter in normalized frequency space
Returns
-------
x_filt : array-like, 1d
Filtered time series
"""
if w <= 0:
raise ValueError(
'Number of cycles in a filter must be a positive number.')
if np.logical_or(tw < 0, tw > 1):
raise ValueError('Transition width must be between 0 and 1.')
nyq = fs / 2
if np.any(np.array(f_range) > nyq):
raise ValueError('Filter frequencies must be below nyquist rate.')
if np.any(np.array(f_range) < 0):
raise ValueError('Filter frequencies must be positive.')
Ntaps = np.floor(w * fs / f_range[0])
if len(x) < Ntaps:
raise RuntimeError(
'Length of filter is loger than data. '
'Provide more data or a shorter filter.')
# Characterize desired filter
f = [0, (1 - tw) * f_range[0] / nyq, f_range[0] / nyq,
f_range[1] / nyq, (1 + tw) * f_range[1] / nyq, 1]
m = [0, 0, 1, 1, 0, 0]
if any(np.diff(f) < 0):
raise RuntimeError(
'Invalid FIR filter parameters.'
'Please decrease the transition width parameter.')
# Perform filtering
taps = firwin2(Ntaps, f, m)
x_filt = filtfilt(taps, [1], x)
if any(np.isnan(x_filt)):
raise RuntimeError(
'Filtered signal contains nans. Adjust filter parameters.')
# Remove edge artifacts
return _remove_edge(x_filt, Ntaps)
|
71c7c3fc229ce8745f5940593a6e5a2f9bf12490
| 18,718 |
def extract_and_coadd(ra, dec, pm_ra, pm_dec, match_radius=4./3600.,
search_radius=25./60, sigma_clip=None, query_timeout=60.,
upper_limits=True, return_exps=False):
"""
The top-level function of this module, extract_and_coadd finds sources in
GALEX archive matching the target while accounting for its proper motion
between observing visits, the coadds the fluxes from each visit.
Parameters
----------
ra : float
Right ascencion of target in decimal degrees.
dec : float
Declination of target in decimal degrees.
pm_ra : float
Right ascencion proper motion of target in mas/yr.
pm_dec : float
Declination proper motion of target in mas/yr.
match_radius : float
Radius within which to consider a GALEX source a match to the target
in degrees. For reference, the 1-sigma astrometric uncertainty is 0.4
arcseconds for GALEX.
search_radius : float
Radius in which to query the MCAT in degrees. If upper limits are
desired, this should be large enough for the MCAT to return results
whenever exposures were taken near enough that the target could have
been in the aperture.
sigma_clip : float
Exclude fluxes > this many sigma from median flux relative to their
measurement error. Careful with this. Stars show real variability
that is often well beyond measurement errors, so it is probably
unwise to sigma clip in most cases.
query_timeout : float
Seconds to wait for server to respond before giving up.
upper_limits : bool
Estimate upper limits for exposures where there is no match for the
source.
return_exps : bool
If True, return all the data provided by extract_source.
Returns
-------
nuv_coadd : tuple
Coadded flux and error in counts s-1 and, optionally, exposure info returned by
extract_source. Upper limits show up as -999 for the flux with a positive error.
fuv_coadd : tuple
As above, for fuv.
"""
data = extract_source(ra, dec, pm_ra, pm_dec, match_radius,
search_radius, query_timeout, upper_limits)
nuv_data, fuv_data = data
nuv = list(coadd_fluxes(*nuv_data[:3], sigma_clip=sigma_clip))
fuv = list(coadd_fluxes(*fuv_data[:3], sigma_clip=sigma_clip))
if return_exps:
nuv.append(nuv_data)
fuv.append(fuv_data)
nuv, fuv = map(tuple, (nuv, fuv))
return (nuv, fuv)
|
d28d62877ba0c8a99aba1596128dc764bc3e19b7
| 18,719 |
import aiohttp
async def create_payout(
session: ClientSession, data: CreatePayoutRequest
) -> CreatePayoutResponse:
"""
Create a payout.
"""
url = RAZORPAY_BASE_URL + "/payouts"
async with session.post(
url,
json=data.__dict__,
auth=aiohttp.BasicAuth(RAZORPAY_KEY_ID, RAZORPAY_KEY_SECRET),
) as resp:
response = await resp.json()
print(response, resp.status)
return from_dict(data_class=CreatePayoutResponse, data=response)
|
c4b9dae09111c83efb1d5a9c5fd88050f11b5510
| 18,720 |
def eval_ocr_metric(pred_texts, gt_texts):
"""Evaluate the text recognition performance with metric: word accuracy and
1-N.E.D. See https://rrc.cvc.uab.es/?ch=14&com=tasks for details.
Args:
pred_texts (list[str]): Text strings of prediction.
gt_texts (list[str]): Text strings of ground truth.
Returns:
eval_res (dict[str: float]): Metric dict for text recognition, include:
- word_acc: Accuracy in word level.
- word_acc_ignore_case: Accuracy in word level, ignore letter case.
- word_acc_ignore_case_symbol: Accuracy in word level, ignore
letter case and symbol. (default metric for
academic evaluation)
- char_recall: Recall in character level, ignore
letter case and symbol.
- char_precision: Precision in character level, ignore
letter case and symbol.
- 1-N.E.D: 1 - normalized_edit_distance.
"""
assert isinstance(pred_texts, list)
assert isinstance(gt_texts, list)
assert len(pred_texts) == len(gt_texts)
match_res = count_matches(pred_texts, gt_texts)
eps = 1e-8
char_recall = 1.0 * match_res['true_positive_char_num'] / (
eps + match_res['gt_char_num'])
char_precision = 1.0 * match_res['true_positive_char_num'] / (
eps + match_res['pred_char_num'])
word_acc = 1.0 * match_res['match_word_num'] / (
eps + match_res['gt_word_num'])
word_acc_ignore_case = 1.0 * match_res['match_word_ignore_case'] / (
eps + match_res['gt_word_num'])
word_acc_ignore_case_symbol = 1.0 * match_res[
'match_word_ignore_case_symbol'] / (
eps + match_res['gt_word_num'])
eval_res = {}
eval_res['word_acc'] = word_acc
eval_res['word_acc_ignore_case'] = word_acc_ignore_case
eval_res['word_acc_ignore_case_symbol'] = word_acc_ignore_case_symbol
eval_res['char_recall'] = char_recall
eval_res['char_precision'] = char_precision
eval_res['1-N.E.D'] = 1.0 - match_res['ned']
eval_res['BLEU'] = match_res['bleu']
for key, value in eval_res.items():
eval_res[key] = float('{:.4f}'.format(value))
return eval_res
|
0ec92be231d93abf9db8247369ba5ea546bd1b17
| 18,721 |
def get_all_zones():
"""Return a list of all available zones."""
cf = CloudFlare.CloudFlare(raw=True)
page_number = 0
total_pages = 1
all_zones = []
while page_number < total_pages:
page_number += 1
raw_results = cf.zones.get(params={'per_page':100, 'page':page_number})
zones = raw_results['result']
all_zones += zones
total_pages = raw_results['result_info']['total_pages']
return all_zones
|
0b9f6bf9b7b8fe274f7c6f856abf1d9397384c3c
| 18,722 |
def entry_id(e):
"""entry identifier which is not the bibtex key
"""
authortitle = ''.join([author_id(e),title_id(e)])
return (e.get('doi','').lower(), authortitle)
|
7c663d6c2bbdfcef8168c11a78e176e634cf644b
| 18,723 |
def AskNumber(text="unknown task"):
"""
Asks the user to interactively input a number (float or int) at any point in the script, and returns the input number.
| __option__ | __description__
| --- | ---
| *text | an optional string to identify for what purpose the chosen number will be used.
"""
def ValidateNumber(text):
try:
innumber = input("\n\nwrite a comma or integer number to use for "+str(text)+" (example: 15 or 15.83)\nnumber = ")
except NameError:
print("""\n---> unknown error""")
return ValidateNumber(text)
if not isinstance(innumber,(float,int)):
print("""\n---> error: the number must be either a floating point comma or integer number""")
return ValidateNumber(text)
return innumber
return ValidateNumber(text)
|
41949d0a2e2d87b5cdb26d2db9bff9a64fbeeb1d
| 18,724 |
def TokenEmphasis(character="_"):
"""
Italic (`<i>`, `<em>`) text is rendered with one asterisk or underscore
"""
assert character in ("_", "*")
return {
"type": "Characters",
"data": character,
"_md_type": mdTokenTypes["TokenEmphasis"],
}
|
2012fdeb9ca4d9483b4cc403010f9900dcc1230f
| 18,725 |
import os
def get_data(filename, **kwargs):
"""
get selected data file
"""
filepath = os.path.join(data.__path__[0], filename)
return np.genfromtxt(filepath, **kwargs)
|
dcf95489b95c3900819f10cad2a503b27fc5d88c
| 18,726 |
import random
def generate_plan(suite, node):
"""Randomly generates a plan, completely ignoring norms. This is mainly for testing the norm driven algorithm"""
plan = [node]
next_actions = next_actions(suite,node)
# print "Next actions ", next_actions
while (next_actions != []):
a = random.sample(next_actions,1)[0]
node = a.path[1:]
plan[len(plan):] = node
node = node[-1] # if we have a sequence of actions
next_actions = next_actions(suite,node)
return plan
|
30d967986d1c4237b4b312470d47d1ecce06ecbc
| 18,727 |
def update_y(pred_coords, ypart_tracker, history=1500):
"""
Update y-tracker and store last 1500 detection
:param pred_coords: y coordinates
:param ypart_tracker: choose keypoints based on input conditions
:return: y-tracker
"""
anks_val = (pred_coords[15] + pred_coords[16]) * 0.5
shdr_val = (pred_coords[5] + pred_coords[6]) * 0.5
ypart_tracker['anks'] = np.append(ypart_tracker['anks'], [anks_val], axis=0)
ypart_tracker['shdr'] = np.append(ypart_tracker['shdr'], [shdr_val], axis=0)
ypart_tracker['anks-shdr'] = np.append(ypart_tracker['anks-shdr'], [anks_val - shdr_val], axis=0)
ypart_tracker = {k: v[-history:] for k, v in ypart_tracker.items()}
return ypart_tracker
|
ee1880e27b121dae4661a93286bf07117bc7bb34
| 18,728 |
def augmentData(features, labels):
"""
For augmentation of the data
:param features:
:param labels:
:return:
"""
features = np.append(features, features[:, :, ::-1], axis=0)
labels = np.append(labels, -labels, axis=0)
return features, labels
|
ef684ae2bf9eb4fca9a0636d3b0089020805f4be
| 18,729 |
import tqdm
import torch
import os
def train_adversary():
""" Trains an adversary on data from the data censoring process. """
def accuracy(pred, true):
u = true.cpu().numpy().flatten()
p = np.argmax(pred.cpu().detach().numpy(), axis=1)
acc = np.sum(u == p)/len(u)
return acc
tmp_secret_classifier.train()
adv_secret_classifier.train()
for i_epoch in range(5):
for i_batch, batch in tqdm.tqdm(enumerate(train_dataloader, 0)):
imgs = batch['image'].cuda()
utility = batch['utility'].float().cuda()
secret = batch['secret'].float().cuda()
secret = secret.view(secret.size(0))
utility = utility.view(utility.size(0))
batch_size = imgs.shape[0]
z1 = torch.randn(batch_size, opt.latent_dim).cuda()
if opt.use_filter:
filter_imgs = filter(imgs, z1, secret.long())
else:
filter_imgs = imgs
if opt.use_real_fake:
z2 = torch.randn(batch_size, opt.latent_dim).cuda()
gen_secret = Variable(LongTensor(np.random.choice([0.0, 1.0], batch_size)))
filter_imgs = generator(filter_imgs, z2, gen_secret)
# train tmp
optimizer_tmp.zero_grad()
secret_pred = tmp_secret_classifier(imgs)
loss = adversarial_loss(secret_pred, secret.long())
loss.backward()
optimizer_tmp.step()
if i_batch % 50 == 0:
acc = accuracy(secret_pred, secret)
print("secret_tmp_acc: ", acc)
# train adversary
optimizer_adv.zero_grad()
secret_pred = adv_secret_classifier(filter_imgs.detach())
loss = adversarial_loss(secret_pred, secret.long())
loss.backward()
optimizer_adv.step()
if i_batch % 50 == 0:
acc = accuracy(secret_pred, secret)
print("secret_adv_acc: ", acc)
utils.save_model(adv_secret_classifier, os.path.join(artifacts_path, "adv_secret_classifier.hdf5"))
accs1 = []
accs2 = []
tmp_secret_classifier.eval()
adv_secret_classifier.eval()
for i_batch, batch in tqdm.tqdm(enumerate(valid_dataloader, 0)):
imgs = batch['image'].cuda()
secret = batch['secret'].float().cuda()
secret = secret.view(secret.size(0))
batch_size = imgs.shape[0]
z1 = torch.randn(batch_size, opt.latent_dim).cuda()
if opt.use_filter:
filter_imgs = filter(imgs, z1, secret.long())
else:
filter_imgs = imgs
if opt.use_real_fake:
z2 = torch.randn(batch_size, opt.latent_dim).cuda()
gen_secret = Variable(LongTensor(np.random.choice([0.0, 1.0], batch_size)))
filter_imgs = generator(filter_imgs, z2, gen_secret)
secret_pred = adv_secret_classifier(filter_imgs.detach())
acc = accuracy(secret_pred, secret)
accs1.append(acc)
secret_pred = tmp_secret_classifier(imgs)
acc = accuracy(secret_pred, secret)
accs2.append(acc)
acc1 = np.mean(accs1)
acc2 = np.mean(accs2)
print("test_secret_adv_acc: ", acc1)
print("test_secret_tmp_acc: ", acc2)
return acc1
|
33236a2260aa6df11a0b99bd58b5601fcd9ef052
| 18,730 |
def sigmoid(x):
""" Implement 1 / ( 1 + exp( -x ) ) in terms of tanh."""
return 0.5 * (np.tanh(x / 2.) + 1)
|
95d6dd0cd62db2c43df419358ef368609ede42c8
| 18,731 |
def get_unique_output_values(signals):
"""
Based on segment length, determine how many of the possible four
uniquely identifiable digits are in the set of signals.
"""
unique_digit_count = 0
for signal in signals:
for digit in signal["output"]:
if len(digit) in (2, 3, 4, 7):
unique_digit_count += 1
return unique_digit_count
|
84098d4d294bfdd1b983ea70d51da1453b17245a
| 18,732 |
import itertools
def split_and_pad(s, sep, nsplit, pad=None):
""" Splits string s on sep, up to nsplit times.
Returns the results of the split, pottentially padded with
additional items, up to a total of nsplit items.
"""
l = s.split(sep, nsplit)
return itertools.chain(l, itertools.repeat(None, nsplit+1-len(l)))
|
6c439301df7109d9b01a06a87bd7d6adafb8ee1e
| 18,733 |
def transpose_report(report):
"""Transposes the report. Columns into rows"""
return list(map(list, zip(*report)))
|
bc59f9106496b0b830fdc9ac0266f3b774a8f759
| 18,734 |
def _shape_from_resolution(resolution):
"""
Calculate the shape of the global Earth relief grid given a resolution.
Parameters
----------
resolution : str
Same as the input for load_earth_relief
Returns
-------
shape : (nlat, nlon)
The calculated shape.
Examples
--------
>>> _shape_from_resolution('60m')
(181, 361)
>>> _shape_from_resolution('30m')
(361, 721)
>>> _shape_from_resolution('10m')
(1081, 2161)
"""
minutes = int(resolution[:2])
nlat = 180*60//minutes + 1
nlon = 360*60//minutes + 1
return (nlat, nlon)
|
c726d599696cee2259bc450606e63480b0991451
| 18,735 |
def __virtual__():
"""
Load module only if cx_Oracle installed
"""
if HAS_CX_ORACLE:
return __virtualname__
return (
False,
"The oracle execution module not loaded: python oracle library not found.",
)
|
a64eddc8b78e5d7b3c8e0588a72a0c238b4c12d0
| 18,736 |
import pkgutil
import os
def find_plugins():
""" Finds all Python packages inside the port.plugins directory """
return [os.path.join(importer.path, name) for importer, name, ispkg in pkgutil.iter_modules(plugins.__path__) if ispkg]
|
06b1217840686a4f4ad90b1f9b7dd32e8377df05
| 18,737 |
import os
def saturation(rgb_img, threshold=255, channel="any"):
"""Return a mask filtering out saturated pixels.
Inputs:
rgb_img = RGB image
threshold = value for threshold, above which is considered saturated
channel = how many channels must be saturated for the pixel to be masked out ("any", "all")
Returns:
masked_img = A binary image with the saturated regions blacked out.
:param rgb_img: np.ndarray
:param threshold: int
:param channel: str
:return masked_img: np.ndarray
"""
# Mask red, green, and blue saturation separately
b, g, r = cv2.split(rgb_img)
b_saturated = cv2.inRange(b, threshold, 255)
g_saturated = cv2.inRange(g, threshold, 255)
r_saturated = cv2.inRange(r, threshold, 255)
# Combine channel masks
if channel.lower() == "any":
# Consider a pixel saturated if any channel is saturated
saturated = cv2.bitwise_or(b_saturated, g_saturated)
saturated = cv2.bitwise_or(saturated, r_saturated)
elif channel.lower() == "all":
# Consider a pixel saturated only if all channels are saturated
saturated = cv2.bitwise_and(b_saturated, g_saturated)
saturated = cv2.bitwise_and(saturated, r_saturated)
else:
fatal_error(str(channel) + " is not a valid option. Channel must be either 'any', or 'all'.")
# Invert "saturated" before returning, so saturated = black
bin_img = cv2.bitwise_not(saturated)
_debug(visual=bin_img, filename=os.path.join(params.debug_outdir, str(params.device), '_saturation_threshold.png'))
return bin_img
|
588bdc18f87af48b322b653170126516942594f5
| 18,738 |
def get_fuel_from(mass: int) -> int:
"""Gets fuel from mass.
Args:
mass (int): mass for the fuel
Returns:
int: fuel necessary for the mass
"""
return mass // 3 - 2
|
37390c8cb9ba7e84c7b5c14841528d6c38f1589e
| 18,739 |
def test_energy_density_function():
"""
Compute the Zeeman energy density over the entire mesh, integrate it, and
compare it to the expected result.
"""
mesh = df.RectangleMesh(df.Point(-50, -50), df.Point(50, 50), 10, 10)
unit_length = 1e-9
H = 1e6
# Create simulation object.
sim = finmag.Simulation(mesh, 1e5, unit_length=unit_length)
# Set uniform magnetisation.
def m_ferromagnetic(pos):
return np.array([0., 0., 1.])
sim.set_m(m_ferromagnetic)
# Assign zeeman object to simulation
sim.add(Zeeman(H * np.array([0., 0., 1.])))
# Get energy density function
edf = sim.get_interaction('Zeeman').energy_density_function()
# Integrate it over the mesh and compare to expected result.
total_energy = df.assemble(edf * df.dx) * unit_length
expected_energy = -mu0 * H
assert (total_energy + expected_energy) < 1e-6
|
10a4da043554a93c3d6c90f32c554741b2fe2c7b
| 18,740 |
def my_Bayes_model_mse(params):
""" Function fits the Bayesian model from Tutorial 4
Args :
params (list of positive floats): parameters used by the model (params[0] = posterior scaling)
Returns :
(scalar) negative log-likelihood :sum of log probabilities
"""
trial_ll = np.zeros_like(true_stim)
## Create the prior Matrix outside of trial loop
alpha=params[0]
prior_mean = 0
prior_sigma1 = 0.5
prior_sigma2 = 3
prior1 = my_gaussian(x, prior_mean, prior_sigma1)
prior2 = my_gaussian(x, prior_mean, prior_sigma2)
prior_combined = (1-alpha) * prior1 + (alpha * prior2)
prior_combined = prior_combined / np.sum(prior_combined)
prior_matrix = np.tile(prior_combined, hypothetical_stim.shape[0]).reshape((hypothetical_stim.shape[0],-1))
## Create posterior matrix outside of trial loop
posterior_matrix = np.zeros_like(likelihood_matrix)
for i_posterior in np.arange(posterior_matrix.shape[0]):
posterior_matrix[i_posterior,:] = np.multiply(prior_matrix[i_posterior,:], likelihood_matrix[i_posterior,:])
posterior_matrix[i_posterior,:] = posterior_matrix[i_posterior,:] / np.sum(posterior_matrix[i_posterior,:])
## Create Binary decision matrix outside of trial loop
binary_decision_matrix = np.zeros_like(posterior_matrix)
for i_posterior in np.arange(posterior_matrix.shape[0]):
mean, _, _ = moments_myfunc(x, posterior_matrix[i_posterior,:])
idx = np.argmin(np.abs(x - mean))
binary_decision_matrix[i_posterior,idx] = 1
# Loop over stimuli
for i_stim in np.arange(true_stim.shape[0]):
input_matrix = np.zeros_like(posterior_matrix)
for i in np.arange(x.shape[0]):
input_matrix[:, i] = my_gaussian(hypothetical_stim, true_stim[i_stim], 1)
input_matrix[:, i] = input_matrix[:, i] / np.sum(input_matrix[:, i])
marginalization_matrix = input_matrix * binary_decision_matrix
marginal = np.sum(marginalization_matrix, axis=0)
marginal = marginal / np.sum(marginal)
action = behaviour[i_stim]
idx = np.argmin(np.abs(x - action))
trial_ll[i_stim] = np.log(marginal[idx] + np.finfo(float).eps)
neg_ll = -np.sum(trial_ll)
return neg_ll
|
b4a03e3edd0c9894b518ecd2589949aed8337479
| 18,741 |
def validate_request_tween_factory(handler, registry):
"""
Updates request.environ's REQUEST_METHOD to be X_REQUEST_METHOD if present.
Asserts that if a POST (or similar) request is in application/json format,
with exception for /metadata/* endpoints.
Apache config:
SetEnvIf Request_Method HEAD X_REQUEST_METHOD=HEAD
"""
def validate_request_tween(request):
# Fix Request method changed by mod_wsgi.
# See: https://github.com/GrahamDumpleton/mod_wsgi/issues/2
environ = request.environ
if 'X_REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = environ['X_REQUEST_METHOD']
if request.method in ('GET', 'HEAD'):
# If GET request, don't need to check `request.content_type`
# Includes page text/html requests.
return handler(request)
elif request.content_type != 'application/json':
if request.content_type == 'application/x-www-form-urlencoded' and request.path[0:10] == '/metadata/':
# Special case to allow us to POST to metadata TSV requests via form submission
return handler(request)
detail = "Request content type %s is not 'application/json'" % request.content_type
raise HTTPUnsupportedMediaType(detail)
return handler(request)
return validate_request_tween
|
909e7d67044e31c1b3c0a97774d398f7d64d40bb
| 18,742 |
async def get_rank(display_number: int, minimal_msg_number: int,
display_total_number: int, group_id: int) -> str:
""" 获取排行榜 """
repeat_list = recorder_obj.repeat_list(group_id)
msg_number_list = recorder_obj.msg_number_list(group_id)
ranking = Ranking(group_id, display_number, minimal_msg_number,
display_total_number, repeat_list, msg_number_list)
str_data = await ranking.ranking()
if not str_data:
str_data = '暂时还没有满足条件的数据~>_<~'
return str_data
|
f1ca183890e33b15b77d7693771b37c33af9535e
| 18,743 |
def feedback(request):
"""FeedbackForm"""
if (request.method == 'POST'):
form = forms.FeedbackForm(request.POST)
# pdb.set_trace()
if form.is_valid():
form.save()
type = form.cleaned_data['type']
type = dict(form.fields['type'].choices)[type]
settings.EMAIL_HOST_USER += '[email protected]'
send_mail(
'[' + type + '] ' + form.cleaned_data['title'],
'A new feedback was posted on JobPort' + '\n\n' +
form.cleaned_data['body'], ['[email protected]']
)
settings.EMAIL_HOST_USER += ''
messages.success(
request, 'Thanks for filling your precious feedback! :) ')
return HttpResponseRedirect('/')
else:
context = {'form': form}
return render(request, 'jobport/feedback.html', context)
else:
form = forms.FeedbackForm()
context = {'form': form}
return render(request, 'jobport/feedback.html', context)
|
188dfa77d7e72555062e25acc15518f90c252b33
| 18,744 |
def get_convolutional_args(call, include_buffers=False, remove_constants=False):
"""A method to extract the arguments from conv2d or depthwise_conv2d extern call."""
args = call.args
conv_args = []
remove_indices = [0]
if remove_constants:
remove_indices += [41, 42, 44, 45]
for i, arg in enumerate(args):
if i in remove_indices:
continue
elif isinstance(arg, tvm.tir.expr.IntImm) or isinstance(arg, tvm.tir.expr.FloatImm):
conv_args.append(arg.value)
elif isinstance(arg, tvm.tir.expr.Load) and not include_buffers:
conv_args.append(arg.index)
else:
conv_args.append(arg)
return conv_args
|
01db4d4e025bb9212bcb20a8852a7d4f1250e4b2
| 18,745 |
def view_party(party_id):
"""View dashboard for that party."""
party = party_service.find_party(party_id)
if party is None:
abort(404)
days = party_service.get_party_days(party)
days_until_party = (party.starts_at.date() - date.today()).days
orga_count = orga_team_service.count_memberships_for_party(party.id)
orga_team_count = orga_team_service.count_teams_for_party(party.id)
seating_area_count = seating_area_service.count_areas_for_party(party.id)
seat_count = seat_service.count_seats_for_party(party.id)
ticket_sale_stats = ticket_service.get_ticket_sale_stats(party.id)
tickets_checked_in = ticket_service.count_tickets_checked_in_for_party(
party.id
)
seat_utilization = seat_service.get_seat_utilization(party.id)
guest_servers = guest_server_service.get_all_servers_for_party(party.id)
return {
'party': party,
'days': days,
'days_until_party': days_until_party,
'orga_count': orga_count,
'orga_team_count': orga_team_count,
'seating_area_count': seating_area_count,
'seat_count': seat_count,
'ticket_sale_stats': ticket_sale_stats,
'tickets_checked_in': tickets_checked_in,
'seat_utilization': seat_utilization,
'guest_servers': guest_servers,
}
|
de02cf21c9afe35a1dc5ef7a896d99d40a9bd43f
| 18,746 |
import _collections
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, `namedtuple`, `dict`,
`collections.OrderedDict`, or `composite_tensor.Composite_Tensor`
or `type_spec.TypeSpec`.
args: elements to be converted to the `instance` type.
Returns:
`args` with the type of `instance`.
"""
if _is_mutable_mapping(instance):
# Pack dictionaries in a deterministic order by sorting the keys.
# Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
result = dict(zip(_sorted(instance), args))
instance_type = type(instance)
if instance_type == _collections.defaultdict:
d = _collections.defaultdict(instance.default_factory)
else:
d = instance_type()
for key in instance:
d[key] = result[key]
return d
elif _is_mapping(instance):
result = dict(zip(_sorted(instance), args))
instance_type = type(instance)
tf_logging.log_first_n(
tf_logging.WARN, "Mapping types may not work well with tf.nest. Prefer"
" using MutableMapping for {}".format(instance_type), 1)
try:
return instance_type((key, result[key]) for key in instance)
except TypeError as err:
raise TypeError("Error creating an object of type {} like {}. Note that "
"it must accept a single positional argument "
"representing an iterable of key-value pairs, in "
"addition to self. Cause: {}".format(
type(instance), instance, err))
elif _is_mapping_view(instance):
# We can't directly construct mapping views, so we create a list instead
return list(args)
elif _is_namedtuple(instance) or _is_attrs(instance):
if isinstance(instance, _wrapt.ObjectProxy):
instance_type = type(instance.__wrapped__)
else:
instance_type = type(instance)
return instance_type(*args)
elif _is_composite_tensor(instance):
assert len(args) == 1
spec = instance._type_spec # pylint: disable=protected-access
return spec._from_components(args[0]) # pylint: disable=protected-access
elif _is_type_spec(instance):
# Pack a CompositeTensor's components according to a TypeSpec.
assert len(args) == 1
return instance._from_components(args[0]) # pylint: disable=protected-access
elif isinstance(instance, _six.moves.range):
return _sequence_like(list(instance), args)
elif isinstance(instance, _wrapt.ObjectProxy):
# For object proxies, first create the underlying type and then re-wrap it
# in the proxy type.
return type(instance)(_sequence_like(instance.__wrapped__, args))
else:
# Not a namedtuple
return type(instance)(args)
|
576e2c0ff6baeda2f0ff0a89773181ea021e725d
| 18,747 |
def subcat_add():
"""
添加小分类
"""
if request.method == 'POST':
cat_name = request.form['cat_name']
super_cat_id = request.form['super_cat_id']
# 检测名称是否存在
subcat = SubCat.query.filter_by(cat_name=cat_name).count()
if subcat :
return "<script>alert('该小分类已经存在');history.go(-1);</script>"
# 组织数据
data = SubCat(
super_cat_id = super_cat_id,
cat_name = cat_name,
)
db.session.add(data)
db.session.commit()
return redirect(url_for("admin.subcat_list"))
supercat = SuperCat.query.all() # 获取大分类信息
return render_template("admin/subcat_add.html",supercat=supercat)
|
046011d15be00557b28f9300d813ffc6e23d43e0
| 18,748 |
async def async_setup_entry(hass, config_entry, async_add_devices):
"""Set up the Alexa sensor platform by config_entry."""
return await async_setup_platform(
hass,
config_entry.data,
async_add_devices,
discovery_info=None)
|
b7a4a8a4573ecc008f43a4a9c3e9dcfa21fb0d78
| 18,749 |
def parse_duration(dur: str) -> int:
"""Generates seconds from a human readable duration."""
if not DURATION_REGEX.match(dur):
raise ValueError('Time passed does not match required format: `XX:XX` or `XX:XX:XX`')
parts = dur.split(':')
seconds = 0
if len(parts) == 3:
seconds += int(parts[0]) * 60 * 60
seconds += int(parts[1]) * 60
seconds += int(parts[2])
else:
seconds += int(parts[0]) * 60
seconds += int(parts[1])
return seconds
|
ec60b2362d8dc2e898e278b4e1dbf0aca764bc87
| 18,750 |
import random
def shorter_uuid(length=7, starter=None, with_original=False):
"""
Generate an even shorter short UUID generated by the shortuuid library.
:param length: Length of trimmed ID.
:param starter: Whether to begin with an already-created ShortUUID.
Useful when using recursively.
:param with_original: Also return initially-generated ShortUUID
:return:
"""
original_id = str(shortuuid.uuid()) if starter is None else starter
n = len(original_id)
dx = min(length, len(original_id)) # ID length
if starter is not None and len(starter) < dx * 2:
original_id = str(shortuuid.uuid())
start_point = random.randint(0, n - dx)
shorter_id = original_id[start_point:(start_point + dx)]
return shorter_id if not with_original else [shorter_id, original_id]
|
80eca9d14ff3ebeccd77a4b989dde52e4786a042
| 18,751 |
from typing import Iterable
from typing import Callable
from typing import Optional
from typing import List
from typing import Dict
def group_by(s: Iterable[_ElementType],
key: Callable[[_ElementType], _GroupType],
gfunc: Optional[Callable[[List[_ElementType]], _ResultType]] = None) -> Dict[_GroupType, _ResultType]:
"""
Overview:
Divide the elements into groups.
:param s: Elements.
:param key: Group key, should be a callable object.
:param gfunc: Post-process function for groups, should be a callable object. Default is ``None`` which means \
no post-processing will be performed.
:return: Grouping result.
Examples::
>>> from hbutils.collection import group_by
>>>
>>> foods = [
... 'apple', 'orange', 'pear',
... 'banana', 'fish', 'pork', 'milk',
... ]
>>> group_by(foods, len) # group by length
{5: ['apple'], 6: ['orange', 'banana'], 4: ['pear', 'fish', 'pork', 'milk']}
>>> group_by(foods, len, len) # group and get length
{5: 1, 6: 2, 4: 4}
>>> group_by(foods, lambda x: x[0]) # group by first letter
{'a': ['apple'], 'o': ['orange'], 'p': ['pear', 'pork'], 'b': ['banana'], 'f': ['fish'], 'm': ['milk']}
>>> group_by(foods, lambda x: x[0], len) # group and get length
{'a': 1, 'o': 1, 'p': 2, 'b': 1, 'f': 1, 'm': 1}
"""
gfunc = gfunc or (lambda x: x)
_result_dict: Dict[_GroupType, List[_ElementType]] = {}
for item in s:
_item_key = key(item)
if _item_key not in _result_dict:
_result_dict[_item_key] = []
_result_dict[_item_key].append(item)
return {
key: gfunc(grps)
for key, grps in _result_dict.items()
}
|
b515e0b3a3467b47ad29552bed39b14eca2d2978
| 18,752 |
def init_templateflow_wf(
bids_dir,
output_dir,
participant_label,
mov_template,
ref_template='MNI152NLin2009cAsym',
use_float=True,
omp_nthreads=None,
mem_gb=3.0,
modality='T1w',
normalization_quality='precise',
name='templateflow_wf',
fs_subjects_dir=None,
):
"""
A Nipype workflow to perform image registration between two templates
*R* and *M*. *R* is the *reference template*, selected by a templateflow
identifier such as ``MNI152NLin2009cAsym``, and *M* is the *moving
template* (e.g., ``MNI152Lin``). This workflows maps data defined on
template-*M* space onto template-*R* space.
1. Run the subrogate images through ``antsBrainExtraction``.
2. Recompute :abbr:`INU (intensity non-uniformity)` correction using
the mask obtained in 1).
3. Independently, run spatial normalization of every
:abbr:`INU (intensity non-uniformity)` corrected image
(supplied via ``in_files``) to both templates.
4. Calculate an initialization between both templates, using them directly.
5. Run multi-channel image registration of the images resulting from
3). Both sets of images (one registered to *R* and another to *M*)
are then used as reference and moving images in the registration
framework.
**Parameters**
in_files: list of files
a list of paths pointing to the images that will be used as surrogates
mov_template: str
a templateflow identifier for template-*M*
ref_template: str
a templateflow identifier for template-*R* (default: ``MNI152NLin2009cAsym``).
"""
# number of participants
ninputs = len(participant_label)
ants_env = {
'NSLOTS': '%d' % omp_nthreads,
'ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS': '%d' % omp_nthreads,
'OMP_NUM_THREADS': '%d' % omp_nthreads,
}
# Get path to templates
tpl_ref = str(get_template(ref_template, suffix=modality, desc=None, resolution=1))
tpl_ref_mask = str(get_template(ref_template, suffix='mask',
desc='brain', resolution=1))
tpl_mov = str(get_template(mov_template, suffix=modality, desc=None, resolution=1))
tpl_mov_mask = str(get_template(mov_template, suffix='mask',
desc='brain', resolution=1))
wf = pe.Workflow(name)
inputnode = pe.Node(niu.IdentityInterface(fields=['participant_label']),
name='inputnode')
inputnode.iterables = ('participant_label', sorted(list(participant_label)))
pick_file = pe.Node(niu.Function(function=_bids_pick),
name='pick_file', run_without_submitting=True)
pick_file.inputs.bids_root = bids_dir
ref_bex = init_brain_extraction_wf(
in_template=ref_template,
omp_nthreads=omp_nthreads,
mem_gb=mem_gb,
bids_suffix=modality,
name='reference_bex',
)
mov_bex = init_brain_extraction_wf(
in_template=mov_template,
omp_nthreads=omp_nthreads,
mem_gb=mem_gb,
bids_suffix=modality,
name='moving_bex',
)
ref_norm = pe.Node(
Registration(
from_file=pkgr.resource_filename(
'niworkflows.data', 't1w-mni_registration_%s_000.json' % normalization_quality)),
name='ref_norm', n_procs=omp_nthreads)
ref_norm.inputs.fixed_image = tpl_ref
ref_norm.inputs.fixed_image_masks = tpl_ref_mask
ref_norm.inputs.environ = ants_env
# Register the INU-corrected image to the other template
mov_norm = pe.Node(
Registration(
from_file=pkgr.resource_filename(
'niworkflows.data', 't1w-mni_registration_%s_000.json' % normalization_quality)),
name='mov_norm', n_procs=omp_nthreads)
mov_norm.inputs.fixed_image = tpl_mov
mov_norm.inputs.fixed_image_masks = tpl_mov_mask
mov_norm.inputs.environ = ants_env
# Initialize between-templates transform with antsAI
init_aff = pe.Node(AI(
metric=('Mattes', 32, 'Regular', 0.2),
transform=('Affine', 0.1),
search_factor=(20, 0.12),
principal_axes=False,
convergence=(10, 1e-6, 10),
verbose=True,
fixed_image=tpl_ref,
fixed_image_mask=tpl_ref_mask,
moving_image=tpl_mov,
moving_image_mask=tpl_mov_mask,
environ=ants_env,
), name='init_aff', n_procs=omp_nthreads)
ref_buffer = pe.JoinNode(niu.IdentityInterface(
fields=['fixed_image']),
joinsource='inputnode', joinfield='fixed_image', name='ref_buffer')
mov_buffer = pe.JoinNode(niu.IdentityInterface(
fields=['moving_image']),
joinsource='inputnode', joinfield='moving_image', name='mov_buffer')
flow = pe.Node(
Registration(
from_file=pkgr.resource_filename(
'niworkflows.data', 't1w-mni_registration_%s_000.json' % normalization_quality)),
name='flow_norm', n_procs=omp_nthreads,
)
flow.inputs.fixed_image_masks = tpl_ref_mask
flow.inputs.moving_image_masks = tpl_mov_mask
flow.inputs.metric = [[v] * ninputs for v in flow.inputs.metric]
flow.inputs.metric_weight = [[1 / ninputs] * ninputs
for _ in flow.inputs.metric_weight]
flow.inputs.radius_or_number_of_bins = [
[v] * ninputs for v in flow.inputs.radius_or_number_of_bins]
flow.inputs.sampling_percentage = [
[v] * ninputs for v in flow.inputs.sampling_percentage]
flow.inputs.sampling_strategy = [
[v] * ninputs for v in flow.inputs.sampling_strategy]
flow.inputs.environ = ants_env
# Datasinking
ref_norm_ds = pe.Node(
DerivativesDataSink(base_directory=str(output_dir.parent),
out_path_base=output_dir.name, space=ref_template,
desc='preproc', keep_dtype=True),
name='ref_norm_ds', run_without_submitting=True
)
mov_norm_ds = pe.Node(
DerivativesDataSink(base_directory=str(output_dir.parent),
out_path_base=output_dir.name, space=mov_template,
desc='preproc', keep_dtype=True),
name='mov_norm_ds', run_without_submitting=True
)
xfm_ds = pe.Node(DerivativesDataSink(
base_directory=str(output_dir.parent), out_path_base=output_dir.name,
allowed_entities=['from', 'mode'], mode='image', suffix='xfm',
source_file='group/tpl-{0}_T1w.nii.gz'.format(ref_template),
**{'from': mov_template}),
name='xfm_ds', run_without_submitting=True)
wf.connect([
(inputnode, pick_file, [('participant_label', 'participant_label')]),
(pick_file, ref_bex, [('out', 'inputnode.in_files')]),
(pick_file, mov_bex, [('out', 'inputnode.in_files')]),
(ref_bex, ref_norm, [('outputnode.bias_corrected', 'moving_image'),
('outputnode.out_mask', 'moving_image_masks'),
('norm.forward_transforms', 'initial_moving_transform')]),
(ref_bex, mov_norm, [('outputnode.bias_corrected', 'moving_image')]),
(mov_bex, mov_norm, [('outputnode.out_mask', 'moving_image_masks'),
('norm.forward_transforms', 'initial_moving_transform')]),
(init_aff, flow, [('output_transform', 'initial_moving_transform')]),
(ref_norm, ref_buffer, [('warped_image', 'fixed_image')]),
(mov_norm, mov_buffer, [('warped_image', 'moving_image')]),
(ref_buffer, flow, [('fixed_image', 'fixed_image')]),
(mov_buffer, flow, [('moving_image', 'moving_image')]),
(pick_file, ref_norm_ds, [('out', 'source_file')]),
(ref_norm, ref_norm_ds, [('warped_image', 'in_file')]),
(pick_file, mov_norm_ds, [('out', 'source_file')]),
(mov_norm, mov_norm_ds, [('warped_image', 'in_file')]),
(flow, xfm_ds, [('composite_transform', 'in_file')]),
])
if fs_subjects_dir:
fssource = pe.Node(
FreeSurferSource(subjects_dir=str(fs_subjects_dir)),
name='fssource', run_without_submitting=True)
tonative = pe.Node(fs.Label2Vol(subjects_dir=str(fs_subjects_dir)),
name='tonative')
tonii = pe.Node(
fs.MRIConvert(out_type='niigz', resample_type='nearest'),
name='tonii')
ref_aparc = pe.Node(
ApplyTransforms(interpolation='MultiLabel', float=True,
reference_image=tpl_ref, environ=ants_env),
name='ref_aparc', mem_gb=1, n_procs=omp_nthreads
)
mov_aparc = pe.Node(
ApplyTransforms(interpolation='MultiLabel', float=True,
reference_image=tpl_mov, environ=ants_env),
name='mov_aparc', mem_gb=1, n_procs=omp_nthreads
)
ref_aparc_buffer = pe.JoinNode(
niu.IdentityInterface(fields=['aparc']),
joinsource='inputnode', joinfield='aparc', name='ref_aparc_buffer')
ref_join_labels = pe.Node(
AntsJointFusion(
target_image=[tpl_ref],
out_label_fusion='merged_aparc.nii.gz',
out_intensity_fusion_name_format='merged_aparc_intensity_%d.nii.gz',
out_label_post_prob_name_format='merged_aparc_posterior_%d.nii.gz',
out_atlas_voting_weight_name_format='merged_aparc_weight_%d.nii.gz',
environ=ants_env,
),
name='ref_join_labels', n_procs=omp_nthreads)
ref_join_labels_ds = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir.parent),
out_path_base=output_dir.name,
suffix='dtissue', desc='aparc', keep_dtype=False,
source_file='group/tpl-{0}_T1w.nii.gz'.format(ref_template)),
name='ref_join_labels_ds', run_without_submitting=True)
ref_join_probs_ds = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir.parent),
out_path_base=output_dir.name,
suffix='probtissue', desc='aparc', keep_dtype=False,
source_file='group/tpl-{0}_T1w.nii.gz'.format(ref_template)),
name='ref_join_probs_ds', run_without_submitting=True)
# ref_join_voting_ds = pe.Node(
# DerivativesDataSink(
# base_directory=str(output_dir.parent),
# out_path_base=output_dir.name, space=ref_template,
# suffix='probtissue', desc='aparcvoting', keep_dtype=False,
# source_file='group/tpl-{0}_T1w.nii.gz'.format(ref_template)),
# name='ref_join_voting_ds', run_without_submitting=True)
mov_aparc_buffer = pe.JoinNode(
niu.IdentityInterface(fields=['aparc']),
joinsource='inputnode', joinfield='aparc', name='mov_aparc_buffer')
mov_join_labels = pe.Node(
AntsJointFusion(
target_image=[tpl_mov],
out_label_fusion='merged_aparc.nii.gz',
out_intensity_fusion_name_format='merged_aparc_intensity_%d.nii.gz',
out_label_post_prob_name_format='merged_aparc_posterior_%d.nii.gz',
out_atlas_voting_weight_name_format='merged_aparc_weight_%d.nii.gz',
environ=ants_env,
),
name='mov_join_labels', n_procs=omp_nthreads)
mov_join_labels_ds = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir.parent),
out_path_base=output_dir.name,
suffix='dtissue', desc='aparc', keep_dtype=False,
source_file='group/tpl-{0}_T1w.nii.gz'.format(mov_template)),
name='mov_join_labels_ds', run_without_submitting=True)
mov_join_probs_ds = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir.parent),
out_path_base=output_dir.name,
suffix='probtissue', desc='aparc', keep_dtype=False,
source_file='group/tpl-{0}_T1w.nii.gz'.format(mov_template)),
name='mov_join_probs_ds', run_without_submitting=True)
ref_aparc_ds = pe.Node(
DerivativesDataSink(base_directory=str(output_dir.parent),
out_path_base=output_dir.name, space=ref_template,
suffix='dtissue', desc='aparc', keep_dtype=False),
name='ref_aparc_ds', run_without_submitting=True
)
mov_aparc_ds = pe.Node(
DerivativesDataSink(base_directory=str(output_dir.parent),
out_path_base=output_dir.name, space=mov_template,
suffix='dtissue', desc='aparc', keep_dtype=False),
name='mov_aparc_ds', run_without_submitting=True
)
# Extract surfaces
cifti_wf = init_gifti_surface_wf(
name='cifti_surfaces',
subjects_dir=str(fs_subjects_dir))
# Move surfaces to template spaces
gii2csv = pe.MapNode(GiftiToCSV(itk_lps=True),
iterfield=['in_file'], name='gii2csv')
ref_map_surf = pe.MapNode(
ApplyTransformsToPoints(dimension=3, environ=ants_env),
n_procs=omp_nthreads, name='ref_map_surf', iterfield=['input_file'])
ref_csv2gii = pe.MapNode(
CSVToGifti(itk_lps=True),
name='ref_csv2gii', iterfield=['in_file', 'gii_file'])
ref_surfs_buffer = pe.JoinNode(
niu.IdentityInterface(fields=['surfaces']),
joinsource='inputnode', joinfield='surfaces', name='ref_surfs_buffer')
ref_surfs_unzip = pe.Node(UnzipJoinedSurfaces(), name='ref_surfs_unzip',
run_without_submitting=True)
ref_ply = pe.MapNode(SurfacesToPointCloud(), name='ref_ply',
iterfield=['in_files'])
ref_recon = pe.MapNode(PoissonRecon(), name='ref_recon',
iterfield=['in_file'])
ref_avggii = pe.MapNode(PLYtoGifti(), name='ref_avggii',
iterfield=['in_file', 'surf_key'])
ref_smooth = pe.MapNode(fs.SmoothTessellation(), name='ref_smooth',
iterfield=['in_file'])
ref_surfs_ds = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir.parent),
out_path_base=output_dir.name, space=ref_template,
keep_dtype=False, compress=False),
name='ref_surfs_ds', run_without_submitting=True)
ref_avg_ds = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir.parent),
out_path_base=output_dir.name, space=ref_template,
keep_dtype=False, compress=False,
source_file='group/tpl-{0}_T1w.nii.gz'.format(ref_template)),
name='ref_avg_ds', run_without_submitting=True)
mov_map_surf = pe.MapNode(
ApplyTransformsToPoints(dimension=3, environ=ants_env),
n_procs=omp_nthreads, name='mov_map_surf', iterfield=['input_file'])
mov_csv2gii = pe.MapNode(
CSVToGifti(itk_lps=True),
name='mov_csv2gii', iterfield=['in_file', 'gii_file'])
mov_surfs_buffer = pe.JoinNode(
niu.IdentityInterface(fields=['surfaces']),
joinsource='inputnode', joinfield='surfaces', name='mov_surfs_buffer')
mov_surfs_unzip = pe.Node(UnzipJoinedSurfaces(), name='mov_surfs_unzip',
run_without_submitting=True)
mov_ply = pe.MapNode(SurfacesToPointCloud(), name='mov_ply',
iterfield=['in_files'])
mov_recon = pe.MapNode(PoissonRecon(), name='mov_recon',
iterfield=['in_file'])
mov_avggii = pe.MapNode(PLYtoGifti(), name='mov_avggii',
iterfield=['in_file', 'surf_key'])
mov_smooth = pe.MapNode(fs.SmoothTessellation(), name='mov_smooth',
iterfield=['in_file'])
mov_surfs_ds = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir.parent),
out_path_base=output_dir.name, space=mov_template,
keep_dtype=False, compress=False),
name='mov_surfs_ds', run_without_submitting=True)
mov_avg_ds = pe.Node(
DerivativesDataSink(
base_directory=str(output_dir.parent),
out_path_base=output_dir.name, space=mov_template,
keep_dtype=False, compress=False,
source_file='group/tpl-{0}_T1w.nii.gz'.format(mov_template)),
name='mov_avg_ds', run_without_submitting=True)
wf.connect([
(inputnode, fssource, [(('participant_label', _sub_decorate), 'subject_id')]),
(inputnode, cifti_wf, [
(('participant_label', _sub_decorate), 'inputnode.subject_id')]),
(pick_file, cifti_wf, [('out', 'inputnode.in_t1w')]),
(pick_file, tonii, [('out', 'reslice_like')]),
# Select DKT aparc
(fssource, tonative, [(('aparc_aseg', _last), 'seg_file'),
('rawavg', 'template_file'),
('aseg', 'reg_header')]),
(tonative, tonii, [('vol_label_file', 'in_file')]),
(tonii, ref_aparc, [('out_file', 'input_image')]),
(tonii, mov_aparc, [('out_file', 'input_image')]),
(ref_norm, ref_aparc, [('composite_transform', 'transforms')]),
(mov_norm, mov_aparc, [('composite_transform', 'transforms')]),
(ref_buffer, ref_join_labels, [
('fixed_image', 'atlas_image')]),
(ref_aparc, ref_aparc_buffer, [('output_image', 'aparc')]),
(ref_aparc_buffer, ref_join_labels, [
('aparc', 'atlas_segmentation_image')]),
(mov_buffer, mov_join_labels, [
('moving_image', 'atlas_image')]),
(mov_aparc, mov_aparc_buffer, [('output_image', 'aparc')]),
(mov_aparc_buffer, mov_join_labels, [
('aparc', 'atlas_segmentation_image')]),
# Datasinks
(ref_join_labels, ref_join_labels_ds, [('out_label_fusion', 'in_file')]),
(ref_join_labels, ref_join_probs_ds, [
('out_label_post_prob', 'in_file'),
(('out_label_post_prob', _get_extra), 'extra_values')]),
# (ref_join_labels, ref_join_voting_ds, [
# ('out_atlas_voting_weight_name_format', 'in_file')]),
(mov_join_labels, mov_join_labels_ds, [('out_label_fusion', 'in_file')]),
(mov_join_labels, mov_join_probs_ds, [
('out_label_post_prob', 'in_file'),
(('out_label_post_prob', _get_extra), 'extra_values')]),
(pick_file, ref_aparc_ds, [('out', 'source_file')]),
(ref_aparc, ref_aparc_ds, [('output_image', 'in_file')]),
(pick_file, mov_aparc_ds, [('out', 'source_file')]),
(mov_aparc, mov_aparc_ds, [('output_image', 'in_file')]),
# Mapping ref surfaces
(cifti_wf, gii2csv, [
(('outputnode.surf_norm', _discard_inflated), 'in_file')]),
(gii2csv, ref_map_surf, [('out_file', 'input_file')]),
(ref_norm, ref_map_surf, [
(('inverse_composite_transform', _ensure_list), 'transforms')]),
(ref_map_surf, ref_csv2gii, [('output_file', 'in_file')]),
(cifti_wf, ref_csv2gii, [
(('outputnode.surf_norm', _discard_inflated), 'gii_file')]),
(pick_file, ref_surfs_ds, [('out', 'source_file')]),
(ref_csv2gii, ref_surfs_ds, [
('out_file', 'in_file'),
(('out_file', _get_surf_extra), 'extra_values')]),
(ref_csv2gii, ref_surfs_buffer, [('out_file', 'surfaces')]),
(ref_surfs_buffer, ref_surfs_unzip, [('surfaces', 'in_files')]),
(ref_surfs_unzip, ref_ply, [('out_files', 'in_files')]),
(ref_ply, ref_recon, [('out_file', 'in_file')]),
(ref_recon, ref_avggii, [('out_file', 'in_file')]),
(ref_surfs_unzip, ref_avggii, [('surf_keys', 'surf_key')]),
(ref_avggii, ref_smooth, [('out_file', 'in_file')]),
(ref_smooth, ref_avg_ds, [
('surface', 'in_file'),
(('surface', _get_surf_extra), 'extra_values')]),
# Mapping mov surfaces
(gii2csv, mov_map_surf, [('out_file', 'input_file')]),
(mov_norm, mov_map_surf, [
(('inverse_composite_transform', _ensure_list), 'transforms')]),
(mov_map_surf, mov_csv2gii, [('output_file', 'in_file')]),
(cifti_wf, mov_csv2gii, [
(('outputnode.surf_norm', _discard_inflated), 'gii_file')]),
(pick_file, mov_surfs_ds, [('out', 'source_file')]),
(mov_csv2gii, mov_surfs_ds, [
('out_file', 'in_file'),
(('out_file', _get_surf_extra), 'extra_values')]),
(mov_csv2gii, mov_surfs_buffer, [('out_file', 'surfaces')]),
(mov_surfs_buffer, mov_surfs_unzip, [('surfaces', 'in_files')]),
(mov_surfs_unzip, mov_ply, [('out_files', 'in_files')]),
(mov_ply, mov_recon, [('out_file', 'in_file')]),
(mov_recon, mov_avggii, [('out_file', 'in_file')]),
(mov_surfs_unzip, mov_avggii, [('surf_keys', 'surf_key')]),
(mov_avggii, mov_smooth, [('out_file', 'in_file')]),
(mov_smooth, mov_avg_ds, [
('surface', 'in_file'),
(('surface', _get_surf_extra), 'extra_values')]),
])
return wf
|
677218b13cbfc48881440523d87667eaed7ea2e8
| 18,753 |
def Qest(ICobj, r=None):
"""
Estimate Toomre Q at r (optional) for ICs, assuming omega=epicyclic
frequency. Ignores disk self-gravity
"""
if not hasattr(ICobj, 'sigma'):
raise ValueError, 'Could not find surface density profile (sigma)'
G = SimArray(1.0, 'G')
kB = SimArray(1.0, 'k')
if r is None:
r = ICobj.sigma.r_bins
sigma = ICobj.sigma(r)
T = ICobj.T(r)
M = ICobj.settings.physical.M
m = ICobj.settings.physical.m
M = match_units(M, 'Msol')[0]
m = match_units(m, 'm_p')[0]
gamma = ICobj.settings.physical.gamma_cs()
Q = np.sqrt(M*kB*T*gamma/(G*m*r**3))/(np.pi*sigma)
Q.convert_units('1')
return Q
|
f262c0f68683dc069dd983981b2cbd1d9a9e608a
| 18,754 |
def get_projector_csr_file(config_name: str) -> str:
"""Returns full path to projector server crt file"""
return join(get_run_configs_dir(), config_name, f'{PROJECTOR_JKS_NAME}.csr')
|
159bc798d28bf23ce06d356591d8c41bcea40356
| 18,755 |
import os
def test_xiaomi():
"""
KITTI视差图——>深度图——>点云
"""
def disp2depth(b, f, disp):
"""
"""
disp = disp.astype(np.float32)
non_zero_inds = np.where(disp)
depth = np.zeros_like(disp, dtype=np.float32)
depth[non_zero_inds] = b * f / disp[non_zero_inds]
return depth
disp_f_path = './disp_2.png' # TestDisparity2DepthAndPC
img_f_path = './left_2.png'
if not (os.path.isfile(disp_f_path) or os.path.isfile(img_f_path)):
print('[Err]: invalid disparity/image file path.')
return
# # KITTI数据集参数
# f = 721 # pixel
# b = 0.54 # m
# xiaomi参数
# fx = 998.72290039062500
# fy = 1000.0239868164063
f = (998.72290039062500 + 1000.0239868164063) * 0.5 # 1000.0
cx = 671.15643310546875
cy = 384.32458496093750
b = 0.12 # m
# 读取视差图
disp = cv2.imread(disp_f_path, cv2.IMREAD_ANYDEPTH)
print('Disparity image data type: ', disp.dtype)
# 读取BGR图
bgr = cv2.imread(img_f_path, cv2.IMREAD_COLOR)
print('BGR image data type: ', bgr.dtype)
assert (bgr.shape[:2] == disp.shape[:2])
H, W = disp.shape[:2]
print('W×H: {:d}×{:d}'.format(W, H))
c, r = np.meshgrid(np.arange(W), np.arange(H))
# print(c, '\n', r)
# x, y = np.arange(W), np.arange(H)
cx, cy = W * 0.5, H * 0.5
# ---------- 视差图(uint16)——>深度图(float32)
depth = disp2depth(b, f, disp)
# --------- 深度图——>点云x, y, z
points = np.zeros((H, W, 3), dtype=np.float32)
colors = np.zeros((H, W, 3), dtype=np.uint8)
points[r, c, 0] = (c - cx) * depth / f # x
points[r, c, 1] = (r - cy) * depth / f # y
points[r, c, 2] = depth # z
# bgr ——> rgb
colors = bgr[:, :, ::-1]
# ----- 过滤掉x, y, z全为0的点
inds = np.where((points[:, :, 0] != 0.0) |
(points[:, :, 1] != 0.0) |
(points[:, :, 2] != 0.0))
points = points[inds]
colors = colors[inds]
# # # ----- 滤波
# inds = np.where(
# (points[:, 1] > -1.0)
# & (points[:, 1] < 1.0)
# )
# points = points[inds]
# colors = colors[inds]
# print('{:d} 3D points left.'.format(inds[0].size))
# view_points_cloud(points)
# 保存pcd点云文件
points2pcd(points, './pc_2.pcd')
print('PCD poind cloud saved.')
# 保存ply点云文件
points2ply(points, colors, './ply_2.ply')
print('Ply poind cloud saved.')
# ---------- 保存深度图
depth *= 1000.0 # m ——> mm
depth = depth.astype(np.uint16)
cv2.imwrite('./depth_2.png', depth)
print('Depth image written.')
|
b95d4094e38ce17957b2cba9441f960c1022f653
| 18,756 |
import os
import PIL
def get_img(shape, path, dtype, should_scale=True):
"""Get image as input."""
resize_to = shape[1:3]
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), path)
img = PIL.Image.open(path)
img = img.resize(resize_to, PIL.Image.ANTIALIAS)
img_np = np.array(img).astype(dtype)
img_np = np.stack([img_np] * shape[0], axis=0).reshape(shape)
if should_scale:
img_np = img_np / 255
return img_np
|
829ebfcdab258d5a5b57694c5209fe848300420c
| 18,757 |
def make_epsilon_greedy_policy(Q: defaultdict, epsilon: float, nA: int) -> callable:
"""
Creates an epsilon-greedy policy based on a given Q-function and epsilon.
I.e. create weight vector from which actions get sampled.
:param Q: tabular state-action lookup function
:param epsilon: exploration factor
:param nA: size of action space to consider for this policy
"""
def policy_fn(observation):
policy = np.ones(nA) * epsilon / nA
best_action = np.random.choice(np.flatnonzero( # random choice for tie-breaking only
Q[observation] == Q[observation].max()
))
policy[best_action] += (1 - epsilon)
return policy
return policy_fn
|
f0fe733b18b416939db44acd830ee605bc41e18f
| 18,758 |
def write_init(proxy_parameters=None, exception=None):
"""Encodes and returns an MPI ('Metadata Init') response."""
return _write_init(Method.MPI, MetadataProviderError, proxy_parameters,
exception)
|
0a73c4949796a93549e208da523e805894170193
| 18,759 |
def pfam_to_pubmed(family):
"""get a list of associated pubmed ids for given pfam access key.
:param family: pfam accession key of family
:type family: str
:return: List of associated Pubmed ids
:rettype:list"""
url='https://pfam.xfam.org/family/'+family
pattern='http://www.ncbi.nlm.nih.gov/pubmed/'
return _xfam_to(url,pattern)
|
e3d63050d7e2e782ccd9d376fb4cd2d33c177be6
| 18,760 |
def cvConvexHull2(input, hull_storage=None, orientation=CV_CLOCKWISE, return_points=0):
"""CvSeq_or_CvMat cvConvexHull2(list_or_tuple_of_CvPointXYZ input, void* hull_storage=NULL, int orientation=CV_CLOCKWISE, int return_points=0)
Finds convex hull of point set
[ctypes-opencv] OpenCV's note: a vertex of the detected convex hull can be represented by:
a point of the same type with every point in 'input', if return_points==1
an index to a point in 'input', if return_points==0 and hull_storage is a CvMat
a pointer to a point in 'input', if return_points==0 and hull_storage is a CvStorage
[ctypes-opencv] If input is a (subclass of) CvSeq, 'hull_storage' can be:
None: detected vertices are stored in input's storage
an instance of CvStorage or CvMat: detected vertices are stored here
[ctypes-opencv] If input is 1d CvMat of 2D 32-bit points, 'hull_storage' can be:
None: 'hull_storage' is internally created as a 1d CvMat of 2D 32-bit points.
an instance of CvStorage or CvMat: detected vertices are stored here
[ctypes-opencv] In any case, the function returns a sequence (CvSeq) of detected vertices if 'hull_storage' is an instance CvStorage, or 'hull_storage' itself if otherwise.
"""
if isinstance(input, _CvSeqStructure): # a sequence
return pointee(_cvConvexHull2(input, hull_storage, orientation, return_points), input if hull_storage is None else hull_storage)
if hull_storage is None:
hull_storage = cvCreateMat(1, input.rows*input.cols, CV_MAT_TYPE(input) if return_points else CV_32SC1)
_cvConvexHull2(input, hull_storage, orientation, return_points)
return hull_storage
|
3def10577e29e6b9bcf2611ad194dca2f6e2feb7
| 18,761 |
from typing import Union
from typing import Optional
from typing import Dict
def init_classifier(config: Union[str, mmcv.Config],
checkpoint: Optional[str] = None,
device: str = 'cuda:0',
options: Optional[Dict] = None) -> nn.Module:
"""Prepare a few shot classifier from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str | None): Checkpoint path. If left as None, the model
will not load any weights. Default: None.
device (str): Runtime device. Default: 'cuda:0'.
options (dict | None): Options to override some settings in the
used config. Default: None.
Returns:
nn.Module: The constructed classifier.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if options is not None:
config.merge_from_dict(options)
model = build_classifier(config.model)
if checkpoint is not None:
map_loc = 'cpu' if device == 'cpu' else None
load_checkpoint(model, checkpoint, map_location=map_loc)
# save the config in the model for convenience in later use
model.cfg = config
model.to(device)
model.eval()
return model
|
20f819892295f6bfeb9c01f4e6d558731a2f8e68
| 18,762 |
import sys
def get_twitter_auth():
"""Setup Twitter authentication.
Return: tweepy.OAuthHandler object
"""
try:
credentials = read_credentials()
consumer_key = credentials.get('consumer_key')
consumer_secret = credentials.get('consumer_secret')
access_token = credentials.get('access_token')
access_secret = credentials.get('access_secret')
except KeyError:
sys.stderr.write("TWITTER_* not found\n")
sys.exit(1)
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
return auth
|
e84d24159bf4a8aeb4667547ef1f6324de3b8351
| 18,763 |
def Newton_method(f, df, start:float=0.0, max_step:int=32, sign_dig:int=6)->float:
"""
Newton method.
---------------------------
Args:
None.
Returns:
None.
Raises:
None.
"""
fun = lambda x: x - f(x)/df(x)
return fixed_point(fun, start, max_step, sign_dig)
|
d3a803a1a10b6c6d34831efeccd6fb7bae43689a
| 18,764 |
def get_all(isamAppliance, count=None, start=None, filter=None, check_mode=False, force=False):
"""
Retrieve a list of federations
"""
return isamAppliance.invoke_get("Retrieve a list of federations",
"{0}/{1}".format(uri, tools.create_query_string(count=count, start=start,
filter=filter)),
requires_modules=requires_modules,
requires_version=requires_version)
|
d65529bfc953976247fd44cb50051d5efddf10ea
| 18,765 |
def roty(theta):
"""
Rotation about Y-axis
@type theta: number
@param theta: the rotation angle
@rtype: 3x3 orthonormal matrix
@return: rotation about Y-axis
@see: L{rotx}, L{rotz}, L{rotvec}
"""
ct = cos(theta)
st = sin(theta)
return mat([[ct, 0, st],
[0, 1, 0],
[-st, 0, ct]])
|
702051efbd9f0999e04d5d7faca207c53520d712
| 18,766 |
def flush(name, family="ipv4", ignore_absence=False, **kwargs):
"""
.. versionadded:: 2014.7.0
.. versionchanged:: Magnesium
Flush current nftables state
family
Networking family, either ipv4 or ipv6
ignore_absence
If set to True, attempts to flush a non-existent table will not
result in a failed state.
.. versionadded:: Magnesium
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if __opts__["test"]:
ret["comment"] = "nftables flush not performed in test mode."
return ret
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
if "table" not in kwargs:
kwargs["table"] = "filter"
check_table = __salt__["nftables.check_table"](kwargs["table"], family=family)
if not ignore_absence and not check_table["result"]:
ret["result"] = False
ret[
"comment"
] = "Failed to flush table {} in family {}, table does not exist.".format(
kwargs["table"], family
)
return ret
if "chain" not in kwargs:
kwargs["chain"] = ""
else:
check_chain = __salt__["nftables.check_chain"](
kwargs["table"], kwargs["chain"], family=family
)
if not ignore_absence and not check_chain["result"]:
ret["result"] = False
ret[
"comment"
] = "Failed to flush chain {} in table {} in family {}, chain does not exist.".format(
kwargs["chain"], kwargs["table"], family
)
return ret
res = __salt__["nftables.flush"](kwargs["table"], kwargs["chain"], family)
if res["result"] or (
ignore_absence and (not check_table["result"] or not check_chain["result"])
):
ret["changes"] = {"locale": name}
ret["result"] = True
ret["comment"] = "Flush nftables rules in {} table {} chain {} family".format(
kwargs["table"], kwargs["chain"], family
)
return ret
else:
ret["result"] = False
ret["comment"] = "Failed to flush nftables rules"
return ret
|
67ab6d2f7e337ff5a68704be14d605298a1447aa
| 18,767 |
def solution(s):
"""
Check if a string has properly matching brackets
:param s: String to verify if it is well-formed
:return: 1 if the brackets are properly matching, 0 otherwise
"""
return check_matching_brackets(s, opening="(", closing=")")
|
4ba1bb92e0a1db05557980420f2fac3a88b93086
| 18,768 |
def process_to_annotation_data(df, class_names, video_fps, min_len):
"""
This function cleans the output data, so that there are
no jumping frames.
"""
j = 1 # Helper
# Minimum qty of frames of the same task in order to
# consider it a whole task
min_frames = int(float(min_len) * float(video_fps) * float(0.6))
# Initialize variables
df["subgroup"] = (df.iloc[:, -1] != df.iloc[:, -1].shift(1)).cumsum()
added = (
df["subgroup"]
.value_counts()[df["subgroup"].value_counts() < (j + 1)]
.index.tolist()
)
# Modify jumping frames by considering the sourrounding frames
# check for frames that jump (the total group of those frames are of a max of 7)
for jj in range(min_frames):
j = jj + 1
df["subgroup"] = (df.iloc[:, -2] != df.iloc[:, -2].shift(1)).cumsum()
added = (
df["subgroup"]
.value_counts()[df["subgroup"].value_counts() < (j + 1)]
.index.tolist()
)
cnt = 0
i_prev = 0
i_prev_cnt = 0
while len(added) > 0:
added.sort()
i = added[0]
k = 1 # Helper
prev = []
after = []
prev_yes = 0
after_yes = 0
if (i - k) > 0:
prev = [df[df["subgroup"] == (i - k)].iloc[0, -2]] * len(
df[df["subgroup"] == (i - k)]
)
prev_yes = 1
if (i + k) < max(df["subgroup"]) + 1:
after = [df[df["subgroup"] == (i + k)].iloc[0, -2]] * len(
df[df["subgroup"] == (i + k)]
)
after_yes = 1
check_loop = True
if (prev_yes + after_yes) == 2:
if mode(prev).mode[0] == mode(after).mode[0]:
check_loop = False
if check_loop:
k = 1 # Helper
while len(prev) < j + 2 - i_prev_cnt:
k += 1
if (i - k) > 0:
prev_i = [df[df["subgroup"] == (i - k)].iloc[0, -2]] * len(
df[df["subgroup"] == (i - k)]
)
prev.extend(prev_i)
else:
break
k = 1 # Helper
while len(after) < j + 2 - i_prev_cnt:
k += 1
if (i + k) < max(df["subgroup"]) + 1:
prev_i = [df[df["subgroup"] == (i + k)].iloc[0, -2]] * len(
df[df["subgroup"] == (i + k)]
)
after.extend(prev_i)
else:
break
changeTo = prev
changeTo.extend(after)
changeTo = mode(changeTo).mode[0]
else:
changeTo = mode(prev).mode[0]
change_idx = df.index[df["subgroup"] == i].tolist()
df.iloc[change_idx, -2] = changeTo
df["subgroup"] = (df.iloc[:, -2] != df.iloc[:, -2].shift(1)).cumsum()
added = (
df["subgroup"]
.value_counts()[df["subgroup"].value_counts() < (j + 1)]
.index.tolist()
)
added.sort()
if i == i_prev:
i_prev_cnt += 1
else:
i_prev_cnt = 0
i_prev = i
cnt += 1
if cnt > max(df["subgroup"]) * (j + 2):
break
# Modify the output shape so that for each task we have start frame and end frame
output_df = pd.DataFrame(columns=["task", "startTime", "endTime"])
for i in range(max(df["subgroup"])):
df_i = df[df["subgroup"] == (i + 1)]
task_str = str(class_names[int(df_i.iloc[0]["task_label"])])
start_frame = int(min(df_i["frame"]))
start_frame = frame_to_time(start_frame, video_fps)
end_frame = int(max(df_i["frame"]))
end_frame = frame_to_time(end_frame, video_fps)
output_df = output_df.append(
pd.DataFrame(
[[task_str] + [start_frame] + [end_frame]],
columns=["task", "startTime", "endTime"],
)
)
return output_df
|
eaa0537b217030664562489a2ceeec63cf7b32c0
| 18,769 |
def reduce_memmap(a):
"""Pickle the descriptors of a memmap instance to reopen on same file."""
m = _get_backing_memmap(a)
if m is not None:
# m is a real mmap backed memmap instance, reduce a preserving striding
# information
return _reduce_memmap_backed(a, m)
else:
# This memmap instance is actually backed by a regular in-memory
# buffer: this can happen when using binary operators on numpy.memmap
# instances
return (loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL),))
|
4e4caf6bb5f1be1f62537c45671010232665ec0c
| 18,770 |
def load_sparse_csr(filename):
"""Load a saved sparse matrix in csr format. Stolen from above source."""
loader = np.load(filename)
return sparse.csr_matrix((loader['data'], loader['indices'],
loader['indptr']), shape=loader['shape'])
|
9654e8baedf5ada8b626d86860aef2335a04b565
| 18,771 |
from typing import Optional
def blank(name: Optional[str]) -> Output:
"""Generate a blank `Output` instance."""
return Output(file_suffix=name or _DEFAULT_SUFFIX, variables=dict())
|
0811dd8875983b89a8ae82a204243419effddcb4
| 18,772 |
def estatistica_regras(regras_pt, regras_lgp):
"""
Contagem das regras morfossintáticas no corpus.
:param regras_pt: Lado português das regras (lista)
:param regras_lgp: Lado LGP das regras (lista)
:return: Dicionário com a frequência de cada regra. Ex: {"(0, 'INT')": 1, "(1, 'CAN')": 1, "(2, 'INT')": 1}
"""
estatistica = {}
repetido = set()
for i in range(len(regras_pt)):
tipo = regras_pt[i][1]
if i in repetido:
continue
if tipo == "":
tipo = "CAN"
if str((i,tipo)) not in estatistica.keys():
estatistica[str((i, tipo))]= 1
for j in range(len(regras_pt)):
a = regras_pt[i]
b = regras_lgp[i]
c = regras_pt[j]
d = regras_lgp[j]
if i >= j:
continue
if j in repetido:
continue
if compara_regra(a,b,c,d):
repetido.add(j)
tipo = regras_pt[j][1]
if tipo == "":
tipo = "CAN"
estatistica[str((i,tipo))] +=1
if str((j, tipo)) in estatistica.keys():
del estatistica[str((j,tipo))]
else:
tipo = regras_pt[j][1]
if tipo == "":
tipo = "CAN"
if str((j, tipo)) not in estatistica.keys():
estatistica.setdefault(str((j,tipo)),0)
estatistica[str((j, tipo))] += 1
return estatistica
|
67e1edae2d0418e1a36eefbb16ea4795b04728d6
| 18,773 |
import json
import sys
def read_config():
"""
Reads the configuration info into the cfg dictionary.
:return: A dictionary with the SSH-IPS configuration variables.
"""
CONFIG_FILE = '/etc/ssh-ips/config.json'
try:
with open(CONFIG_FILE, "r") as f:
cfg = json.load(f)
except ValueError as e:
print(str(e))
sys.exit()
return cfg
|
325e2e63fdf47c892ab432930de3e835faf1831d
| 18,774 |
def bq_create_dataset(bq_client):
"""Creates the BigQuery dataset.
If the dataset already exists, the existing dataset will be returned.
Dataset will be create in the location specified by DATASET_LOCATION.
Args:
bq_client: BigQuery client
Returns:
BigQuery dataset that will be used to store data.
"""
dataset_id = "{}.{}".format(bq_client.project, DATASET_NAME)
dataset = bigquery.Dataset(dataset_id)
dataset.location = DATASET_LOCATION
dataset = bq_client.create_dataset(dataset, exists_ok=True)
return dataset
|
ff2c0072210541261aff58ef8c590e94260e046d
| 18,775 |
def root_node():
"""
Returns DCC scene root node
:return: str
"""
return scene.get_root_node()
|
3a632bc0887a5c3a5696ec10f4387c917d12bfe5
| 18,776 |
def firsts(things):
"""
FIRSTS list
outputs a list containing the FIRST of each member of the input
list. It is an error if any member of the input list is empty.
(The input itself may be empty, in which case the output is also
empty.) This could be written as::
to firsts :list
output map \"first :list
end
but is provided as a primitive in order to speed up the iteration
tools MAP, MAP.SE, and FOREACH::
to transpose :matrix
if emptyp first :matrix [op []]
op fput firsts :matrix transpose bfs :matrix
end
"""
return [first(thing) for thing in things]
|
72141a7409cb17ac6785eabde91b45d5e9e0869f
| 18,777 |
def inf_set_mark_code(*args):
"""
inf_set_mark_code(_v=True) -> bool
"""
return _ida_ida.inf_set_mark_code(*args)
|
0395ab40bac5f036210802fdf548534c83c78951
| 18,778 |
def get_letter(xml):
"""
:param xml:
:return: everything between <bank> tag
"""
try:
left, right = xml.index('<bank '), xml.index('</bank>') + _BANK_OFFSET
return xml[left:right]
except ValueError:
return None
|
3c7a601b2a25969902d530e3e17a48ddcf0819c1
| 18,779 |
def stim_align_all_cells(traces, time, new_start):
"""
Make stim-aligned PSTHs from trialwise data (eg. trial x cell x time array). The
advantage of doing it this way (trialwise) is the trace for each cell gets rolled around
to the other side of the array, thus eliminating the need for nan padding.
Args:
trialwise_traces (array-like): trial x cell x time array of traces data, typicall from make_trialwise
times (array-like): list of stim times for each cell, must match exactly, not sure how it
handles nans yet...
new_start (int): frame number where the psths will be aligned to
"""
# FIXME: URGERNT see above, list or single stim time??? depends on how this is working... for a
# single trial an int is fine, but for multiple trials you'd want to give a list
psth = np.zeros_like(traces)
for i in range(traces.shape[0]):
psth[i,:,:] = np.roll(traces[i,:,:], -int(time[i])+new_start, axis=1)
return psth
|
18861b67b68f37c0c76caad40bd5d583868b1e70
| 18,780 |
def PowercycleNode(opts, args):
"""Remove a node from the cluster.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the name of
the node to be removed
@rtype: int
@return: the desired exit code
"""
node = args[0]
if (not opts.confirm and
not AskUser("Are you sure you want to hard powercycle node %s?" % node)):
return 2
op = opcodes.OpNodePowercycle(node_name=node, force=opts.force)
result = SubmitOrSend(op, opts)
if result:
ToStderr(result)
return 0
|
557b48309bb897da29cc1c1f6f724cd6d3959e23
| 18,781 |
def collect_data(bids_dir, participant_label, queries, filters=None, bids_validate=True):
"""
Uses pybids to retrieve the input data for a given participant
"""
if isinstance(bids_dir, BIDSLayout):
layout = bids_dir
else:
layout = BIDSLayout(str(bids_dir), validate=bids_validate)
bids_filters = filters or {}
for acq, entities in bids_filters.items():
queries[acq].update(entities)
subj_data = {
dtype: sorted(
layout.get(
return_type="file",
subject=participant_label,
extension=["nii", "nii.gz"],
**query
)
)
for dtype, query in queries.items()
}
return subj_data, layout
|
eaa15a3b3dacbae7c16b03f0c6347d71d939b57d
| 18,782 |
def minimaldescriptives(inlist):
"""this function takes a clean list of data and returns the N, sum, mean
and sum of squares. """
N = 0
sum = 0.0
SS = 0.0
for i in range(len(inlist)):
N = N + 1
sum = sum + inlist[i]
SS = SS + (inlist[i] ** 2)
mean = sum / float(N)
return N, sum, mean, SS
|
ca1d821ef64b93218bdb22268bfdde737f2d731c
| 18,783 |
def gen_filelist(infiles, tmpd) :
"""Write all audio files to a temporary text document for ffmpeg
Returns the path of that text document."""
filename = tmpd/"files.txt"
with open(filename, "w") as f:
for file in infiles:
# This part ensures that any apostrophes are escaped
file = str(file).split("'")
if len(file) > 1:
file = "'\\''".join(file)
else:
file = file[0]
# Write the file line
f.write("file '"+file+"'\n")
return filename
|
c7d21c62de34fea98725a39fec735836e0cfd3d9
| 18,784 |
import os
def check_envs():
"""Checks environment variables.
The MONGODB_PWD is a needed variable to enable mongodb connection.
Returns:
bool: If all needed environment variables are set.
"""
if not os.environ.get('MONGODB_PWD', False):
return False
return True
|
22ac892b0eee827f63f7d8258ca49f0e462452fd
| 18,785 |
def VMMemoryLower() -> tvm.ir.transform.Pass:
"""Perform memory lowering. Lowers the relax.builtin.alloc_tensor intrinsic to VM intrinsics.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.VMMemoryLower()
|
f636ea5f854e42413395669d1a0da3c2e439fb1e
| 18,786 |
def _is_disk_larger_than_max_size(device, node_uuid):
"""Check if total disk size exceeds 2TB msdos limit
:param device: device path.
:param node_uuid: node's uuid. Used for logging.
:raises: InstanceDeployFailure, if any disk partitioning related
commands fail.
:returns: True if total disk size exceeds 2TB. Returns False otherwise.
"""
try:
disksize_bytes, err = utils.execute('blockdev', '--getsize64',
device,
use_standard_locale=True,
run_as_root=True)
except (processutils.UnknownArgumentError,
processutils.ProcessExecutionError, OSError) as e:
msg = (_('Failed to get size of disk %(disk)s for node %(node)s. '
'Error: %(error)s') %
{'disk': device, 'node': node_uuid, 'error': e})
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
disksize_mb = int(disksize_bytes.strip()) // 1024 // 1024
return disksize_mb > MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR
|
ed39e885825cec7c2fba895121741b43e3661a58
| 18,787 |
def getLines(filename):
"""Return list of lines from file"""
with open(filename, 'r', errors='ignore') as ff:
return ff.readlines()
|
36e515decaa3876eed3b5db8363fb81a5db89c84
| 18,788 |
import torch
def bbox_next_frame_v3(F_first, F_pre, seg_pre, seg_first, F_tar, bbox_first, bbox_pre, temp, name):
"""
METHOD: combining tracking & direct recognition, calculate bbox in target frame
using both first frame and previous frame.
"""
F_first, F_pre, seg_pre, seg_first, F_tar = squeeze_all(F_first, F_pre, seg_pre, seg_first, F_tar)
c, h, w = F_first.size()
coords_pre_tar = match_ref_tar(F_pre, F_tar, seg_pre, temp)
coords_first_tar = match_ref_tar(F_first, F_tar, seg_first, temp)
coords_tar = {}
for cnt, coord_first in coords_first_tar.items():
coord_pre = coords_pre_tar[cnt]
# fall-back schema
if(coord_pre is None):
coord_tar_ = coord_first
else:
coord_tar_ = coord_pre
coords_tar[cnt] = coord_tar_
_, seg_pre_idx = torch.max(seg_pre, dim = 0)
coords_tar = clean_coords(coords_tar, bbox_pre, threshold=4)
bbox_tar = bbox_in_tar(coords_tar, bbox_first, h, w)
# recoginition
seg_pred = recoginition(F_first, F_tar, bbox_first, bbox_tar, seg_first, temp)
seg_cleaned = clean_seg(seg_pred, bbox_tar, threshold=1)
# move bbox w.r.t cleaned seg
bbox_tar = shift_bbox(seg_cleaned, bbox_tar)
seg_post = post_process_seg(seg_pred.unsqueeze(0))
return seg_pred, seg_post, bbox_tar
|
62d782a04b5d7c114fe0096fec50d6cd2d9db7bf
| 18,789 |
def hough_lines(img, rho=2, theta=np.pi / 180, threshold=20, min_line_len=5, max_line_gap=25, thickness=3):
"""Perform a Hough transform on img
Args:
img (numpy.ndarray): input image
rho (float, optional): distance resolution in pixels of the Hough grid
theta (float, optional): angular resolution in radians of the Hough grid
threshold (float, optional): minimum number of votes (intersections in Hough grid cell)
min_line_len (int, optional): minimum number of pixels making up a line
max_line_gap (int, optional): maximum gap in pixels between connectable line segments
thickness (int, optional): thickness of lines drawn on resulting image
Returns:
numpy.ndarray: result image
"""
# Hough transform
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]),
minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
# Line extrapolation
extrapolated_lines = extrapolate_lines(lines, line_img.shape)
# Image display
draw_lines(line_img, extrapolated_lines, thickness=thickness)
return line_img
|
f797e8b24255225d4c2beb41677da3fa6c6e42d6
| 18,790 |
import packaging
def verify_package_version(ctx, config, remote):
"""
Ensures that the version of package installed is what
was asked for in the config.
For most cases this is for ceph, but we also install samba
for example.
"""
# Do not verify the version if the ceph-deploy task is being used to
# install ceph. Verifying the ceph installed by ceph-deploy should work,
# but the qa suites will need reorganized first to run ceph-deploy
# before the install task.
# see: http://tracker.ceph.com/issues/11248
if config.get("extras"):
log.info("Skipping version verification...")
return True
if 'repos' in config and config.get('repos'):
log.info("Skipping version verification because we have custom repos...")
return True
builder = _get_builder_project(ctx, remote, config)
version = builder.version
pkg_to_check = builder.project
installed_ver = packaging.get_package_version(remote, pkg_to_check)
if installed_ver and version in installed_ver:
msg = "The correct {pkg} version {ver} is installed.".format(
ver=version,
pkg=pkg_to_check
)
log.info(msg)
else:
raise RuntimeError(
"{pkg} version {ver} was not installed, found {installed}.".format(
ver=version,
installed=installed_ver,
pkg=pkg_to_check
)
)
|
5ab0177738ccec3879c0383a13038515b2d6b6e9
| 18,791 |
import os
import glob
def convert_file():
"""Setup code to create a groceries cart object with 6 items in it"""
local_path = os.path.join('data', '2017-07-31_072433.txt')
fixture_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
local_path,
)
files = glob.glob(fixture_path)
for file_ in files:
convert_sitl(file_)
converted_path = generate_output_path(fixture_path)
converted_files = glob.glob(converted_path)
return converted_files
|
1767d5ee584ef1a45f3ebfab92d2c461df6ff965
| 18,792 |
def decrypt(v1: int, v2: int):
"""funcao desencriptadora"""
palavra_encriptada = int(v1) ^ int(v2)
desencriptada = palavra_encriptada.to_bytes((palavra_encriptada.bit_length() + 7) // 8, 'big')
return desencriptada.decode()
|
19887e070f0c6ae7e68f677d69c2c5eb24761bdb
| 18,793 |
def enforce_mixture_consistency_time_domain(mixture_waveforms,
separated_waveforms,
mix_weights=None,
mix_weights_type=''):
"""Projection implementing mixture consistency in time domain.
This projection makes the sum across sources of separated_waveforms equal
mixture_waveforms and minimizes the unweighted mean-squared error between the
sum across sources of separated_waveforms and mixture_waveforms. See
https://arxiv.org/abs/1811.08521 for the derivation.
Args:
mixture_waveforms: Tensor of mixture waveforms in waveform format.
separated_waveforms: Tensor of separated waveforms in source image format.
mix_weights: None or Tensor of weights used for mixture consistency, shape
should broadcast with denoised_waveforms. Overrides mix_weights_type.
mix_weights_type: Type of weights used for mixture consistency. Options are:
`` - No weighting.
`magsq` - Mix weights are magnitude-squared of the separated signal.
Returns:
Projected separated_waveforms as a Tensor in source image format.
"""
# Modify the source estimates such that they sum up to the mixture, where
# the mixture is defined as the sum across sources of the true source
# targets. Uses the least-squares solution under the constraint that the
# resulting source estimates add up to the mixture.
num_sources = tf.shape(separated_waveforms)[1]
# Add a sources axis to mixture_spectrograms.
mix = tf.expand_dims(mixture_waveforms, axis=1)
# mix is now of shape:
# (batch_size, 1, num_mics, samples).
mix_estimate = tf.reduce_sum(separated_waveforms, axis=1, keepdims=True)
# mix_estimate is of shape:
# (batch_size, 1, num_mics, samples).
if mix_weights is None:
if mix_weights_type == 'magsq':
mix_weights = tf.reduce_mean(tf.square(separated_waveforms), axis=[2, 3],
keepdims=True)
mix_weights /= tf.reduce_sum(mix_weights, axis=1, keepdims=True)
else:
mix_weights = (1.0 / num_sources)
mix_weights = tf.cast(mix_weights, mix.dtype)
correction = mix_weights * (mix - mix_estimate)
separated_waveforms = separated_waveforms + correction
return separated_waveforms
|
294b1d927394ef388967de529ac5f24382ceec2c
| 18,794 |
def MatchNormsLoss(anchor_tensors, paired_tensors):
"""A norm on the difference between the norms of paired tensors.
Gradients are only applied to the paired_tensor.
Args:
anchor_tensors: batch of embeddings deemed to have a "correct" norm.
paired_tensors: batch of embeddings that will be pushed to the norm of
anchor_tensors.
Returns:
A scalar loss
"""
anchor_norms = tf.stop_gradient(tf.norm(anchor_tensors, axis=1))
paired_norms = tf.norm(paired_tensors, axis=1)
tf.summary.histogram('norms_difference', tf.nn.l2_loss(anchor_norms
-paired_norms))
loss = tf.reduce_mean(tf.nn.l2_loss(anchor_norms-paired_norms))
return loss
|
02f62a90cf51547b4af6063ad13be1bb712dfe5a
| 18,795 |
def get_ei(xx_tf, yn_tf, gp):
"""
:param xx_tf: A tensor giving the new point to evaluate at.
:param yn_tf: A tensor giving all previously observed responses.
:param gp: A gp used to predict. GP should be trained on the locations yn_tf was observed.
"""
N, P = gp.index_points.numpy().shape
k = gp.kernel
kxx = tf.reshape(k.apply(xx_tf, gp.index_points), [N,1])
K = tf.squeeze(gp.covariance())
Kl = tf.squeeze(tf.linalg.cholesky(gp.covariance()))
alpha = tf.linalg.solve(tf.cast(tf.transpose(Kl), tf.float64), tf.linalg.solve(tf.cast(Kl, tf.float64), yn_tf))
v = tf.linalg.solve(Kl, kxx)
zpred_mean = tf.squeeze(tf.matmul(tf.transpose(kxx), alpha))
#TODO: Made a small change right here.
kkxx = k.apply(xx_tf, xx_tf)
zpred_vars = tf.squeeze(kkxx - tf.matmul(tf.transpose(v),v))
miny = tf.reduce_min(yn_tf)
pdist = tfp.distributions.Normal(tf.squeeze(zpred_mean), tf.squeeze(tf.sqrt(zpred_vars)))
#pdist = tfp.distributions.Normal(tf.squeeze(zpred_mean), tf.squeeze((zpred_vars)))
ei = (miny - zpred_mean) * pdist.cdf(miny) + \
zpred_vars * pdist.prob(miny)
return(ei)
|
af1b41e80a74d505161a5c9edc2334a6d4e691bf
| 18,796 |
def authenticate_begin(username, **_):
"""
Begin authentication procedure
Variables:
username user name of the user you want to login with
Arguments:
None
Data Block:
None
Result example:
<WEBAUTHN_AUTHENTICATION_DATA>
"""
user = STORAGE.user.get(username, as_obj=False)
if not user:
return make_api_response({'success': False}, err="Bad Request", status_code=400)
session.pop('state', None)
security_tokens = user.get('security_tokens', {}) or {}
credentials = [AttestedCredentialData(websafe_decode(x)) for x in security_tokens.values()]
auth_data, state = server.authenticate_begin(credentials)
session['state'] = state
return make_api_response(list(cbor.encode(auth_data)))
|
6a42bfd2aba2f17f7ee7ec892bf23ef2f0221ee0
| 18,797 |
def tempSHT31():
"""Read temp and humidity from SHT31"""
return sht31sensor.get_temp_humi()
|
1e934ad3a467d48019ec91e18ffab4e8d4c473ae
| 18,798 |
import requests
def dog(argv, params):
"""Returns a slack attachment with a picture of a dog from thedogapi"""
# Print prints logs to cloudwatch
# Send response to response url
dogurl = 'https://api.thedogapi.com/v1/images/search?mime_types=jpg,png'
dogr = requests.get(dogurl)
url = dogr.json()[0].get('url')
payload = {
'statusCode': '200',
"attachments": [
{
"author_name": '@{} /catops dog'.format(
params.get('user_name', ['CatOps'])[0]),
"fallback": "Woof woof.",
"title": "Woof!",
"text": "Evil doggo.",
"image_url": url,
"color": "#764FA5"
}
],
'response_type': 'in_channel',
'headers': {'Content-Type': 'application/json'}
}
return payload
|
cb80426e6cab0aa2fc58b78baa0ff225d654f04a
| 18,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.