content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import inspect
def GetUniqueClassMembers(Class, Ignore = [], AllowedOverrides = []):
"""
Args:
- Class {object}: reference to the class
- Ignore {List[str]}:
- AlwaysAllow {List[str]}: Always allowed members named x, even if they exists in the parent class
Returns: tuple("Name", Reference)
"""
Members = inspect.getmembers(Class)
ParentClass = GetClassParents(Class)[0]
UniqueMemebers = [x for x in Members if (not hasattr(ParentClass, x[0]) and x[0] not in Ignore) or x[0] in AllowedOverrides] # and not x[0].startswith("__")
return UniqueMemebers | bdd22e6ce7eaa12d2285f7ca7747a0210d3b98c9 | 3,651,973 |
def get_condition_keys_available_to_raw_arn(db_session, raw_arn):
"""
Get a list of condition keys available to a RAW ARN
:param db_session: SQLAlchemy database session object
:param raw_arn: The value in the database, like arn:${Partition}:s3:::${BucketName}/${ObjectName}
"""
rows = db_session.query(ArnTable).filter(ArnTable.raw_arn.like(raw_arn))
result = rows.first()
if result.condition_keys:
condition_keys = result.condition_keys.split(",")
return condition_keys
else:
return False | 8f8025ffe1fd6f6fa750f826b0a3c5b8a4f655eb | 3,651,974 |
def get_reviewer(form):
""" Gets reviewer info, or adds if necessary
"""
reviewer = Reviewer.query.filter_by(email=form.get("reviewer-email")).first()
if reviewer:
reviewer_id = reviewer.reviewer_id
else:
reviewer_id = add_reviewer(form)
return reviewer_id | 641bb81e73bad7f0eeac8a5cbd5efde499535b77 | 3,651,975 |
def read_xyz(using):
"""Reads coordinates of an xyz file and return a list of |Atom| objects, one for each atom"""
coords = []
with open(using, "r") as f:
for coord in f.readlines()[2:]:
line = coord.split()
for val in PT.ptable.values():
if line[0] == val[0]:
coords.append(
Atom(line[0],
coords=tuple(float(i) for i in line[1:4])))
return coords | 9ed1b0de9fe4bd7bbabe63a2d808b08e44315113 | 3,651,976 |
def initialize_classification(model_name: str,
num_classes: int,
use_pretrained: bool =True
) -> (Module, int):
""" Initialize these variables which will be set in this if statement. Each of these
variables is model specific. The final fully-connected layer will fit the new number
of classes. The weights are initialized with the Xavier algorithm. All biases are
initialized to 0.
Args:
model_name (str): Classification network name in ['vgg', 'alexnet', 'resnet', 'googlenet'].
num_classes (int): The number of classes in dataset.
use_pretrain (bool): If true, load pretrained model on ImageNet.
Return:
model (Module): Modified classification network fitting given class number.
input_size (int): input image size for the classification network.
"""
model = None
input_size = None
# VGG-16
if "vgg" in model_name.lower():
model = models.vgg16(pretrained=use_pretrained)
set_parameter_requires_grad(model, True)
num_ftrs = model.classifier[6].in_features
model.classifier[6] = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.classifier[6].weight)
nn.init.zeros_(model.classifier[6].bias)
input_size = 224
# Alexnet
elif "alexnet" in model_name.lower():
model = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model, True)
num_ftrs = model.classifier[6].in_features
model.classifier[6] = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.classifier[6].weight)
nn.init.zeros_(model.classifier[6].bias)
input_size = 224
# Resnet-50
elif "resnet" in model_name.lower():
if '18' in model_name.lower():
model = models.resnet18(pretrained=use_pretrained)
else:
model = models.resnet50(pretrained=use_pretrained)
set_parameter_requires_grad(model, True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.fc.weight)
nn.init.zeros_(model.fc.bias)
input_size = 224
# GoogLeNet
elif "googlenet" in model_name.lower():
model = models.googlenet(pretrained=use_pretrained, aux_logits=True)
set_parameter_requires_grad(model, True)
# Handle the auxilary network
num_ftrs = model.aux1.fc2.in_features
model.aux1.fc2 = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.aux1.fc2.weight)
nn.init.zeros_(model.aux1.fc2.bias)
num_ftrs = model.aux2.fc2.in_features
model.aux2.fc2 = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.aux2.fc2.weight)
nn.init.zeros_(model.aux2.fc2.bias)
# Handle the primary network
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.fc.weight)
nn.init.zeros_(model.fc.bias)
input_size = 224
else:
raise ValueError("Invalid classification network name.")
return model, input_size | 23958e7970022b2c0ed77353fa8b927510873bb7 | 3,651,977 |
def get_csc():
"""get Configuration Client"""
config_host = enstore_functions2.default_host()
config_port = enstore_functions2.default_port()
return configuration_client.ConfigurationClient((config_host,config_port)) | 49c2740ac9a654e700079d15f32421e32f8568c3 | 3,651,978 |
def findx(mu, lnum):
"""Obtains the Hill sphere and x-coordinate for a mu-value and lnum."""
hill = (mu/3)**(1.0/3.0)
if lnum == 1: #lnum is used to request one of the collinear Lagrange points.
guess = 1 - mu - hill * (1 - (1.0/3.0) * hill - (1.0/9.0) * hill ** 2)
elif lnum == 2:
guess = 1 - mu + hill * (1 + (1.0/3.0) * hill - (1.0/9.0) * hill ** 2)
elif lnum == 3:
guess = -1 #I know this isn't the formal guess the Mission Handbook might prescribe, but it should suffice
#as the L3 Lagrange point is the only collinear point with x < 0
else:
return "Invalid"
return optimize.fsolve(xroot, guess, mu, xtol = 0.0)[0], hill | 260f9dda3b5a494df15d3c0bbe7ce0ebd0351c9b | 3,651,979 |
def _f1_div_ ( self , other ) :
"""Operator for ``1D-function / other''"""
return _f1_op_ ( self , other , Ostap.MoreRooFit.Division , "Divide_" ) | 5278ec2036724f0bb263487b8880c16b161d8145 | 3,651,980 |
def test_interrupted_late_wait():
"""Test we can interrupt the wait during the timeout period.
"""
called = 0
def cond():
nonlocal called
called += 1
if called == 3:
return True
job = InstrJob(cond, 0)
assert not job.wait_for_completion(lambda: True, refresh_time=0.1)
assert called == 2 | 737df84c71efdaf0e52be5f42c0ae856f9fb1018 | 3,651,981 |
def set_prior_6(para):
"""
set prior before the first data came in
doc details to be added
"""
n_shape = para['n_shape']
log_prob = [ [] for i_shape in range(n_shape) ]
delta_mean = [ [] for i_shape in range(n_shape) ]
delta_var = [ [] for i_shape in range(n_shape) ]
time_since_last_cp = [ [] for i_shape in range(n_shape) ]
return log_prob, delta_mean, delta_var, time_since_last_cp | e97944e1c48ca6def16308584dfe04eaebae6259 | 3,651,982 |
def inf_set_af2(*args):
"""
inf_set_af2(_v) -> bool
"""
return _ida_ida.inf_set_af2(*args) | c9fa149ca8595d053db4eb4d4113e2493b8665de | 3,651,983 |
import pandas as pd
def json_find_matches_dataframe(df, filter_path, reverse_selectivity=False):
"""Iteratively filters a pandas.DataFrame df using the same sort of
filter_path used by json_extract.
Because of the tabular nature of pandas DataFrames, filters are treated as
being either 'down' or 'check'; a filter either refines both the rows and
columns returned (essentially a 'down' action) or refines only the rows
returned (essentially a 'check' action)."""
for layer in filter_path:
if isinstance(layer, str):
if layer == "!!":
reverse_selectivity = not reverse_selectivity
continue
rows = pd.Series([True] * df.shape[0])
for filt in layer:
new_rows, new_cols = filt.filter_dataframe(df)
rows &= new_rows
if filt.action != "check":
cols = new_cols
else:
cols = df.columns
df = df.loc[rows, cols]
return df | 33f3de47ffbe774d22e2dc9fb7c07f132272452f | 3,651,985 |
def contrast(arr, amount=0.2, split=0.5, normalize=True):
"""
General contrast booster or diffuser of normalized array-like data.
Parameters
----------
arr : ndarray
Input array (of floats on range [0, 1] if ``normalize=False``). If
values exist outside this range, with ``normalize=True`` the image
will be normalized for calculation.
amount : float or length-2 iterable of floats
Controls the exponential contrast mechanism for values above and below
``split`` in ``I``. If positive, the curve provides added contrast;
if negative, the curve provides reduced contrast.
If provided as a lenth-2 iterable of floats, they control the regions
(below, above) ``split`` separately.
split : float
Positive scalar, on range [0, 1], determining the midpoint of the
exponential contrast. Default of 0.5 is reasonable for well-exposed
images.
normalize : bool, default True
Controls normalization to the range [0, 1].
Returns
-------
focused : ndarray
Contrast adjusted, normalized, floating-point image on range [0, 1].
Notes
-----
The result of this algorithm is like applying a Curves adjustment in the
GIMP or Photoshop.
Algorithm for curves adjustment at a given pixel, x, is given by::
| split * (x/split)^below, 0 <= x <= split
y(x) = |
| 1 - (1-split) * ((1-x) / (1-split))^above, split < x <= 1.0
See Also
--------
skfuzzy.fuzzymath.sigmoid
"""
# Ensure scalars are floats, to avoid truncating division in Python 2.x
split = float(split)
im = arr.astype(float)
amount_ = np.asarray(amount, dtype=np.float64).ravel()
if len(amount_) == 1:
# One argument -> Equal amount applied on either side of `split`
above = below = amount_[0]
else:
# Two arguments -> Control contrast separately in light/dark regions
below = amount_[0]
above = amount_[1]
# Normalize if required
if im.max() > 1. and normalize is True:
ma = float(im.max())
im /= float(im.max())
else:
ma = 1.
focused = np.zeros_like(im, dtype=np.float64)
# Simplified array-wise algorithm using fancy indexing rather than looping
focused[im <= split] = split * (im[im <= split] / split) ** below
focused[im > split] = (1 - (1. - split) *
((1 - im[im > split]) / (1. - split)) ** above)
# Reapply multiplicative factor
return focused * ma | 94542fd4df7c65c98f818b652c733ad5a319f449 | 3,651,986 |
def get_group(items, total_groups, group_id):
"""
Get the items from the passed in group based on group size.
"""
if not 0 < group_id <= total_groups:
raise ValueError("Invalid test-group argument")
start, size = get_group_size_and_start(len(items), total_groups, group_id)
selected = items[start : start + size]
deselected = items[:start] + items[start + size :]
assert len(selected) + len(deselected) == len(items)
return selected, deselected | f236c9f26adfa5da5507e7ae91feb8858ac13c6c | 3,651,987 |
def read_nq_entry(entry, is_training):
"""
Converts a NQ entry into a list of NqExamples.
:param entry: dict
:param is_training: bool
:return: list[NqExample]
"""
def is_whitespace(c):
return c in " \t\r\n" or ord(c) == 0x202F
examples = []
contexts_id = entry["id"]
contexts = entry["contexts"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in contexts:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
questions = []
for i, question in enumerate(entry["questions"]):
qas_id = "{}".format(contexts_id)
question_text = question["input_text"]
start_position = None
end_position = None
answer = None
if is_training:
answer_dict = entry["answers"][i]
answer = make_nq_answer(contexts, answer_dict)
# For now, only handle extractive, yes, and no.
if answer is None or answer.offset is None:
continue
start_position = char_to_word_offset[answer.offset]
end_position = char_to_word_offset[answer.offset + len(answer.text) - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(answer.text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'", actual_text,
cleaned_answer_text)
continue
questions.append(question_text)
example = NqExample(
example_id=int(contexts_id),
qas_id=qas_id,
questions=questions[:],
doc_tokens=doc_tokens,
doc_tokens_map=entry.get("contexts_map", None),
answer=answer,
start_position=start_position,
end_position=end_position)
examples.append(example)
return examples | a712ff6a2714798ee49fd90741f387d8cb3b4695 | 3,651,988 |
def calc_atoms(psi, vol_elem=1.0):
"""Calculate the total number of atoms.
Parameters
----------
psi : :obj:`list` of 2D NumPy :obj:`array` or PyTorch :obj:`Tensor`
The input spinor wavefunction.
vol_elem : :obj:`float`
2D volume element of the space.
Returns
-------
atom_num : :obj:`float`
The total atom number in both spin components.
"""
pops = calc_pops(psi, vol_elem=vol_elem)
atom_num = sum(pops)
return atom_num | 9e9d87c9445a6a03fe245b66c2ce1c104a276e7a | 3,651,989 |
def get_tcp_packet_payload_len(pkt: dpkt.ethernet.Ethernet) -> int:
"""
Return the length of only payload without options
:param pkt: dpkt.ethernet.Ethernet packet containing TCP header
:return: int
"""
if isinstance(pkt, dpkt.ethernet.Ethernet):
ip = pkt.data
elif isinstance(pkt, dpkt.ip.IP):
ip = pkt
else:
return None
return ip.len - (ip.hl * 4 + ip.data.off * 4) | 410ec3f76085647def33572cc35f951462dd9324 | 3,651,990 |
def overviewUsage(err=''):
""" default overview information highlighting active scripts"""
m = '%s\n' %err
m += ' The following scripts allow you to manage Team Branches (TmB) on SalesForce.\n'
m += ' Use one of the scripts below to meet your needs.\n'
m += ' \n'
m += ' 1. First link Task Branches to Team Branches \n'
m += ' teamaddbranch -s4.1 -n<RTL|SI|Timing> -t<Team_branch> -b<branch_Name> \n'
m += ' \n'
m += ' 2. List Task Branches linked to a Team Branches \n'
m += ' teamaddbranch -s4.1 -n<RTL|SI|Timing> -t<Team_branch> -b<branch_Name> -d \n'
m += ' \n'
m += ' 3. First link Task Branches to Team Branches \n'
m += ' teamaddbranch -s4.1 -n<RTL|SI|Timing> -t<Team_branch> -b<branch_Name> -p <low|medium|high|urgent|critical> \n'
m += ' \n'
return m | ba62773dd8be21d17c44e8e295c8228d568512a0 | 3,651,991 |
def min_distance(z_i, z_j, sc_size):
"""Calculates the minimum distance between the particle at
``z_i`` and all of the images of the particle at ``z_j``,
including this. The minimum distance is always less than
half of the size of the simulation supercell ``sc_size``.
:param z_i:
:param z_j:
:param sc_size:
:return:
"""
sc_half = 0.5 * sc_size
z_ij = z_i - z_j
if fabs(z_ij) > sc_half:
# Take the image.
return -sc_half + (z_ij + sc_half) % sc_size
return z_ij | b585eb8e813ca852c4538eea7a9a6f9028a969d7 | 3,651,992 |
import re
def prf(gold: str, pred: str, dic) -> tuple:
"""
计算P、R、F1
:param gold: 标准答案文件,比如“商品 和 服务”
:param pred: 分词结果文件,比如“商品 和服 务”
:param dic: 词典
:return: (P, R, F1, OOV_R, IV_R)
"""
A_size, B_size, A_cap_B_size, OOV, IV, OOV_R, IV_R = 0, 0, 0, 0, 0, 0, 0
with open(gold,encoding='utf8') as gd, open(pred,encoding='utf8') as pd:
for g, p in zip(gd, pd):
A, B = set(to_region(g)), set(to_region(p))
A_size += len(A)
B_size += len(B)
A_cap_B_size += len(A & B)
text = re.sub("\\s+", "", g)
for (start, end) in A:
word = text[start: end]
if dic.containsKey(word):
IV += 1
else:
OOV += 1
for (start, end) in A & B:
word = text[start: end]
if dic.containsKey(word):
IV_R += 1
else:
OOV_R += 1
p, r = A_cap_B_size / B_size * 100, A_cap_B_size / A_size * 100
return p, r, 2 * p * r / (p + r), OOV_R / OOV * 100, IV_R / IV * 100 | a8767bbe4c60eea2433d2c8023a9d7a1af74a4bf | 3,651,993 |
def lorem():
"""Returns some sample latin text to use for prototyping."""
return """
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut
enim ad minim veniam, quis nostrud exercitation ullamco laboris
nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor
in reprehenderit in voluptate velit esse cillum dolore eu fugiat
nulla pariatur. Excepteur sint occaecat cupidatat non proident,
sunt in culpa qui officia deserunt mollit anim id est laborum.
""" | 6ddacdb23b7c62cf930e622a7fd801b514a419ae | 3,651,994 |
def read_gene2species(* filenames):
"""
Reads a gene2species file
Returns a function that will map gene names to species names.
"""
for filename in filenames:
maps = []
for filename in filenames:
maps.extend(util.read_delim(util.skip_comments(
util.open_stream(filename))))
return make_gene2species(maps) | 90e58b2089f2561642ac1ba6648256888b931080 | 3,651,995 |
def make_list_table(headers, data, title='', columns=None):
"""Build a list-table directive.
:param headers: List of header values.
:param data: Iterable of row data, yielding lists or tuples with rows.
:param title: Optional text to show as the table title.
:param columns: Optional widths for the columns.
"""
results = []
add = results.append
add('.. list-table:: %s' % title)
add(' :header-rows: 1')
if columns:
add(' :widths: %s' % (','.join(str(c) for c in columns)))
add('')
add(' - * %s' % headers[0])
for h in headers[1:]:
add(' * %s' % h)
for row in data:
add(' - * %s' % row[0])
for r in row[1:]:
add(' * %s' % r)
add('')
return '\n'.join(results) | 569370b8359ad25bf255f940b5a89d93d896804d | 3,651,997 |
def toss_unbaised():
"""
toss 2 times:
assign 0-1 = 0
assign 1-0 = 1
discard 0-0 and 1-1
"""
while True:
first, second = toss_biased(), toss_biased()
if first == 0 and second == 1:
return 0
if first == 1 and second == 0:
return 1 | 971f3365fbc4f0de34cd51c8060aab5c5037c7b2 | 3,651,998 |
def split_val_condition(input_string):
"""
Split and return a {'value': v, 'condition': c} dict for the value and the condition.
Condition is empty if no condition was found.
@param input A string of the form XXX @ YYYY
"""
try:
(value, condition) = [x.strip() for x in input_string.split('@')]
return {'value': value, 'condition': condition}
except ValueError:
# no condition was found
return {'value': input_string.strip(), 'condition': None} | 97c5733a80b3348928b95e2430bf3630867b2050 | 3,651,999 |
def shimenreservoir_operation_rule_lower_limit():
"""
Real Name: ShiMenReservoir Operation Rule Lower Limit
Original Eqn: WITH LOOKUP ( Date, ([(1,190)-(366,250)],(1,240),(32,240),(152,220),(182,220),(244,225),(335,240),(365,\ 240) ))
Units: m
Limits: (None, None)
Type: component
"""
return functions.lookup(date(), [1, 32, 152, 182, 244, 335, 365],
[240, 240, 220, 220, 225, 240, 240]) | 72830cd13bb411afe67398750f33c75a3a5bfba3 | 3,652,000 |
def pre_process(dd, df, dataset_len, batch_size):
"""Partition one dataframe to multiple small dataframes based on a given batch size."""
df = dd.str2ascii(df, dataset_len)
prev_chunk_offset = 0
partitioned_dfs = []
while prev_chunk_offset < dataset_len:
curr_chunk_offset = prev_chunk_offset + batch_size
chunk = df.iloc[prev_chunk_offset:curr_chunk_offset:1]
partitioned_dfs.append(chunk)
prev_chunk_offset = curr_chunk_offset
return partitioned_dfs | a0a19916d60476430bdaf27f85f31620f2b5ae2a | 3,652,001 |
from datetime import datetime
import re
def fromisoformat(s):
"""
Hacky way to recover a datetime from an isoformat() string
Python 3.7 implements datetime.fromisoformat() which is the proper way
There are many other 3rd party modules out there, but should be good enough for testing
"""
return datetime(*map(int, re.findall('\d+', s))) | 7db362222f9da28f43eab5363336e0ca09b65960 | 3,652,002 |
def non_repeat(a, decimals=12):
"""
Функция возвращает матрицу А с различными строками.
"""
a = np.ascontiguousarray(a)
a = np.around(a, decimals = int(decimals))
_, index = np.unique(a.view([('', a.dtype)]*a.shape[1]), return_index=True)
index = sorted(index)
return a[index] | 312ce49fe275649c745ee22c79a08c0a2c1b798b | 3,652,003 |
def softmax_with_cross_entropy(predictions, target_index):
"""
Computes softmax and cross-entropy loss for model predictions,
including the gradient
Arguments:
predictions, np array, shape is either (N) or (batch_size, N) -
classifier output
target_index: np array of int, shape is (1) or (batch_size) -
index of the true class for given sample(s)
Returns:
loss, single value - cross-entropy loss
dprediction, np array same shape as predictions - gradient of predictions by loss value
"""
is_batch = predictions.ndim == 2
probs = softmax(predictions)
loss = cross_entropy_loss(probs, target_index)
dprediction = probs
if is_batch:
batch_size = target_index.size
i = np.arange(batch_size)
dprediction[i, target_index] -= 1
dprediction /= batch_size
else:
dprediction[target_index] -= 1
return loss, dprediction | 9683da852dae92a5dec1f4353f4d93b2243fd30d | 3,652,004 |
import re
def scraper_main_olx(url):
""" Reads pages with offers from OLX and provides URLS to said offers. """
def __create_url_olx(offs_ids, prefix="https://www.olx.pl"):
""" Method creates an olx offer link from parts read from a main page. """
return [
"/".join([
prefix,
"oferta",
"CID3-ID" + o_id + ".html"
])
for o_id in offs_ids
]
# Loading the page
page = get_page(url)
# Reading the offers' ids
offers_ids = [
re.search("[^_]*$", off.attrib["class"]).group()[2:]
for off in page.element("table[id=offers_table] table[summary=Ogłoszenie]")
]
return {
"url": url,
"offers_urls": __create_url_olx(offers_ids)
} | 4f209dd800124c3b59db31029141e4d37f98e7d8 | 3,652,005 |
import torch
from typing import Tuple
from typing import List
def accuracy(
output: torch.Tensor,
target: torch.tensor,
topk: Tuple[int] = (
1,
)) -> List[float]:
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res | 73dd8a03e729fa89cea7abf535779dd45897113d | 3,652,007 |
def _make_unique(key, val):
"""
Make a tuple of key, value that is guaranteed hashable and should be unique per value
:param key: Key of tuple
:param val: Value of tuple
:return: Unique key tuple
"""
if type(val).__hash__ is None:
val = str(val)
return key, val | 65d746276f635c129aa0a5aeb9b9f467453c0b2a | 3,652,008 |
def replace_caps(x):
"""Replace all Capitalized tokens in `x` by their lower version and add `TK_MAJ` before."""
res = []
for t in x:
if t == '': continue
if t[0].isupper():
if len(t) == 1 and t[0] == 'I':
res.append(TK_MAJ)
if len(t) > 1 and (t[1:].islower() or (t[1] == "’" or t[1] == "'")):
res.append(TK_MAJ)
res.append(t.lower())
return res | 72519c264a97b60b05d430fc86dce1069e3718a7 | 3,652,009 |
def computes_ts_coverage(k, outputs, two_symbols):
""" Computes the input coverage by Two Symbol schematas.
Args:
k (int): the number of inputs.
outpus (list): the list of transition outputs.
two_symbols (list): The final list of Two Symbol permutable schematas. This is returned by `find_two_symbols`.
Returns:
coverage (dict): a dictionary of coverage where keys are inputs states and values are lists of the Two Symbols covering that input.
"""
ts_coverage = {}
for statenum in range(2**k):
binstate = statenum_to_binstate(statenum, base=k)
ts_coverage[binstate] = covering_twosymbols = []
output = int(outputs[statenum])
if output == 2:
output = [0, 1]
else:
output = [int(outputs[statenum])]
for t in output:
for implicant, permut_indxs, same_symbols_indxs in two_symbols[t]:
if __ts_covers(implicant, permut_indxs, binstate):
covering_twosymbols.append((implicant, permut_indxs, same_symbols_indxs))
#
return ts_coverage | 741718bb78ffc6840bb004eb80f096dc30d4df79 | 3,652,010 |
def create_measurements(nh, nv, offset, measurement_type):
"""Creates necessary measurement details for a given type on a given lattice.
Given the lattice size, whether odd or even pairs are being measured,
and the measurement type, this function returns a namedtuple
with the pairs of qubits to be measured, the circuit preparation
function and the measurement_type to be passed to the analysis
function.
The measurement_type can be:
"onsite", "horiz", "vert", "vert0", "vert1"
Args:
nh -- number of horizontal sites
nv -- number of vertical sites
offset -- offset taking care of odd vs even pairing
measurement_type -- onsite, horizontal or vertical measurement
Returns:
Measurements namedtuple with measurement
(pairs, preparation circuit, analysis type)
"""
n = nh * nv
if measurement_type == "onsite":
pairs = [(i, i+n) for i in range(n)]
prep = None
if measurement_type == "horiz":
pairs = [(i+j, i+j+1) for i in range(0, 2*n, nh) for j in range(offset,nh-1,2)]
prep = prepH
if measurement_type == "vert":
pairst = [(i*nh+j, (i+1)*nh+j) for i in range(offset, nv-1, 2) for j in range(nh)]
pairst += [(i*nh+j+n, (i+1)*nh+j+n) for i in range(offset, nv-1, 2) for j in range(0, nh)]
pairs = [ (map_site_to_JW(nh, nv, site1), map_site_to_JW(nh, nv, site2)) for (site1, site2) in pairst]
prep = prepV
if measurement_type == "vert0":
pairs = [(i+j, i+j+1) for i in range(0, 2*n, n) for j in range(1,n-1,2)]
prep = prepV
if measurement_type == "vert1":
pairs = [(i+j, i+j+1) for i in range(0, 2*n, n) for j in range(1,n-1,2)]
prep = prepV2wrap(nh, nv)
print(f"Prepped {measurement_type}, pairs={pairs}")
return Measurements(pairs=pairs, prep=prep, analysis=measurement_type) | ff4dbe1ee49a0db41c30fc9ba8fc6ab94c314c48 | 3,652,011 |
def headline(
in_string,
surround = False,
width = 72,
nr_spaces = 2,
spacesym = ' ',
char = '=',
border = None,
uppercase = True,
):
"""return in_string capitalized, spaced and sandwiched:
============================== T E S T ===============================
Parameters are the following:
* char (one-letter string, default='='):
changes the character the title is put between.
* surround (boolean, default=False):
adds additional lines above and under in_string:
====================================================
==================== T E S T =====================
====================================================
* width (int, default=72):
defines the width of each line.
* nr_spaces (int, default=2):
defines number of nr_spaces between in_string and the
char as indicated in ..====__T I T L E__====.. .
* spacesym (one-letter string, default=' '):
instead of using a whitespace to seperate the 'title' letters,
one can use every other character, e.g. '_'.
* border (either string or list/tuple of two strings; defaults to char):
If this is a single character string, it will be used at the left
and right end of the headline.
If this is multiple character string, it will be used at the left
and mirrored at the right. This way you can easily introduce additional
space if you prefer and use, for example c style like inline comments
with border="/*".
If this is not enough for you, the left and right borders can be given
seperately, like in border=("<!--", "-->")
* uppercase (boolean, default=True):
if True, headline will capitalize the letters given by in_string.
if False, in_string will be used as it is given.
"""
if isinstance(border, tuple) or isinstance(border, list):
left_border = border[0]
right_border = border[1]
else:
if border is None:
border = char
left_border = border
right_border = border[::-1]
nr_sym_spaces = len(left_border + right_border)
headline_text = spacesym.join(
l.upper() if uppercase else l for l in in_string
)
headline_text_sandwiched = '{:{}^{}}'.format(
headline_text,
spacesym,
2 * (len(in_string) + nr_spaces) - 1
)
headline_without_sym = '{:{}^{}}'.format(
headline_text_sandwiched,
char,
width - nr_sym_spaces
)
headline_full = '{1}{0}{2}'.format(
headline_without_sym,
left_border,
right_border
)
if surround:
line = '{1}{0}{2}'.format(
(width - nr_sym_spaces) * char,
left_border,
right_border
)
output = line + '\n' + headline_full + '\n' + line
else:
output = headline_full
return output | 1848d91bbf6c9d2216338f35433a26bcd3854664 | 3,652,012 |
import itertools
import unicodedata
def rainbow_cmd(bot, trigger):
"""Make text colored. Options are "rainbow", "usa", "commie", and "spooky"."""
text = clean(trigger.group(2) or '')
scheme = trigger.group(1).lower()
if not text:
try:
msg = SCHEME_ERRORS[scheme]
except KeyError:
msg = "How did you do that?!"
bot.reply(msg)
return module.NOLIMIT
try:
colors = COLOR_SCHEMES[scheme]
except KeyError:
# not possible to reach this at time of writing, but who knows?
# mistakes happen when updating stuff that needs to be changed in parallel
bot.reply("I don't know what color sequence to use for '{}'!".format(scheme))
return module.NOLIMIT
color_cycle = itertools.cycle(colors)
bot.say(
''.join(
char if unicodedata.category(char) == 'Zs'
else formatting.color(char, next(color_cycle))
for char in text
)
) | 292e55511b40c3c265e7ba87164cf179e54c16a6 | 3,652,013 |
def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True,
errors='ignore', separator='&', cls=None):
"""Parse a querystring and return it as :class:`MultiDict`. Per default
only values are decoded into unicode strings. If `decode_keys` is set to
`True` the same will happen for keys.
Per default a missing value for a key will default to an empty key. If
you don't want that behavior you can set `include_empty` to `False`.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
In previous versions ";" and "&" could be used for url decoding.
This changed in 0.5 where only "&" is supported. If you want to
use ";" instead a different `separator` can be provided.
The `cls` parameter was added.
:param s: a string with the query string to decode.
:param charset: the charset of the query string.
:param decode_keys: set to `True` if you want the keys to be decoded
as well.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
"""
if cls is None:
cls = MultiDict
result = []
for pair in str(s).split(separator):
if not pair:
continue
if '=' in pair:
key, value = pair.split('=', 1)
else:
key = pair
value = ''
key = _unquote_plus(key)
if decode_keys:
key = _decode_unicode(key, charset, errors)
result.append((key, url_unquote_plus(value, charset, errors)))
return cls(result) | 2b5b9598639ef600900dd1cb50c8ec6de892feff | 3,652,014 |
from typing import Type
def special_loader(as_type: type) -> Type[FullLoader]:
"""Construct new loader class supporting current class structure"""
class TypedLoader(FullLoader): # pylint: disable=too-many-ancestors
"""Custom loader with typed resolver"""
...
_add_path_resolvers(as_type, TypedLoader) # we need to add resolver only to the root typed item
return TypedLoader | e60c96284334fc57cc32af557a86433bb5302526 | 3,652,015 |
def try_(func, *args, **kwargs):
"""Try to call a function and return `_default` if it fails
Note: be careful that in order to have a fallback, you can supply
the keyword argument `_default`. If you supply anything other
than a keyword arg, it will result in it being passed to the wrapped
function and could cause unexpected behavior including always failing
with default value of None.
"""
_default_val = kwargs.pop("_default", None)
try:
return func(*args, **kwargs)
except Exception: # pylint: disable=broad-except
return _default_val | 206b25bd2e345d9cd6423e2cbc2706c274f36c89 | 3,652,016 |
def _create_course_and_cohort_with_user_role(course_is_cohorted, user, role_name):
"""
Creates a course with the value of `course_is_cohorted`, plus `always_cohort_inline_discussions`
set to True (which is no longer the default value). Then 1) enrolls the user in that course,
2) creates a cohort that the user is placed in, and 3) adds the user to the given role.
Returns: a tuple of the created course and the created cohort
"""
cohort_course = CourseFactory.create(
cohort_config={"cohorted": course_is_cohorted, "always_cohort_inline_discussions": True}
)
CourseEnrollmentFactory.create(user=user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[user])
_assign_role_to_user(user=user, course_id=cohort_course.id, role=role_name)
return [cohort_course, cohort] | 6f55d10d4b1dfa27c067298862e89a558c5618a1 | 3,652,017 |
def relative_vorticity(
u, v, wrap=None, one_sided_at_boundary=False, radius=6371229.0, cyclic=None
):
"""Calculate the relative vorticity using centred finite
differences.
The relative vorticity of wind defined on a Cartesian domain (such
as a plane projection) is defined as
ζcartesian = δv/δx − δu/δy
where x and y are points on along the 'X' and 'Y' Cartesian
dimensions respectively; and u and v denote the 'X' and 'Y'
components of the horizontal winds.
If the wind field field is defined on a spherical
latitude-longitude domain then a correction factor is included:
ζspherical = δv/δx − δu/δy + (u/a)tan(ϕ)
where u and v denote the longitudinal and latitudinal components
of the horizontal wind field; a is the radius of the Earth; and ϕ
is the latitude at each point.
The relative vorticity is calculated using centred finite
differences (see the *one_sided_at_boundary* parameter).
The grid may be global or limited area. If missing values are
present then missing values will be returned at points where the
centred finite difference could not be calculated. The boundary
conditions may be cyclic in longitude. The non-cyclic boundaries
may either be filled with missing values or calculated with
off-centre finite differences.
Reference: H.B. Bluestein, Synoptic-Dynamic Meteorology in
Midlatitudes, 1992, Oxford Univ. Press p113-114
:Parameters:
u: `Field`
A field containing the x-wind. Must be on the same grid as
the y-wind.
v: `Field`
A field containing the y-wind. Must be on the same grid as
the x-wind.
radius: optional
The radius of the sphere when the winds are on a spherical
polar coordinate domain. May be any numeric scalar object
that can be converted to a `Data` object (which includes
numpy array and `Data` objects). By default *radius* has a
value of 6371229.0 metres, representing the Earth's
radius. If units are not specified then units of metres
are assumed.
*Parameter example:*
Five equivalent ways to set a radius of 6371200 metres:
``radius=6371200``, ``radius=numpy.array(6371200)``,
``radius=cf.Data(6371200)``, ``radius=cf.Data(6371200,
'm')``, ``radius=cf.Data(6371.2, 'km')``.
wrap: `bool`, optional
Whether the longitude is cyclic or not. By default this is
autodetected.
one_sided_at_boundary: `bool`, optional
If True then if the field is not cyclic off-centre finite
differences are calculated at the boundaries, otherwise
missing values are used at the boundaries.
:Returns:
`Field`
The relative vorticity calculated with centred finite
differences.
"""
if cyclic:
_DEPRECATION_ERROR_FUNCTION_KWARGS(
"relative_vorticity",
{"cyclic": cyclic},
"Use the 'wrap' keyword instead",
) # pragma: no cover
# Get the standard names of u and v
u_std_name = u.get_property("standard_name", None)
v_std_name = v.get_property("standard_name", None)
# Copy u and v
u = u.copy()
v = v.copy()
# Get the X and Y coordinates
(u_x_key, u_y_key), (u_x, u_y) = get_cartesian_coords(u, "u", ("X", "Y"))
(v_x_key, v_y_key), (v_x, v_y) = get_cartesian_coords(v, "v", ("X", "Y"))
if not u_x.equals(v_x) or not u_y.equals(v_y):
raise ValueError("u and v must be on the same grid.")
# Check for lat/long
is_latlong = (u_x.Units.islongitude and u_y.Units.islatitude) or (
u_x.units == "degrees" and u_y.units == "degrees"
)
# Check for cyclicity
if wrap is None:
if is_latlong:
wrap = u.iscyclic(u_x_key)
else:
wrap = False
# Find the relative vorticity
if is_latlong:
# Save the units of the X and Y coordinates
x_units = u_x.Units
y_units = u_y.Units
# Change the units of the lat/longs to radians
radians = Units("radians")
u_x.Units = radians
u_y.Units = radians
v_x.Units = radians
v_y.Units = radians
# Find cos and tan of latitude
cos_lat = u_y.cos()
tan_lat = u_y.tan()
# Reshape for broadcasting
u_shape = [1] * u.ndim
u_y_index = u.get_data_axes().index(u_y_key)
u_shape[u_y_index] = u_y.size
v_shape = [1] * v.ndim
v_y_index = v.get_data_axes().index(v_y_key)
v_shape[v_y_index] = v_y.size
# Calculate the correction term
corr = u.copy()
corr *= tan_lat.array.reshape(u_shape)
# Calculate the derivatives
v.derivative(
v_x_key,
wrap=wrap,
one_sided_at_boundary=one_sided_at_boundary,
inplace=True,
)
v.data /= cos_lat.array.reshape(v_shape)
u.derivative(
u_y_key, one_sided_at_boundary=one_sided_at_boundary, inplace=True
)
radius = Data.asdata(radius).squeeze()
radius.dtype = float
if radius.size != 1:
raise ValueError(f"Multiple radii: radius={radius!r}")
if not radius.Units:
radius.override_units(Units("metres"), inplace=True)
elif not radius.Units.equivalent(Units("metres")):
raise ValueError(f"Invalid units for radius: {radius.Units!r}")
# Calculate the relative vorticity. Do v-(u-corr) rather than
# v-u+corr to be nice with coordinate reference corner cases.
rv = v - (u - corr)
rv.data /= radius
# Convert the units of latitude and longitude to canonical units
rv.dimension_coordinate("X").Units = x_units
rv.dimension_coordinate("Y").Units = y_units
else:
v.derivative(
v_x_key, one_sided_at_boundary=one_sided_at_boundary, inplace=True
)
u.derivative(
u_y_key, one_sided_at_boundary=one_sided_at_boundary, inplace=True
)
rv = v - u
# Convert the units of relative vorticity to canonical units
rv.Units = Units("s-1")
# Set the standard name if appropriate and delete the long_name
if (u_std_name == "eastward_wind" and v_std_name == "northward_wind") or (
u_std_name == "x_wind" and v_std_name == "y_wind"
):
rv.standard_name = "atmosphere_relative_vorticity"
else:
rv.del_property("standard_name", None)
rv.del_property("long_name", None)
return rv | 6134a44594cd84174f44f00a57df2f7284c4a7e5 | 3,652,018 |
import torch_geometric
import torch
def coalesce(
edge_index: torch.Tensor,
edge_attr: _typing.Union[
torch.Tensor, _typing.Iterable[torch.Tensor], None
] = None,
num_nodes: _typing.Optional[int] = ...,
is_sorted: bool = False,
sort_by_row: bool = True
) -> _typing.Union[
torch.Tensor, _typing.Tuple[torch.Tensor, torch.Tensor],
_typing.Tuple[torch.Tensor, _typing.Iterable[torch.Tensor]]
]:
"""
Row-wise sorts :obj:`edge_index` and removes its duplicated entries.
Duplicate entries in :obj:`edge_attr` are directly removed, instead of merged.
Args:
edge_index (LongTensor): The edge indices.
edge_attr (Tensor or List[Tensor], optional): Edge weights or multi-
dimensional edge features.
If given as a list, will re-shuffle and remove duplicates for all
its entries. (default: :obj:`None`)
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
is_sorted (bool, optional): If set to :obj:`True`, will expect
:obj:`edge_index` to be already sorted row-wise.
sort_by_row (bool, optional): If set to :obj:`False`, will sort
:obj:`edge_index` column-wise.
:rtype: :class:`LongTensor` if :attr:`edge_attr` is :obj:`None`, else
(:class:`LongTensor`, :obj:`Tensor` or :obj:`Iterable[Tensor]]`)
"""
if not isinstance(num_nodes, int):
num_nodes = None
try:
return torch_geometric.utils.coalesce(
edge_index, edge_attr, num_nodes,
is_sorted=is_sorted,
sort_by_row=sort_by_row
)
except ModuleNotFoundError:
return __coalesce(
edge_index, edge_attr, num_nodes,
is_sorted=is_sorted,
sort_by_row=sort_by_row
) | 00006971c06fc599edb6b3ff12b2e0a7700dd136 | 3,652,019 |
def get_label_names(l_json):
"""
Get names of all the labels in given json
:param l_json: list of labels jsons
:type l_json: list
:returns: list of labels names
:rtype: list
"""
llist = []
for j in l_json:
llist.append(j['name'])
return llist | bab12bedc8b5001b94d6c5f02264b1ebf4ab0e99 | 3,652,020 |
from ...niworkflows.engine.workflows import LiterateWorkflow as Workflow
from ...niworkflows.interfaces.utility import KeySelect
from ...smriprep.workflows.outputs import _bids_relative
from ...niworkflows.interfaces.space import SpaceDataSource
def init_asl_derivatives_wf(
bids_root,
metadata,
output_dir,
spaces,
scorescrub=False,
basil=False,
name='asl_derivatives_wf',
):
"""
Set up a battery of datasinks to store derivatives in the right location.
Parameters
----------
bids_root : :obj:`str`
Original BIDS dataset path.
metadata : :obj:`dict`
Metadata dictionary associated to the ASL run.
output_dir : :obj:`str`
Where derivatives should be written out to.
spaces : :py:class:`~niworkflows.utils.spaces.SpatialReferences`
A container for storing, organizing, and parsing spatial normalizations. Composed of
:py:class:`~niworkflows.utils.spaces.Reference` objects representing spatial references.
Each ``Reference`` contains a space, which is a string of either TemplateFlow template IDs
(e.g., ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNIPediatricAsym``), nonstandard references
(e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.), or a custom template located in
the TemplateFlow root directory. Each ``Reference`` may also contain a spec, which is a
dictionary with template specifications (e.g., a specification of ``{'resolution': 2}``
would lead to resampling on a 2mm resolution of the space).
name : :obj:`str`
This workflow's identifier (default: ``func_derivatives_wf``).
"""
nonstd_spaces = set(spaces.get_nonstandard())
workflow = Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(fields=[
'asl_mask_std', 'asl_mask_t1', 'asl_std',
'asl_std_ref', 'asl_t1', 'asl_t1_ref', 'asl_native', 'asl_native_ref',
'asl_mask_native','confounds', 'confounds_metadata', 'source_file',
'template', 'spatial_reference', 'cbf', 'meancbf', 'score', 'avgscore',
'scrub', 'basil', 'pv', 'cbf_t1', 'meancbf_t1', 'att_t1', 'score_t1', 'avgscore_t1',
'scrub_t1', 'basil_t1', 'pv_t1', 'cbf_std', 'meancbf_std', 'score_std',
'avgscore_std', 'scrub_std', 'basil_std', 'pv_std','att','att_std','qc_file',
'cbf_hvoxf', 'score_hvoxf', 'scrub_hvoxf', 'basil_hvoxf', 'pvc_hvoxf',
'cbf_sc207', 'score_sc207', 'scrub_sc207', 'basil_sc207', 'pvc_sc207',
'cbf_sc217', 'score_sc217', 'scrub_sc217', 'basil_sc217', 'pvc_sc217',
'cbf_sc407', 'score_sc407', 'scrub_sc407', 'basil_sc407', 'pvc_sc407',
'cbf_sc417', 'score_sc417', 'scrub_sc417', 'basil_sc417', 'pvc_sc417'
]),
name='inputnode')
raw_sources = pe.Node(niu.Function(function=_bids_relative), name='raw_sources')
raw_sources.inputs.bids_root = bids_root
ds_confounds = pe.Node(DerivativesDataSink(
base_directory=output_dir, desc='confounds', suffix='regressors'),
name="ds_confounds", run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, raw_sources, [('source_file', 'in_files')]),
(inputnode, ds_confounds, [('source_file', 'source_file'),
('confounds', 'in_file'),
('confounds_metadata', 'meta_dict')]),
])
qcfile = pe.Node(
DerivativesDataSink(base_directory=output_dir,
desc='quality_control',
suffix='cbf', compress=False),
name='qcfile', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, qcfile, [('source_file', 'source_file'),
('qc_file', 'in_file')]),
])
cbf_hvoxf = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='HavardOxford', suffix='mean_cbf',
compress=False),
name='cbf_hvoxf', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
cbf_sc207 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x7',
suffix='mean_cbf', compress=False),
name='cbf_sc207', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
cbf_sc217 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x17',
suffix='mean_cbf', compress=False),
name='cbf_sc217', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
cbf_sc407 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x7', suffix='mean_cbf',
compress=False),
name='cbf_sc407', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
cbf_sc417 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x17',
suffix='mean_cbf', compress=False),
name='cbf_sc417', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, cbf_hvoxf, [('source_file', 'source_file'),
('cbf_hvoxf', 'in_file')]),
(inputnode, cbf_sc207, [('source_file', 'source_file'),
('cbf_sc207', 'in_file')]),
(inputnode, cbf_sc217, [('source_file', 'source_file'),
('cbf_sc217', 'in_file')]),
(inputnode, cbf_sc407, [('source_file', 'source_file'),
('cbf_sc407', 'in_file')]),
(inputnode, cbf_sc417, [('source_file', 'source_file'),
('cbf_sc417', 'in_file')]),
])
if scorescrub:
score_hvoxf = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='HavardOxford',
suffix='mean_score', compress=False),
name='score_hvoxf', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
scrub_hvoxf = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='HavardOxford',
suffix='mean_scrub', compress=False),
name='scrub_hvoxf', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
score_sc207 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x7',
suffix='mean_score', compress=False),
name='score_sc207', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
scrub_sc207 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x7',
suffix='mean_scrub', compress=False),
name='scrub_sc207', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
score_sc217 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x17',
suffix='mean_score', compress=False),
name='score_sc217', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
scrub_sc217 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x17',
suffix='mean_scrub', compress=False),
name='scrub_sc217', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
score_sc407 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x7',
suffix='mean_score', compress=False),
name='score_sc407', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
scrub_sc407 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x7',
suffix='mean_scrub', compress=False),
name='scrub_sc407', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
score_sc417 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x17',
suffix='mean_score', compress=False),
name='score_sc417', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
scrub_sc417 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x17',
suffix='mean_scrub', compress=False),
name='scrub_sc417', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([ (inputnode, score_hvoxf, [('source_file', 'source_file'),
('score_hvoxf', 'in_file')]),
(inputnode, scrub_hvoxf, [('source_file', 'source_file'),
('scrub_hvoxf', 'in_file')]),
(inputnode, score_sc217, [('source_file', 'source_file'),
('score_sc217', 'in_file')]),
(inputnode, score_sc207, [('source_file', 'source_file'),
('score_sc207', 'in_file')]),
(inputnode, scrub_sc207, [('source_file', 'source_file'),
('scrub_sc207', 'in_file')]),
(inputnode, scrub_sc217, [('source_file', 'source_file'),
('scrub_sc217', 'in_file')]),
(inputnode, score_sc417, [('source_file', 'source_file'),
('score_sc417', 'in_file')]),
(inputnode, scrub_sc417, [('source_file', 'source_file'),
('scrub_sc417', 'in_file')]),
(inputnode, score_sc407, [('source_file', 'source_file'),
('score_sc407', 'in_file')]),
(inputnode, scrub_sc407, [('source_file', 'source_file'),
('scrub_sc407', 'in_file')]),
])
if basil:
basil_hvoxf = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='HavardOxford',
suffix='mean_basil', compress=False),
name='basil_hvoxf', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
pvc_hvoxf = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='HavardOxford', suffix='mean_pvc',
compress=False),
name='pvc_hvoxf', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
basil_sc207 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x7',
suffix='mean_basil', compress=False),
name='basil_sc207', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
pvc_sc207 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x7', suffix='mean_pvc',
compress=False),
name='pvc_sc207', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
basil_sc217 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x17',
suffix='mean_basil', compress=False),
name='basil_sc217', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
pvc_sc217 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer200x17',
suffix='mean_pvc', compress=False),
name='pvc_sc217', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
basil_sc407 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x7',
suffix='mean_basil', compress=False),
name='basil_sc407', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
pvc_sc407 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x7', suffix='mean_pvc',
compress=False),
name='pvc_sc407', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
basil_sc417 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x17',
suffix='mean_basil', compress=False),
name='basil_sc417', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
pvc_sc417 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='schaefer400x17',
suffix='mean_pvc', compress=False),
name='pvc_sc417', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, basil_hvoxf, [('source_file', 'source_file'),
('basil_hvoxf', 'in_file')]),
(inputnode, pvc_hvoxf, [('source_file', 'source_file'),
('pvc_hvoxf', 'in_file')]),
(inputnode, basil_sc207, [('source_file', 'source_file'),
('basil_sc207', 'in_file')]),
(inputnode, pvc_sc207, [('source_file', 'source_file'),
('pvc_sc207', 'in_file')]),
(inputnode, basil_sc217, [('source_file', 'source_file'),
('basil_sc217', 'in_file')]),
(inputnode, pvc_sc217, [('source_file', 'source_file'),
('pvc_sc217', 'in_file')]),
(inputnode, basil_sc407, [('source_file', 'source_file'),
('basil_sc407', 'in_file')]),
(inputnode, pvc_sc407, [('source_file', 'source_file'),
('pvc_sc217', 'in_file')]),
(inputnode, basil_sc417, [('source_file', 'source_file'),
('basil_sc417', 'in_file')]),
(inputnode, pvc_sc417, [('source_file', 'source_file'),
('pvc_sc417', 'in_file')]),
])
if nonstd_spaces.intersection(('func', 'run', 'asl','sbref')):
ds_asl_native = pe.Node(
DerivativesDataSink(
base_directory=output_dir, desc='preproc', compress=True, SkullStripped=False,
RepetitionTime=metadata.get('RepetitionTime'), TaskName=metadata.get('TaskName')),
name='ds_asl_native', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
ds_asl_native_ref = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='aslref', compress=True,
dismiss_entities=("echo",)),
name='ds_asl_native_ref', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
ds_asl_mask_native = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='brain', suffix='mask',
compress=True, dismiss_entities=("echo",)),
name='ds_asl_mask_native', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
cbfnative = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='cbf', compress=True),
name='cbfnative', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
meancbfnative = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='mean_cbf', compress=True),
name='meancbfnative', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, ds_asl_native, [('source_file', 'source_file'),
('asl_native', 'in_file')]),
(inputnode, ds_asl_native_ref, [('source_file', 'source_file'),
('asl_native_ref', 'in_file')]),
(inputnode, ds_asl_mask_native, [('source_file', 'source_file'),
('asl_mask_native', 'in_file')]),
(inputnode, cbfnative, [('source_file', 'source_file'),
('cbf', 'in_file')]),
(inputnode, meancbfnative, [('source_file', 'source_file'),
('meancbf', 'in_file')]),
])
if scorescrub:
scorenative = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='score', suffix='cbf',
compress=True),
name='scorenative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
meanscorenative = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='score', suffix='mean_cbf',
compress=True),
name='meanscorenative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
scrubnative = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='scrub', suffix='cbf',
compress=True),
name='scrubnative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, scorenative, [('source_file', 'source_file'),
('score', 'in_file')]),
(inputnode, meanscorenative, [('source_file', 'source_file'),
('avgscore', 'in_file')]),
(inputnode, scrubnative, [('source_file', 'source_file'),
('scrub', 'in_file')]),
])
if basil:
basilnative = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='basil', suffix='cbf',
compress=True),
name='basilnative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
pvnative = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='pvc', suffix='cbf',
compress=True),
name='pvcnative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
attnative = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='bat', suffix='cbf',
compress=True),
name='attcnative', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, basilnative, [('source_file', 'source_file'),
('basil', 'in_file')]),
(inputnode, pvnative, [('source_file', 'source_file'),
('pv', 'in_file')]),
(inputnode, attnative, [('source_file', 'source_file'),
('att', 'in_file')]),
(raw_sources, ds_asl_mask_native, [('out', 'RawSources')]),
])
# Resample to T1w space
if nonstd_spaces.intersection(('T1w', 'anat')):
ds_asl_t1 = pe.Node(
DerivativesDataSink(
base_directory=output_dir, space='T1w', desc='preproc', compress=True,
SkullStripped=False, RepetitionTime=metadata.get('RepetitionTime'),
TaskName=metadata.get('TaskName'), dismiss_entities=("echo",)),
name='ds_asl_t1', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
ds_asl_t1_ref = pe.Node(
DerivativesDataSink(base_directory=output_dir, space='T1w', suffix='aslref',
compress=True, dismiss_entities=("echo",)),
name='ds_asl_t1_ref', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
ds_asl_mask_t1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, space='T1w', desc='brain',
suffix='mask', compress=True, dismiss_entities=("echo",)),
name='ds_asl_mask_t1', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
cbfnativet1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='cbf', space='T1w',
compress=True),
name='cbfnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
meancbfnativet1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='mean_cbf', space='T1w',
compress=True),
name='meancbfnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, ds_asl_t1, [('source_file', 'source_file'),
('asl_t1', 'in_file')]),
(inputnode, ds_asl_t1_ref, [('source_file', 'source_file'),
('asl_t1_ref', 'in_file')]),
(inputnode, ds_asl_mask_t1, [('source_file', 'source_file'),
('asl_mask_t1', 'in_file')]),
(inputnode, cbfnativet1, [('source_file', 'source_file'),
('cbf_t1', 'in_file')]),
(inputnode, meancbfnativet1, [('source_file', 'source_file'),
('meancbf_t1', 'in_file')]),
])
if scorescrub:
scorenativet1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='score', suffix='cbf',
space='T1w', compress=True),
name='scorenativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
meanscorenativet1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='mean_cbf', desc='score',
space='T1w', compress=True),
name='meanscorenativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
scrubnativet1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='scrub', suffix='cbf',
space='T1w', compress=True),
name='scrubnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, scorenativet1, [('source_file', 'source_file'),
('score_t1', 'in_file')]),
(inputnode, meanscorenativet1, [('source_file', 'source_file'),
('avgscore_t1', 'in_file')]),
(inputnode, scrubnativet1, [('source_file', 'source_file'),
('scrub_t1', 'in_file')]),
])
if basil:
basilnativet1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='basil', suffix='cbf',
space='T1w', compress=True),
name='basilnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
pvnativet1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='pvc', suffix='cbf',
space='T1w', compress=True),
name='pvcnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
attnativet1 = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='bat', suffix='cbf',
space='T1w', compress=True),
name='attnativet1', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, basilnativet1, [('source_file', 'source_file'),
('basil_t1', 'in_file')]),
(inputnode, pvnativet1, [('source_file', 'source_file'),
('pv_t1', 'in_file')]),
(inputnode, attnativet1, [('source_file', 'source_file'),
('att_t1', 'in_file')]),
])
workflow.connect([
(raw_sources, ds_asl_mask_t1, [('out', 'RawSources')]),
])
if getattr(spaces, '_cached') is None:
return workflow
# Store resamplings in standard spaces when listed in --output-spaces
if spaces.cached.references:
spacesource = pe.Node(SpaceDataSource(),
name='spacesource', run_without_submitting=True)
spacesource.iterables = ('in_tuple', [
(s.fullname, s.spec) for s in spaces.cached.get_standard(dim=(3,))
])
out_names = ['template', 'asl_std', 'asl_std_ref', 'asl_mask_std',
'cbf_std', 'meancbf_std']
if scorescrub:
out_names = out_names + ['score_std', 'avgscore_std', 'scrub_std']
if basil:
out_names = out_names + ['basil_std', 'pv_std','att_std']
select_std = pe.Node(KeySelect(
fields=out_names),
name='select_std', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
ds_asl_std = pe.Node(
DerivativesDataSink(
base_directory=output_dir, desc='preproc', compress=True, SkullStripped=False,
RepetitionTime=metadata.get('RepetitionTime'), TaskName=metadata.get('TaskName'),
dismiss_entities=("echo",)),
name='ds_asl_std', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
ds_asl_std_ref = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='aslref', compress=True,
dismiss_entities=("echo",)),
name='ds_asl_std_ref', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
ds_asl_mask_std = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='brain', suffix='mask',
compress=True, dismiss_entities=("echo",)),
name='ds_asl_mask_std', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
cbfstd = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='cbf', compress=True),
name='cbfstd', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
meancbfstd = pe.Node(
DerivativesDataSink(base_directory=output_dir, suffix='mean_cbf', compress=True),
name='meancbfstd', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, ds_asl_std, [('source_file', 'source_file')]),
(inputnode, ds_asl_std_ref, [('source_file', 'source_file')]),
(inputnode, ds_asl_mask_std, [('source_file', 'source_file')]),
(inputnode, cbfstd, [('source_file', 'source_file')]),
(inputnode, meancbfstd, [('source_file', 'source_file')]),
(inputnode, select_std, [('asl_std', 'asl_std'),
('asl_std_ref', 'asl_std_ref'),
('asl_mask_std', 'asl_mask_std'),
('cbf_std', 'cbf_std'),
('meancbf_std', 'meancbf_std'),
('template', 'template'),
('spatial_reference', 'keys')]),
(spacesource, select_std, [('uid', 'key')]),
(select_std, ds_asl_std, [('asl_std', 'in_file')]),
(spacesource, ds_asl_std, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, ds_asl_std_ref, [('asl_std_ref', 'in_file')]),
(spacesource, ds_asl_std_ref, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, ds_asl_mask_std, [('asl_mask_std', 'in_file')]),
(spacesource, ds_asl_mask_std, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, cbfstd, [('cbf_std', 'in_file')]),
(spacesource, cbfstd, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, meancbfstd, [('meancbf_std', 'in_file')]),
(spacesource, meancbfstd, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(raw_sources, ds_asl_mask_std, [('out', 'RawSources')]),
])
if scorescrub:
scorestd = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='score', suffix='cbf',
compress=True),
name='scorestd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
meanscorestd = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='score', suffix='mean_cbf',
compress=True),
name='meanscorestd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
scrubstd = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='scrub', suffix='cbf',
compress=True),
name='scrubstd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, scorestd, [('source_file', 'source_file')]),
(inputnode, meanscorestd, [('source_file', 'source_file')]),
(inputnode, scrubstd, [('source_file', 'source_file')]),
(inputnode, select_std, [
('score_std', 'score_std'),
('avgscore_std', 'avgscore_std'),
('scrub_std', 'scrub_std')]),
(select_std, scorestd, [('score_std', 'in_file')]),
(spacesource, scorestd, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, meanscorestd, [('avgscore_std', 'in_file')]),
(spacesource, meanscorestd, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, scrubstd, [('scrub_std', 'in_file')]),
(spacesource, scrubstd, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
])
if basil:
basilstd = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='basil', suffix='cbf',
compress=True),
name='basilstd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
pvstd = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='pvc', suffix='cbf',
compress=True),
name='pvcstd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
attstd = pe.Node(
DerivativesDataSink(base_directory=output_dir, desc='bat', suffix='cbf',
compress=True),
name='attstd', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(inputnode, basilstd, [('source_file', 'source_file')]),
(inputnode, pvstd, [('source_file', 'source_file')]),
(inputnode, attstd, [('source_file', 'source_file')]),
(inputnode, select_std, [
('basil_std', 'basil_std'),
('pv_std', 'pv_std'),
('att_std', 'att_std')]),
(select_std, basilstd, [('basil_std', 'in_file')]),
(spacesource, basilstd, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, pvstd, [('pv_std', 'in_file')]),
(spacesource, pvstd, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
(select_std, attstd, [('att_std', 'in_file')]),
(spacesource, attstd, [('space', 'space'),
('cohort', 'cohort'),
('resolution', 'resolution'),
('density', 'density')]),
])
return workflow | c5a5425dd38fd1b451b41a687a44c8edbb3d24b0 | 3,652,021 |
def makehash(w=dict):
"""autovivification like hash in perl
http://stackoverflow.com/questions/651794/whats-the-best-way-to-initialize-a-dict-of-dicts-in-python
use call it on hash like h = makehash()
then directly
h[1][2]= 3
useful ONLY for a 2 level hash
"""
# return defaultdict(makehash)
return defaultdict(w) | 5c772c07de9231c40053b29c545e25b611dd3b6e | 3,652,022 |
def sample_parameters(kmodel,
tmodel,
individual,
param_sampler,
scaling_parameters,
only_stable=True,
):
"""
Run sampling on first order model
"""
solution_raw = individual.data.data
# Load fluxes and concentrations
fluxes = load_fluxes(solution_raw, tmodel, kmodel,
density=scaling_parameters.DENSITY,
ratio_gdw_gww=scaling_parameters.GDW_GWW_RATIO,
concentration_scaling=scaling_parameters.CONCENTRATION_SCALING,
time_scaling=scaling_parameters.TIME_SCALING)
concentrations = load_concentrations(solution_raw, tmodel, kmodel,
concentration_scaling=scaling_parameters.CONCENTRATION_SCALING)
# Fetch equilibrium constants
load_equilibrium_constants(solution_raw, tmodel, kmodel,
concentration_scaling=scaling_parameters.CONCENTRATION_SCALING,
in_place=True)
parameter_population_lam_mu,\
lamda_max, lamda_min = param_sampler.sample(kmodel,
fluxes,
concentrations,
only_stable = only_stable,
min_max_eigenvalues=True)
return parameter_population_lam_mu, lamda_max, lamda_min | cc48f170c58c090844dbbf0e72aa2bc9f2a1598b | 3,652,023 |
import yaml
def load_yaml(fpath):
""" load settings from a yaml file and return them as a dictionary """
with open(fpath, 'r') as f:
settings = yaml.load(f)
return settings | bd9c19407c39e190f2d7fd734d118dbb4e9378ab | 3,652,024 |
import statistics
def recommendation(agent, other_agent, resource_id, scale, logger, discovery, recency_limit):
"""
Get recommendations on other agent of third agents and average them to one recommendation value.
:param agent: The agent which calculates the popularity.
:type agent: str
:param other_agent: The other agent for which the popularity value is calculated.
:type other_agent: str
:param resource_id: The URI of the evaluated resource.
:type resource_id: str
:param scale: The Scale object to be used by the agent.
:type scale: Scale
:param logger: The logger object to be used by the agent.
:type logger: BasicLogger
:param discovery: Addresses of all agents within the scenario.
:type discovery: dict
:param recency_limit: A datetime object which is used for "forgetting" old history entries
:type recency_limit: datetime
:return: The Recommendation trust value.
:rtype: float or int
"""
agents_to_ask = []
for third_agent in discovery:
if third_agent != agent and third_agent != other_agent:
combined = get_combined_direct_experience_for_agent(
agent, third_agent, logger, recency_limit, scale)
if combined != None and combined >= scale.minimum_to_trust_others():
agents_to_ask.append(third_agent)
recommendations = ask_for_recommendations(
agent, resource_id, agents_to_ask, scale, logger, discovery, recency_limit)
return statistics.median(recommendations) if len(recommendations) > 0 else None | 0670ec3d388dc008f2c5315907fac11f80aa7ebe | 3,652,025 |
import time
import numpy
import pandas
import numpy.testing
import mhctools
def do_predictions_mhctools(work_item_dicts, constant_data=None):
"""
Each tuple of work items consists of:
(work_item_num, peptides, alleles)
"""
# This may run on the cluster in a way that misses all top level imports,
# so we have to re-import everything here.
if constant_data is None:
constant_data = GLOBAL_DATA
cols = constant_data['cols']
predictor_name = constant_data['args'].predictor
results = []
for (i, d) in enumerate(work_item_dicts):
work_item_num = d['work_item_num']
peptides = d['peptides']
alleles = d['alleles']
print("Processing work item", i + 1, "of", len(work_item_dicts))
result = {}
results.append((work_item_num, result))
if predictor_name == "netmhcpan4-ba":
predictor = mhctools.NetMHCpan4(
alleles=alleles,
program_name="netMHCpan-4.0",
mode="binding_affinity")
elif predictor_name == "netmhcpan4-el":
predictor = mhctools.NetMHCpan4(
alleles=alleles,
program_name="netMHCpan-4.0",
mode="elution_score")
elif predictor_name == "mixmhcpred":
# Empirically determine supported alleles.
mixmhcpred_usable_alleles = []
unusable_alleles = []
for allele in alleles:
predictor = mhctools.MixMHCpred(alleles=[allele])
# We use inf not nan to indicate unsupported alleles since
# we use nan to indicate incomplete results that still need
# to execute.
empty_results = pandas.Series(index=peptides,
dtype=numpy.float16)
empty_results[:] = float('-inf')
try:
predictor.predict_peptides_dataframe(["PEPTIDESS"])
mixmhcpred_usable_alleles.append(allele)
except ValueError:
unusable_alleles.append(allele)
for col in cols:
result["%s %s" % (allele, col)] = empty_results.values
print("MixMHCpred usable alleles: ", *mixmhcpred_usable_alleles)
print("MixMHCpred unusable alleles: ", *unusable_alleles)
predictor = mhctools.MixMHCpred(alleles=mixmhcpred_usable_alleles)
assert mixmhcpred_usable_alleles, mixmhcpred_usable_alleles
else:
raise ValueError("Unsupported", predictor_name)
start = time.time()
df = predictor.predict_peptides_dataframe(peptides)
print("Predicted for %d peptides x %d alleles in %0.2f sec." % (
len(peptides), len(alleles), (time.time() - start)))
for (allele, sub_df) in df.groupby("allele"):
for col in cols:
result["%s %s" % (allele, col)] = (
sub_df[col].values.astype(
constant_data['args'].result_dtype))
return results | c42270f3b31b984973e9e668902ac2018f38b25f | 3,652,026 |
def inceptionresnetv2(**kwargs):
"""
InceptionResNetV2 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_inceptionresnetv2(model_name="inceptionresnetv2", bn_epsilon=1e-3, **kwargs) | 7c11c147d01b6551fa1b65cb5d24497efc2a3d3b | 3,652,027 |
def _n_pow_i(a, b, n):
"""
return (1+i)**k
"""
x = a
y = b
for i in range(1, n):
x1 = (x*a) - (y*b)
y1 = (y*a) + (x*b)
x = x1
y = y1
return x, y | 35b00c7bc76aaf19a5acdf012e63c9c0c50e5d1d | 3,652,029 |
def IsNameBased(link):
"""Finds whether the link is name based or not
:param str link:
:return:
True if link is name-based; otherwise, False.
:rtype: boolean
"""
if not link:
return False
# trimming the leading "/"
if link.startswith("/") and len(link) > 1:
link = link[1:]
# Splitting the link(separated by "/") into parts
parts = link.split("/")
# First part should be "dbs"
if not (parts and parts[0].lower() == "dbs"):
return False
# The second part is the database id(ResourceID or Name) and cannot be empty
if len(parts) < 2 or not parts[1]:
return False
# Either ResourceID or database name
databaseID = parts[1]
# Length of databaseID(in case of ResourceID) is always 8
if len(databaseID) != 8:
return True
return not IsValidBase64String(str(databaseID)) | e887fd6cd02c7ef71cbafa825014e1fca2c9d4d1 | 3,652,030 |
def register_submit(class_name, fire) -> None:
"""
Register on a form a handler
:param class_name: class name of the form
:param fire: function that will be fire on form submit
:return: None
"""
def submit_handler(event) -> None:
"""
Handle form submit and fire handler
:param event: Default html form object
:return: None
"""
event.preventDefault()
fire()
if window.jQuery('.' + class_name).length == 1:
return window.jQuery('.' + class_name).on('submit', submit_handler) | f2f8b2b067a282b073d6cc13825aedc3509c8077 | 3,652,031 |
from typing import Any
def compile(obj: Any) -> Definition:
"""Extract a definition from a JSON-like object representation."""
return ConcreteValue(obj) | 5e82471be599e77739e485468571bee296bfca71 | 3,652,032 |
def policy_network(vocab_embed_variable, document_placeholder, label_placeholder):
"""Build the policy core network.
Args:
vocab_embed_variable: [vocab_size, FLAGS.wordembed_size], embeddings without PAD and UNK
document_placeholder: [None,(FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length +
FLAGS.max_firstsentences_length + FLAGS.max_randomsentences_length), FLAGS.max_sent_length]
label_placeholder: Gold label [None, FLAGS.max_doc_length, FLAGS.target_label_size], only used during cross entropy training of JP's model.
Returns:
Outputs of sentence extractor and logits without softmax
"""
with tf.variable_scope('PolicyNetwork') as scope:
### Full Word embedding Lookup Variable
# PADDING embedding non-trainable
pad_embed_variable = variable_on_cpu("pad_embed", [1, FLAGS.wordembed_size], tf.constant_initializer(0), trainable=False)
# UNK embedding trainable
unk_embed_variable = variable_on_cpu("unk_embed", [1, FLAGS.wordembed_size], tf.constant_initializer(0), trainable=True)
# Get fullvocab_embed_variable
fullvocab_embed_variable = tf.concat(0, [pad_embed_variable, unk_embed_variable, vocab_embed_variable])
# print(fullvocab_embed_variable)
### Lookup layer
with tf.variable_scope('Lookup') as scope:
document_placeholder_flat = tf.reshape(document_placeholder, [-1])
document_word_embedding = tf.nn.embedding_lookup(fullvocab_embed_variable, document_placeholder_flat, name="Lookup")
document_word_embedding = tf.reshape(document_word_embedding, [-1, (FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length +
FLAGS.max_firstsentences_length + FLAGS.max_randomsentences_length),
FLAGS.max_sent_length, FLAGS.wordembed_size])
# print(document_word_embedding)
### Convolution Layer
with tf.variable_scope('ConvLayer') as scope:
document_word_embedding = tf.reshape(document_word_embedding, [-1, FLAGS.max_sent_length, FLAGS.wordembed_size])
document_sent_embedding = conv1d_layer_sentence_representation(document_word_embedding) # [None, sentembed_size]
document_sent_embedding = tf.reshape(document_sent_embedding, [-1, (FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length +
FLAGS.max_firstsentences_length + FLAGS.max_randomsentences_length), FLAGS.sentembed_size])
# print(document_sent_embedding)
### Reshape Tensor to List [-1, (max_doc_length+max_title_length+max_image_length), sentembed_size] -> List of [-1, sentembed_size]
with variable_scope.variable_scope("ReshapeDoc_TensorToList"):
document_sent_embedding = reshape_tensor2list(document_sent_embedding, (FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length +
FLAGS.max_firstsentences_length + FLAGS.max_randomsentences_length), FLAGS.sentembed_size)
# print(document_sent_embedding)
# document_sents_enc
document_sents_enc = document_sent_embedding[:FLAGS.max_doc_length]
if FLAGS.doc_encoder_reverse:
document_sents_enc = document_sents_enc[::-1]
# document_sents_ext
document_sents_ext = document_sent_embedding[:FLAGS.max_doc_length]
# document_sents_titimg
document_sents_titimg = document_sent_embedding[FLAGS.max_doc_length:]
### Document Encoder
with tf.variable_scope('DocEnc') as scope:
encoder_outputs, encoder_state = simple_rnn(document_sents_enc)
### Sentence Label Extractor
with tf.variable_scope('SentExt') as scope:
if (FLAGS.attend_encoder) and (len(document_sents_titimg) != 0):
# Multiple decoder
print("Multiple decoder is not implement yet.")
exit(0)
# # Decoder to attend captions
# attendtitimg_extractor_output, _ = simple_attentional_rnn(document_sents_ext, document_sents_titimg, initial_state=encoder_state)
# # Attend previous decoder
# logits = sentence_extractor_seqrnn_docatt(document_sents_ext, attendtitimg_extractor_output, encoder_state, label_placeholder)
elif (not FLAGS.attend_encoder) and (len(document_sents_titimg) != 0):
# Attend only titimages during decoding
extractor_output, logits = sentence_extractor_nonseqrnn_titimgatt(document_sents_ext, encoder_state, document_sents_titimg)
elif (FLAGS.attend_encoder) and (len(document_sents_titimg) == 0):
# JP model: attend encoder
extractor_outputs, logits = sentence_extractor_seqrnn_docatt(document_sents_ext, encoder_outputs, encoder_state, label_placeholder)
else:
# Attend nothing
extractor_output, logits = sentence_extractor_nonseqrnn_noatt(document_sents_ext, encoder_state)
# print(extractor_output)
# print(logits)
return extractor_output, logits | d59cf6d1d99fca7c654087d8fc720b64e419bced | 3,652,033 |
def get_feature(file_path: str):
""" Read and parse given feature file"""
print('Reading feature file ', file_path)
file_obj = open(file_path, "r")
steam = file_obj.read()
parser = Parser()
return parser.parse(TokenScanner(steam)) | e30e78afdb205aa2c26e3831ca7b0091579866a3 | 3,652,034 |
def hough_lines_draw(img, outfile, peaks, rhos, thetas):
"""
Returns the image with hough lines drawn.
Args
- img: Image on which lines will be drawn
- outfile: The output file. The file will be saved.
- peaks: peaks returned by hough_peaks
- rhos: array of rhos used in Hough Space
- thetas: array of thetas used in Hough Space
Returns
- img: after drwaing lines on it.
"""
for peak in peaks:
rho = rhos[peak[0]]
theta = thetas[peak[1]] * np.pi / 180.0
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img, (x1,y1),(x2,y2),(0,0,255),2)
cv2.imwrite(outfile, img)
return img | f1731adb7d90a69dc50721c03f9e2ab01b7e2078 | 3,652,035 |
def cg_file_h(tmpdir):
"""Get render config."""
return {
'cg_file': str(tmpdir.join('muti_layer_test.hip'))
} | caedb2324953e4ca90ebffdf80be60fed1b8026d | 3,652,036 |
from ._groupbyuntil import group_by_until_
from typing import Optional
from typing import Callable
from typing import Any
def group_by_until(
key_mapper: Mapper[_T, _TKey],
element_mapper: Optional[Mapper[_T, _TValue]],
duration_mapper: Callable[[GroupedObservable[_TKey, _TValue]], Observable[Any]],
subject_mapper: Optional[Callable[[], Subject[_TValue]]] = None,
) -> Callable[[Observable[_T]], Observable[GroupedObservable[_TKey, _TValue]]]:
"""Groups the elements of an observable sequence according to a
specified key mapper function. A duration mapper function is used
to control the lifetime of groups. When a group expires, it
receives an OnCompleted notification. When a new element with the
same key value as a reclaimed group occurs, the group will be
reborn with a new lifetime request.
.. marble::
:alt: group_by_until
--1--2--a--3--b--c-|
[ group_by_until() ]
-+-----+-----------|
+a-----b--c-|
+1--2-----3-------|
Examples:
>>> group_by_until(lambda x: x.id, None, lambda : reactivex.never())
>>> group_by_until(
lambda x: x.id, lambda x: x.name, lambda grp: reactivex.never()
)
>>> group_by_until(
lambda x: x.id,
lambda x: x.name,
lambda grp: reactivex.never(),
lambda: ReplaySubject()
)
Args:
key_mapper: A function to extract the key for each element.
element_mapper: A function to map each source element to an element in
an observable group.
duration_mapper: A function to signal the expiration of a group.
subject_mapper: A function that returns a subject used to initiate
a grouped observable. Default mapper returns a Subject object.
Returns:
An operator function that takes an observable source and
returns a sequence of observable groups, each of which
corresponds to a unique key value, containing all elements that
share that same key value. If a group's lifetime expires, a new
group with the same key value can be created once an element
with such a key value is encountered.
"""
return group_by_until_(key_mapper, element_mapper, duration_mapper, subject_mapper) | c4f54140dadbd0d043400a35f9be9f978460ae3c | 3,652,037 |
def GetFilesystemSize(options, image_type, layout_filename, num):
"""Returns the filesystem size of a given partition for a given layout type.
If no filesystem size is specified, returns the partition size.
Args:
options: Flags passed to the script
image_type: Type of image eg base/test/dev/factory_install
layout_filename: Path to partition configuration file
num: Number of the partition you want to read from
Returns:
Size of selected partition filesystem in bytes
"""
partitions = GetPartitionTableFromConfig(options, layout_filename, image_type)
partition = GetPartitionByNumber(partitions, num)
if 'fs_bytes' in partition:
return partition['fs_bytes']
else:
return partition['bytes'] | 1ea542366a11f9a00b648b5158282a4b5e39f633 | 3,652,038 |
def match_pairs(obj_match, params):
""" Matches objects into pairs given a disparity matrix and removes
bad matches. Bad matches have a disparity greater than the maximum
threshold. """
# Create a list of sets, where the i-th set will store the objects
# from image1 that have merged with objects in image2
# Maybe faster to use a 2D array?
obj_merge = np.zeros(obj_match.shape, dtype=bool)
# Determine optimal pairs
pairs = optimize.linear_sum_assignment(obj_match)
for id1 in pairs[0]:
if obj_match[id1, pairs[1][id1]] > params['MAX_DISPARITY']:
# Set to -1 if object has died (or merged)
pairs[1][id1] = -1
# Find the closest object in image2 to object with id1
id2 = np.argmin(obj_match[id1])
# If this object was in the search radius of object id1,
# add object id1 to obj_merge[id2].
if obj_match[id1, id2] < LARGE_NUM:
obj_merge[id1, id2] = True
pairs = pairs[1] + 1 # ids in current_objects are 1-indexed
return pairs, obj_merge | 42939faca3cc2a61e8dde1b00818da593aa89c7a | 3,652,039 |
def spike_train_convolution(spike_times, interval, dt, sigma):
"""
Needed for Schreiber reliability measure
"""
N = int(np.floor((interval[1]-interval[0])/dt)+1)
x = np.linspace(interval[0], interval[1], N)
s = np.zeros(N)
for spike in spike_times:
s = s + gaussian(x, spike, sigma)
return s | 0dbd2ac6a3cc016ecb0ab7209256d1544b6acfd1 | 3,652,040 |
def interpolate_peak(spectrum: list, peak: int) -> float:
""" Uses quadratic interpolation of spectral peaks to get a better estimate of the peak.
Args:
- spectrum: the frequency bin to analyze.
- peak: the location of the estimated peak in the spectrum list.
Based off: https://ccrma.stanford.edu/~jos/sasp/Quadratic_Interpolation_Spectral_Peaks.html
"""
prev_neighbour = spectrum[peak-1]
next_neighbour = spectrum[peak+1]
peak_value = spectrum[peak]
estimated_peak = (next_neighbour
- prev_neighbour) / (2 * peak_value - prev_neighbour - next_neighbour) + peak
return abs(estimated_peak) | 0e74057908e7839438325da9adafdf385012ce17 | 3,652,042 |
def _check_trunk_switchport(
dut, check, expd_status: SwitchportTrunkExpectation, msrd_status: dict
) -> tr.CheckResultsCollection:
"""
This function validates a trunk switchport against the expected values.
These checks include matching on the native-vlan and trunk-allowed-vlans.
"""
results = list()
device = dut.device
e_nvl_id = expd_status.native_vlan.vlan_id if expd_status.native_vlan else None
m_nvl_id = msrd_status["trunkingNativeVlanId"]
if e_nvl_id and (e_nvl_id != m_nvl_id):
results.append(
tr.CheckFailFieldMismatch(
device=device,
check=check,
field="native_vlan",
expected=e_nvl_id,
measurement=m_nvl_id,
)
)
# EOS stores this as a CSV string, with ranges, for example:
# 14,16,25-26,29
e_tr_allowed_vids = sorted(
[vlan.vlan_id for vlan in expd_status.trunk_allowed_vlans]
)
# conver the list of vlan-ids to a range string for string comparison
# purposes.
e_tr_alwd_vstr = range_string(e_tr_allowed_vids)
m_tr_alwd_vstr = msrd_status["trunkAllowedVlans"]
# if there no expected allowed vlans on this trunk, then set the expected
# value to "NONE" since that is what EOS reports in this case.
if not e_tr_alwd_vstr:
e_tr_alwd_vstr = "NONE"
if e_tr_alwd_vstr != m_tr_alwd_vstr:
results.append(
tr.CheckFailFieldMismatch(
device=device,
check=check,
field="trunk_allowed_vlans",
expected=e_tr_alwd_vstr,
measurement=m_tr_alwd_vstr,
)
)
return results | a739ae5897c4627ea78d27a07f831e528318f052 | 3,652,043 |
def is_valid_compressed(file):
"""Check tar gz or zip is valid."""
try:
archive = ZipFile(file, 'r')
try:
corrupt = archive.testzip()
except zlib_error:
corrupt = True
archive.close()
except BadZipfile:
corrupt = True
return not corrupt | 261a4fcdfa1117aa749b00805e323f21a04d0f57 | 3,652,044 |
def Krsol_SP_pt(SP,pt):
"""
Krsol_SP_pt solubility of Kr in seawater
==========================================================================
USAGE:
Krsol = sol.Krsol_SP_pt(SP,pt)
DESCRIPTION:
Calculates the krypton, Kr, concentration expected at equilibrium with
air at an Absolute Pressure of 101325 Pa (sea pressure of 0 dbar)
including saturated water vapor. This function uses the solubility
coefficients derived from the data of Weiss (1971).
Note that this algorithm has not been approved by IOC and is not work
from SCOR/IAPSO Working Group 127. It is included in the GSW
Oceanographic Toolbox as it seems to be oceanographic best practice.
INPUT:
SP = Practical Salinity (PSS-78) [ unitless ]
pt = potential temperature (ITS-90) referenced [ deg C ]
to one standard atmosphere (0 dbar).
SP & pt need to have the same dimensions.
OUTPUT:
Krsol = solubility of krypton in micro-moles per kg [ umol/kg ]
AUTHOR: Roberta Hamme, Paul Barker and Trevor McDougall
[ [email protected] ]
REFERENCES:
IOC, SCOR and IAPSO, 2010: The international thermodynamic equation of
seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. Available from http://www.TEOS-10.org
Weiss, R.F. and T.K. Kyser, 1978: Solubility of Krypton in Water and
Seawater. J. Chem. Thermodynamics, 23, 69-72.
The software is available from http://www.TEOS-10.org
==========================================================================
"""
x = SP # Note that salinity argument is Practical Salinity, this is
# beacuse the major ionic components of seawater related to Cl
# are what affect the solubility of non-electrolytes in seawater.
pt68 = pt * 1.00024 # pt68 is the potential temperature in degress C on
# the 1968 International Practical Temperature Scale IPTS-68.
y = pt68 + K0
y_100 = y * 1e-2
# Table 2 (Weiss and Kyser, 1978)
a = (-112.6840, 153.5817, 74.4690, -10.0189)
b = (-0.011213, -0.001844, 0.0011201)
Krsol_mL = np.exp(a[0] + a[1] * 100/y + a[2] * np.log(y_100) + a[3] * \
y_100 + x * (b[0] + y_100 * (b[1] + b[2] * y_100)))
# mL/kg to umol/kg for Kr (1/22.3511e-3)
#Molar volume at STP (Dymond and Smith, 1980).
Krsol = Krsol_mL * 4.474052731185490e1
return Krsol | 3402fdf5756ca9a54938211e67a57de1326bcc7f | 3,652,045 |
def find_title(item):
"""Title of the video"""
title = item['snippet']['title']
return title | 9c6f64e02d959d46cfd1e4536f5faf7ec0c281bd | 3,652,047 |
import hashlib
def calc_fingerprint(text):
"""Return a hex string that fingerprints `text`."""
return hashlib.sha1(text).hexdigest() | 8be154e4e32ae9412a73e73397f0e0198ae9c862 | 3,652,048 |
from typing import List
from typing import Any
from typing import Tuple
import torch
def yolo_collate_fn(
batch: List[Any],
) -> Tuple[Tensor, Tuple[Tensor, Tensor, List[Tuple[Tensor, Tensor]]]]:
"""
Collate function to be used for creating a DataLoader with values for Yolo model
input.
:param batch: a batch of data points and annotations transformed by
bounding_box_and_labels_to_yolo_fmt
:return: the batch stacked as tensors for all values except for the
original annotations
"""
images = []
targets = []
annotations = []
for idx, (image, (target, annotation)) in enumerate(batch):
images.append(image.unsqueeze(0))
img_label = torch.ones(target.size(0), 1) * idx
targets.append(torch.cat((img_label, target), 1))
annotations.append(annotation)
images = torch.cat(images, 0)
targets = torch.cat(targets, 0)
return images, (targets, annotations) | 599d4e9bbb91cf6d79225024cbcd9690cb55f8e6 | 3,652,049 |
def delete_category(category_id):
"""Delete a category."""
category = session.query(Category).filter_by(id=category_id).first()
if 'username' not in login_session:
flash("Please log in to continue.")
return redirect(url_for('login'))
if not exists_category(category_id):
flash("We are unable to process your request right now.")
return redirect(url_for('home'))
# If the logged in user does not have authorisation to
# edit the category, redirect to homepage.
if login_session['user_id'] != category.user_id:
flash("We are unable to process your request right now.")
return redirect(url_for('home'))
if request.method == 'POST':
session.delete(category)
session.commit()
flash("Category successfully deleted!")
return redirect(url_for('home'))
else:
return render_template("delete_category.html", category=category) | 979aabe5b6d7730c9f75a714266d6aad61e1cd41 | 3,652,050 |
from typing import List
def get_all_users_of(fx_module: GraphModule, index: int) -> List[int]:
"""Given the graph(fx_module) and an index, return a list of all node indexes that use this node"""
graph = fx_module.graph
current_node = graph.nodes[index]
user_indexes: List[int] = []
"""if the node A is in node B's args, then B is the user of A
go through all the nodes, if the input node in any node's args,
then that node is the input node's user
"""
for i, n in enumerate(graph.nodes):
if find_use(n.args, current_node) or find_use(n.kwargs, current_node):
user_indexes.append(i)
return user_indexes | e3fc32aa7baf549bbfe4a2fb7558aa7bfb3d84b0 | 3,652,051 |
from operator import and_
def insert_from(
table_name, into_table_name, column_names=None, join_columns=None, create_if_not_exists=False, engine=None
):
"""
Inserts records from one table into another
:param table_name: the name of the table from which to insert records
:param into_table_name: the name of the table into which the records will go
:param column_names: an optional reduced list of column names to specify for insertion
:param join_columns: one or more column names that constitute unique records, not to be inserted
:param create_if_not_exists: if True, create into_table_name if it doesn't exist, otherwise exit with warning
:param engine: an optional sqlalchemy.engine to use in the UPDATE query
"""
both_tables = get_tables(engine=engine)
from_table = both_tables.get(table_name)
into_table = both_tables.get(into_table_name)
validate_table_name(from_table, table_name)
if not table_exists(into_table):
if not create_if_not_exists:
raise ValueError(f"No table named {into_table_name} to insert into")
return select_from(table_name, into_table_name, column_names, engine=engine)
# Validate parameters for excluding unique records
if isinstance(join_columns, str):
join_columns = [c.strip() for c in join_columns.split(",")]
if join_columns:
validate_columns_in(
from_table, join_columns,
empty_table=table_name,
message=f"Join columns missing in source table {table_name}"
)
validate_columns_in(
into_table, join_columns,
empty_table=into_table_name,
message=f"Join columns missing in target table {into_table_name}"
)
# Prepare column names to be inserted
log_message = f"insert_from: populating {into_table_name} from {table_name}"
from_cols = from_table.columns
into_cols = into_table.columns
if isinstance(column_names, str):
column_names = column_names.split(",")
if column_names is None or "*" in column_names:
log_message += f", with all columns in {table_name}"
insert_cols = from_cols
else:
log_message += f", with specified columns in {table_name}"
insert_cols = [c for c in from_cols if c.name in column_names]
if not insert_cols:
logger.warning("insert_from: no columns to insert")
return
elif column_names and len(column_names) > len(insert_cols):
target_cols = set(c.name for c in insert_cols)
ignore_cols = ", ".join(set(column_names).difference(target_cols))
logger.warning(f"insert_from: ignoring columns: {ignore_cols}")
# Prepare query with specified columns and filtering
if not join_columns:
insert_vals = Select(insert_cols).select_from(from_table)
else:
log_message += f", excluding those matching: {join_columns}"
# Exclude records matching specified columns via outer join
insert_from = from_table.outerjoin(
into_table, and_(*[from_cols[col] == into_cols[col] for col in join_columns])
)
insert_vals = (
Select(insert_cols)
.select_from(insert_from)
.where(and_(*[into_cols[col].is_(None) for col in join_columns]))
)
logger.info(log_message)
insert_from = Insert(into_table).from_select(names=[c.name for c in insert_cols], select=insert_vals)
with from_table.bind.connect() as conn:
conn.execute(insert_from.execution_options(autocommit=True)) | 8c013bdaeb1c16e1a487c4a90c0554e9b673f4d9 | 3,652,052 |
def format(color, style=''):
"""Return a QTextCharFormat with the given attributes.
"""
_color = QColor()
_color.setNamedColor(color)
_format = QTextCharFormat()
_format.setForeground(_color)
if 'bold' in style:
_format.setFontWeight(QFont.Bold)
if 'italic' in style:
_format.setFontItalic(True)
return _format | bd526cab85bd8909904af0c6e32b22d29c1de561 | 3,652,053 |
def array_to_mincvolume(filename, array, like,
volumeType=None, dtype=None, labels=None,
write=True, close=False):
"""
Create a mincVolume from a data array.
Create a mincVolume from a data array, using coordinate system information from another volume.
Parameters
----------
filname : str
A path to the new MINC volume.
array : array_like
Input array to convert to mincVolume.
like : mincVolume or str
Either an existing mincVolume object, or a path to one on disk.
volumeType : str, optional
MINC type. The default is None.
If no value is given (default), then volumeType will be set as ushort if the dtype
is a subtype of np.integer, otherwise volumeType will be set as double.
dtype : np.dtype, optional
Datatype for the mincVolume data array. The default is None.
If no value is given (default), the dtype of array is used.
labels : bool, optional
Does the output mincVolume represent integer labels? The default is None.
If no value is given (default), then labels will be set as True if the dtype
is a subtype of np.integer, otherwise labels will be set as False.
write : bool, optional
Should the mincVolume be written to disk? Default is True.
close : bool, optional
Should the mincVolume be closed? Default is False.
Returns
-------
outvol : mincVolume
An object of mincVolume type.
"""
if dtype is None:
dtype = array.dtype
if labels is None:
if np.issubdtype(array.dtype, np.integer):
labels = True
else:
labels = False
if volumeType is None:
if np.issubdtype(array.dtype, np.integer):
volumeType='ushort'
else:
volumeType='double'
if like.__class__ == mincVolume:
outvol = volumeFromData(outputFilename=filename,
data=array,
dimnames=like.getDimensionNames(),
starts=like.getStarts(),
steps=like.getSeparations(),
volumeType=volumeType,
dtype=dtype,
labels=labels,
x_dir_cosines=[i for i in like._x_direction_cosines],
y_dir_cosines=[i for i in like._y_direction_cosines],
z_dir_cosines=[i for i in like._z_direction_cosines],
)
# Set dimnames and starts
outvol.starts = like.getStarts()
outvol.dimnames = like.getDimensionNames()
else:
outvol = volumeLikeFile(likeFilename=like, outputFilename=filename,
dtype=dtype, volumeType=volumeType, labels=labels)
outvol.data = array
# Finish
if write:
outvol.writeFile()
if close:
outvol.closeVolume()
return(outvol) | 16074668c1143091322969a501b23203378ca169 | 3,652,054 |
import random
def getRandomPipe():
"""returns a randomly generated pipe"""
# y of gap between upper and lower pipe
gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE))
gapY += int(BASEY * 0.2)
pipeHeight = IMAGES['pipe'][0].get_height()
pipeX = SCREENWIDTH + 10
return [
{'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe
{'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe
] | a5789a090ff7ab88b5cf6cbf4ad8e0943ea9ccdf | 3,652,055 |
from typing import Optional
def get_spot_market_price(facility: Optional[str] = None,
plan: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSpotMarketPriceResult:
"""
Use this data source to get Packet Spot Market Price.
## Example Usage
```python
import pulumi
import pulumi_packet as packet
example = packet.get_spot_market_price(facility="ewr1",
plan="c1.small.x86")
```
:param str facility: Name of the facility.
:param str plan: Name of the plan.
"""
__args__ = dict()
__args__['facility'] = facility
__args__['plan'] = plan
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('packet:index/getSpotMarketPrice:getSpotMarketPrice', __args__, opts=opts, typ=GetSpotMarketPriceResult).value
return AwaitableGetSpotMarketPriceResult(
facility=__ret__.facility,
id=__ret__.id,
plan=__ret__.plan,
price=__ret__.price) | 912f07ced8a12ba4df7992c8cbba2576673a893f | 3,652,056 |
def _get_cluster_id(emr: boto3.client("emr"), clusterName: str) -> str:
"""
Returns the id of a running cluster with given cluster name.
"""
clusters = emr.list_clusters()["Clusters"]
# choose the correct cluster
clusters = [c for c in clusters if c["Name"] == clusterName and c["Status"]["State"] in ["WAITING", "RUNNING"]]
if not clusters:
logger.info("No valid clusters")
raise Exception("cannot find running cluster: " + clusterName)
# take the first relevant cluster
return clusters[0]["Id"] | 9fba31d05157411d8fddde7502e174433859898f | 3,652,057 |
def seed_student(request, i):
"""Returns the properties for a new student entity.
"""
gsoc2009 = Program.get_by_key_name('google/gsoc2009')
user = User.get_by_key_name('user_%d' % i)
if not gsoc2009:
raise Error('Run seed_db first')
if not user:
raise Error('Run seed_many for at least %d users first.' % i)
properties = {
'key_name':'google/gsoc2009/student_%d' % i,
'link_id': 'student_%d' % i,
'scope_path': 'google/gsoc2009',
'scope': gsoc2009,
'user' : user,
'given_name': 'Student %d' % i,
'surname': 'Last Name',
'name_on_documents': 'Test Example',
'email': '[email protected]',
'res_street': 'Some Street',
'res_city': 'Some City',
'res_state': 'Some State',
'res_country': 'United States',
'res_postalcode': '12345',
'phone': '1-555-BANANA',
'birth_date': db.DateProperty.now(),
'agreed_to_tos': True,
'school_name': 'School %d' % i,
'school_country': 'United States',
'major': 'Computer Science',
'degree': 'Undergraduate',
'expected_graduation': 2012,
'program_knowledge': 'Knowledge %d' % i,
'school': None,
'can_we_contact_you': True,
}
return properties | 01f1923b4d1e5af74c6bbad2649f04be62f29c6f | 3,652,058 |
from typing import List
def apply(effect: List[float], signal: List[float]):
"""Given effect interpolated to length of given signal.
Args:
effect: effect to interpolate to signal length.
signal: length of which effect is interpolated to.
"""
max_len = max(len(effect), len(signal))
# Signal indices to effect indices.
i = interp1d(
np.linspace(0, len(signal) - 1, max_len),
np.linspace(0, len(effect) - 1, max_len),
)(np.arange(len(signal)))
# print(
# f"i[0:10] = {i[0:10]}, np.arange(len(effect))[0:10] = {np.arange(len(effect))[0:10]}, effect[0:10] = {effect[0:10]}"
# )
# Effect indices to effect.
return interp1d(np.arange(len(effect)), effect)(i) | 11bd4938c997cbef445493274fa3ee7447f1821e | 3,652,059 |
from typing import List
def evaluate_features(features: np.ndarray, labels: np.ndarray, train_frac: float = 0.8) -> List[int]:
"""
Evaluates the marginal impact of each feature in the given array (by retraining).
Args:
features: A [N, T, D] array of input features for each sequence element
labels: A [N] array of labels per instance
Returns:
An (ordered) list of feature indices
"""
# For feasibility purposes, we start with the first feature
result: List[int] = [0]
remaining_idx = list(range(1, features.shape[1]))
split_point = int(features.shape[0] * train_frac)
train_features = features[0:split_point, :, :]
test_features = features[split_point:, :, :]
train_labels = labels[0:split_point]
test_labels = labels[split_point:]
train_samples = train_features.shape[0]
test_samples = test_features.shape[0]
while len(remaining_idx) > 0:
best_accuracy = 0.0
best_idx = None
for feature_idx in remaining_idx:
feature_indices = result + [feature_idx]
X_train = train_features[:, feature_indices, :].reshape(train_samples, -1)
X_test = test_features[:, feature_indices, :].reshape(test_samples, -1)
clf = LogisticRegression(max_iter=500)
clf.fit(X_train, train_labels)
accuracy = clf.score(X_test, test_labels)
if accuracy > best_accuracy:
best_accuracy = accuracy
best_idx = feature_idx
result.append(best_idx)
remaining_idx.pop(remaining_idx.index(best_idx))
print(best_accuracy)
print(result)
return result | 88b9d7cab4723934f16ab59c43e41d5a4140daa5 | 3,652,061 |
import six
def pad_for_tpu(shapes_dict, hparams, max_length):
"""Pads unknown features' dimensions for TPU."""
padded_shapes = {}
def get_filler(specified_max_length):
if not specified_max_length:
return max_length
return min(specified_max_length, max_length)
inputs_none_filler = get_filler(hparams.max_input_seq_length)
targets_none_filler = get_filler(hparams.max_target_seq_length)
def pad_one_shape(shape, none_filler):
return [
(dim if dim is not None else none_filler) for dim in shape.as_list()
]
for key, shape in six.iteritems(shapes_dict):
if key == "inputs":
padded_shapes[key] = pad_one_shape(shape, inputs_none_filler)
elif key == "targets":
padded_shapes[key] = pad_one_shape(shape, targets_none_filler)
else:
padded_shapes[key] = pad_one_shape(shape, max_length)
return padded_shapes | b72e1463fad9740c8a265b795c4b3c5a45e42a9a | 3,652,062 |
from typing import Union
from typing import Tuple
from typing import List
def _get_child_query_node_and_out_name(
ast: Union[FieldNode, InlineFragmentNode],
child_type_name: str,
child_field_name: str,
name_assigner: IntermediateOutNameAssigner,
) -> Tuple[SubQueryNode, str]:
"""Create a query node out of ast, return node and unique out_name on field with input name.
Create a new document out of the input AST, that has the same structure as the input. For
instance, if the input AST can be represented by
out_Human {
name
}
where out_Human is a vertex field going to type Human, the resulting document will be
{
Human {
name
}
}
If the input AST starts with a type coercion, the resulting document will start with the
coerced type, rather than the original union or interface type.
The output child_node will be wrapped around this new DocumentNode. In addition, if no field
of child_field_name currently exists, such a field will be added. If there is no @output
directive on this field, a new @output directive will be added.
Args:
ast: Representing the AST that we're using to build a child node.
It is not modified by this function.
child_type_name: Name of the type to which this cross schema field leads.
child_field_name: str. If no field of this name currently exists as a part of the root
selections of the input AST, a new field will be created in the AST
contained in the output child query node
name_assigner: Object used to generate and keep track of names of newly created
@output directives.
Returns:
Tuple containing:
- The child sub query node wrapping around the input AST.
- The out_name of the @output directive uniquely identifying the field used for
stitching in this sub query node.
"""
# Get type and selections of child AST, taking into account type coercions
child_selection_set = ast.selection_set
if child_selection_set is None:
raise AssertionError("Invalid AST. child_selection_set cannot be None.")
type_coercion = try_get_inline_fragment(child_selection_set.selections)
if type_coercion is not None:
child_type_name = type_coercion.type_condition.name.value
child_selection_set = type_coercion.selection_set
child_selections: List[SelectionNode] = []
for child_selection in child_selection_set.selections:
if not isinstance(child_selection, FieldNode):
raise AssertionError(
"Expected child_selection to be of type FieldNode, but was of "
f"type {type(child_selection)}."
)
child_selections.append(child_selection)
# Get existing field with name in child
existing_child_property_field = try_get_ast_by_name_and_type(
child_selections, child_field_name, FieldNode
)
# Validate that existing_child_property_field is None or FieldNode.
# It should be impossible for this to *not* be the case, but check so that mypy is happy.
if not (
existing_child_property_field is None
or isinstance(existing_child_property_field, FieldNode)
):
raise AssertionError(
"Unreachable code reached! existing_child_property_field should be None or of type "
f"FieldNode, but was type {type(existing_child_property_field)}."
)
child_property_field = _get_property_field(
existing_child_property_field, child_field_name, None
)
# Add @output if needed, record out_name
child_property_field, child_output_name = _get_out_name_optionally_add_output(
child_property_field, name_assigner
)
# Get new child_selections by replacing or adding in new property field
child_property_fields_map, child_vertex_fields = _split_selections_property_and_vertex(
child_selections
)
child_property_fields_map[child_field_name] = child_property_field
child_selections = _get_selections_from_property_and_vertex_fields(
child_property_fields_map, child_vertex_fields
)
# Wrap around
# NOTE: if child_type_name does not actually exist as a root field (not all types are
# required to have a corresponding root vertex field), then this query will be invalid.
child_query_ast = _get_query_document(child_type_name, child_selections)
child_query_node = SubQueryNode(child_query_ast)
return child_query_node, child_output_name | c99e2a1aa7ea56600203e1550dca6a0a59eed094 | 3,652,063 |
def has_balanced_parens(exp: str) -> bool:
"""
Checks if the parentheses in the given expression `exp` are balanced,
that is, if each opening parenthesis is matched by a corresponding
closing parenthesis.
**Example:**
::
>>> has_balanced_parens("(((a * b) + c)")
False
:param exp: The expression to check.
:return: `True` if the parentheses are balanced, `False` otherwise.
"""
# Use a stack to determine if the expression is balanced.
# Ref: https://youtu.be/HJOnJU77EUs?t=75 [1:15 - 2:47]
paren_stack = []
for e in exp:
if e == '(':
paren_stack.append(e)
elif e == ')':
try:
paren_stack.pop()
except IndexError:
return False
return len(paren_stack) == 0 | f76c7cafcf6aadd0c2cb947f0c49d23835a9f6e4 | 3,652,064 |
def _is_binary(c):
"""Ensures character is a binary digit."""
return c in '01' | b763a5a8ba591b100fea64a589dcb0aea9fbcf53 | 3,652,065 |
def read_frame_positions(lmp_trj):
""" Read stream positions in trajectory file corresponding to
time-step and atom-data.
"""
ts_pos, data_pos = [], []
with open(lmp_trj, 'r') as fid:
while True:
line = fid.readline()
if not line:
break
if line.startswith('ITEM: TIMESTEP'):
ts_pos.append(fid.tell())
elif line.startswith('ITEM: ATOMS id'):
data_pos.append(fid.tell())
return ts_pos, data_pos | c168f08577e38758bf3d9d42bae8379125d7fc33 | 3,652,070 |
async def async_setup_entry(hass, config_entry):
"""Set up Enedis as config entry."""
hass.data.setdefault(DOMAIN, {})
pdl = config_entry.data.get(CONF_PDL)
token = config_entry.data.get(CONF_TOKEN)
session = async_create_clientsession(hass)
enedis = EnedisGateway(pdl=pdl, token=token, session=session)
coordinator = EnedisDataUpdateCoordinator(hass, config_entry, enedis)
await coordinator.async_config_entry_first_refresh()
if coordinator.data is None:
return False
undo_listener = config_entry.add_update_listener(_async_update_listener)
hass.data[DOMAIN][config_entry.entry_id] = {
COORDINATOR: coordinator,
CONF_PDL: pdl,
UNDO_LISTENER: undo_listener,
}
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
async def async_reload_history(call) -> None:
await coordinator.async_load_datas_history(call)
hass.services.async_register(
DOMAIN, "reload_history", async_reload_history, schema=vol.Schema({})
)
return True | 93ee0360c509088b75935f6b94bf7d918658e86b | 3,652,071 |
import requests
def get_file_list(prefix):
""" Get file list from http prefix """
print("Fetching file list from", prefix)
k = requests.get(prefix)
if not k.ok:
raise Exception("Unable to get http directory listing")
parser = HRefParser()
parser.feed(k.content.decode())
k.close()
return parser.href_list | ca559a20e6f35f31a07e25f7f2a9dbc5db450cc0 | 3,652,072 |
def train_model(model: nn.Module, trainDataLoader: DataLoader, testDataLoader: DataLoader, epochs: int, optimizer, lossFuction, metric, device) -> dict:
"""
Training model function: it will train the model for a number of epochs, with the corresponding optimizer.
It will return the corresponding losses and metrics in a dictionary.
"""
# Send model to the corresponding device
model.to(device)
# Creating loss dictionary
losses = {
'training_batchs': [],
'training_average': [],
'testing_average': [],
'metric_average': []
}
# Iterating over number of epochs
for epoch in range(epochs):
print(f'Starting epoch {epoch + 1}')
# Training
epoch_loss = training_epoch(
model, trainDataLoader, testDataLoader, lossFuction, optimizer, metric, device)
# Updating loss dictionary
for key, loss in epoch_loss.items():
try:
losses[key].extend(loss)
except:
losses[key].append(loss)
# print training stats after epoch
print(f'Results for epoch {epoch + 1}')
print('------------------------------')
print(f'Training loss average: {epoch_loss["training_average"]}')
print(f'Test loss average: {epoch_loss["testing_average"]}')
print(f'Metric average: {epoch_loss["metric_average"]}')
return losses | bd971d4d5063ad83188e3093f46a6dba86ac995b | 3,652,073 |
import builtins
def _has_profile():
"""Check whether we have kernprof & kernprof has given us global 'profile'
object."""
return kernprof is not None and hasattr(builtins, 'profile') | 3cbb4a0539efbadcea22e2d39ee520e14d7c6da3 | 3,652,074 |
from typing import OrderedDict
def routing_tree_to_tables(routes, net_keys):
"""Convert a set of
:py:class:`~rig.place_and_route.routing_tree.RoutingTree` s into a per-chip
set of routing tables.
.. warning::
A :py:exc:`rig.routing_table.MultisourceRouteError` will
be raised if entries with identical keys and masks but with differing
routes are generated. This is not a perfect test, entries which would
otherwise collide are not spotted.
.. warning::
The routing trees provided are assumed to be correct and continuous
(not missing any hops). If this is not the case, the output is
undefined.
.. note::
If a routing tree has a terminating vertex whose route is set to None,
that vertex is ignored.
Parameters
----------
routes : {net: :py:class:`~rig.place_and_route.routing_tree.RoutingTree`, \
...}
The complete set of RoutingTrees representing all routes in the system.
(Note: this is the same data structure produced by routers in the
:py:mod:`~rig.place_and_route` module.)
net_keys : {net: (key, mask), ...}
The key and mask associated with each net.
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
"""
# Pairs of inbound and outbound routes.
InOutPair = namedtuple("InOutPair", "ins, outs")
# {(x, y): {(key, mask): _InOutPair}}
route_sets = defaultdict(OrderedDict)
for net, routing_tree in iteritems(routes):
key, mask = net_keys[net]
# The direction is the Links entry which describes the direction in
# which we last moved to reach the node (or None for the root).
for direction, (x, y), out_directions in routing_tree.traverse():
# Determine the in_direction
in_direction = direction
if in_direction is not None:
in_direction = direction.opposite
# Add a routing entry
if (key, mask) in route_sets[(x, y)]:
# If there is an existing route set raise an error if the out
# directions are not equivalent.
if route_sets[(x, y)][(key, mask)].outs != out_directions:
raise MultisourceRouteError(key, mask, (x, y))
# Otherwise, add the input directions as this represents a
# merge of the routes.
route_sets[(x, y)][(key, mask)].ins.add(in_direction)
else:
# Otherwise create a new route set
route_sets[(x, y)][(key, mask)] = \
InOutPair({in_direction}, set(out_directions))
# Construct the routing tables from the route sets
routing_tables = defaultdict(list)
for (x, y), routes in iteritems(route_sets):
for (key, mask), route in iteritems(routes):
# Add the route
routing_tables[(x, y)].append(
RoutingTableEntry(route.outs, key, mask, route.ins)
)
return routing_tables | 50384fa0f834f6311cea3b2901b6723ca3fab3c7 | 3,652,076 |
def extract_response_objects(image_file, mask_file, stim_file, input_dict):
"""inputs are file names for aligned images, binary mask, and unprocessed stimulus file
outputs a list of response objects"""
# read files
I = read_tifs(image_file)
mask = read_tifs(mask_file)
labels = segment_ROIs(mask)
print('number of ROIs = ' + str(np.max(labels)))
# process stimulus file
stim_data, stim_data_OG, header = count_frames(stim_file)
if (len(I)) != int(stim_data[-1][-1]):
print("number of images does not match stimulus file")
print('stimulus frames = ' + str(int(stim_data[-1][-1])))
print('image frames = ' + str(len(I)))
# stim_data = fix_dropped_frames(len(I),float(input_dict['time_interval']),stim_data,stim_data_OG,int(input_dict['gt_index']))
# get frames, relative time, stimuulus type, and stimulus state from stim data
fr, rt, st = parse_stim_file(stim_data,
rt_index=int(input_dict['rt_index']),
st_index=input_dict['st_index'])
ss = define_stim_state(rt, float(input_dict['on_time']),
float(input_dict['off_time']))
# measure fluorscence intensities in each ROI
responses, num, labels = measure_multiple_ROIs(I, mask)
# load response objects
response_objects = []
for r, n in zip(responses, num):
ro = ResponseClassSimple.Response(F=r, stim_time=rt, stim_state=ss,
ROI_num=n, stim_type=st)
ro.sample_name = input_dict['sample_name']
ro.reporter_name = input_dict['reporter_name']
ro.driver_name = input_dict['driver_name']
ro.stimulus_name = input_dict['stimulus_name']
ro.time_interval = float(input_dict['time_interval'])
response_objects.append(ro)
return response_objects, stim_data, header, labels | 95b7a5e831d9ab0703c51d41966b36babf52b24d | 3,652,077 |
import torch
def get_top_diff_loc(imgs, ref_imgs, crop_size, grid_size, device, topk=10):
"""Randomly get a crop bounding box."""
assert imgs.shape == ref_imgs.shape
batches = imgs.size(0)
img_size = imgs.shape[2:]
crop_size = _pair(crop_size)
grid_size = _pair(grid_size)
stride_h = (img_size[0] - crop_size[0]) // (grid_size[0] - 1)
stride_w = (img_size[1] - crop_size[1]) // (grid_size[1] - 1)
diff_imgs = imgs - ref_imgs
diff_list = []
for i in range(grid_size[0]):
for j in range(grid_size[1]):
crop_diff = diff_imgs[:, :,
i * stride_h:i * stride_h + crop_size[0],
j * stride_w:j * stride_w + crop_size[1]]
diff_list.append(crop_diff.abs().sum(dim=(1, 2, 3)))
# [batches, grid_size**2]
diff_sum = torch.stack(diff_list, dim=1)
diff_topk_idx = torch.argsort(diff_sum, dim=1, descending=True)[:, :topk]
select_idx = diff_topk_idx
idx_i = select_idx // grid_size[1]
idx_j = select_idx % grid_size[1]
crop_y1, crop_y2 = idx_i * stride_h, idx_i * stride_h + crop_size[0]
crop_x1, crop_x2 = idx_j * stride_w, idx_j * stride_w + crop_size[1]
center = torch.stack([(crop_x1 + crop_x2) * 0.5,
(crop_y1 + crop_y2) * 0.5],
dim=-1).float()
return center | 2e35cc56a484432dd1c1ef05f38e01079414eecb | 3,652,078 |
import json
def decode(file):
"""
This function creates a dictionnary out of a given file thanks to pre-existing json functions.
:param file: The file to decode.
:return: The corresponding Python dictionnary or None if something went wrong (i.e: the given file \
is invalid).
"""
# Json to dictionnary
tmp_res = None
try:
with open(file, "r") as f:
tmp_res = json.load(f)
except Exception as e:
print(e)
return None
# Gets the type of problem handled here
problem_type = ProblemType.identify_problem(tmp_res)
res = {}
# Gets the field's limits + the bottom left and top right points of the field
res["field_limits"] = tmp_res["field_limits"]
res["bottom_left"] = Point(res["field_limits"][0][0], res["field_limits"][1][0])
res["top_right"] = Point(res["field_limits"][0][1], res["field_limits"][1][1])
# Gets the list of goals
res["goals"] = []
for goal in tmp_res["goals"]:
posts = goal["posts"]
direction = goal["direction"]
post1 = Point(posts[0][0], posts[0][1])
post2 = Point(posts[1][0], posts[1][1])
direction = Vector(direction[0], -direction[1])
goal = Goal(post1, post2, direction)
res["goals"].append(goal)
# Gets the list of opponents
res["opponents"] = []
for opponent in tmp_res["opponents"]:
res["opponents"].append(Opponent(Point(opponent[0], opponent[1])))
# Gets the radius of the robots
res["radius"] = tmp_res["robot_radius"]
# Gets theta and pos steps for opponents' shots and defenders's position respectively
res["theta_step"] = tmp_res["theta_step"]
res["pos_step"] = tmp_res["pos_step"]
# Gets the list of defenders if the problem is initial positions
if problem_type == ProblemType.INITIAL_POS:
res["defenders"] = []
for defender in tmp_res["defenders"]:
res["defenders"].append(Defender(Point(defender[0], defender[1]), res["radius"]))
# Gets the min dist if the problem is min dist
if problem_type == ProblemType.MIN_DIST:
res["min_dist"] = tmp_res["min_dist"]
# Gets the goalkeeper area if the problem is goal keeper
if problem_type == ProblemType.GOAL_KEEPER:
res["goalkeeper_area"] = tmp_res["goalkeeper_area"]
res["gk_bottom_left"] = Point(res["goalkeeper_area"][0][0], res["goalkeeper_area"][1][0])
res["gk_top_right"] = Point(res["goalkeeper_area"][0][1], res["goalkeeper_area"][1][1])
if problem_type == ProblemType.MAX_SPEED:
res["ball_max_speed"] = tmp_res["ball_max_speed"]
res["robot_max_speed"] = tmp_res["robot_max_speed"]
return (res, problem_type) | bfd0671f9e6bb06faa02a3179c1a5e18a607882c | 3,652,079 |
def kron_compact(x):
"""Calculate the unique terms of the Kronecker product x ⊗ x.
Parameters
----------
x : (n,) or (n,k) ndarray
If two-dimensional, the product is computed column-wise (Khatri-Rao).
Returns
-------
x ⊗ x : (n(n+1)/2,) or (n(n+1)/2,k) ndarray
The "compact" Kronecker product of x with itself.
"""
if x.ndim not in (1,2):
raise ValueError("x must be one- or two-dimensional")
return _np.concatenate([x[i]*x[:i+1] for i in range(x.shape[0])], axis=0) | 55c2c89fa7eb9f7c2c1a3a296798b022c158c399 | 3,652,080 |
def record_speech_sequentially(min_sound_lvl=0.01, speech_timeout_secs=1.):
"""Records audio in sequential audio files.
Args:
min_sound_lvl: The minimum sound level as measured by root mean square
speech_timeout_secs: Timeout of audio after that duration of silence as measured by min_sound_lvl
Returns:
The recorded audio samples.
"""
samples = []
i = 0
while True:
cmd = input("> ").encode()
if cmd == KeyInput.QUIT.value:
return samples
elif cmd == KeyInput.REDO.value:
print("Index now at {}.".format(i))
i = max(i - 1, 0)
try:
samples.pop()
except IndexError:
pass
continue
with AudioSnippetGenerator() as generator:
timeout_len = int(speech_timeout_secs * generator.sr / generator.chunk_size)
active_count = timeout_len
curr_snippet = None
for audio in generator:
if curr_snippet:
curr_snippet.append(audio)
else:
curr_snippet = audio
if audio.amplitude_rms() < min_sound_lvl:
active_count -= 1
else:
active_count = timeout_len
print("Time left: {:<10}".format(active_count), end="\r")
if active_count == 0:
i += 1
samples.append(curr_snippet)
print("Recorded #{:<10}".format(i))
break | f726f90575cf49a7de0608473f16a12f2a80d3cf | 3,652,081 |
def home():
"""
Display Hello World in a local-host website
"""
return 'Hello World' | f65a035d679878cfd897c9ea9c79fc41cf76db95 | 3,652,082 |
def selecaoEscalar(Mcorr, criterios, N=0, a1=0.5, a2=0.5):
""" Performs a scalar feature selection which orders all features individually,
from the best to the worst to separate the classes.
INPUTS
- Mcorr: Correlation matrix of all features.
- criterios:
- N: Number of best features to be returned.
- a1: Weigth for criterios.
- a2: Weight for Mcorr.
OUTPUTS
- ordem: Tuple with the order of features.
- M: Tuple with criteria for each feature.
"""
L = Mcorr.shape[0]
if len(criterios.shape) != 1:
criterios = criterios[0]
if N==0 or N > len(criterios):
N = len(criterios)
print('You either did not specify or you gave a number grater than the number of characteristics.')
print('Function will return all {} characteristics.'.format(N))
Mcorr = abs(Mcorr)
ordem = []
M = []
ordem.append(int(np.where(criterios == max(criterios))[0]))
M.append(criterios[int(ordem[0])])
Mcorr[:, int(ordem[0])] = 1
fator = np.zeros(N)
for n in range(1, N):
index = np.linspace(0, L-1, L)
fator = np.sum(Mcorr[tuple(ordem), :], axis=0)
MK = a1*criterios - a2*fator/n
MK = np.delete(MK, ordem)
index = np.delete(index, ordem)
M.append(max(MK))
ordem.append(int(index[int(np.where(MK == max(MK))[0])]))
ordem = tuple(ordem)
M = tuple(M)
return ordem, M | 713a7c8543cefdef8f4a35dd970d326fb49229a1 | 3,652,084 |
def sum_by_letter(list_of_dicts, letter):
"""
:param list_of_dicts: A list of dictionaries.
:param letter: A value of the letter keyed by 'letter'.
"""
total = 0
for d in list_of_dicts:
if d['letter'] == letter:
total += d['number']
return total | bffc5990eaa9e352d60d86d40b8a8b7070fd00c0 | 3,652,085 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.