content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import random
import multiprocessing
import concurrent
import tqdm
def solve_lineage_instance(
_target_nodes,
prior_probabilities=None,
method="hybrid",
threads=8,
hybrid_cell_cutoff=200,
hybrid_lca_cutoff=None,
time_limit=1800,
max_neighborhood_size=10000,
seed=None,
num_iter=-1,
weighted_ilp=False,
fuzzy=False,
probabilistic=False,
plot_diagnostics=True,
maximum_alt_solutions=100,
greedy_minimum_allele_rep=1.0,
n_neighbors=10,
missing_data_mode="lookahead",
lookahead_depth=3,
):
"""
Aggregated lineage solving method, which given a set of target nodes, will find the maximum parsimony tree
accounting the given target nodes
:param target_nodes:
A list of target nodes, where each node is in the form 'Ch1|Ch2|....|Chn'
:param prior_probabilities:
A nested dictionary containing prior probabilities for [character][state] mappings
where characters are in the form of integers, and states are in the form of strings,
and values are the probability of mutation from the '0' state.
:param method:
The method used for solving the problem ['ilp, 'hybrid', 'greedy']
- ilp: Attempts to solve the problem based on steiner tree on the potential graph
(Recommended for instances with several hundred samples at most)
- greedy: Runs a greedy algorithm to find the maximum parsimony tree based on choosing the most occurring split in a
top down fasion (Algorithm scales to any number of samples)
- hybrid: Runs the greedy algorithm until there are less than hybrid_subset_cutoff samples left in each leaf of the
tree, and then returns a series of small instance ilp is then run on these smaller instances, and the
resulting graph is created by merging the smaller instances with the greedy top-down tree
:param threads:
The number of threads to use in parallel for the hybrid algorithm
:param hybrid_subset_cutoff:
The maximum number of nodes allowed before the greedy algorithm terminates for a given leaf node
:return:
A reconstructed subgraph representing the nodes
"""
if method == "hybrid":
assert (
hybrid_cell_cutoff is None or hybrid_lca_cutoff is None
), "You can only use one type of cutoff in Hybrid"
target_nodes = [n.get_character_string() + "_" + n.name for n in _target_nodes]
node_name_dict = dict(
zip(
[n.split("_")[0] for n in target_nodes],
[n + "_target" for n in target_nodes],
)
)
if seed is not None:
np.random.seed(seed)
random.seed(seed)
# clip identifier for now, but make sure to add later
target_nodes = [n.split("_")[0] for n in target_nodes]
# target_nodes = list(set(target_nodes))
master_root = root_finder(target_nodes)
if method == "ilp":
subgraphs, r, pid, graph_sizes = find_good_gurobi_subgraph(
master_root,
target_nodes,
node_name_dict,
prior_probabilities,
time_limit,
1,
max_neighborhood_size,
seed=seed,
num_iter=num_iter,
weighted=weighted_ilp,
n_neighbors=n_neighbors,
)
subgraph = subgraphs[0]
rdict = {}
target_seen = []
for n in subgraph:
spl = n.split("_")
nn = Node(n, spl[0].split("|"), is_target=False)
if len(spl) == 2:
if "target" in n and nn.char_string not in target_seen:
nn.is_target = True
if len(spl) > 2:
if "target" in n and nn.char_string not in target_seen:
nn.is_target = True
nn.pid = spl[-1]
if nn.is_target:
target_seen.append(nn.char_string)
rdict[n] = nn
state_tree = nx.relabel_nodes(subgraph, rdict)
return (
Cassiopeia_Tree(
method="ilp", network=state_tree, name="Cassiopeia_state_tree"
),
graph_sizes,
)
if method == "hybrid":
neighbors, distances = None, None
if missing_data_mode == "knn":
print("Computing neighbors for imputing missing values...")
neighbors, distances = find_neighbors(target_nodes, n_neighbors=n_neighbors)
network, target_sets = greedy_build(
target_nodes,
neighbors,
distances,
priors=prior_probabilities,
cell_cutoff=hybrid_cell_cutoff,
lca_cutoff=hybrid_lca_cutoff,
fuzzy=fuzzy,
probabilistic=probabilistic,
minimum_allele_rep=greedy_minimum_allele_rep,
missing_data_mode=missing_data_mode,
lookahead_depth=lookahead_depth,
)
print(
"Using "
+ str(min(multiprocessing.cpu_count(), threads))
+ " threads, "
+ str(multiprocessing.cpu_count())
+ " available.",
flush=True,
)
executor = concurrent.futures.ProcessPoolExecutor(
min(multiprocessing.cpu_count(), threads)
)
print("Sending off Target Sets: " + str(len(target_sets)), flush=True)
# just in case you've hit a target node during the greedy reconstruction, append name at this stage
# so the composition step doesn't get confused when trying to join to the root.
network = nx.relabel_nodes(network, node_name_dict)
futures = [
executor.submit(
find_good_gurobi_subgraph,
root,
targets,
node_name_dict,
prior_probabilities,
time_limit,
1,
max_neighborhood_size,
seed,
num_iter,
weighted_ilp,
n_neighbors,
)
for root, targets in target_sets
]
concurrent.futures.wait(futures)
base_network = network.copy()
base_rdict = {}
for n in base_network:
spl = n.split("_")
nn = Node(n, spl[0].split("|"), is_target=False)
if len(spl) > 1:
nn.pid = spl[1]
if spl[0] in node_name_dict:
nn.is_target = True
base_rdict[n] = nn
base_network = nx.relabel_nodes(base_network, base_rdict)
num_solutions = 1 # keep track of number of possible solutions
potential_graph_sizes = []
all_res = []
alt_solutions = {}
for future in futures:
results, r, pid, graph_sizes = future.result()
potential_graph_sizes.append(graph_sizes)
subproblem_solutions = []
for res in results:
new_names = {}
for n in res:
if res.in_degree(n) == 0 or n == r:
new_names[n] = n
else:
new_names[n] = n + "_" + str(pid)
res = nx.relabel_nodes(res, new_names)
subproblem_solutions.append(res)
num_solutions *= len(subproblem_solutions)
all_res.append(subproblem_solutions)
rt = [
n
for n in subproblem_solutions[0]
if subproblem_solutions[0].in_degree(n) == 0
][0]
alt_solutions[base_rdict[rt]] = subproblem_solutions
network = nx.compose(network, subproblem_solutions[0])
rdict = {}
target_seen = []
for n in network:
spl = n.split("_")
nn = Node(n, spl[0].split("|"), is_target=False)
if len(spl) == 2:
if "target" in n and nn.char_string not in target_seen:
nn.is_target = True
if len(spl) > 2:
if "target" in n and nn.char_string not in target_seen:
nn.is_target = True
nn.pid = spl[-1]
if nn.is_target:
target_seen.append(nn.char_string)
rdict[n] = nn
state_tree = nx.relabel_nodes(network, rdict)
# create alternative solutions
pbar = tqdm(
total=len(alt_solutions.keys()), desc="Enumerating alternative solutions"
)
for r in alt_solutions.keys():
soln_list = []
# get original target char strings
# sub_targets = [n.char_string for n in state_tree.successors(r) if n.is_target]
for res in alt_solutions[r]:
rdict = {}
for n in res:
spl = n.split("_")
nn = Node(n, spl[0].split("|"), is_target=False)
if len(spl) > 2:
nn.pid = spl[-1]
rdict[n] = nn
res = nx.relabel_nodes(res, rdict)
soln_list.append(res)
alt_solutions[r] = soln_list
pbar.update(1) # update progress bar
# iterate through all possible solutions
# alt_solutions = []
# if num_solutions > 1:
# num_considered_solutions = 0
# sol_identifiers = [] # keep track of solutions already sampled
# # we'll sample maximum_alt_solutions from the set of possible solutions
# pbar = tqdm(
# total=maximum_alt_solutions, desc="Enumerating alternative solutions"
# )
# while num_considered_solutions < min(num_solutions, maximum_alt_solutions):
# current_sol = []
# for res_list in all_res:
# current_sol.append(np.random.choice(len(res_list)))
# if tuple(current_sol) not in sol_identifiers:
# new_network = base_network.copy()
# for i in range(len(current_sol)):
# res_list = all_res[i]
# net = res_list[current_sol[i]]
# new_network = nx.compose(new_network, net)
# rdict = {}
# target_seen = []
# for n in new_network:
# spl = n.split("_")
# nn = Node("state-node", spl[0].split("|"), is_target=False)
# if len(spl) == 2:
# if "target" in n and n not in target_seen:
# nn.is_target = True
# if len(spl) > 2:
# if 'target' in n and n not in target_seen:
# nn.is_target = True
# nn.pid = spl[-1]
# if nn.is_target:
# target_seen.append(nn.char_string)
# rdict[n] = nn
# new_network = nx.relabel_nodes(new_network, rdict)
# alt_solutions.append(new_network)
# sol_identifiers.append(tuple(current_sol))
# num_considered_solutions += 1
# pbar.update(1) # update progress bar
return (
Cassiopeia_Tree(
method="hybrid",
network=state_tree,
name="Cassiopeia_state_tree",
alternative_solutions=alt_solutions,
base_network=base_network,
),
potential_graph_sizes,
)
if method == "greedy":
neighbors, distances = None, None
if missing_data_mode == "knn":
print("Computing neighbors for imputing missing values...")
neighbors, distances = find_neighbors(target_nodes, n_neighbors=n_neighbors)
graph = greedy_build(
target_nodes,
neighbors,
distances,
priors=prior_probabilities,
cell_cutoff=-1,
lca_cutoff=None,
fuzzy=fuzzy,
probabilistic=probabilistic,
minimum_allele_rep=greedy_minimum_allele_rep,
missing_data_mode=missing_data_mode,
lookahead_depth=lookahead_depth,
)[0]
rdict = {}
for n in graph:
spl = n.split("_")
nn = Node(n, spl[0].split("|"), is_target=False)
if len(spl) > 1:
nn.pid = spl[1]
if spl[0] in node_name_dict and len(spl) == 1:
nn.is_target = True
rdict[n] = nn
state_tree = nx.relabel_nodes(graph, rdict)
return (
Cassiopeia_Tree(
method="greedy", network=state_tree, name="Cassiopeia_state_tree"
),
None,
)
else:
raise Exception(
"Please specify one of the following methods: ilp, hybrid, greedy"
)
|
6babbb6c071a17f9a5bfe2fbe402d1cb72a6c330
| 33,523 |
def split_in_chunks(
data,
chunk_size,
train_size=None,
val_size=None,
test_size=None,
shuffle=True,
seed=None,
):
"""Split data into train-test, where chunks of data are held together
Assume `data` is a list of numpy arrays
"""
chunks = []
for rollout in data:
for idx in range(0, len(rollout), chunk_size):
chunk = rollout[idx : idx + chunk_size]
chunks.append(chunk)
return split(chunks, train_size, val_size, test_size, shuffle, seed)
|
fad3ebdc99731ad95ff0200e70e2e64d4a9acdc8
| 33,524 |
import re
def check_id(id):
"""
Check whether a id is valid
:param id: The id
:return: The result
"""
return bool(re.match(r"^[a-f0-9]{24}$", id))
|
f336d34de12f4f5520d4c88a838ebdb396857d2b
| 33,526 |
def totaled_tbr_no_lgtm(cc, sql_time_specification):
"""Counts the number of commits with a TBR that have not been lgtm'ed
in a given timeframe
Args:
cc(cursor)
sql_time_specification(str): a sql command to limit the dates of the
returned results
Return:
count(int): a count of all commits with a TBR and no lgtm
results(list): a list of lists with all tbr'ed commits with no lgtm in the
format [rietveld_url, git_timestamp, git_subject, git_hash]
"""
cc.execute("""SELECT git_commit.review_url, git_commit.timestamp,
git_commit.subject, git_commit.hash
FROM review
INNER JOIN git_commit
ON review.review_url = git_commit.review_url
INNER JOIN commit_people
ON commit_people.git_commit_hash = git_commit.hash
LEFT JOIN (
SELECT review_url, COUNT(*) AS c
FROM review_people
WHERE type = 'lgtm'
GROUP BY review_url) lgtm_count
ON review.review_url = lgtm_count.review_url
WHERE lgtm_count.c = 0 OR lgtm_count.c IS NULL
AND commit_people.type = 'author' AND %s""" % sql_time_specification)
result = cc.fetchall()
count = len(result)
formatted_data = []
for data in result:
subject = data[2]
formatted_data.append([data[0], data[1].strftime("%Y-%m-%d %H:%M:%S"),
subject.replace('-', ' '), data[3]])
results = sorted(formatted_data, key=lambda x: x[1], reverse=True)
return count, results
|
25666c4f741f6bdd2c8358468a54c6e261cc57e3
| 33,527 |
import unicodedata
def normalize_text(text: str) -> str:
"""Normalize the text to remove accents
and ensure all the characters are valid
ascii symbols.
Args:
text : Input text
Returns:
Output text
"""
nfkd_form = unicodedata.normalize("NFKD", text)
only_ascii = nfkd_form.encode("ASCII", "ignore")
return only_ascii.decode()
|
fa1c5362caa9946e79152f9e14ccf2131754f258
| 33,528 |
def rotate_y(x, z, cosangle, sinangle):
"""3D rotaion around *y* (roll). *x* and *z* are values or arrays.
Positive rotation is for positive *sinangle*. Returns *xNew, zNew*."""
return cosangle*x + sinangle*z, -sinangle*x + cosangle*z
|
0a1b28548f771b9ca8cec29ba4060be7b0919182
| 33,529 |
import base64
import html
def json_file_upload(contents, file_names, dates):
"""
Reads a JSON file from disk and saves it in the default path
The goal is to keep that file over there for the Draw JSON
Function to read and draw it.
Args:
ontents, file_names, dates: inputs from the Dash UI
"""
global arm, dp
global default_JSON_file_Path
if contents is not None:
msg = "{} is loaded".format(file_names)
print (msg)
try:
_, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
data =decoded.decode('utf-8')
data = str(data)
except:
print ("Couldn\'t read the JSON file")
dp.write_dic_to_json_file(data, default_JSON_file_Path)
fig = quick_draw_graph(default_JSON_file_Path)
return (html.Div([html.Pre(data)]), {'visibility': 'visible'}, fig)
return "No file is loaded yet"
|
88d06880fb4e102260ef7df6f931ee541d8c98fc
| 33,530 |
import getpass
def ask_credential(login=None, password=None):
""" Ask for a login and a password when not specified.
Parameters
----------
login: str, default None
a login.
password: str, defualt None
a password.
Returns
-------
login: str, default None
a login.
password: str, defualt None
a password.
"""
if login is None:
login = raw_input("Login:")
if password is None:
password = getpass.getpass("Password for " + login + ":")
return login, password
|
55ba7425cd2016212c897345c21b6d24ce3d8578
| 33,532 |
import numpy
def zeros_like(a, dtype=None, bohrium=None):
"""
Return an array of zeros with the same shape and type as a given array.
With default parameters, is equivalent to ``a.copy().fill(0)``.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
bohrium : boolean, optional
Determines whether it is a Bohrium-enabled array or a regular NumPy array
Returns
-------
out : ndarray
Array of zeros with the same shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Notes
-----
The order of the data in memory is always row-major (C-style).
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
>>> y = np.arange(3, dtype=np.float)
>>> y
array([ 0., 1., 2.])
>>> np.zeros_like(y)
array([ 0., 0., 0.])
"""
if dtype is None:
dtype = a.dtype
if bohrium is None:
bohrium = bhary.check(a)
if bohrium and not dtype_support(dtype):
_warn_dtype(dtype, 3)
return numpy.zeros_like(a, dtype=dtype)
b = empty_like(a, dtype=dtype, bohrium=bohrium)
b[...] = b.dtype.type(0)
return b
|
6d831ead77b55229121f85d005a426e86565353a
| 33,533 |
import random
def rand():
"""
Returns a random number.
"""
return random.random()
|
efad85c9f169d39358ed034a29f166df175d0ea7
| 33,534 |
def read_counters():
"""read_counters()
Get current counts and reset counters.
:rtype: list(int)
:raises PapiInvalidValueError: One or more of the arguments is invalid
(this error should not happen with PyPAPI).
:raises PapiSystemError: A system or C library call failed inside PAPI.
"""
values = ffi.new("long long[]", _counter_count)
rcode = lib.PAPI_read_counters(values, _counter_count)
return rcode, ffi.unpack(values, _counter_count)
|
d82c3b186aadc0b61e848a838549cf8393fa1010
| 33,535 |
def is_monotonic_decreasing(series_or_index):
"""
Return boolean scalar if values in the object are
monotonic_decreasing.
Returns
-------
Scalar
"""
return check_monotonic(series_or_index, decreasing=True, strict=False)
|
3b5c2f989d53cc96fc6e9131cb2b3a78945befaa
| 33,536 |
def mprint(m,fmt):
"""
This function will print a VSIPL matrix or vector suitable for pasting into Octave or Matlab.
usage: mprint(<vsip matrix/vector>, fmt)
fmt is a string corresponding to a simple fmt statement.
For instance '%6.5f' prints as 6 characters wide with 5 decimal digits.
Note format converts this statement to '% 6.5f' or '%+6.5f' so keep
the input simple.
"""
def _fmt1(c):
if c != '%':
return c
else:
return '% '
def _fmt2(c):
if c != '%':
return c
else:
return '%+'
def _fmtfunc(fmt1,fmt2,y):
x = vsip.cscalarToComplex(y)
if type(x) == complex:
return fmt1 % x.real + fmt2 % x.imag + "i"
else:
return fmt % x
tm=['mview_d','mview_f','cmview_d','cmview_f','mview_i','mview_uc','mview_si','mview_bl']
tv=['vview_d','vview_f','cvview_d','cvview_f','vview_i','vview_uc','vview_si','vview_bl','vview_vi','vview_mi']
t=vsip.getType(m)[1]
tfmt=[_fmt1(c) for c in fmt]
fmt1 = "".join(tfmt)
tfmt=[_fmt2(c) for c in fmt]
fmt2 = "".join(tfmt)
if t in tm:
cl=vsip.getcollength(m)
rl=vsip.getrowlength(m)
for i in range(cl):
M=[]
for j in range(rl):
M.append(_fmtfunc(fmt1,fmt2,vsip.get(m,(i,j))))
if i == 0:
print("["+" ".join(M) + ";")
elif i < cl-1:
print(" "+" ".join(M) + ";")
else:
print(" "+" ".join(M) + "]")
elif t in tv:
l=vsip.getlength(m)
V=[_fmtfunc(fmt1,fmt2,vsip.get(m,i)) for i in range(l)]
print("[" + " ".join(V) + "]")
else:
print('Object not VSIP vector or matrix')
|
36b555720a8908f2552dbbec7f03c648e34f168a
| 33,537 |
import requests
def fetch_reviews(app_id, page_num):
"""Fetch a single page of reviews for a given app_id.
:param app_id: The ID of the app in the app store.
:param page_num: The page of reviews to fetch.
:return: A list of Review objects.
"""
# page=1 is the same as if the "page" param was left off.
url = "https://itunes.apple.com/us/rss/customerreviews/page={page_num}/id={app_id}/sortBy=mostRecent/json".format(
page_num=page_num,
app_id=app_id,
)
r = requests.get(url)
feed_json = r.json()["feed"]
return [Review(entry) for entry in feed_json["entry"] if Review.is_review(entry)]
|
99b71a41ae7bc96e40d6d1798dd676c6a889f80b
| 33,539 |
def graph_push_point(graph_id):
"""
Push points to graph
"""
request_data, error = parseHTTPRequest(request, ["value"])
if error:
response = jsonify({
"error": error
})
return response, 422
try:
# data = {"value":10, "date": datetime.datetime.utcnow()}
operation = {
"$push": {
"points": {
"value": request_data["value"],
}
}
}
if "date" in request_data:
operation["$push"]["points"].update({
"date": request_data["date"]
})
deform_response = token_client.documents.update(
collection=schemas.GRAPH_COLLECTION_ID,
filter={
"_id": graph_id
},
operation=operation
)
response = jsonify({
"point": operation["$push"]["points"]
})
return response, 201
except Exception as e:
response = jsonify({
"error": str(e)
})
return response, 422
|
cf6522e294887370a3f8ad76ff66e96fb9b79291
| 33,540 |
def or_operator():
"""|: Bitwise "or" operator."""
class _Operand:
def __or__(self, other):
return " ~or~ ".join(('east coast', other))
return _Operand() | 'dirty south'
|
9ccfc124dd6c7aae8035b336788cc07cdff983d1
| 33,541 |
import tqdm
def computeAlignmentScores(vocabulary, sub_mat, gap_open, gap_extend):
"""
Pre-computes the alignment scores between all the k-mer pairs
Args:
vocabulary (list [str]): The list of all possible k-mers
sub_mat (dict[tuple[str,str],int]): Substitution matrix as represented in Bio.SubsMat.MatrixInfo.blosum objects
gap_open (int): gap open loss (negative number to use Biopython)
gap_extend (int): gap extend loss (negative number to use Biopython)
Returns:
2D numpy array [int,int] : a matrix[len(vocabulary)xlen(vocabulary)] where the value of (i,j) cell presents
the alignment score between ith and jth k-mer in the vocabulary
"""
alignment_scores = np.zeros((len(vocabulary),len(vocabulary)))
for i in tqdm(range(len(vocabulary)), desc='Computing Alignment Score'):
for j in range(i,len(vocabulary)):
alignment_score = align.globalds(vocabulary[i], vocabulary[j], sub_mat, gap_open, gap_extend, score_only=True)
alignment_scores[i][j] = alignment_score
alignment_scores[j][i] = alignment_score
return alignment_scores
|
0b7aeb6b592586b827e19969a418146800805687
| 33,542 |
import re
def _get_version(basename):
"""Returns the _get_next_version of a file."""
match = re.search(r"\(\d*\)", basename)
if match:
v = int(match.group(0)
.replace('(', '')
.replace(')', ''))
return v
return 0
|
7340b74dca04ecb5520c03b046ec650c34527b4c
| 33,544 |
def list_tags():
"""Show all the known tags."""
streets_with_tags = (
db.session.query(Street).filter(Street.tags != None).all() # noqa
)
all_tags = set()
for street in streets_with_tags:
all_tags.update(set(street.tags))
all_tags = sorted(list((all_tags)))
return render_template(
"streets/tags_list.html",
all_tags=all_tags,
)
|
737e65e7088357407c71eb19b9b3a4ddc948e26a
| 33,545 |
def get_records(field_id):
"""Return TOP 10 records for selected field"""
if not request.is_xhr:
abort(403)
if field_id == 0:
field_id = session.get('current_field_id', 2)
field = Field.query.get(field_id)
records = field.records.limit(10)
top_10 = []
for record in records:
is_you = False
current_player = session.get('player', '')
if current_player == record.player.name:
is_you = True
top_10.append(
{
'size': field.name,
'player': record.player.name,
'shot_count': record.shot_count,
'seconds': record.seconds,
'isYou': is_you,
}
)
if not top_10:
top_10 = [{'size': field.name},]
return jsonify(top_10)
|
c9fcf236ee765d2a09148367293ea882093ab001
| 33,546 |
def self_play_iterator_creator(hparams, num_workers, jobid):
"""create a self play iterator. There are iterators that will be created here.
A supervised training iterator used for supervised learning. A full text
iterator and structured iterator used for reinforcement learning self play.
Full text iterators feeds data from text files while structured iterators
are initialized directly from objects. The former one is used for traiing.
The later one is used for self play dialogue generation to eliminate the
need of serializing them into actual text
files.
"""
vocab_table = vocab_utils.create_vocab_tables(hparams.vocab_file)[0]
data_dataset = tf.data.TextLineDataset(hparams.train_data)
kb_dataset = tf.data.TextLineDataset(hparams.train_kb)
skip_count_placeholder = tf.placeholder(shape=(), dtype=tf.int64)
# this is the actual iterator for supervised training
train_iterator = iterator_utils.get_iterator(
data_dataset,
kb_dataset,
vocab_table,
batch_size=hparams.batch_size,
t1=hparams.t1.encode(),
t2=hparams.t2.encode(),
eod=hparams.eod,
len_action=hparams.len_action,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
max_dialogue_len=hparams.max_dialogue_len,
skip_count=skip_count_placeholder,
num_shards=num_workers,
shard_index=jobid)
# this is the actual iterator for self_play_fulltext_iterator
data_placeholder = tf.placeholder(
shape=[None], dtype=tf.string, name="src_ph")
kb_placeholder = tf.placeholder(shape=[None], dtype=tf.string, name="kb_ph")
batch_size_placeholder = tf.placeholder(
shape=[], dtype=tf.int64, name="bs_ph")
dataset_data = tf.data.Dataset.from_tensor_slices(data_placeholder)
kb_dataset = tf.data.Dataset.from_tensor_slices(kb_placeholder)
self_play_fulltext_iterator = iterator_utils.get_infer_iterator(
dataset_data,
kb_dataset,
vocab_table,
batch_size=batch_size_placeholder,
eod=hparams.eod,
len_action=hparams.len_action,
self_play=True)
# this is the actual iterator for self_play_structured_iterator
self_play_structured_iterator = tf.data.Iterator.from_structure(
tf.data.get_output_types(self_play_fulltext_iterator),
tf.data.get_output_shapes(self_play_fulltext_iterator))
iterators = [
train_iterator, self_play_fulltext_iterator, self_play_structured_iterator
]
# this is the list of placeholders
placeholders = [
data_placeholder, kb_placeholder, batch_size_placeholder,
skip_count_placeholder
]
return iterators, placeholders
|
618179a8694a2df0edbd1401a6caf18e607b2652
| 33,547 |
def utf8_product_page():
"""
Single product page with utf8 content.
"""
with open('data/product_utf8.html') as f:
return ''.join(f)
|
56a70e463cebdaef632ebd2997be4a523289da02
| 33,548 |
def detect_Nir2011(dat_orig, s_freq, time, opts):
"""Spindle detection based on Nir et al. 2011
Parameters
----------
dat_orig : ndarray (dtype='float')
vector with the data for one channel
s_freq : float
sampling frequency
time : ndarray (dtype='float')
vector with the time points for each sample
opts : instance of 'DetectSpindle'
'det_butter' : dict
parameters for 'butter',
'smooth' : dict
parameters for 'gaussian'
'det_thresh' : float
detection threshold
'sel_thresh' : float
selection threshold
'min_interval' : float
minimum interval between consecutive events
'duration' : tuple of float
min and max duration of spindles
Returns
-------
list of dict
list of detected spindles
dict
'det_value_lo' with detection value, 'det_value_hi' with nan,
'sel_value' with selection value
float
spindle density, per 30-s epoch
Notes
-----
This paper also selects channels carefully:
'First, the channels with spindle activity in NREM sleep were
chosen for further analysis.'
'Third, those channels, in which an increase in spectral power
within the detected events was restricted to the spindle-frequency
range (10-16 Hz) rather than broadband.'
References
----------
Nir, Y. et al. Neuron 70, 153-69 (2011).
"""
dat_det = transform_signal(dat_orig, s_freq, 'butter', opts.det_butter)
dat_det = transform_signal(dat_det, s_freq, 'hilbert')
dat_det = transform_signal(dat_det, s_freq, 'abs')
dat_det = transform_signal(dat_det, s_freq, 'gaussian', opts.smooth)
det_value = define_threshold(dat_det, s_freq, 'mean+std',
opts.det_thresh_lo)
sel_value = define_threshold(dat_det, s_freq, 'mean+std', opts.sel_thresh)
events = detect_events(dat_det, 'above_thresh', det_value)
if events is not None:
events = _merge_close(dat_det, events, time, opts.min_interval)
events = select_events(dat_det, events, 'above_thresh', sel_value)
events = within_duration(events, time, opts.duration)
power_peaks = peak_in_power(events, dat_orig, s_freq, opts.power_peaks)
power_avgs = avg_power(events, dat_orig, s_freq, opts.frequency)
sp_in_chan = make_spindles(events, power_peaks, power_avgs, dat_det,
dat_orig, time, s_freq)
else:
# print('No spindle found')
sp_in_chan = []
values = {'det_value_lo': det_value, 'det_value_hi': nan,
'sel_value': sel_value}
density = len(sp_in_chan) * s_freq * 30 / len(dat_orig)
return sp_in_chan, values, density
|
f4e4c1cd22944fd09121c0f43bb28a4dd3e7d629
| 33,549 |
def is_indel(variant):
"""Is variant an indel?
An indel event is simply one where the size of at least one of the alleles
is > 1.
Args:
variant: third_party.nucleus.protos.Variant.
Returns:
True if the alleles in variant indicate an insertion/deletion event
occurs at this site.
"""
# redacted
# redacted
return (not is_ref(variant) and
(len(variant.reference_bases) > 1 or
any(len(alt) > 1 for alt in variant.alternate_bases)))
|
dc44642a011ac292c73a163b29bb1f3c4cb36624
| 33,550 |
import functools
def project_access_required(f):
"""Decorator function to verify the users access to the project."""
@functools.wraps(f)
def verify_project_access(current_user, project, *args, **kwargs):
"""Verifies that the user has been granted access to the project."""
if project["id"] is None:
return flask.make_response("Project ID missing. Cannot proceed", 401)
if not project["verified"]:
return flask.make_response(
f"Access to project {project['id']} not yet verified. " "Checkout token settings.",
401,
)
return f(current_user, project, *args, **kwargs)
return verify_project_access
|
4689e05ae330e360ca0e1ac221b571545608d5b4
| 33,551 |
def downloadRangesHelper(imageCount : int, downloadRange, downloadIndex):
"""Helper function for calculating download ranges and/or download index"""
# expected output should be [x,y], where x = zero-based image index start,
# and y = the amount of images in a post.
# check if downloadRange and downloadIndex are set
# then return an error message
if downloadRange is not None and downloadIndex is not None:
print("Range and index parameters cannot be combined at the same time.")
return None
# check if there is only one image in a post
if imageCount == 1:
return [0, 1]
elif imageCount > 1 and (downloadRange is None and downloadIndex is None):
return [0, imageCount]
# checks when download range is set
if downloadRange is not None:
return _calculateRange(imageCount, downloadRange)
elif downloadIndex is not None:
return _calculateIndex(imageCount, downloadIndex)
|
2fa98a1111059d5c2f2126f19a8eccab9b23a596
| 33,552 |
import sqlite3
def removeProduct(product, connection=None):
"""
Remove a product tuple from the database.
Args:
products (str): The product to be removed from the database.
connection (sqlite3.Connection, optional): A connection to the database.
Returns:
bool: True if successful, false otherwise.
"""
# Convert to a valid string by adding or removing characters.
product = "'" + product + "'"
# Open the master database if it is not supplied.
flag = False
if connection is None:
connection = sqlite3.connect(MASTER)
flag = True
# Create a cursor from the connection.
cursor = connection.cursor()
# Execute the command to remove the row of the given product.
cursor.execute("""DELETE FROM information WHERE product={p}""".\
format(p=product))
# Close the cursor.
cursor.close()
# Commit the change to the database and close the connection.
if flag:
connection.commit()
connection.close()
return True
|
131c013a4ee1569b91dd7f52b3c961877e4a9124
| 33,553 |
def health():
"""Return information about the health of the queue in a format that
can be turned into JSON.
"""
output = {'queue': {}, 'errors': {}}
output['queue']['all-jobs'] = Job.objects.all().count()
output['queue']['not-executed'] = Job.objects.filter(executed=None).count()
output['queue']['executed'] = Job.objects.exclude(executed=None).count()
output['errors']['number'] = Error.objects.all().count()
return output
|
f65ac55808d52ece96b6168137e78f8ad42c7d7f
| 33,554 |
def build_confirmation_msg(message_template: str, variables_arr: [], record):
"""Returns the full confirmation email as a string
Note: some email services also support sending HTML for future purposes.
"""
inserts = []
for variable in variables_arr:
inserts.append(record['fields'][variable.strip()])
message = message_template.format(*inserts)
# print(message)
return message
|
6d84b19f2bcce3fab298fe7ece2a011b423f9e90
| 33,557 |
import re
def _get_freq_label_by_month(date_value: str) -> str:
"""Gets frequency label for the date value which is aggregated by month.
Args:
date_value (str): The date value.
Returns:
str: The date value aggregated by month.
"""
if bool(re.match(r"^\d{4}M\d{1,2}$", date_value)):
return date_value
if not bool(re.match(r"^\d{4}-\d{1,2}(-\d{1,2})?$", date_value)):
raise ValueError("Date needs to be in yyyy-mm-dd or yyyy-mm format when freq is M")
ts = pd.Timestamp(date_value)
return "{}M{}".format(ts.year, ts.month)
|
a789d993bf390fba42dc5451302f3fe89623cb3d
| 33,558 |
import torch
def get_depth_metrics(pred, gt, mask=None):
"""
params:
pred: [N,1,H,W]. torch.Tensor
gt: [N,1,H,W]. torch.Tensor
"""
if mask is not None:
num = torch.sum(mask) # the number of non-zeros
pred = pred[mask]
gt = gt[mask]
else:
num = pred.numel()
num = num * 1.0
diff_i = gt - pred
abs_diff = torch.sum(torch.abs(diff_i)) / num
abs_rel = torch.sum(torch.abs(diff_i) / gt) / num
sq_rel = torch.sum(diff_i ** 2 / gt) / num
rmse = torch.sqrt(torch.sum(diff_i ** 2) / num)
rmse_log = torch.sqrt(torch.sum((torch.log(gt) -
torch.log(pred)) ** 2) / num)
return abs_diff, abs_rel, sq_rel, rmse, rmse_log
|
2d4e617bbbf3823ee60dbf1480aed624fd7ba57b
| 33,559 |
def run_ica(raw, n_components, max_pca_components=100,
n_pca_components=64, noise_cov=None, random_state=None,
algorithm='parallel', fun='logcosh', fun_args=None,
verbose=None, picks=None, start=None, stop=None, start_find=None,
stop_find=None, ecg_ch=None, ecg_score_func='pearsonr',
ecg_criterion=0.1, eog_ch=None, eog_score_func='pearsonr',
eog_criterion=0.1, skew_criterion=-1, kurt_criterion=-1,
var_criterion=0, add_nodes=None):
"""Run ICA decomposition on raw data and identify artifact sources
This function implements an automated artifact removal work flow.
Hints and caveats:
- It is highly recommended to bandpass filter ECG and EOG
data and pass them instead of the channel names as ecg_ch and eog_ch
arguments.
- Please check your results. Detection by kurtosis and variance
can be powerful but misclassification of brain signals as
noise cannot be precluded. If you are not sure set those to None.
- Consider using shorter times for start_find and stop_find than
for start and stop. It can save you much time.
Example invocation (taking advantage of defaults):
ica = run_ica(raw, n_components=.9, start_find=10000, stop_find=12000,
ecg_ch='MEG 1531', eog_ch='EOG 061')
Parameters
----------
raw : instance of Raw
The raw data to decompose.
n_components : int | float | None
The number of components used for ICA decomposition. If int, it must be
smaller then max_pca_components. If None, all PCA components will be
used. If float between 0 and 1 components can will be selected by the
cumulative percentage of explained variance.
n_pca_components
The number of PCA components used after ICA recomposition. The ensuing
attribute allows to balance noise reduction against potential loss of
features due to dimensionality reduction. If greater than
self.n_components_, the next 'n_pca_components' minus
'n_components_' PCA components will be added before restoring the
sensor space data. The attribute gets updated each time the according
parameter for in .pick_sources_raw or .pick_sources_epochs is changed.
max_pca_components : int | None
The number of components used for PCA decomposition. If None, no
dimension reduction will be applied and max_pca_components will equal
the number of channels supplied on decomposing data.
noise_cov : None | instance of mne.cov.Covariance
Noise covariance used for whitening. If None, channels are just
z-scored.
random_state : None | int | instance of np.random.RandomState
np.random.RandomState to initialize the FastICA estimation.
As the estimation is non-deterministic it can be useful to
fix the seed to have reproducible results.
algorithm : {'parallel', 'deflation'}
Apply parallel or deflational algorithm for FastICA
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point.
fun_args: dictionary, optional
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
picks : array-like
Channels to be included. This selection remains throughout the
initialized ICA session. If None only good data channels are used.
start : int | float | None
First sample to include for decomposition. If float, data will be
interpreted as time in seconds. If None, data will be used from the
first sample.
stop : int | float | None
Last sample to not include for decomposition. If float, data will be
interpreted as time in seconds. If None, data will be used to the
last sample.
start_find : int | float | None
First sample to include for artifact search. If float, data will be
interpreted as time in seconds. If None, data will be used from the
first sample.
stop_find : int | float | None
Last sample to not include for artifact search. If float, data will be
interpreted as time in seconds. If None, data will be used to the last
sample.
ecg_ch : str | ndarray | None
The `target` argument passed to ica.find_sources_raw. Either the
name of the ECG channel or the ECG time series. If None, this step
will be skipped.
ecg_score_func : str | callable
The `score_func` argument passed to ica.find_sources_raw. Either
the name of function supported by ICA or a custom function.
ecg_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
eog_ch : list | str | ndarray | None
The `target` argument or the list of target arguments subsequently
passed to ica.find_sources_raw. Either the name of the vertical EOG
channel or the corresponding EOG time series. If None, this step
will be skipped.
eog_score_func : str | callable
The `score_func` argument passed to ica.find_sources_raw. Either
the name of function supported by ICA or a custom function.
eog_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
skew_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
kurt_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
var_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
scores smaller than the criterion will be dropped. Else, the scores
sorted in descending order will be indexed accordingly.
E.g. range(2) would return the two sources with the highest score.
If None, this step will be skipped.
add_nodes : list of ica_nodes
Additional list if tuples carrying the following parameters:
(name : str, target : str | array, score_func : callable,
criterion : float | int | list-like | slice). This parameter is a
generalization of the artifact specific parameters above and has
the same structure. Example:
add_nodes=('ECG phase lock', ECG 01', my_phase_lock_function, 0.5)
Returns
-------
ica : instance of ICA
The ica object with detected artifact sources marked for exclusion
"""
ica = ICA(n_components=n_components, max_pca_components=max_pca_components,
n_pca_components=n_pca_components, noise_cov=noise_cov,
random_state=random_state, algorithm=algorithm, fun=fun,
fun_args=fun_args, verbose=verbose)
ica.decompose_raw(raw, start=start, stop=stop, picks=picks)
logger.info('%s' % ica)
logger.info(' Now searching for artifacts...')
_detect_artifacts(ica=ica, raw=raw, start_find=start_find,
stop_find=stop_find, ecg_ch=ecg_ch,
ecg_score_func=ecg_score_func,
ecg_criterion=ecg_criterion, eog_ch=eog_ch,
eog_score_func=eog_score_func,
eog_criterion=ecg_criterion,
skew_criterion=skew_criterion,
kurt_criterion=kurt_criterion,
var_criterion=var_criterion,
add_nodes=add_nodes)
return ica
|
56b36d33211123839ed61f6cc19e7c7ee4c2e336
| 33,560 |
def remove_member(context, request):
"""Remove a member from the given group."""
# Currently, we only support removing the requesting user
if request.matchdict.get("userid") == "me":
userid = request.authenticated_userid
else:
raise HTTPBadRequest('Only the "me" user value is currently supported')
group_members_service = request.find_service(name="group_members")
group_members_service.member_leave(context.group, userid)
return HTTPNoContent()
|
494ddb9e824911680c8c4f80dbc7d1c6aa6f12d2
| 33,561 |
def convert_length(length, original_unit="kilometers", final_unit="kilometers"):
"""
:param length: length to be converted
:param original_unit: original unit of the length
:param final_unit: return unit of the length
:return: the converted length
"""
if not isinstance(length, (float, int)) or length < 0:
raise InvalidInput(error_code_messages["InvalidLength"])
return radians_to_length(length_to_radians(length, original_unit), final_unit)
|
e76dcaa050eaa1a77621c7062ca2a187238d0ea3
| 33,562 |
def get_value_larger(threshold, value, step, direction, size_x, size_y):
"""Function for looping until correct coordinate is found"""
matrix = [[0 for _ in range(size_x)] for _ in range(size_y)]
current_x = size_x / 2
current_y = size_y / 2
while value <= threshold:
for _ in range(0, 2):
for _ in range(0, step):
matrix[current_x][current_y] = value
# Step
if direction == 0:
current_x += 1
elif direction == 1:
current_y += 1
elif direction == 2:
current_x -= 1
elif direction == 3:
current_y -= 1
value = matrix[current_x + 1][current_y] \
+ matrix[current_x + 1][current_y + 1] \
+ matrix[current_x][current_y + 1] \
+ matrix[current_x - 1][current_y + 1] \
+ matrix[current_x - 1][current_y] \
+ matrix[current_x - 1][current_y - 1] \
+ matrix[current_x][current_y - 1] \
+ matrix[current_x + 1][current_y - 1]
if value > threshold:
return value
# Change direction
direction = (direction + 1) % 4
# Increase steps
step += 1
|
49db166798446e98ae23800eb1428ba3baf0a026
| 33,565 |
def get_daq_device_inventory(interface_type, number_of_devices=100):
# type: (InterfaceType, int) -> list[DaqDeviceDescriptor]
"""
Gets a list of :class:`DaqDeviceDescriptor` objects that can be used
as the :class:`DaqDevice` class parameter to create DaqDevice objects.
Args:
interface_type (InterfaceType): One or more of the
:class:`InterfaceType` attributes (suitable for bit-wise operations)
specifying which physical interfaces (such as USB) to search for
MCC devices.
number_of_devices (Optional[int]): Optional parameter indicating the
maximum number of devices to return in the list; the default is 100.
Returns:
list[DaqDeviceDescriptor]:
A list of :class:`DaqDeviceDescriptor` objects that describe each of the
each of the MCC devices found on the specified interface.
Raises:
:class:`ULException`
"""
device_descriptors = _daq_device_descriptor_array(number_of_devices)
number_of_devices = c_uint(number_of_devices)
err = lib.ulGetDaqDeviceInventory(interface_type, device_descriptors,
byref(number_of_devices))
if err != 0:
raise ULException(err)
devices_list = [device_descriptors[i] for i in range(
number_of_devices.value)]
return devices_list
|
abee7adb4fa57471e09a480a971e35e5081a9837
| 33,566 |
def statement_passive_verb(stmt_type):
"""Return the passive / state verb form of a statement type.
Parameters
----------
stmt_type : str
The lower case string form of a statement type, for instance,
'phosphorylation'.
Returns
-------
str
The passive/state verb form of a statement type, for instance,
'phosphorylated'.
"""
override = {
'complex': 'bound',
'regulateamount': 'amount regulated',
'decreaseamount': 'decreased',
'increaseamount': 'increased',
'gap': 'GAP-regulated',
'gef': 'GEF-regulated',
'gtpactivation': 'GTP-activated',
'influence': 'influenced',
'event': 'happened',
'conversion': 'converted',
'modification': 'modified',
'addmodification': 'modified',
'removemodification': 'unmodified',
'regulateactivity': 'activity regulated',
}
return override.get(stmt_type) if stmt_type in override else \
stmt_type[:-3] + 'ed'
|
21a34fc7270d0c9f9d4c096930ed5bcb9f6af72b
| 33,567 |
from typing import Iterable
def find_hpas(config: Config,) -> Iterable[client.models.v1_horizontal_pod_autoscaler.V1HorizontalPodAutoscaler]:
"""Find any HorizontalPodAutoscaler having klutch annotation."""
resp = client.AutoscalingV1Api().list_horizontal_pod_autoscaler_for_all_namespaces()
return filter(lambda h: config.hpa_annotation_enabled in h.metadata.annotations, resp.items)
|
fcc34c83d7f50c25510eabfb6b60d6106fe97432
| 33,569 |
def bioacoustics_index (Sxx, fn, flim=(2000, 15000), R_compatible ='soundecology'):
"""
Compute the Bioacoustics Index from a spectrogram [1]_.
Parameters
----------
Sxx : ndarray of floats
matrix : Spectrogram
fn : vector
frequency vector
flim : tupple (fmin, fmax), optional, default is (2000, 15000)
Frequency band used to compute the bioacoustic index.
R_compatible : string, default is "soundecology"
if 'soundecology', the result is similar to the package SoundEcology in R
Otherwise, the result is specific to maad
Returns
-------
BI : scalar
Bioacoustics Index
References
----------
.. [1] Boelman NT, Asner GP, Hart PJ, Martin RE. 2007. Multi-trophic invasion resistance in Hawaii: bioacoustics, field surveys, and airborne remote sensing. Ecological Applications 17: 2137-2144. `DOI: 10.1890/07-0004.1 <https://doi.org/10.1890/07-0004.1>`_
Ported and modified from the soundecology R package - cran.ms.unimelb.edu.au/web/packages/soundecology/soundecology.pdf.
Notes
-----
Soundecology compatible version:
- average of dB value
- remove negative value in order to get positive values only
- dividing by the frequency resolution df instead of multiplication
Examples
--------
>>> s, fs = maad.sound.load('../data/cold_forest_daylight.wav')
>>> Sxx, tn, fn, ext = maad.sound.spectrogram (s, fs,mode='amplitude')
>>> BI = maad.features.bioacoustics_index(Sxx,fn)
>>> print('BI Soundecology : %2.2f ' %BI)
BI Soundecology : 52.84
>>> BI = maad.features.bioacoustics_index(Sxx,fn,R_compatible=None)
>>> print('BI MAAD: %2.2f ' %BI)
BI MAAD : 17.05
"""
# select the indices corresponding to the frequency bins range
indf = index_bw(fn,flim)
# frequency resolution.
df = fn[1] - fn[0]
# ======= As soundecology
if R_compatible == 'soundecology' :
# Mean Sxx normalized by the max
meanSxx = mean(Sxx/max(Sxx), axis=1)
# Convert into dB
meanSxxdB = amplitude2dB(meanSxx)
# "normalization" in order to get positive 'vectical' values
meanSxxdB = meanSxxdB[indf,]-min(meanSxxdB[indf,])
# this is not the area under the curve...
# what is the meaning of an area under the curve in dB...
BI = sum(meanSxxdB)/df
# ======= maad version
else:
# better to average the PSD for energy conservation
PSDxx_norm = (Sxx**2/max(Sxx**2))
meanPSDxx_norm = mean(PSDxx_norm, axis=1)
# Compute the area
# take the sqrt in order to go back to Sxx
BI = sqrt(sum(meanPSDxx_norm))* df
return BI
|
fcc557d1bbbe5d3c9758cb500cb8a98cd83510ce
| 33,570 |
def ctm_to_dict(ctm_fn):
"""
Return a dictionary with a list of (start, dur, word) for each utterance.
"""
ctm_dict = {}
with open(ctm_fn, "r") as f:
for line in f:
utt, _, start, dur, word = line.strip().split(" ")
if not utt in ctm_dict:
ctm_dict[utt] = []
start = float(start)
dur = float(dur)
ctm_dict[utt].append((start, dur, word))
return ctm_dict
|
7a0c58e544029fd118448b916c2c2966172c5d1b
| 33,572 |
def qft_core(qubits, coef=1):
"""
Generates a quil programm that performs
quantum fourier transform on given qubits
without swaping qubits at the end.
:param qubits: A list of qubit indexes.
:param coeff: A modifier for the angle used in rotations (-1 for inverse
QFT, 1 for QFT)
:return: A Quil program to compute the QFT of the given qubits without swapping.
"""
p = Program()
# Iterate over qubits starting from the most significant
for i, qubit in enumerate(qubits[::-1]):
p.inst(H(qubit))
# Add controlled rotations R_i for i in 1 .. n-1
# using all qubits right to current
p.inst(crotate(qubit, qubits[:-i-1], coef=coef, start_index=1))
return p
|
79944c93ff2d3d5393d9f94e546d79b69334f005
| 33,573 |
def get_formatted_timestamp(app_type):
"""Different services required different date formats - return the proper format here"""
if app_type in {'duo', 'duo_admin', 'duo_auth'}:
return 1505316432
elif app_type in {'onelogin', 'onelogin_events'}:
return '2017-10-10T22:03:57Z'
elif app_type in {'gsuite', 'gsuite_admin', 'gsuite_drive',
'gsuite_login', 'gsuite_token', 'salesforce'}:
return '2017-06-17T15:39:18.460Z'
elif app_type in {'box', 'box_admin_events'}:
return '2017-10-27T12:31:22-07:00'
|
f5d4f2ac1d30383849b6149a46525e67439229df
| 33,574 |
import array
def preprocess(x, copy=False, float=False, axis=None):
"""
Ensure that `x` is a properly formatted numpy array.
Proper formatting means at least one dimension, and may include
optional copying, reshaping and coersion into a floating point
datatype.
Parameters
----------
x : array-like
The array to process. If not already a numpy array, it will be
converted to one.
copy : bool, optional
If True, a copy is made regardless of whether `x` is already a
numpy array or not. The default is False.
float : bool, optional
If True, and `x` is not an inexact array already
(:py:attr:`numpy.float16`, :py:attr:`numpy.float32`,
:py:attr:`numpy.float64`, :py:attr:`numpy.float96`,
:py:attr:`numpy.float128`, etc), coerce to be of type
:py:attr:`numpy.float_`. Defaults to False.
axis : int, optional
If specified, the specified axis is moved to the end of the
shape. Default is to return `x` without reshaping.
Return
------
x : ~numpy.ndarray
Processed version of the input.
"""
if float:
dtype = x.dtype if hasattr(x, 'dtype') and \
issubdtype(x.dtype, inexact) else float_
else:
dtype=None
x = array(x, copy=copy, subok=not copy, ndmin=1, dtype=dtype)
if axis is not None and axis not in (-1, x.ndim - 1):
# moveaxis always returns a new view, never the same object
x = moveaxis(x, axis, -1)
return x
|
b1632ca64fe315330d26f9beeef6fa9df8a40382
| 33,576 |
def detect_defaults_settings(output):
""" try to deduce current machine values without any
constraints at all
"""
output.writeln("\nIt seems to be the first time you run conan", Color.BRIGHT_YELLOW)
output.writeln("Auto detecting your dev setup to initialize conan.conf", Color.BRIGHT_YELLOW)
result = []
_detect_os_arch(result, output)
_detect_compiler_version(result, output)
result.append(("build_type", "Release"))
output.writeln("Default conan.conf settings", Color.BRIGHT_YELLOW)
output.writeln("\n".join(["\t%s=%s" % (k, v) for (k, v) in result]), Color.BRIGHT_YELLOW)
output.writeln("*** You can change them in ~/.conan/conan.conf ***", Color.BRIGHT_MAGENTA)
output.writeln("*** Or override with -s compiler='other' -s ...s***\n\n", Color.BRIGHT_MAGENTA)
return result
|
86e15908c6219e3f0f5caefe8a0a858ce602e484
| 33,577 |
from .continent import COUNTRY_TO_CONTINENT
def compute_continent_histogram(docs: DocumentSet, **kwargs) -> pd.DataFrame:
""" Compute a histogram of number of documents by affiliation
continent.
"""
def extract(doc):
result = set()
for author in doc.authors or []:
for aff in author.affiliations or []:
if country := extract_country(aff):
country = country.strip().lower()
if country.startswith('the '):
country = country[4:]
if continent := COUNTRY_TO_CONTINENT.get(country):
result.add(continent)
else:
result.add('Other')
return result
return compute_histogram(docs, extract, **kwargs)
|
031617c337850eeada50046acdade4fb7ada4fef
| 33,579 |
from typing import OrderedDict
def calc_prod(corpus_context, envs, strict = True, all_info = False, ordered_pair = None,
stop_check = None, call_back = None):
"""
Main function for calculating predictability of distribution for
two segments over specified environments in a corpus.
Parameters
----------
corpus_context : CorpusContext
Context manager for a corpus
envs : list of EnvironmentFilter
List of EnvironmentFilter objects that specify environments
strict : bool
If true, exceptions will be raised for non-exhausive environments
and non-unique environments. If false, only warnings will be
shown. Defaults to True.
all_info : bool
If true, all the intermediate numbers for calculating predictability
of distribution will be returned. If false, only the final entropy
will be returned. Defaults to False.
stop_check : callable, optional
Optional function to check whether to gracefully terminate early
call_back : callable, optional
Optional function to supply progress information during the function
Returns
-------
dict
Keys are the environments specified and values are either a list
of [entropy, frequency of environment, frequency of seg1, frequency
of seg2] if all_info is True, or just entropy if all_info is False.
"""
seg_list = envs[0].middle
for e in envs:
if not all(s in seg_list for s in e.middle):#e.middle != seg_list:
raise(PCTError("Middle segments of all environments must be the same."))
returned = check_envs(corpus_context, envs, stop_check, call_back)
if stop_check is not None and stop_check():
return
env_matches, miss_envs, overlap_envs = returned
if miss_envs or overlap_envs:
if strict:
raise(ProdError(envs, miss_envs, overlap_envs))
H_dict = OrderedDict()
#CALCULATE ENTROPY IN INDIVIDUAL ENVIRONMENTS FIRST
total_matches = {x: 0 for x in seg_list}
total_frequency = 0
if call_back is not None:
call_back('Calculating predictability of distribution...')
call_back(0,len(corpus_context))
cur = 0
for env in env_matches:
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
call_back(cur)
total_tokens = 0
matches = {}
for seg in seg_list:
matches[seg] = env_matches[env][seg]
total_matches[seg] += matches[seg]
total_tokens += matches[seg]
total_frequency += total_tokens
if not total_tokens:
H = 0
else:
seg_H = {}
for seg in seg_list:
seg_prob = matches[seg] / total_tokens
seg_H[seg] = log2(seg_prob)*seg_prob if seg_prob > 0 else 0
H = sum(seg_H.values())*-1
if not H:
H = H+0 #avoid the -0.0 problem
H_dict[env] = [H, total_tokens] + [matches[x] for x in ordered_pair]
#CALCULATE WEIGHTED ENTROPY LAST
weighted_H = 0
for env in env_matches:
weighted_H += H_dict[env][0] * (H_dict[env][1] / total_frequency) if total_frequency>0 else 0
try:
avg_h = sum(total_matches.values())/total_frequency
except ZeroDivisionError:
avg_h = 0.0
H_dict['AVG'] = [weighted_H, avg_h] + [total_matches[x] for x in ordered_pair]
if not all_info:
for k,v in H_dict.items():
H_dict[k] = v[0]
return H_dict
|
4c104b235bb6970573c2ea659c81038e266c40d3
| 33,580 |
import tqdm
def read_run_dict(file_name):
"""Read a run file in the form of a dictionary where keys are query IDs.
:param file_name: run file name
:return:
"""
result = {}
with FileWrapper(file_name) as f:
for ln, line in enumerate(tqdm(f, desc='loading run (by line)', leave=False)):
line = line.strip()
if not line:
continue
fld = line.split()
if len(fld) != 6:
ln += 1
raise Exception(
f'Invalid line {ln} in run file {file_name} expected 6 white-space separated fields by got: {line}')
qid, _, docid, rank, score, _ = fld
result.setdefault(qid, {})[docid] = float(score)
return result
|
2aa7937e259481c86ddaf13d9f6af8e49efb087c
| 33,581 |
def parse_address(address):
"""Convert host:port or port to address to pass to connect."""
if ':' not in address:
return ('', int(address))
host, port = address.rsplit(':', 1)
return (host, int(port))
|
06eb172974c4e75d33ae205f952e8533c88acfeb
| 33,582 |
def secure_host_url(request, secure_url=None):
"""Overrides ``host_url`` to make sure the protocol is secure."""
# Test jig.
if secure_url is None:
secure_url = secure_request_url
return secure_url(request, 'host_url')
|
d4a0b43a52170d7ac07d2cc8141e789a0005c2fa
| 33,584 |
def get_resource_mutator(cpu=None, memory=None, gpu=None, gpu_vendor='nvidia'):
"""The mutator for getting the resource setting for pod spec.
The useful example:
https://github.com/kubeflow/fairing/blob/master/examples/train_job_api/main.ipynb
:param cpu: Limits and requests for CPU resources (Default value = None)
:param memory: Limits and requests for memory (Default value = None)
:param gpu: Limits for GPU (Default value = None)
:param gpu_vendor: Default value is 'nvidia', also can be set to 'amd'.
:returns: object: The mutator function for setting cpu and memory in pod spec.
"""
def _resource_mutator(kube_manager, pod_spec, namespace): #pylint:disable=unused-argument
if cpu is None and memory is None and gpu is None:
return
if pod_spec.containers and len(pod_spec.containers) >= 1:
# All cloud providers specify their instace memory in GB
# so it is peferable for user to specify memory in GB
# and we convert it to Gi that K8s needs
limits = {}
if cpu:
limits['cpu'] = cpu
if memory:
memory_gib = "{}Gi".format(round(memory/1.073741824, 2))
limits['memory'] = memory_gib
if gpu:
limits[gpu_vendor + '.com/gpu'] = gpu
if pod_spec.containers[0].resources:
if pod_spec.containers[0].resources.limits:
pod_spec.containers[0].resources.limits = {}
for k, v in limits.items():
pod_spec.containers[0].resources.limits[k] = v
else:
pod_spec.containers[0].resources = V1ResourceRequirements(limits=limits)
return _resource_mutator
|
02cc5069470d6b255c2c8cb1e8eb27f282b66911
| 33,586 |
def gram_matrix(features):
"""
Calculates the gram matrix of the feature representation matrix
:param features: The feature matrix that is used to calculate the gram matrix
:return: The gram matrix
"""
return K.dot(features, K.transpose(features))
|
ebc8a354de903b764e7cc5a214c9d6bbcbe5a1f8
| 33,587 |
def lp_dominate(w, U):
"""
Computes the belief in which w improves U the most.
With LP in White & Clark
:param w: np.ndarray
:param U: list of np.ndarray
:return: b if d >= 0 else None
"""
# print("LP dominate")
if len(U) == 0:
return w
S = len(w)
d = cvx.Variable()
b = cvx.Variable(S)
objective = cvx.Maximize(d)
# print("U", U)
constraints = [b.T*(w-u) >= d for u in U] + [np.sum(b) == 1]
prob = cvx.Problem(objective, constraints)
result = prob.solve()
# print("d =", d.value)
if d.value >= 0:
return np.ravel(b.value)
else:
return None
|
26ff577b8ad7d97b2062d37299ca59c84897c404
| 33,588 |
from typing import Union
import torch
def get_random_subset_dataloader(dataset: Dataset, subset_size: Union[float, int], **dataloader_kwargs) -> DataLoader:
""" Returns a random subset dataloader sampling data from given dataset, without replacement.
Args:
- dataset: PyTorch dataset from which random subset dataloader is sampling data.
- subset_size: Returned dataloader subsets size. If it is a float, then `subset_size` is intepreted as the subset size fraction of dataset size and should be between 0. and 1.; Otherwise, if it is an interger, `subset_size` is directly interpreted as absolute subset size should be between 0 and `len(dataset)`.
- dataloader_kwargs: Additional dataloader constructor kwargs, like batch_size, num_workers, pin_memory, ... (dataset, shuffle and sampler arguments are already specified by default)
"""
if isinstance(subset_size, float):
assert subset_size > 0. and subset_size <= 1., 'ERROR: `subset_size` should be between 0. and 1. if it is a float.'
subset_size = max(1, min(len(dataset), int(subset_size * len(dataset) + 1)))
train_indices = torch.from_numpy(np.random.choice(len(dataset), size=(subset_size,), replace=False))
return DataLoader(dataset, sampler=SubsetRandomSampler(train_indices), shuffle=True, **dataloader_kwargs)
|
a6dc4e70d6676a6e348339c90ffa99dc76772e9a
| 33,589 |
import numpy
def _combine_pfs(pfa, pfb, coeff, operator):
""" Obtain the pf information of the multiplication of pfa and pfb
"""
tempsa, logqa, dq_dta, d2q_dt2a = pfa
_, logqb, dq_dtb, d2q_dt2b = pfb
if operator == 'multiply':
logq = [a+b+numpy.log(coeff) for a, b in zip(logqa, logqb)]
dq_dt = [a+b+numpy.log(coeff) for a, b in zip(dq_dta, dq_dtb)]
d2q_dt2 = [a+b+numpy.log(coeff) for a, b in zip(d2q_dt2a, d2q_dt2b)]
elif operator == 'divide':
logq = [a-b-numpy.log(coeff) for a, b in zip(logqa, logqb)]
dq_dt = [a-b-numpy.log(coeff) for a, b in zip(dq_dta, dq_dtb)]
d2q_dt2 = [a-b-numpy.log(coeff) for a, b in zip(d2q_dt2a, d2q_dt2b)]
return tempsa, logq, dq_dt, d2q_dt2
|
f89ee97ab5e5de348f42e71d9ad5fa86ba15f922
| 33,590 |
def epa_nei_nonpoint_parse(*, df_list, source, year, config, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:param config: dictionary, items in FBA method yaml
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
df = epa_nei_global_parse(df_list=df_list, source=source,
year=year, config=config)
# Add DQ scores
df['DataReliability'] = 3
df['DataCollection'] = 5 # data collection scores are updated in fbs as
# a function of facility coverage from point source data
return df
|
0a411cd7ce130adafcc8b3615a2c41998454e585
| 33,591 |
from typing import Union
from typing import List
from typing import Dict
import ray
def create_auto_config(
dataset: Union[str, pd.DataFrame, dd.core.DataFrame, DatasetInfo],
target: Union[str, List[str]],
time_limit_s: Union[int, float],
tune_for_memory: bool,
user_config: Dict = None,
) -> dict:
"""Returns an auto-generated Ludwig config with the intent of training the best model on given given dataset /
target in the given time limit.
# Inputs
:param dataset: (str, pd.DataFrame, dd.core.DataFrame, DatasetInfo) data source to train over.
:param target: (str, List[str]) name of target feature
:param time_limit_s: (int, float) total time allocated to auto_train. acts
as the stopping parameter
:param tune_for_memroy: (bool) refine hyperopt search space for available
host / GPU memory
# Return
:return: (dict) selected model configuration
"""
default_configs = _create_default_config(dataset, target, time_limit_s)
model_config = _model_select(dataset, default_configs, user_config)
if tune_for_memory:
if ray.is_initialized():
model_config, _ = ray.get(ray.remote(num_cpus=1)(memory_tune_config).remote(model_config, dataset))
else:
model_config, _ = memory_tune_config(model_config, dataset)
return model_config
|
1bd76497b71ed7163af9bddf87e7ee346ed9ff4d
| 33,592 |
import numpy
def create_orbit_from_particles(particles, angular_velocity=0.|units.yr**-1):
"""
Use mass, position and velocity to determine orbital parameters.
Then setup Roche_Orbit
"""
roche = Roche_Orbit()
roche.mass_1, roche.mass_2 = particles.mass
position_vector = particles.position[1] - particles.position[0]
velocity_vector = particles.velocity[1] - particles.velocity[0]
mu = constants.G * particles.mass.sum()
separation = position_vector.length()
speed_squared = velocity_vector.length_squared()
roche.semimajor_axis = mu * separation / (2 * mu - separation * speed_squared)
e_vector = speed_squared * position_vector / mu - position_vector.dot(velocity_vector) * velocity_vector / mu - position_vector / separation
roche.eccentricity = numpy.sqrt((e_vector * e_vector).sum())
roche.true_anomaly = numpy.arccos(position_vector.dot(e_vector) / (roche.eccentricity * separation))
if position_vector.dot(velocity_vector) < quantities.zero:
# arccos is ambiguous
roche.true_anomaly = 2. * numpy.pi - roche.true_anomaly
period = (4. * numpy.pi**2 * roche.semimajor_axis**3 / mu).sqrt()
peri_orbital_angular_velocity = 2. * numpy.pi / period * (1. + roche.eccentricity)**2/ (1-roche.eccentricity**2)**(3./2.)
roche.angular_velocity_ratio = angular_velocity / peri_orbital_angular_velocity
return roche
|
e9314b582e4031a4ca5816f7d0980430c325dc53
| 33,594 |
def bitter_rivals(voting_dict):
"""
Input: a dictionary mapping senator names to lists representing
their voting records
Output: a tuple containing the two senators who most strongly
disagree with one another.
Example:
>>> voting_dict = {'Klein': [-1,0,1], 'Fox-Epstein': [-1,-1,-1], 'Ravella': [0,0,1]}
>>> bitter_rivals(voting_dict)
('Fox-Epstein', 'Ravella')
"""
leastSimMap = {}
rivalA = ''
rivalB = ''
rivalDist = 10000.0
for key, val in voting_dict.items():
l = least_similar(key, voting_dict)
leastSimMap[key] = l
for key, val in leastSimMap.items():
cmpVal = policy_compare(key, val, voting_dict)
if cmpVal < rivalDist:
rivalDist = cmpVal
rivalA = key
rivalB = val
return (rivalA, rivalB)
|
154e067add7a8d5b58d1474530e690bdfb40a7ea
| 33,595 |
from operator import and_
def last_contacts(ts_start):
"""Get the last time each timeseries datapoint was updated.
Args:
config: Configuration object
ts_start: Timestamp to start from
Returns:
data: List of dicts of last contact information
"""
# Initialize key variables
data = []
last_contact = defaultdict(lambda: defaultdict(dict))
# Get start and stop times
ts_stop = general.normalized_timestamp()
if ts_start > ts_stop:
ts_start = ts_stop
else:
ts_start = general.normalized_timestamp(ts_start)
# Establish a database session
database = db.Database()
session = database.session()
result = session.query(
Data.value, Data.idx_datapoint, Data.timestamp).filter(
and_(Data.timestamp >= ts_start, Data.timestamp <= ts_stop)
)
# Add to the list of device idx values
for instance in result:
idx_datapoint = instance.idx_datapoint
timestamp = instance.timestamp
value = float(instance.value)
# Update dictionary
if idx_datapoint in last_contact:
if timestamp > last_contact[idx_datapoint]['timestamp']:
last_contact[idx_datapoint]['timestamp'] = timestamp
last_contact[idx_datapoint]['value'] = value
else:
continue
else:
last_contact[idx_datapoint]['timestamp'] = timestamp
last_contact[idx_datapoint]['value'] = value
# Return the session to the pool after processing
database.close()
# Convert dict to list of dicts
for idx_datapoint in last_contact:
data_dict = {}
data_dict['idx_datapoint'] = idx_datapoint
data_dict['timestamp'] = last_contact[idx_datapoint]['timestamp']
data_dict['value'] = last_contact[idx_datapoint]['value']
data.append(data_dict)
# Return
return data
|
6e46d10f0ad7ab1523cd21255cf659d791f551c4
| 33,596 |
import torch
def train_one_epoch_loss_acc(net, train_iter, loss, updater):
"""训练模型一个迭代周期(定义见第3章)
Defined in :numref:`sec_softmax_scratch`
返回 train loss 和 train acc
"""
# 将模型设置为训练模式
if isinstance(net, torch.nn.Module):
net.train()
# 训练损失总和、训练准确度总和、样本数
metric = Accumulator(3)
for X, y in train_iter:
# 计算梯度并更新参数
y_hat = net(X)
l = loss(y_hat, y)
if isinstance(updater, torch.optim.Optimizer):
# 使用PyTorch内置的优化器和损失函数
updater.zero_grad()
l.mean().backward()
updater.step()
else:
# 使用定制的优化器和损失函数
l.sum().backward()
updater(X.shape[0])
metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
# 返回训练损失和训练精度
return metric[0] / metric[2], metric[1] / metric[2]
|
314fe27c20d690a3a09a02571a0e545aea73f845
| 33,598 |
def get_numeric_cache(hostname):
"""Get all the numeric cache entries we have for an hostname
"""
return [{
'collection': str(n.template.collection),
'template': str(n.template),
'value': n.value,
'last_modified': n.last_modified,
} for n in NumericCache.objects.filter(hostname=hostname)]
|
6adb7f3a17e6856ba9319e7cd10567e9cf820378
| 33,599 |
from typing import Sequence
from typing import Optional
import math
def partition_dataset(
data: Sequence,
ratios: Optional[Sequence[float]] = None,
num_partitions: Optional[int] = None,
shuffle: bool = False,
seed: int = 0,
drop_last: bool = False,
even_divisible: bool = False,
):
"""
Split the dataset into N partitions. It can support shuffle based on specified random seed.
Will return a set of datasets, every dataset contains 1 partition of original dataset.
And it can split the dataset based on specified ratios or evenly split into `num_partitions`.
Refer to: https://pytorch.org/docs/stable/distributed.html#module-torch.distributed.launch.
Note:
It also can be used to partition dataset for ranks in distributed training.
For example, partition dataset before training and use `CacheDataset`, every rank trains with its own data.
It can avoid duplicated caching content in each rank, but will not do global shuffle before every epoch:
.. code-block:: python
data_partition = partition_dataset(
data=train_files,
num_partitions=dist.get_world_size(),
shuffle=True,
even_divisible=True,
)[dist.get_rank()]
train_ds = SmartCacheDataset(
data=data_partition,
transform=train_transforms,
replace_rate=0.2,
cache_num=15,
)
Args:
data: input dataset to split, expect a list of data.
ratios: a list of ratio number to split the dataset, like [8, 1, 1].
num_partitions: expected number of the partitions to evenly split, only works when `ratios` not specified.
shuffle: whether to shuffle the original dataset before splitting.
seed: random seed to shuffle the dataset, only works when `shuffle` is True.
drop_last: only works when `even_divisible` is False and no ratios specified.
if True, will drop the tail of the data to make it evenly divisible across partitions.
if False, will add extra indices to make the data evenly divisible across partitions.
even_divisible: if True, guarantee every partition has same length.
Examples::
>>> data = [1, 2, 3, 4, 5]
>>> partition_dataset(data, ratios=[0.6, 0.2, 0.2], shuffle=False)
[[1, 2, 3], [4], [5]]
>>> partition_dataset(data, num_partitions=2, shuffle=False)
[[1, 3, 5], [2, 4]]
>>> partition_dataset(data, num_partitions=2, shuffle=False, even_divisible=True, drop_last=True)
[[1, 3], [2, 4]]
>>> partition_dataset(data, num_partitions=2, shuffle=False, even_divisible=True, drop_last=False)
[[1, 3, 5], [2, 4, 1]]
>>> partition_dataset(data, num_partitions=2, shuffle=False, even_divisible=False, drop_last=False)
[[1, 3, 5], [2, 4]]
"""
data_len = len(data)
datasets = []
indices = list(range(data_len))
if shuffle:
# deterministically shuffle based on fixed seed for every process
rs = np.random.RandomState(seed)
rs.shuffle(indices)
if ratios:
next_idx = 0
rsum = sum(ratios)
for r in ratios:
start_idx = next_idx
next_idx = min(start_idx + int(r / rsum * data_len + 0.5), data_len)
datasets.append([data[i] for i in indices[start_idx:next_idx]])
return datasets
if not num_partitions:
raise ValueError("must specify number of partitions or ratios.")
# evenly split the data without ratios
if not even_divisible and drop_last:
raise RuntimeError("drop_last only works when even_divisible is True.")
if data_len < num_partitions:
raise RuntimeError(f"there is no enough data to be split into {num_partitions} partitions.")
if drop_last and data_len % num_partitions != 0:
# split to nearest available length that is evenly divisible
num_samples = math.ceil((data_len - num_partitions) / num_partitions)
else:
num_samples = math.ceil(data_len / num_partitions)
# use original data length if not even divisible
total_size = num_samples * num_partitions if even_divisible else data_len
if not drop_last and total_size - data_len > 0:
# add extra samples to make it evenly divisible
indices += indices[: (total_size - data_len)]
else:
# remove tail of data to make it evenly divisible
indices = indices[:total_size]
for i in range(num_partitions):
_indices = indices[i:total_size:num_partitions]
datasets.append([data[j] for j in _indices])
return datasets
|
a8a306e72d256d511d0a8f9493dd46dcbf6c1d7e
| 33,600 |
def canopy_PAR_absorbed(states: States, setpoints: Setpoints, weather: Weather):
"""The PAR absorbed by the canopy
Equation 8.26
:return: The PAR absorbed by the canopy [W m^-2]
"""
return canopy_PAR_absorbed_from_greenhouse_cover(states, setpoints, weather) + canopy_PAR_absorbed_from_greenhouse_floor(states, setpoints, weather)
|
0ea8103d1087be3d283e4834ec7ec8b436357e28
| 33,601 |
def tiff_to_array(tiff):
"""
Open a TIFF file as an array, normalizing the dimensions.
:param tiff: Filename
:return:
"""
array = (
tiff.asarray(out='memmap') if tiff.pages[0].is_memmappable else tiff.asarray()
)
if array.ndim < 3:
array = array[np.newaxis, ...]
return array
|
157591e2f9980602fc9bca3f713fb512c696821b
| 33,602 |
def version():
"""donghuangzhong version"""
return "0.0.1"
|
ad5d9834dddad46c2f4add31f46ea470bf370304
| 33,604 |
def list_dot(a, b):
"""
Returns the Euclidean inner product of two itterable data-structures.
"""
try:
if len(a) == len(b):
temp = 0
for i in range(len(a)):
temp += a[i]*b[i]
return(temp)
else:
raise ValueError("The length of a and b must be the same!")
except Exception :
PE.PrintException()
|
80dcdb22ed76a9cfe9750deb55971548efe4380e
| 33,605 |
import struct
def pack_bytes(payload):
"""Optimally pack a byte string according to msgpack format"""
pl = len(payload)
if pl < (2**8):
prefix = struct.pack('BB', 0xC4, pl)
elif pl < (2**16):
prefix = struct.pack('>BH', 0xC5, pl)
else:
prefix = struct.pack('>BI', 0xC6, pl)
return prefix + payload
|
eaea52c44a766d74d0aa10e1da20e70f49b624f6
| 33,606 |
import torch
def disparity_consistency_src_to_tgt(meshgrid_homo, K_src_inv, disparity_src,
G_tgt_src, K_tgt, disparity_tgt):
"""
:param xyz_src_B3N: Bx3xN
:param G_tgt_src: Bx4x4
:param K_tgt: Bx3x3
:param disparity_tgt: Bx1xHxW
:return:
"""
B, _, H, W = disparity_src.size()
depth_src = torch.reciprocal(disparity_src)
xyz_src_B3N = get_xyz_from_depth(meshgrid_homo, depth_src, K_src_inv).view(B, 3, H*W)
xyz_tgt_B3N = transform_G_xyz(G_tgt_src, xyz_src_B3N, is_return_homo=False)
K_xyz_tgt_B3N = torch.matmul(K_tgt, xyz_tgt_B3N)
pxpy_tgt_B2N = K_xyz_tgt_B3N[:, 0:2, :] / K_xyz_tgt_B3N[:, 2:, :] # Bx2xN
pxpy_tgt_mask = torch.logical_and(
torch.logical_and(pxpy_tgt_B2N[:, 0:1, :] >= 0,
pxpy_tgt_B2N[:, 0:1, :] <= W - 1),
torch.logical_and(pxpy_tgt_B2N[:, 1:2, :] >= 0,
pxpy_tgt_B2N[:, 1:2, :] <= H - 1)
) # B1N
disparity_src = torch.reciprocal(xyz_tgt_B3N[:, 2:, :]) # Bx1xN
disparity_tgt = gather_pixel_by_pxpy(disparity_tgt, pxpy_tgt_B2N) # Bx1xN
depth_diff = torch.abs(disparity_src - disparity_tgt)
return torch.mean(depth_diff[pxpy_tgt_mask])
|
c407085bf10b0f7c67152d7d92f55a4984520766
| 33,607 |
import re
def split (properties):
""" Given a property-set of the form
v1/v2/...vN-1/<fN>vN/<fN+1>vN+1/...<fM>vM
Returns
v1 v2 ... vN-1 <fN>vN <fN+1>vN+1 ... <fM>vM
Note that vN...vM may contain slashes. This is resilient to the
substitution of backslashes for slashes, since Jam, unbidden,
sometimes swaps slash direction on NT.
"""
assert isinstance(properties, basestring)
def split_one (properties):
pieces = re.split (__re_slash_or_backslash, properties)
result = []
for x in pieces:
if not get_grist (x) and len (result) > 0 and get_grist (result [-1]):
result = result [0:-1] + [ result [-1] + '/' + x ]
else:
result.append (x)
return result
if isinstance (properties, str):
return split_one (properties)
result = []
for p in properties:
result += split_one (p)
return result
|
8b15697f6ae15b2fb634144987893ca04eabcccc
| 33,608 |
def abort_behavior(token):
""" Abort behavior identified with the token """
return True, stop_nodenetrunner(behavior_token_map[token])
|
faee270933a225ab376fef9b3ede589960e9c594
| 33,609 |
def limit_to_value_max(value_max, value):
"""
:param
1.(int) value_max -- value that should not be exceed
2.(int) value -- actual value
:return
1. return a value in the given range bound with value_max
"""
if value > value_max:
return value_max
elif value < -value_max:
return -value_max
else:
return value
|
a568bc1febe9a0cb6115efb4c95c0e1705787bfe
| 33,610 |
def reference_col(
tablename, nullable=False, pk_name="id", foreign_key_kwargs=None, column_kwargs=None
):
"""Column that adds primary key foreign key reference.
Usage: ::
category_id = reference_col('category')
category = relationship('Category', backref='categories')
"""
foreign_key_kwargs = foreign_key_kwargs or {}
column_kwargs = column_kwargs or {}
return Column(
db.ForeignKey(f"{tablename}.{pk_name}", **foreign_key_kwargs),
nullable=nullable,
**column_kwargs,
)
|
17da349907c2764b4d4a205a9a5a4e4ff9d02484
| 33,611 |
import numpy as np
def extract_municipality_hashtags(df):
""" This function takes a twitter dataframe as an input then the output is the dataframe with 2 new columns namely a hashtag
column and a municipality column.
Example
------
if the tweet contains the @mention '@CityPowerJhb' then the coresponding output in the municipality column should be
Johannesburg.
The function also extracts hashtags and saves them under the hastags column as a list in the dataframe.
"""
mun_dict = {
'@CityofCTAlerts' : 'Cape Town',
'@CityPowerJhb' : 'Johannesburg',
'@eThekwiniM' : 'eThekwini' ,
'@EMMInfo' : 'Ekurhuleni',
'@centlecutility' : 'Mangaung',
'@NMBmunicipality' : 'Nelson Mandela Bay',
'@CityTshwane' : 'Tshwane'
}
g = df['Tweets'].str.findall(r'@.*?(?=\s|$)') # finds all @mentions
df['municipality'] = g.apply(lambda x: [mun_dict[g] for g in mun_dict.keys() if g in x]) # produces the values for the keys found in the tweets
df["municipality"]= df['municipality'].apply(lambda x: ''.join(x) if len(x) > 0 else np.nan) # removes the list format to string format
df['hashtags'] = df['Tweets'].str.findall(r'#.*?(?=\s|$)') # finds all the hashtags and stores them in the newly created hashtags column
df["hashtags"]= df['hashtags'].apply(lambda x: ','.join(x).lower().split(',') if len(x) > 0 else np.nan) # makes all the hashtags lowercase
return df
|
1c58e3154f57ad82a8129c5ed765a622b12b8d08
| 33,612 |
import math
def vertices_homography(vertices, H):
"""Apply projective transformation (homography) on a sequence of points.
Parameters:
vertices: List of (x, y) tuples.
A list for projective transformation.
H: A homography matrix.
Return:
vertices_homo: List of (x, y) tuples.
The list after projective transformation.
"""
vertices_homo = []
for vertex in vertices:
pt_homo = point_homography(vertex, H)
pt_homo = tuple([math.floor(i) for i in pt_homo])
vertices_homo.append(pt_homo)
return vertices_homo
|
ad0b1cd397d0b01a0f333d872b4f8a8d2e5f0736
| 33,613 |
def SRCNNv2(input_shape, depth_multiplier=1, multi_output=False):
"""
conv 9-64 puis 7-64 puis 5-32 puis 7-1 -> 1.006 120 epoch
conv 9-128 puis 7-64 puis 5-32 puis 7-16 puis 9-1 -> 1.007 130 epoch
@ multi_output : set to True
"""
inputs = Input(input_shape, name="inputs")
conv1 = Convolution2D(filters=64, kernel_size=9, padding="same", name="conv1", activation="relu")(inputs)
conv2 = Convolution2D(filters=64, kernel_size=7, padding="same", name="conv2", activation="relu")(conv1)
#conv3 = Convolution2D(filters=64, kernel_size=3, padding="same", name="conv3", activation="relu")(conv2)
mapping = Convolution2D(filters=32, kernel_size=5, padding="same", name="mapping", activation="relu")(conv2)
#mapping2 = Convolution2D(filters=16, kernel_size=7, padding="same", name="mapping2", activation="relu")(mapping)
if multi_output:
out = Convolution2D(filters=2, kernel_size=5, padding="same", name="output", activation="sigmoid")(mapping)
else:
out = Convolution2D(filters=1, kernel_size=5, padding="same", name="output", activation="sigmoid")(mapping)
return Model(inputs, out)
|
752d4e7da9f62a532db326c9eb68d91369a44dd9
| 33,614 |
import json
def parse_labels(string, bb_label_mapping, static_label):
"""Returns array of rectangles geometry and their labels
Arguments:
string {str} -- JSON string
bb_label_mapping {dict} -- Mapping from color to label
static_label {list} -- List of labels valid for the whole image
Returns:
ndarray -- array containing rectangle information
"""
try:
data = json.loads(string)
except:
return None
scale = 1
img_width = 1
img_height = 1
props = []
for obj in data["objects"]:
if obj["type"] == "image":
# print('img')
# print(obj['scaleX'],obj['scaleY'],obj['width'],obj['height'])
# scale = obj['scaleX']
img_width = obj["width"]
img_height = obj["height"]
for obj in data["objects"]:
if obj["type"] == "rect":
# scale_factor = obj['scaleX'] / scale
try:
label = bb_label_mapping[obj["stroke"]]
label = reverse_static_label_mapping[label]
except:
raise RuntimeError(f'Could not find bb_label_mapping for {obj["stroke"]}')
item = [obj["right"], obj["bottom"], obj["left"], obj["top"]]
item = np.array(item)
# convert ltwh to corner points (ltrb)
# item[0] = item[0] + item[2]
# item[1] = item[1] + item[3]
# item = scale_factor * item
# item[0], item[2] = item[0] / img_width, item[2] / img_width
# item[1], item[3] = item[1] / img_height, item[3] / img_height
item = item.tolist()
item += [label]
props.append(item)
if static_label is not None:
for item in static_label:
props.append([None, None, None, None, item])
# return (np.array(props))
return props
|
3a671abaac1faa326d0faa7e618249b5f17cd705
| 33,615 |
def spike_profile(*args, **kwargs):
""" Computes the spike-distance profile :math:`S(t)` of the given
spike trains. Returns the profile as a PieceWiseConstLin object. The
SPIKE-values are defined positive :math:`S(t)>=0`.
Valid call structures::
spike_profile(st1, st2) # returns the bi-variate profile
spike_profile(st1, st2, st3) # multi-variate profile of 3 spike trains
spike_trains = [st1, st2, st3, st4] # list of spike trains
spike_profile(spike_trains) # profile of the list of spike trains
spike_profile(spike_trains, indices=[0, 1]) # use only the spike trains
# given by the indices
The multivariate spike-distance profile is defined as the average of all
pairs of spike-trains:
.. math:: <S(t)> = \\frac{2}{N(N-1)} \\sum_{<i,j>} S^{i, j}`,
where the sum goes over all pairs <i,j>
:returns: The spike-distance profile :math:`S(t)`
:rtype: :class:`.PieceWiseConstLin`
"""
if len(args) == 1:
return spike_profile_multi(args[0], **kwargs)
elif len(args) == 2:
return spike_profile_bi(args[0], args[1])
else:
return spike_profile_multi(args)
|
ffaff8b0e1e3f81dcbbf8cb0762224dc3850b2b3
| 33,616 |
import re
import pandas
def wig_to_dataframe(infile, step, format):
"""Read a wig file into a Pandas dataframe
infile(str): Path to file
Returns:
Dataframe
"""
fs = open(infile, 'r')
coverage_data = []
pos = 0
chr = ""
for line in fs.readlines():
try:
f = float(line)
coverage_data.append([chr, f, pos])
pos += 5000
except ValueError:
reresult = re.search("chrom=(\w*)", line) # find chromosome name in line
if reresult:
last_pos = [chr, 0, 249255001] # writen in every set to give same scale when plotting
coverage_data.append(last_pos)
chr = reresult.group(1) # start working on next chromosome
pos =0
fs.close()
df = pandas.DataFrame(coverage_data, columns= format)
return df
|
07873b340b450ef3d0eb3d7715afb9b204a8277e
| 33,617 |
from corehq.apps.users.models import CommCareUser
def get_all_commcare_users_by_domain(domain):
"""Returns all CommCareUsers by domain regardless of their active status"""
def get_ids():
for flag in ['active', 'inactive']:
key = [flag, domain, CommCareUser.__name__]
for user in CommCareUser.get_db().view(
'users/by_domain',
startkey=key,
endkey=key + [{}],
reduce=False,
include_docs=False
):
yield user['id']
return imap(CommCareUser.wrap, iter_docs(CommCareUser.get_db(), get_ids()))
|
2b9209ac899b73eb534ba98cd5930bb0dc4749c2
| 33,618 |
def print_cycles_info(data):
"""
Print various information about cycles.
"""
n_cycles = len(data.cycles)
output('number of cycles:', n_cycles)
if not n_cycles:
return data
slengths = sorted(set(data.cycles_lengths))
lhist, lbins = np.histogram(data.cycles_lengths,
bins=slengths + [slengths[-1] + 1])
output('histogram of cycle lengths (length: count [duration interval]):')
for ib, lbin in enumerate(lbins[:-1]):
ii = np.where(data.cycles_lengths == lbin)[0]
cdt = data.cycles_dt[ii]
output(' % 5d: % 5d [%10.5e, %10.5e]'
% (lbin, lhist[ib], cdt.min(), cdt.max()))
i0 = data.cycles_lengths.min()
i1 = np.where(data.cycles_lengths == i0)[0]
output('shortest cycle length: %d (in %d cycle(s))' % (i0, len(i1)))
cdt = data.cycles_dt[i1]
output(' duration in [%10.5e, %10.5e]' % (cdt.min(), cdt.max()))
i0 = data.cycles_lengths.max()
i1 = np.where(data.cycles_lengths == i0)[0]
output('longest cycle length: %d (in %d cycle(s))' % (i0, len(i1)))
cdt = data.cycles_dt[i1]
output(' duration in [%10.5e, %10.5e]' % (cdt.min(), cdt.max()))
output('data min., mean, max. in the longest cycle(s):')
for ii in i1:
strain = data.strain[data.cycles[ii]]
stress = data.stress[data.cycles[ii]]
output(' cycle:', ii)
output(' strain: %10.5e, %10.5e, %10.5e'
% (strain.min(), strain.mean(), strain.max()))
output(' stress: %10.5e, %10.5e, %10.5e'
% (stress.min(), stress.mean(), stress.max()))
return data
|
d22d827d966ff467de232818edd7854c20e12bb9
| 33,619 |
def get_block_size(sigma = 1.5):
"""
Devuelve el tamaño de los vecinos (block_size) que se va a utilizar para
obtener los puntos Harris. El valor se fija al valor correspondiente al uso
de máscaras gaussianas de sigma 1.5. El tamaño de la máscara Gaussiana es
6*1.5+1.
"""
return int(6*sigma+1)
|
52f4aa88580252ab9c0f7a1840ac3097166c3930
| 33,620 |
from typing import Callable
from typing import Union
from typing import Tuple
import typing
def approxZeroNewton(f: Callable[[float], float], df: Callable[[float], float], ddf: Callable[[float], float], a: Union[int, float], b: Union[int, float], epsilon: float, iteration: int) -> Tuple[float, int]:
"""
Approximates the solution to f(x) = 0 in range [a, b] using Newton's method
:param f: callable function of variable x
:param df: callable derivate of f (f')
:param ddf: callable second-order derivate of f (f'')
:param a: left range bound
:param b: right range bound
:param epsilon: the desired precision (stop condition)
:param iteration: maximum iterations count limit (to prevent an infinite loop)
:raises ValueError: when input types are wrong or either df or dff has same different on both range bounds
:return: a tuple containing (in order): an approximate solution x, iterations made count
"""
if not isType(a, [int, float]) or not isType(b, [int, float]) or not isType(f, Callable) or not isType(df, typing.Callable) \
or not isType(ddf, Callable) or not isType(epsilon, float) or not isType(iteration, int) or epsilon < 0 or iteration < 0 or b < a or np.sign(f(a)) * np.sign(f(b)) >= 0:
raise ValueError("Wrong input types")
if np.sign(df(a)) * np.sign(df(b)) <= 0:
raise ValueError(
"Df has to be of different signs at the edges of range [a, b]")
if np.sign(ddf(a)) * np.sign(ddf(b)) <= 0:
raise ValueError(
"Ddf has to be of different signs at the edges of range [a, b]")
x0 = a
for x0 in [a, b]:
if(f(x0) * ddf(x0) > 0):
# initial condition found
break
i = 0
while np.abs(f(x0)) > epsilon and i < iteration:
x0 = x0 - f(x0) / df(x0)
i += 1
return x0, i
|
f15e37f581f2f040af8b2c7edcec11ebce75c93f
| 33,622 |
def chain_data(symbol, info=None):
"""Gets chain data for stock. INSTANT. Includes possible expiration dates for options."""
assert type(symbol) == str
return robin_stocks.options.get_chains(symbol, info)
|
04cdb028420fbabdd1a4951762a5a6fea24a821a
| 33,623 |
def convert_Cf2manningn(Cf, h):
"""
Convert the friction coefficient Cf to the Manning's n
"""
n = h**(1 / 6) * np.sqrt(Cf / g)
return n
|
6552425ed1deea8ea93b226e1cbd19df40d3e5af
| 33,624 |
def lstmemory_unit(input,
name=None,
size=None,
param_attr=None,
act=None,
gate_act=None,
state_act=None,
mixed_bias_attr=None,
lstm_bias_attr=None,
mixed_layer_attr=None,
lstm_layer_attr=None,
get_output_layer_attr=None):
"""
Define calculations that a LSTM unit performs in a single time step.
This function itself is not a recurrent layer, so that it can not be
directly applied to sequence input. This function is always used in
recurrent_group (see layers.py for more details) to implement attention
mechanism.
Please refer to **Generating Sequences With Recurrent Neural Networks**
for more details about LSTM. The link goes as follows:
.. _Link: https://arxiv.org/abs/1308.0850
.. math::
i_t & = \\sigma(W_{xi}x_{t} + W_{hi}h_{t-1} + W_{ci}c_{t-1} + b_i)
f_t & = \\sigma(W_{xf}x_{t} + W_{hf}h_{t-1} + W_{cf}c_{t-1} + b_f)
c_t & = f_tc_{t-1} + i_t tanh (W_{xc}x_t+W_{hc}h_{t-1} + b_c)
o_t & = \\sigma(W_{xo}x_{t} + W_{ho}h_{t-1} + W_{co}c_t + b_o)
h_t & = o_t tanh(c_t)
The example usage is:
.. code-block:: python
lstm_step = lstmemory_unit(input=[layer1],
size=256,
act=TanhActivation(),
gate_act=SigmoidActivation(),
state_act=TanhActivation())
:param input: input layer name.
:type input: LayerOutput
:param name: lstmemory unit name.
:type name: basestring
:param size: lstmemory unit size.
:type size: int
:param param_attr: Parameter config, None if use default.
:type param_attr: ParameterAttribute
:param act: lstm final activiation type
:type act: BaseActivation
:param gate_act: lstm gate activiation type
:type gate_act: BaseActivation
:param state_act: lstm state activiation type.
:type state_act: BaseActivation
:param mixed_bias_attr: bias parameter attribute of mixed layer.
False means no bias, None means default bias.
:type mixed_bias_attr: ParameterAttribute|False
:param lstm_bias_attr: bias parameter attribute of lstm layer.
False means no bias, None means default bias.
:type lstm_bias_attr: ParameterAttribute|False
:param mixed_layer_attr: mixed layer's extra attribute.
:type mixed_layer_attr: ExtraLayerAttribute
:param lstm_layer_attr: lstm layer's extra attribute.
:type lstm_layer_attr: ExtraLayerAttribute
:param get_output_layer_attr: get output layer's extra attribute.
:type get_output_layer_attr: ExtraLayerAttribute
:return: lstmemory unit name.
:rtype: LayerOutput
"""
if size is None:
assert input.size % 4 == 0
size = input.size / 4
out_mem = memory(name=name, size=size)
state_mem = memory(name="%s_state" % name, size=size)
with mixed_layer(
name="%s_input_recurrent" % name,
size=size * 4,
bias_attr=mixed_bias_attr,
layer_attr=mixed_layer_attr,
act=IdentityActivation()) as m:
m += identity_projection(input=input)
m += full_matrix_projection(input=out_mem, param_attr=param_attr)
lstm_out = lstm_step_layer(
name=name,
input=m,
state=state_mem,
size=size,
bias_attr=lstm_bias_attr,
act=act,
gate_act=gate_act,
state_act=state_act,
layer_attr=lstm_layer_attr)
get_output_layer(
name='%s_state' % name,
input=lstm_out,
arg_name='state',
layer_attr=get_output_layer_attr)
return lstm_out
|
5e4ec7203b58d44b7b07c865ea7683994869dd90
| 33,625 |
def _GetPrivateIpv6GoogleAccess(dataproc, private_ipv6_google_access_type):
"""Get PrivateIpv6GoogleAccess enum value.
Converts private_ipv6_google_access_type argument value to
PrivateIpv6GoogleAccess API enum value.
Args:
dataproc: Dataproc API definition
private_ipv6_google_access_type: argument value
Returns:
PrivateIpv6GoogleAccess API enum value
"""
if private_ipv6_google_access_type == 'inherit-subnetwork':
return dataproc.messages.GceClusterConfig.PrivateIpv6GoogleAccessValueValuesEnum(
'INHERIT_FROM_SUBNETWORK')
if private_ipv6_google_access_type == 'outbound':
return dataproc.messages.GceClusterConfig.PrivateIpv6GoogleAccessValueValuesEnum(
'OUTBOUND')
if private_ipv6_google_access_type == 'bidirectional':
return dataproc.messages.GceClusterConfig.PrivateIpv6GoogleAccessValueValuesEnum(
'BIDIRECTIONAL')
if private_ipv6_google_access_type is None:
return None
raise exceptions.ArgumentError(
'Unsupported --private-ipv6-google-access-type flag value: ' +
private_ipv6_google_access_type)
|
56c830257ce996716a6dea7d205dca00e06ab6a9
| 33,626 |
def retry_(f, ex, times=3, interval=1, on_error=lambda e, x: None, *args, **kwargs):
"""
Call a function and try again if it throws a specified exception.
:param funciton f: The function to retry
:param ex: The class of the exception to catch, or an iterable of classes
:type ex: class or iterable
:param int times: The maximum number of times to retry
:param float interval: How long to sleep in between attempts in seconds
:param function on_error: A function to be called when
``f`` throws an exception.
If ``on_error()`` takes no parameters, it will be called
without arguments.
If ``on_error(exception)`` takes one parameter,
it will be called with the exception that was raised.
If ``on_error(exception, retry_count)`` takes two parameters,
it will be called with the exception that was raised and the
number of previous attempts (starting at 0).
A typical use of ``on_error`` would be to log the exception.
Any other arguments are forwarded to ``f``.
:return: The final return value of the function ``f``.
:raises TimeoutError: The function did not succeed
within the specified timeout.
"""
return exec_(f, ex, lambda _: True, times, float("inf"), interval, on_error, *args, **kwargs)
|
e28d39dfee43c9c651b174f87acdd077920f3ed9
| 33,627 |
from sklearn.decomposition import PCA
from sklearn import linear_model
def pca_analysis(model, data):
"""Run PCA analysis on model to visualize hidden layer activity.
To get the values of the intermediate layer, a new model needs to be
created. This model takes the normal input from the RNN, and returns the
output of the rnn layer. The values of the hidden layer can then be found
using the 'predict()' method.
"""
# Unpack train data dict
X = data['X']
Y = data['Y']
# --- Get hidden layer activations and run PCA ---
# Create new model and predict input
inputs = model.input
outputs = [model.layers[0].output, model.layers[1].output]
act_model = tf.keras.Model(inputs=inputs, outputs=outputs)
activations = act_model.predict(X)
# Format activations into dimensions x observations
# Input dimensions: (obs x time x units)
n_inputs = X.shape[2]
n_obs = X.shape[0]
n_ts = X.shape[1]
n_rnn = activations[0].shape[2]
A = np.transpose(activations[0], (2, 0, 1)) # Now (units x obs x time)
Y = np.transpose(activations[1], (2, 0, 1))
A = A.reshape((n_rnn, -1), order='C') # Now (units x (obs*time))
Y = Y.reshape((n_inputs, -1), order='C')
# Run PCA (note that this is actually PPCA)
n_pcs = 20
pca = PCA(n_components=n_pcs)
pca.fit(A.T)
# --- Figure 1: Variance explained ---
# Setup figure
fh, ax_h = pt.create_subplot(n_row=1, n_col=1)
# Plot fraction of variance explained
idx = 0
dims = np.arange(pca.n_components_) + 1
ax_h[idx].plot(dims, pca.explained_variance_ratio_, color='k', marker='.')
ax_h[idx].set_xlabel('PCA dim.')
ax_h[idx].set_ylabel('Fraction of variance explained')
ax_h[idx].set_title('Fraction of variance explained')
fig_name = 'FracVarExplained'
fh.savefig('./results/BCISimulation_PCAResults_{}.pdf'.format(fig_name))
# --- Figure 2: 2D projections of the PCA representation ---
# Setup figure. Define dimensions to plot
plot_dim = [[0, 1], [0, 2], [1, 2], [2, 3]]
n_row = 2
n_col = len(plot_dim)
fh, ax_h = pt.create_subplot(n_row=n_row, n_col=n_col)
# Find indices of specific trials to plot. Here we want to focus on a
# pair of diametrically-opposed targets (0 and 180 degrees)
start_pos = X[:, 0, :]
start_ang = np.rad2deg(np.arctan2(start_pos[:, 1], start_pos[:, 0]))
mask = start_ang < 0
start_ang[mask] = start_ang[mask] + 360
targ_ang = [0, 180]
targ_idx = [np.argmin(np.abs(start_ang - ang)) for ang in targ_ang]
# Iterate over trials
Z = np.zeros((n_obs, n_ts, n_pcs))
n_samp = np.zeros((n_obs), dtype=int)
for i in range(n_obs):
# Get data from current trial and find PCA representation
A_trial = activations[0][i, :, :]
Z_trial = pca.transform(A_trial)
Z[i, :, :] = Z_trial
# Limit to valid portion of the trajectory
Z_trial = Z_trial[data['onset'][i]:data['offset'][i]]
n_samp[i] = Z_trial.shape[0]
# Iterate over dimensions and plot
for ax, d in zip(ax_h[0:n_col], plot_dim):
plot_trajectory(Z_trial[:, d].T, X[i, 0, :], ax)
# If trial is to be highlighted, plot in a separate set of axes
if i in targ_idx:
for ax, d in zip(ax_h[n_col:], plot_dim):
plot_trajectory(Z_trial[:, d].T, X[i, 0, :], ax)
# Set axis labels
for ax, d in zip(ax_h, plot_dim * 2):
ax.set_xlabel('PC {}'.format(d[0] + 1))
ax.set_ylabel('PC {}'.format(d[1] + 1))
ax.set_title('Dim {} vs Dim {}'.format(d[0] + 1, d[1] + 1))
# Save figure
fig_name = '2DProj'
fh.savefig('./results/BCISimulation_PCAResults_{}.pdf'.format(fig_name))
# --- Figure 3: Linear mapping of PCA representation ---
# Get data to fit linear model
n_samp_all = np.sum(n_samp)
Z_mat = np.zeros((n_samp_all, n_pcs))
Y_mat = np.zeros((n_samp_all, n_inputs))
ctr = 0
for i in range(n_obs):
onset_idx = np.arange(data['onset'][i], data['offset'][i])
ctr_idx = np.arange(ctr, (ctr + len(onset_idx)))
Z_mat[ctr_idx, :] = Z[i, onset_idx, :]
Y_mat[ctr_idx, :] = data['Y'][i, onset_idx, :]
ctr = ctr_idx[-1] + 1
# Fit linear model -- do this independently for the X and Y dimensions
reg_mdl = linear_model.LinearRegression(fit_intercept=False)
reg_mdl.fit(Z_mat, Y_mat)
r2 = reg_mdl.score(Z_mat, Y_mat)
print('Linear fit: r2 = {}'.format(r2))
W = reg_mdl.coef_
# Plot predicted trajectories
fh, ax_h = pt.create_subplot(n_row=1, n_col=1)
for i in range(n_obs):
# Predict cursor position from hidden unit activity
Z_temp = Z[i, data['onset'][i]:data['offset'][i], :]
y_pred = Z_temp @ W.T
y_pred = reg_mdl.predict(Z_temp)
plot_trajectory(y_pred.T, data['X'][i, 0, :], ax_h[0])
# Format plot axes
ax_h[0].set_title('Linear model - predicted trajectories')
ax_h[0].set_xlabel('X position')
ax_h[0].set_ylabel('Y position')
# Save figure
fig_name = 'LinearMapping'
fh.savefig('./results/BCISimulation_PCAResults_{}.pdf'.format(fig_name))
return None
|
00ec016e4c47cd15b2570457b835990ae9aef2e2
| 33,628 |
def get_plugin_history(name):
"""
Get history of results for single plugin
:param name: name of the plugin
:type name: string
"""
plugin = smokerd.pluginmgr.get_plugin(name)
results = []
for res in plugin.result:
res = standardized_api_list(res)
results.append({'result': res})
return results
|
64225c04d13ee228c0a375c78ff805c0dcd56bb4
| 33,629 |
def _parse_instance_info(node):
"""Gets the instance and driver specific Node deployment info.
This method validates whether the 'instance_info' and 'driver_info'
property of the supplied node contains the required information for
this driver to deploy images to the node.
:param node: a single Node.
:returns: A dict with the instance_info and driver_info values.
:raises: MissingParameterValue, image_source is missing in node's
instance_info. Also raises same exception if kernel/ramdisk is
missing in instance_info for non-glance images.
"""
info = {}
info['image_source'] = node.instance_info.get('image_source')
is_whole_disk_image = node.driver_internal_info.get('is_whole_disk_image')
if not is_whole_disk_image:
if not service_utils.is_glance_image(info['image_source']):
info['kernel'] = node.instance_info.get('kernel')
info['ramdisk'] = node.instance_info.get('ramdisk')
error_msg = _("Cannot validate PXE bootloader. Some parameters were "
"missing in node's instance_info.")
deploy_utils.check_for_missing_params(info, error_msg)
return info
|
65e687704ad5fa70f8fc23eaf73f3c48eec9e17c
| 33,630 |
def str2polynomial(string):
""" Get a string, return a polynomial """
try:
parts = advanced_split(string, '+', '-', contain=True)
terms = [str2term(each) for each in parts]
return Polynomial(*terms)
except:
raise Exception('Example input: -5x_1^2*y_1^3+6x_2^2*y_2^4-x_3^1*y_3^1')
|
a52641bcbbc67159f5b73bbbc91ba84ea38cb223
| 33,632 |
def transpose_2d(array):
"""Transpose an array represented as an iterable of iterables."""
return list(map(list, zip_equal(*array)))
|
48249de78d7d7c591f6d9fc8d79e184d3f291b49
| 33,633 |
def get_test_examples(args):
"""See base class."""
src = file2list(args.src_data)
trg = file2list(args.trg_data)
return _create_examples(src, trg, "test")
|
b1353a7b2bb87379c71c025f7abeb6142c55cf30
| 33,634 |
def precrec_unvoted(preds, gts, radius, pred_rphi=False, gt_rphi=False):
"""
The "unvoted" precision/recall, meaning that multiple predictions for the same ground-truth are NOT penalized.
- `preds` an iterable (scans) of iterables (per scan) containing predicted x/y or r/phi pairs.
- `gts` an iterable (scans) of iterables (per scan) containing ground-truth x/y or r/phi pairs.
- `radius` the cutoff-radius for "correct", in meters.
- `pred_rphi` whether `preds` is r/phi (True) or x/y (False).
- `gt_rphi` whether `gts` is r/phi (True) or x/y (False).
Returns a pair of numbers: (precision, recall)
"""
# Tested against other code.
npred, npred_hit, ngt, ngt_hit = 0.0, 0.0, 0.0, 0.0
for ps, gts in zip(preds, gts):
# Distance between each ground-truth and predictions
assoc = np.zeros((len(gts), len(ps)))
for ip, p in enumerate(ps):
for igt, gt in enumerate(gts):
px, py = rphi_to_xy(*p) if pred_rphi else p
gx, gy = rphi_to_xy(*gt) if gt_rphi else gt
assoc[igt, ip] = np.hypot(px-gx, py-gy)
# Now cutting it off at `radius`, we can get all we need.
assoc = assoc < radius
npred += len(ps)
npred_hit += np.count_nonzero(np.sum(assoc, axis=0))
ngt += len(gts)
ngt_hit += np.count_nonzero(np.sum(assoc, axis=1))
return (
npred_hit/npred if npred > 0 else np.nan,
ngt_hit/ngt if ngt > 0 else np.nan
)
|
eff8aef552999db2377c9d053c548a705f07bf3a
| 33,636 |
def get_weapon_objects(json_load):
"""creates weapon objects by iterating over the json load
and making an object of each dictionary, then returns
a list of all the objects
"""
weapon_object_list = []
for weapon_dict in json_load:
# weapon_dict is a dictionary which has data for one weapon
weapon = Weapon(**weapon_dict)
weapon_object_list.append(weapon)
return weapon_object_list
|
e3b7309b4267ce4f237db3e7d6e17c69b187a1fc
| 33,637 |
def _t_P(P):
"""Define the boundary between Region 2 and 3, T=f(P)
>>> "%.2f" % _t_P(16.52916425)
'623.15'
"""
n=[0, 0.34805185628969e3, -0.11671859879975e1, 0.10192970039326e-2,0.57254459862746e3, 0.1391883977870e2]
return n[4]+((P-n[5])/n[3])**0.5
|
196f4fae80d9425b0f3a06213c21f77d3049e401
| 33,638 |
import inspect
def add_as_function(cls):
""" Decorator for classes. Automatically adds functional interface for `call` method of class.
For example, `ConvBlock` class is transformed into `conv_block` function, while
`Conv1DTranspose` class is transformed into `conv1d_transpose` function.
"""
name = cls.__name__
func_name = ''.join('_' + c.lower()
if (c.isupper() and (i != len(name)-1) and name[i+1].islower()) else c.lower()
for i, c in enumerate(name)).strip('_')
def func(inputs, *args, **kwargs):
# We also want to use `training` or `is_training` indicators as arguments for call
call_args = [inputs]
training = kwargs.get('training') or kwargs.get('is_training')
if (training is not None) and (len(inspect.getfullargspec(cls.__call__)[0]) > 2):
call_args.append(training)
return cls(*args, **kwargs)(*call_args)
func.__doc__ = cls.__doc__
module = inspect.getmodule(inspect.stack()[1][0])
setattr(module, func_name, func)
return cls
|
38f2e604e03e5a356450569bbfe7d0764bd784cb
| 33,639 |
def remove_from_cart(request):
"""
Remove product from cart
"""
product_id = int(request.POST['product_id'])
# Checking if user session has cart or session may already flushed
# Cart an empty cart for user
if 'cart_id' in request.session:
cart_id = int(request.session['cart_id'])
cart = Cart.get_cart(cart_id)
cart.remove_item(product_id)
else:
cart = Cart()
if request.is_ajax():
default_currency = get_default_currency(request)
return render(request, 'sales/cart_basket.html', {'cart': cart, 'default_currency': default_currency})
return HttpResponseRedirect(reverse('sales_checkout_cart'))
|
7a5fe35bce0d8ad7adb00c6b8a5677099c728c14
| 33,640 |
def preresnet164bn_svhn(classes=10, **kwargs):
"""
PreResNet-164(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="preresnet164bn_svhn",
**kwargs)
|
ebad863e846fd865772e93daf1225dd71654fc6a
| 33,641 |
from typing import List
def value_map_distribution(value_map: dict, bounds: List[float] = None):
"""Percent of values that fall in ranges.
Args:
value_map: dict, value map
bound: list of float, boundaries to count values within
Returns:
dist: dict, distribution values
"""
if not bounds:
bounds = list(np.linspace(0, 1, 11))
bounds[-1] += 0.01
values = collect_values(value_map)
total = len(values)
dist = {}
for low, up in zip(bounds[:-1], bounds[1:]):
count = len([v for v in values if ((v >= low) and (v < up))])
dist[(low, up)] = count/total
return dist
|
09988024007327ee26f27f43d9dc647e78a78915
| 33,643 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.