content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def euclidean3d(v1, v2):
"""Faster implementation of euclidean distance for the 3D case."""
if not len(v1) == 3 and len(v2) == 3:
print("Vectors are not in 3D space. Returning None.")
return None
return np.sqrt((v1[0] - v2[0]) ** 2 + (v1[1] - v2[1]) ** 2 + (v1[2] - v2[2]) ** 2) | 89facc15567a7ed0138dee09ef1824ba40bb58a8 | 7,329 |
def blast_seqs(seqs,
blast_constructor,
blast_db=None,
blast_mat_root=None,
params={},
add_seq_names=True,
out_filename=None,
WorkingDir=None,
SuppressStderr=None,
SuppressStdout=None,
input_handler=None,
HALT_EXEC=False
):
"""Blast list of sequences.
seqs: either file name or list of sequence objects or list of strings or
single multiline string containing sequences.
WARNING: DECISION RULES FOR INPUT HANDLING HAVE CHANGED. Decision rules
for data are as follows. If it's s list, treat as lines, unless
add_seq_names is true (in which case treat as list of seqs). If it's a
string, test whether it has newlines. If it doesn't have newlines, assume
it's a filename. If it does have newlines, it can't be a filename, so
assume it's a multiline string containing sequences.
If you want to skip the detection and force a specific type of input
handler, use input_handler='your_favorite_handler'.
add_seq_names: boolean. if True, sequence names are inserted in the list
of sequences. if False, it assumes seqs is a list of lines of some
proper format that the program can handle
"""
# set num keep
if blast_db:
params["-d"] = blast_db
if out_filename:
params["-o"] = out_filename
ih = input_handler or guess_input_handler(seqs, add_seq_names)
blast_app = blast_constructor(
params=params,
blast_mat_root=blast_mat_root,
InputHandler=ih,
WorkingDir=WorkingDir,
SuppressStderr=SuppressStderr,
SuppressStdout=SuppressStdout,
HALT_EXEC=HALT_EXEC)
return blast_app(seqs) | ce22f90fe3092a2c792478d7a4818d67fd13a753 | 7,330 |
def merge_dicts(dicts, handle_duplicate=None):
"""Merge a list of dictionaries.
Invoke handle_duplicate(key, val1, val2) when two dicts maps the
same key to different values val1 and val2, maybe logging the
duplication.
"""
if not dicts:
return {}
if len(dicts) == 1:
return dicts[0]
if handle_duplicate is None:
return {key: val for dict_ in dicts for key, val in dict_.items()}
result = {}
for dict_ in dicts:
for key, val in dict_.items():
if key in result and val != result[key]:
handle_duplicate(key, result[key], val)
continue
result[key] = val
return result | 44c06ab30bb76920ff08b5978a6aa271abd3e449 | 7,331 |
from datetime import datetime
def _timestamp(line: str) -> Timestamp:
"""Returns the report timestamp from the first line"""
start = line.find("GUIDANCE") + 11
text = line[start : start + 16].strip()
timestamp = datetime.strptime(text, r"%m/%d/%Y %H%M")
return Timestamp(text, timestamp.replace(tzinfo=timezone.utc)) | 7e3083c6dec766fe681e82555daa59ba7f5166b5 | 7,332 |
def start_qpsworkers(languages, worker_hosts):
"""Starts QPS workers as background jobs."""
if not worker_hosts:
# run two workers locally (for each language)
workers=[(None, 10000), (None, 10010)]
elif len(worker_hosts) == 1:
# run two workers on the remote host (for each language)
workers=[(worker_hosts[0], 10000), (worker_hosts[0], 10010)]
else:
# run one worker per each remote host (for each language)
workers=[(worker_host, 10000) for worker_host in worker_hosts]
return [create_qpsworker_job(language,
shortname= 'qps_worker_%s_%s' % (language,
worker_idx),
port=worker[1] + language.worker_port_offset(),
remote_host=worker[0])
for language in languages
for worker_idx, worker in enumerate(workers)] | 3b53693a292027fd82d808b6d609fb3276f1bc2a | 7,333 |
def validate_frame_range(shots, start_time, end_time, sequence_time=False):
"""
Verify if the given frame range is overlapping existing shots timeline
range. If it is overlapping any shot tail, it redefine the start frame at
the end of it. If it is overlapping any shot head, it will push back all
shots (and animation) behind the range to ensure the space is free to
insert new shot.
:param list[str] shots: Maya shot node names.
:param int start_time:
:param int end_time:
:param bool sequence_time:
Operate on Camera Sequencer's timeline instead of Maya timeline.
:rtype: tuple[int, int]
:return: Free range.
"""
start_attribute = "sequenceStartFrame" if sequence_time else "startFrame"
end_attribute = "sequenceEndFrame" if sequence_time else "endFrame"
length = end_time - start_time
# Offset start_time to ensure it is not overlapping any shot tail.
for shot in shots:
shot_start = cmds.getAttr(shot + "." + start_attribute)
shot_end = cmds.getAttr(shot + "." + end_attribute)
# Ensure the time is not in the middle of a shot.
if shot_start <= start_time <= shot_end:
start_time = shot_end + 1
break
# Detect overlapping shots from heads.
end_time = start_time + length
overlapping_shots = filter_shots_from_range(
shots=shots,
start_frame=start_time,
end_frame=end_time,
sequence_time=sequence_time)
if not overlapping_shots:
return start_time, end_time
# Push back overlapping shots.
offset = max(
end_time - cmds.getAttr(shot + "." + start_attribute) + 1
for shot in overlapping_shots)
if sequence_time:
# Operating on the camera sequencer timeline don't need to adapt
# animation.
shift_shots_in_sequencer(shots, offset, after=end_time - offset)
return start_time, end_time
shift_shots(shots, offset, after=end_time - offset)
curves = cmds.ls(type=ANIMATION_CURVES_TYPES)
if curves:
hold_animation_curves(curves, end_time - offset, offset)
return start_time, end_time | d287ad393c80b899cfba3bac92ea9717918aed4a | 7,336 |
def sparse_add(sv1, sv2):
"""dict, dict -> dict
Returns a new dictionary that is the sum of the other two.
>>>sparse_add(sv1, sv2)
{0: 5, 1: 6, 2: 9}
"""
newdict = {}
keys = set(sv1.keys()) | set(sv2.keys())
for key in keys:
x = sv1.get(key, 0) + sv2.get(key, 0)
newdict[key] = x
return (newdict) | ced3420a585084a246ad25f7686fb388f2c05542 | 7,337 |
def return_flagger(video_ID):
"""
In GET request
- Returns the username of the user that flagged the video with the corresponding video ID from the FLAGS table.
"""
if request.method == 'GET':
return str(db.get_flagger(video_ID)) | c8ca346b60ffa322847b5444c39dcbc43c66701a | 7,338 |
def get_all_hits():
"""Retrieves all hits.
"""
hits = [ i for i in get_connection().get_all_hits()]
pn = 1
total_pages = 1
while pn < total_pages:
pn = pn + 1
print "Request hits page %i" % pn
temp_hits = get_connection().get_all_hits(page_number=pn)
hits.extend(temp_hits)
return hits | 23f4da652d9e89dd0401ac4d8ccf2aa4f2660a5e | 7,339 |
import socket
def create_socket(
host: str = "", port: int = 14443, anidb_server: str = "", anidb_port: int = 0
) -> socket.socket:
"""Create a socket to be use to communicate with the server.
This function is called internally, so you only have to call it if you want to change the default parameters.
:param host: local host to bind the socket to, defaults to "" (which I think is any. Read the docs.)
:type host: str, optional
:param port: local port to bind the socket to, defaults to 14443
:type port: int, optional
:param anidb_server: aniDB server name, defaults to environment ANIDB_SERVER
:type anidb_server: str, optional
:param anidb_port: anidb port, default to environment ANIDB_PORT
:type anidb_port: int, optional
:return: The created socket.
:rtype: socket.socket
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
anidb_server = value_or_error("ANIDB_SERVER", anidb_server)
anidb_port = value_or_error("ANIDB_PORT", anidb_port)
s.connect((anidb_server, anidb_port))
logger.info(
f"Created socket on UDP %s:%d => %s:%d", host, port, anidb_server, anidb_port
)
global _conn
_conn = s
return s | 6b8cc3aa19af4582dbdf96d781d8ae64399165cf | 7,340 |
def aten_eq(mapper, graph, node):
""" 构造判断数值是否相等的PaddleLayer。
TorchScript示例:
%125 : bool = aten::eq(%124, %123)
参数含义:
%125 (bool): 对比后结果。
%124 (-): 需对比的输入1。
%123 (-): 需对比的输入2。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%124
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
layer_inputs["x"] = inputs_name[0]
x_value = list(node.inputs())[0]
x_type = x_value.type()
# 处理输入1,即%123
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, scope_name)
layer_inputs["y"] = inputs_name[1]
y_value = list(node.inputs())[1]
y_type = y_value.type()
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.eq", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name)
return current_inputs, current_outputs | c08fc3120130e949cbba4e147888fabca38c89e3 | 7,341 |
from typing import Tuple
def _create_simple_tf1_conv_model(
use_variable_for_filter=False) -> Tuple[core.Tensor, core.Tensor]:
"""Creates a basic convolution model.
This is intended to be used for TF1 (graph mode) tests.
Args:
use_variable_for_filter: Setting this to `True` makes the filter for the
conv operation a `tf.Variable`.
Returns:
in_placeholder: Input tensor placeholder.
output_tensor: The resulting tensor of the convolution operation.
"""
in_placeholder = array_ops.placeholder(dtypes.float32, shape=[1, 3, 4, 3])
filters = random_ops.random_uniform(shape=(2, 3, 3, 2), minval=-1., maxval=1.)
if use_variable_for_filter:
filters = variables.Variable(filters)
output_tensor = nn_ops.conv2d(
in_placeholder,
filters,
strides=[1, 1, 2, 1],
dilations=[1, 1, 1, 1],
padding='SAME',
data_format='NHWC')
return in_placeholder, output_tensor | 21deebe2de004554a5bdc6559ecf6319947f8109 | 7,342 |
def plot_diversity_bootstrapped(diversity_df):
"""Plots the result of bootstrapped diversity"""
div_lines = (
alt.Chart()
.mark_line()
.encode(
x="year:O",
y=alt.Y("mean(score)", scale=alt.Scale(zero=False)),
color="parametre_set",
)
)
div_bands = (
alt.Chart()
.mark_errorband(extent="ci")
.encode(
x="year:O",
y=alt.Y("score", scale=alt.Scale(zero=False)),
color="parametre_set",
)
)
out = alt.layer(
div_lines, div_bands, data=diversity_df, height=150, width=400
).facet(row="diversity_metric", column="test")
return out | 8b91d1d6d1f7384dcbea6c398a9bde8dfb4aae39 | 7,343 |
def escape(s):
"""
Returns the given string with ampersands, quotes and carets encoded.
>>> escape('<b>oh hai</b>')
'<b>oh hai</b>'
>>> escape("Quote's Test")
'Quote's Test'
"""
mapping = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
for tup in mapping:
s = s.replace(tup[0], tup[1])
return s | 2b4971c4e87e613cad457dde6d62806d299cdbcd | 7,344 |
def _get_db_columns_for_model(model):
"""
Return list of columns names for passed model.
"""
return [field.column for field in model._meta._fields()] | 181999f28ca659bf296bcb4dda7ac29ddfe61071 | 7,345 |
def get_UV(filename):
"""
Input: filename (including path)
Output: (wave_leftedges, wav_rightedges, surface radiance) in units of (nm, nm, photons/cm2/sec/nm)
"""
wav_leftedges, wav_rightedges, wav, toa_intensity, surface_flux, SS,surface_intensity, surface_intensity_diffuse, surface_intensity_direct=np.genfromtxt(filename, skip_header=1, skip_footer=0, usecols=(0, 1, 2,3,4,5,6,7,8), unpack=True)
surface_intensity_photons=surface_intensity*(wav/(hc))
return wav_leftedges, wav_rightedges, surface_intensity_photons | cd514ee29ba3ac17cdfcb95c53d4b5df4f4cad80 | 7,346 |
import json
def load_chunks(chunk_file_location, chunk_ids):
"""Load patch paths from specified chunks in chunk file
Parameters
----------
chunks : list of int
The IDs of chunks to retrieve patch paths from
Returns
-------
list of str
Patch paths from the chunks
"""
patch_paths = []
with open(chunk_file_location) as f:
data = json.load(f)
chunks = data['chunks']
for chunk in data['chunks']:
if chunk['id'] in chunk_ids:
patch_paths.extend([[x,chunk['id']] for x in chunk['imgs']])
if len(patch_paths) == 0:
raise ValueError(
f"chunks {tuple(chunk_ids)} not found in {chunk_file_location}")
return patch_paths | c01ec6076141356ae6f3a1dc40add28638739359 | 7,347 |
def get_model_input(batch, input_id=None):
"""
Get model input from batch
batch: batch of model input samples
"""
if isinstance(batch, dict) or isinstance(batch, list):
assert input_id is not None
return batch[input_id]
else:
return batch | 1b12ee86257bfbd5ab23404251bed39c0021f461 | 7,349 |
def issym(b3):
"""test if a list has equal number of positive
and negative values; zeros belong to both. """
npos = 0; nneg = 0
for item in b3:
if (item >= 0):
npos +=1
if (item <= 0):
nneg +=1
if (npos==nneg):
return True
else:
return False | e8cc57eec5bc9ef7f552ad32bd6518daa2882a3e | 7,350 |
def predictOneVsAll(all_theta, X):
"""will return a vector of predictions
for each example in the matrix X. Note that X contains the examples in
rows. all_theta is a matrix where the i-th row is a trained logistic
regression theta vector for the i-th class. You should set p to a vector
of values from 1..K (e.g., p = [1 3 1 2] predicts classes 1, 3, 1, 2
for 4 examples)
"""
m = X.shape[0]
# You need to return the following variables correctly
p = np.zeros((m, 1))
# probs = np.zeros((all_theta.shape[0], X.shape[0]))
# ====================== YOUR CODE HERE ======================
# Instructions: Complete the following code to make predictions using
# your learned logistic regression parameters (one-vs-all).
# You should set p to a vector of predictions (from 1 to
# num_labels).
#
# Hint: This code can be done all vectorized using the max function.
# In particular, the np.argmax function can return the index of the max
# element, for more information see 'numpy.argmax' on the numpy website.
# If your examples are in rows, then, you can use
# np.argmax(probs, axis=1) to obtain the max for each row.
#
p = np.argmax(sigmoid(np.dot(all_theta, X.T)), axis=0) + 1
# for i in range(all_theta.shape[0]):
# probs[i,:] = sigmoid(X @ all_theta[i,:])
# p = (np.argmax(probs, axis=0) + 1)
# =========================================================================
return p | 32bf9d386ef84debe75781a52335588da360b269 | 7,351 |
from typing import Tuple
from typing import List
def lecture_produit(ligne : str) -> Tuple[str, int, float]:
"""Précondition : la ligne de texte décrit une commande de produit.
Renvoie la commande produit (nom, quantité, prix unitaire).
"""
lmots : List[str] = decoupage_mots(ligne)
nom_produit : str = lmots[0]
quantite : int = int(lmots[1])
prix_unitaire : float = float(lmots[2])
return (nom_produit, quantite, prix_unitaire) | 355721522b9711f9bc206f4ab63af6121ef9b3d0 | 7,352 |
def affinity_matrix(test_specs):
"""Generate a random user/item affinity matrix. By increasing the likehood of 0 elements we simulate
a typical recommending situation where the input matrix is highly sparse.
Args:
users (int): number of users (rows).
items (int): number of items (columns).
ratings (int): rating scale, e.g. 5 meaning rates are from 1 to 5.
spars: probability of obtaining zero. This roughly corresponds to the sparseness.
of the generated matrix. If spars = 0 then the affinity matrix is dense.
Returns:
np.array: sparse user/affinity matrix of integers.
"""
np.random.seed(test_specs["seed"])
# uniform probability for the 5 ratings
s = [(1 - test_specs["spars"]) / test_specs["ratings"]] * test_specs["ratings"]
s.append(test_specs["spars"])
P = s[::-1]
# generates the user/item affinity matrix. Ratings are from 1 to 5, with 0s denoting unrated items
X = np.random.choice(
test_specs["ratings"] + 1, (test_specs["users"], test_specs["items"]), p=P
)
Xtr, Xtst = numpy_stratified_split(
X, ratio=test_specs["ratio"], seed=test_specs["seed"]
)
return (Xtr, Xtst) | 8e033d230cbc8c21d6058bcada14aca9fb1d7e68 | 7,353 |
def get_wm_desktop(window):
"""
Get the desktop index of the window.
:param window: A window identifier.
:return: The window's virtual desktop index.
:rtype: util.PropertyCookieSingle (CARDINAL/32)
"""
return util.PropertyCookieSingle(util.get_property(window,
'_NET_WM_DESKTOP')) | 050fe4a97a69317ba875aa59b3bf4144b9e2f83c | 7,354 |
def get_parents(tech_id, model_config):
"""
Returns the full inheritance tree from which ``tech`` descends,
ending with its base technology group.
To get the base technology group,
use ``get_parents(...)[-1]``.
Parameters
----------
tech : str
model_config : AttrDict
"""
tech = model_config.techs[tech_id].essentials.parent
parents = [tech]
while True:
tech = model_config.tech_groups[tech].essentials.parent
if tech is None:
break # We have reached the top of the chain
parents.append(tech)
return parents | 7220a57b770232e335001a0dab74ca2d8197ddfa | 7,355 |
import hashlib
import urllib
def profile_avatar(user, size=200):
"""Return a URL to the user's avatar."""
try: # This is mostly for tests.
profile = user.profile
except (Profile.DoesNotExist, AttributeError):
avatar = settings.STATIC_URL + settings.DEFAULT_AVATAR
profile = None
else:
if profile.is_fxa_migrated:
avatar = profile.fxa_avatar
elif profile.avatar:
avatar = profile.avatar.url
else:
avatar = settings.STATIC_URL + settings.DEFAULT_AVATAR
if avatar.startswith("//"):
avatar = "https:%s" % avatar
if user and hasattr(user, "email"):
email_hash = hashlib.md5(force_bytes(user.email.lower())).hexdigest()
else:
email_hash = "00000000000000000000000000000000"
url = "https://secure.gravatar.com/avatar/%s?s=%s" % (email_hash, size)
# If the url doesn't start with http (local dev), don't pass it to
# to gravatar because it can't use it.
if avatar.startswith("https") and profile and profile.is_fxa_migrated:
url = avatar
elif avatar.startswith("http"):
url = url + "&d=%s" % urllib.parse.quote(avatar)
return url | a810af7f7abb4a5436a2deed8c4e1069aa5d504c | 7,358 |
import tqdm
def union(graphs, use_tqdm: bool = False):
"""Take the union over a collection of graphs into a new graph.
Assumes iterator is longer than 2, but not infinite.
:param iter[BELGraph] graphs: An iterator over BEL graphs. Can't be infinite.
:param use_tqdm: Should a progress bar be displayed?
:return: A merged graph
:rtype: BELGraph
Example usage:
>>> import pybel
>>> g = pybel.from_bel_script('...')
>>> h = pybel.from_bel_script('...')
>>> k = pybel.from_bel_script('...')
>>> merged = union([g, h, k])
"""
it = iter(graphs)
if use_tqdm:
it = tqdm(it, desc='taking union')
try:
target = next(it)
except StopIteration as e:
raise ValueError('no graphs given') from e
try:
graph = next(it)
except StopIteration:
return target
else:
target = target.copy()
left_full_join(target, graph)
for graph in it:
left_full_join(target, graph)
return target | 8ea9bae0386c497a5fe31c8bd44099ee450b2b2a | 7,360 |
def get_month_n_days_from_cumulative(monthly_cumulative_days):
"""
Transform consecutive number of days in monthly data to actual number of days.
EnergyPlus monthly results report a total consecutive number of days for each day.
Raw data reports table as 31, 59..., this function calculates and returns
actual number of days for each month 31, 28...
"""
old_num = monthly_cumulative_days.pop(0)
m_actual_days = [old_num]
for num in monthly_cumulative_days:
new_num = num - old_num
m_actual_days.append(new_num)
old_num += new_num
return m_actual_days | 5ede033023d357a60ba5eb7e9926325d24b986e8 | 7,361 |
def get_text(name):
"""Returns some text"""
return "Hello " + name | bff30de2184c84f6ed1c4c1831ed9fd782f479c9 | 7,362 |
import re
def apply_template(assets):
"""
Processes the template.
Used for overwrite ``docutils.writers._html_base.Writer.apply_template``
method.
``apply_template(<assets>)``
``assets`` (dictionary)
Assets to add at the template, see ``ntdocutils.writer.Writer.assets``.
returns
function - Template processor.
Example
=======
.. code:: python
apply_template({
"before_styles": '<link rel="stylesheet" href="styles.css" />',
"scripts": '<script src="script.js"></script>'
'<script src="other_script.js"></script>'
})
"""
def apply_template(self):
template_file = open(self.document.settings.template, "rb")
template = str(template_file.read(), "utf-8")
template_file.close()
# Escape ``%`` that don't are special fields
pattern = r"%(?!\((" + "|".join(self.visitor_attributes) + r")\)s)"
template = re.subn(pattern, "%%", template)[0]
subs = self.interpolation_dict()
return template.format(**assets) % subs
return apply_template | 51042e25f701935d668d91a923155813ce60b381 | 7,363 |
def harvest(post):
"""
Filter the post data for just the funding allocation formset data.
"""
data = {k: post[k] for k in post if k.startswith("fundingallocation")}
return data | 67f400caf87f2accab30cb3c519e7014792c84d7 | 7,364 |
def model2(x, input_size, output_size):
"""! Fully connected model [InSize]x800x[OutSize]
Implementation of a [InSize]x800x[OutSize] fully connected model.
Parameters
----------
@param x : placeholder for input data
@param input_size : size of input data
@param output_size : size of output data
Returns
-------
@retval logits : output
@retval logits_dup : a copy of output
@retval w_list : trainable parameters
@retval w_list_dup : a copy of trainable parameters
"""
#==================================================================================================================
## model definition
mu = 0
sigma = 0.2
weights = {
'wfc': tf.Variable(tf.truncated_normal(shape=(input_size,800), mean = mu, stddev = sigma, seed = 1)),
'out': tf.Variable(tf.truncated_normal(shape=(800,output_size), mean = mu, stddev = sigma, seed = 1))
}
biases = {
'bfc': tf.Variable(tf.zeros(800)),
'out': tf.Variable(tf.zeros(output_size))
}
# Flatten input.
c_flat = flatten(x)
# Layer 1: Fully Connected. Input = input_size. Output = 800.
# Activation.
fc = fc_relu(c_flat, weights['wfc'], biases['bfc'])
# Layer 2: Fully Connected. Input = 800. Output = output_size.
logits = tf.add(tf.matmul(fc, weights['out']), biases['out'])
w_list = []
for w,b in zip(weights, biases):
w_list.append(weights[w])
w_list.append(biases[b])
#==================================================================================================================
## duplicate the model used in ProxSVRG
weights_dup = {
'wfc': tf.Variable(tf.truncated_normal(shape=(input_size,800), mean = mu, stddev = sigma, seed = 1)),
'out': tf.Variable(tf.truncated_normal(shape=(800,output_size), mean = mu, stddev = sigma, seed = 1))
}
biases_dup = {
'bfc': tf.Variable(tf.zeros(800)),
'out': tf.Variable(tf.zeros(output_size))
}
# Flatten input.
c_flat_dup = flatten(x)
# Layer 1: Fully Connected. Input = input_size. Output = 800.
# Activation.
fc_dup = fc_relu(c_flat_dup, weights_dup['wfc'], biases_dup['bfc'])
# Layer 2: Fully Connected. Input = 800. Output = output_size.
logits_dup = tf.add(tf.matmul(fc_dup, weights_dup['out']), biases_dup['out'])
w_list_dup = []
for w,b in zip(weights_dup, biases_dup):
w_list_dup.append(weights_dup[w])
w_list_dup.append(biases_dup[b])
return logits, logits_dup, w_list, w_list_dup | 74a7f9129865e1d2b6cbfe767c7f218d53ee50e1 | 7,366 |
def cut_bin_depths(
dataset: xr.Dataset,
depth_range: tp.Union[int, float, list] = None
) -> xr.Dataset:
"""
Return dataset with cut bin depths if the depth_range are not outside the depth span.
Parameters
----------
dataset :
depth_range :
min or (min, max) to be included in the dataset.
Bin depths outside this range will be cut.
Returns
-------
dataset with depths cut.
"""
if depth_range:
if not isinstance(depth_range, (list, tuple)):
if depth_range > dataset.depth.max():
l.log(
"depth_range value is greater than the maximum bin depth. Depth slicing aborded."
)
else:
dataset = dataset.sel(depth=slice(depth_range, None))
l.log(f"Bin of depth inferior to {depth_range} m were cut.")
elif len(depth_range) == 2:
if dataset.depth[0] > dataset.depth[-1]:
depth_range.reverse()
if depth_range[0] > dataset.depth.max() or depth_range[1] < dataset.depth.min():
l.log(
"depth_range values are outside the actual depth range. Depth slicing aborted."
)
else:
dataset = dataset.sel(depth=slice(*depth_range))
l.log(
f"Bin of depth inferior to {depth_range[0]} m and superior to {depth_range[1]} m were cut."
)
else:
l.log(
f"depth_range expects a maximum of 2 values but {len(depth_range)} were given. Depth slicing aborted."
)
return dataset | ab4561711d118dc620100bec5e159dc4b7a29f92 | 7,367 |
def create_edgelist(file, df):
"""
creates an edgelist based on genre info
"""
# load edges from the (sub)genres themselves
df1 = (pd
.read_csv(file,
dtype='str'))
# get edges from the book descriptions df
df2 = (df[['title',
'subclass']]
.rename(columns={'title':'Edge_From',
'subclass':'Edge_To'})
.sort_values(by='Edge_To'))
# combine the two dfs
df3 = (df1
.append(df2,
ignore_index=True))
# consistently assign categories
df4 = (df3
.stack()
.astype('category')
.unstack())
# make the categorical values explicit for later convenience
for name in df4.columns:
df4['N' + name] = (df4[name]
.cat
.codes)
return df4 | 9cfba48eca977e8b2e3217078bd6d112c465ea23 | 7,368 |
def CodeRange(code1, code2):
"""
CodeRange(code1, code2) is an RE which matches any character
with a code |c| in the range |code1| <= |c| < |code2|.
"""
if code1 <= nl_code < code2:
return Alt(RawCodeRange(code1, nl_code),
RawNewline,
RawCodeRange(nl_code + 1, code2))
else:
return RawCodeRange(code1, code2) | c63213c63d96361451e441cf6923015238dae8f8 | 7,369 |
def sort_by_date(data):
"""
The sort_by_date function sorts the lists by their datetime
object
:param data: the list of lists containing parsed UA data
:return: the sorted date list of lists
"""
# Supply the reverse option to sort by descending order
return [x[0:6:4] for x in sorted(data, key=itemgetter(4),
reverse=True)] | f8d18b80404edcf141a56f47938ea09531d30df7 | 7,370 |
def get_menu_option():
"""
Function to display menu options and asking the user to choose one.
"""
print("1. View their next 5 fixtures...")
print("2. View their last 5 fixtures...")
print("3. View their entire current season...")
print("4. View their position in the table...")
print("5. View the club roster...")
print("6. View season statistics...")
print("7. View team information...")
print("8. Sign up to your club's weekly newsletter...")
print("9. Calculate odds on next game...")
print()
return input("CHOOSE AN OPTION BELOW BY ENTERING THE MENU NUMBER OR ENTER 'DONE' ONCE YOU ARE FINISHED: ") | 69e71555d9896d0c462b2e7b542ec87aea9213eb | 7,371 |
def pdf(mu_no):
""" the probability distribution function which the number of fibers per MU should follow """
return pdf_unscaled(mu_no) / scaling_factor_pdf | 2d6fd461d12da6b00bbf20de7a7be7d61112014c | 7,372 |
import requests
def get_weather_by_key(key):
"""
Returns weather information for a given database key
Args:
key (string) -- database key for weather information
Returns:
None or Dict
"""
url = "%s/weather/%s.json" % (settings.FIREBASE_URL, key)
r = requests.get(url)
if r.status_code != 200:
return None
return r.json() | 8ab3bfa6b5924b726fef9a9c0b8bd9d47cf9dfc8 | 7,373 |
import warnings
def source_receiver_midpoints(survey, **kwargs):
"""
Calculate source receiver midpoints.
Input:
:param SimPEG.electromagnetics.static.resistivity.Survey survey: DC survey object
Output:
:return numpy.ndarray midx: midpoints x location
:return numpy.ndarray midz: midpoints z location
"""
if not isinstance(survey, dc.Survey):
raise ValueError("Input must be of type {}".format(dc.Survey))
if len(kwargs) > 0:
warnings.warn(
"The keyword arguments of this function have been deprecated."
" All of the necessary information is now in the DC survey class",
DeprecationWarning,
)
# Pre-allocate
midxy = []
midz = []
for ii, source in enumerate(survey.source_list):
tx_locs = source.location
if isinstance(tx_locs, list):
Cmid = (tx_locs[0][:-1] + tx_locs[1][:-1]) / 2
zsrc = (tx_locs[0][-1] + tx_locs[1][-1]) / 2
tx_sep = np.linalg.norm((tx_locs[0][:-1] - tx_locs[1][:-1]))
else:
Cmid = tx_locs[:-1]
zsrc = tx_locs[-1]
Pmids = []
for receiver in source.receiver_list:
rx_locs = receiver.locations
if isinstance(rx_locs, list):
Pmid = (rx_locs[0][:, :-1] + rx_locs[1][:, :-1]) / 2
else:
Pmid = rx_locs[:, :-1]
Pmids.append(Pmid)
Pmid = np.vstack(Pmids)
midxy.append((Cmid + Pmid) / 2)
diffs = np.linalg.norm((Cmid - Pmid), axis=1)
if np.allclose(diffs, 0.0): # likely a wenner type survey.
midz = zsrc - tx_sep / 2 * np.ones_like(diffs)
else:
midz.append(zsrc - diffs / 2)
return np.vstack(midxy), np.hstack(midz) | adec937949a4293d35c7a57aad7125f2e1113794 | 7,374 |
import copy
def fix_source_scale(
transformer, output_std: float = 1, n_samples: int = 1000, use_copy: bool = True,
) -> float:
""" Adjust the scale for a data source to fix the output variance of a
transformer.
The transformer's data source must have a `scale` parameter.
Parameters
----------
transformer
Transformer whose output variance is optimized. This should behave like
`Arma`: it needs to have a `transform` method that can be called like
`transformer.transform(U=source)`; and it needs an attribute called
`default_source`.
output_std
Value to which to fix the transformer's output standard deviation.
n_samples
Number of samples to generate for each optimization iteration.
use_copy
If true, a deep copy of the data source is made for the optimization, so
that the source's random generator is unaffected by this procedure.
Returns the final value for the scale.
"""
output_var = output_std ** 2
source = transformer.default_source
if use_copy:
source_copy = copy.deepcopy(source)
else:
source_copy = source
def objective(scale: float):
source_copy.scale = np.abs(scale)
samples = transformer.transform(n_samples, X=source_copy)
return np.var(samples) / output_var - 1
soln = optimize.root_scalar(
objective, x0=np.sqrt(output_var / 2), x1=np.sqrt(2 * output_var), maxiter=100,
)
source.scale = np.abs(soln.root)
return source.scale | bf2dc0690732ce7677a484afee75fa7701b3d0e8 | 7,375 |
def samplePinDuringCapture(f, pin, clock):
"""\
Configure Arduino to enable sampling of a particular light sensor or audio
signal input pin. Only enabled pins are read when capture() is subsequently called.
:param f: file handle for the serial connection to the Arduino Due
:param pin: The pin to enable.
:param clock: a :class:`dvbcss.clock` clock object
Values for the pin parameter:
* 0 enables reading of light sensor 0 (on Arduino analogue pin 0).
* 1 enables reading of audio input 0 (on Arduino analogue pin 1).
* 2 enables reading of light sensor 1 (on Arduino analogue pin 2).
* 3 enables reading of audio input 1 (on Arduino analogue pin 3).
:returns: (t1,t2,t3,t4) measuring the specified clock object and arduino clock, as per :func`writeCmdAndTimeRoundTrip`
See :func:`writeAndTimeRoundTrip` for details of the meaning of the returned round-trip timing data
"""
CMD = CMDS_ENABLE_PIN[pin]
return writeCmdAndTimeRoundTrip(f, clock, CMD) | 246dc76eb07b9240439befdffdc3f31376647a64 | 7,376 |
def year_filter(year = None):
"""
Determine whether the input year is single value or not
Parameters
----------
year :
The input year
Returns
-------
boolean
whether the inputed year is a single value - True
"""
if year[0] == year[1]:
single_year = True
else:
single_year = False
return single_year | 35868a72196015c20517179dc89cd65b5601e969 | 7,377 |
def distance(p1, p2):
"""
Return the Euclidean distance between two QPointF objects.
Euclidean distance function in 2D using Pythagoras Theorem and linear algebra
objects. QPointF and QVector2D member functions.
"""
if not (isinstance(p1, QPointF) and isinstance(p2, QPointF)):
raise ValueError('ValueError, computing distance p1 or p2 not of Type QPointF')
return toVector(p2 - p1).length() | 2e8b2d8fcbb05b24798c8507bef8a32b8b9468f3 | 7,378 |
import math
import numpy
def make_primarybeammap(gps, delays, frequency, model, extension='png',
plottype='beamsky', figsize=14, directory=None, resolution=1000, zenithnorm=True,
b_add_sources=False):
"""
"""
print("Output beam file resolution = %d , output directory = %s" % (resolution, directory))
# (az_grid, za_grid) = beam_tools.makeAZZA(resolution,'ZEA') #Get grids in radians
(az_grid, za_grid, n_total, dOMEGA) = beam_tools.makeAZZA_dOMEGA(resolution, 'ZEA') # TEST SIN vs. ZEA
az_grid = az_grid * 180 / math.pi
za_grid = za_grid * 180 / math.pi
# az_grid+=180.0
alt_grid = 90 - (za_grid)
obstime = su.time2tai(gps)
# first go from altitude to zenith angle
theta = (90 - alt_grid) * math.pi / 180
phi = az_grid * math.pi / 180
beams = {}
# this is the response for XX and YY
if model == 'analytic' or model == '2014':
# Handles theta and phi as floats, 1D, or 2D arrays (and probably higher dimensions)
beams['XX'], beams['YY'] = primary_beam.MWA_Tile_analytic(theta, phi,
freq=frequency, delays=delays,
zenithnorm=zenithnorm, power=True)
elif model == 'avg_EE' or model == 'advanced' or model == '2015' or model == 'AEE':
beams['XX'], beams['YY'] = primary_beam.MWA_Tile_advanced(theta, phi,
freq=frequency, delays=delays,
power=True)
elif model == 'full_EE' or model == '2016' or model == 'FEE' or model == 'Full_EE':
# model_ver = '02'
# h5filepath = 'MWA_embedded_element_pattern_V' + model_ver + '.h5'
beams['XX'], beams['YY'] = primary_beam.MWA_Tile_full_EE(theta, phi,
freq=frequency, delays=delays,
zenithnorm=zenithnorm, power=True)
# elif model == 'full_EE_AAVS05':
# # h5filepath='/Users/230255E/Temp/_1508_Aug/embedded_element/h5/AAVS05_embedded_element_02_rev0.h5'
# # h5filepath = 'AAVS05_embedded_element_02_rev0.h5'
# beams['XX'], beams['YY'] = primary_beam.MWA_Tile_full_EE(theta, phi,
# freq=frequency, delays=delays,
# zenithnorm=zenithnorm, power=True)
pols = ['XX', 'YY']
# Get Haslam and interpolate onto grid
my_map = get_Haslam(frequency)
mask = numpy.isnan(za_grid)
za_grid[numpy.isnan(za_grid)] = 90.0 # Replace nans as they break the interpolation
sky_grid = map_sky(my_map['skymap'], my_map['RA'], my_map['dec'], gps, az_grid, za_grid)
sky_grid[mask] = numpy.nan # Remask beyond the horizon
# test:
# delays1 = numpy.array([[6, 6, 6, 6,
# 4, 4, 4, 4,
# 2, 2, 2, 2,
# 0, 0, 0, 0],
# [6, 6, 6, 6,
# 4, 4, 4, 4,
# 2, 2, 2, 2,
# 0, 0, 0, 0]],
# dtype=numpy.float32)
# za_delays = {'0': delays1 * 0, '14': delays1, '28': delays1 * 2}
# tile = mwa_tile.get_AA_Cached()
# za_delay = '0'
# (ax0, ay0) = tile.getArrayFactor(az_grid, za_grid, frequency, za_delays[za_delay])
# val = numpy.abs(ax0)
# val_max = numpy.nanmax(val)
# print "VALUE : %.8f %.8f %.8f" % (frequency, val_max[0], val[resolution / 2, resolution / 2])
beamsky_sum_XX = 0
beam_sum_XX = 0
Tant_XX = 0
beam_dOMEGA_sum_XX = 0
beamsky_sum_YY = 0
beam_sum_YY = 0
Tant_YY = 0
beam_dOMEGA_sum_YY = 0
for pol in pols:
# Get gridded sky
print('frequency=%.2f , polarisation=%s' % (frequency, pol))
beam = beams[pol]
beamsky = beam * sky_grid
beam_dOMEGA = beam * dOMEGA
print('sum(beam)', numpy.nansum(beam))
print('sum(beamsky)', numpy.nansum(beamsky))
beamsky_sum = numpy.nansum(beamsky)
beam_sum = numpy.nansum(beam)
beam_dOMEGA_sum = numpy.nansum(beam_dOMEGA)
Tant = numpy.nansum(beamsky) / numpy.nansum(beam)
print('Tant=sum(beamsky)/sum(beam)=', Tant)
if pol == 'XX':
beamsky_sum_XX = beamsky_sum
beam_sum_XX = beam_sum
Tant_XX = Tant
beam_dOMEGA_sum_XX = beam_dOMEGA_sum
if pol == 'YY':
beamsky_sum_YY = beamsky_sum
beam_sum_YY = beam_sum
Tant_YY = Tant
beam_dOMEGA_sum_YY = beam_dOMEGA_sum
filename = '%s_%.2fMHz_%s_%s' % (gps, frequency / 1.0e6, pol, model)
fstring = "%.2f" % (frequency / 1.0e6)
if plottype == 'all':
plottypes = ['beam', 'sky', 'beamsky', 'beamsky_scaled']
else:
plottypes = [plottype]
for pt in plottypes:
if pt == 'beamsky':
textlabel = 'Beam x sky %s (LST %.2f hr), %s MHz, %s-pol, Tant=%.1f K' % (gps,
get_LST(gps),
fstring,
pol,
Tant)
plot_beamsky(beamsky, frequency, textlabel, filename, extension,
obstime=obstime, figsize=figsize, directory=directory)
elif pt == 'beamsky_scaled':
textlabel = 'Beam x sky (scaled) %s (LST %.2f hr), %s MHz, %s-pol, Tant=%.1f K (max T=%.1f K)' % (gps,
get_LST(gps),
fstring,
pol,
Tant,
float(numpy.nanmax(beamsky)))
plot_beamsky(beamsky, frequency, textlabel, filename + '_scaled', extension,
obstime=obstime, figsize=figsize, vmax=numpy.nanmax(beamsky) * 0.4, directory=directory)
elif pt == 'beam':
textlabel = 'Beam for %s, %s MHz, %s-pol' % (gps, fstring, pol)
plot_beamsky(beam, frequency, textlabel, filename + '_beam', extension,
obstime=obstime, figsize=figsize, cbar_label='', directory=directory,
b_add_sources=b_add_sources,
az_grid=az_grid, za_grid=za_grid)
elif pt == 'sky':
textlabel = 'Sky for %s (LST %.2f hr), %s MHz, %s-pol' % (gps, get_LST(gps), fstring, pol)
plot_beamsky(sky_grid, frequency, textlabel, filename + '_sky', extension,
obstime=obstime, figsize=figsize, directory=directory, b_add_sources=b_add_sources,
az_grid=az_grid, za_grid=za_grid)
return (beamsky_sum_XX,
beam_sum_XX,
Tant_XX,
beam_dOMEGA_sum_XX,
beamsky_sum_YY,
beam_sum_YY,
Tant_YY,
beam_dOMEGA_sum_YY) | 371a29412b52b27c69193bed1e945eeed6a988d7 | 7,379 |
import json
def view_page(request, content_id=None):
"""Displays the content in a more detailed way"""
if request.method == "GET":
if content_id:
if content_id.isdigit():
try:
# Get the contents details
content_data = Content.objects.get(pk=int(content_id))
content_data.fire = int(content_data.contents_history.all().aggregate(Avg("vote"))["vote__avg"] * 10) if content_data.contents_history.all().aggregate(Avg("vote"))["vote__avg"] else 0
try:
# Get all the available comments of this particular content
comment_data = content_data.content_comments.all()
if comment_data:
# Convert Data to JSON list
comment_list = json.loads(comment_data[0].comment)
content_comments = []
for a in comment_list:
try:
user = User.objects.get(pk=a["user_id"])
content_comments.append({
"id": a["id"],
"content_id": a["content_id"],
"profile_picture": (user.profile.profile_picture.url).replace("&export=download", "") if user.profile.profile_picture.url else "/static/teeker/assets/default_img/avatar/avataaars.png",
"username": user.username,
"user_id": user.pk,
"comment": a["comment"],
"date": a["date"]
})
except User.DoesNotExist:
print("Broken Comment...")
else:
content_comments = []
except json.JSONDecodeError:
content_data['contents_comment']['comment'] = []
# Check if the content isn't suspended
if content_data.suspended and not request.user.is_staff:
content_data = {
"title": "CONTENT UNAVAILABLE"
}
# Check if the user is logged in
if request.user.is_authenticated:
# Check if the content is in the logged in user's recommended list
try:
if int(content_id) in json.loads(request.user.profile.recommended):
content_data.recommended = True
else:
content_data.recommended = False
except json.JSONDecodeError:
content_data.recommended = False
else:
content_data.recommended = False
except Content.DoesNotExist:
content_data = {
"title": "CONTENT UNAVAILABLE"
}
else:
content_data = {
"title": "CONTENT UNAVAILABLE"
}
else:
content_data = {
"title": "CONTENT UNAVAILABLE"
}
html_content = {
"content_data": content_data,
"content_comments": content_comments
}
return render(request, "teeker/site_templates/view.html", html_content) | 11908d7d0f75377485022ab87a93e7f27b2b626d | 7,380 |
def run_feat_model(fsf_file):
""" runs FSL's feat_model which uses the fsf file to generate
files necessary to run film_gls to fit design matrix to timeseries"""
clean_fsf = fsf_file.strip('.fsf')
cmd = 'feat_model %s'%(clean_fsf)
out = CommandLine(cmd).run()
if not out.runtime.returncode == 0:
return None, out.runtime.stderr
mat = fsf_file.replace('.fsf', '.mat')
return mat, cmd | 4b033ff1aceb60cdf0c39ebfbefc0841dc4df507 | 7,381 |
def exportDSV(input, delimiter = ',', textQualifier = '"', quoteall = 0, newline = '\n'):
"""
PROTOTYPE:
exportDSV(input, delimiter = ',', textQualifier = '\"', quoteall = 0)
DESCRIPTION:
Exports to DSV (delimiter-separated values) format.
ARGUMENTS:
- input is list of lists of data (as returned by importDSV)
- delimiter is character used to delimit columns
- textQualifier is character used to delimit ambiguous data
- quoteall is boolean specifying whether to quote all data or only data
that requires it
RETURNS:
data as string
"""
if not delimiter or type(delimiter) != type(''): raise InvalidDelimiter
if not textQualifier or type(delimiter) != type(''): raise InvalidTextQualifier
# double-up all text qualifiers in data (i.e. can't becomes can''t)
data = map(lambda i, q = textQualifier:
map(lambda j, q = q: str(j).replace(q, q * 2), i),
input)
if quoteall: # quote every data value
data = map(lambda i, q = textQualifier:
map(lambda j, q = q: q + j + q, i),
data)
else: # quote only the values that contain qualifiers, delimiters or newlines
data = map(lambda i, q = textQualifier, d = delimiter:
map(lambda j, q = q, d = d: ((j.find(q) != -1 or j.find(d) != -1
or j.find('\n') != -1)
and (q + j + q)) or j, i), data)
# assemble each line with delimiters
data = [delimiter.join(line) for line in data]
# assemble all lines together, separated by newlines
data = newline.join(data)
return data | 28075667459e872ec0713efb834a9c3aa1dc620e | 7,382 |
def DatasetSplit(X, y):
#Creating the test set and validation set.
# separating the target
""" To create the validation set, we need to make sure that the distribution of each class is similar
in both training and validation sets. stratify = y (which is the class or tags of each frame) keeps
the similar distribution of classes in both the training as well as the validation set."""
# creating the training and validation set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.2, stratify = y)
# creating dummies of target variable for train and validation set
y_train = pd.get_dummies(y_train)
y_test = pd.get_dummies(y_test)
return X_train, X_test, y_train, y_test | 15c9d6acf51f6535bbd4396be83752b98c6a1fa0 | 7,383 |
def parse_children(root):
"""
:param root: root tags of .xml file
"""
attrib_list = set()
for child in root:
text = child.text
if text:
text = text.strip(' \n\t\r')
attrib_list = attrib_list | get_words_with_point(text)
attrib_list = attrib_list | parse_children(child)
for attribute_name, attribute_value in child.attrib.items():
if '.' in attribute_value:
attrib_list.add(attribute_value)
"""
returns list of attribute_value
"""
return attrib_list | 5062b39775bdb1b788fa7b324de108367421f743 | 7,384 |
def load_data(ETF):
"""
Function to load the ETF data from a file, remove NaN values and set the Date column as index.
...
Attributes
----------
ETF : filepath
"""
data = pd.read_csv(ETF, usecols=[0,4], parse_dates=[0], header=0)
data.dropna(subset = ['Close', 'Date'], inplace=True)
data_close = pd.DataFrame(data['Close'])
data_close.index = pd.to_datetime(data['Date'])
return data_close | 84b4c20c7d74c7e028e62b0147662e9a54311148 | 7,385 |
def preprocess_LLIL_GOTO(bv, llil_instruction):
""" Replaces integer addresses of llil instructions with hex addresses of assembly """
func = get_function_at(bv, llil_instruction.address)
# We have to use the lifted IL since the LLIL ignores comparisons and tests
lifted_instruction = list(
[k for k in find_lifted_il(func, llil_instruction.address) if k.operation == LowLevelILOperation.LLIL_GOTO]
)[0]
lifted_il = func.lifted_il
llil_instruction.dest = hex(lifted_il[lifted_instruction.dest].address).replace("L", "")
return llil_instruction | 656b6088816779395a84d32fe77e28866618b9ff | 7,386 |
import json
async def get_limited_f_result(request, task_id):
"""
This endpoint accepts the task_id and returns the result if ready.
"""
task_result = AsyncResult(task_id)
result = {
"task_id": task_id,
"task_status": task_result.status,
"task_result": task_result.result
}
return json(result) | e459d1963e2de829802927e3265b41bbe4da6bfe | 7,387 |
def process_addr():
"""Process the bridge IP address/hostname."""
server_addr = request.form.get('server_addr')
session['server_addr'] = server_addr
try:
leap_response = get_ca_cert(server_addr)
session['leap_version'] = leap_response['Body'] \
['PingResponse']['LEAPVersion']
except ConnectionRefusedError:
flash("A connection to %s could not be established. Please check "
"the IP address and try again." % server_addr, 'danger')
return redirect(url_for('wizard')) | 109d3e0652caa5e06fe00702f43640304c30323d | 7,388 |
import requests
import json
from datetime import datetime
import time
def get_bkk_list(request):
"""板块课(通识选修课)"""
myconfig = Config.objects.all().first()
year = (myconfig.nChoose)[0:4]
term = (myconfig.nChoose)[4:]
if term == "1":
term = "3"
elif term == "2":
term = "12"
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'bkk':request.POST.get("bkk")
}
res = requests.post(url=myconfig.otherapi+"/choose/bkk",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
bkk = request.POST.get("bkk")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录访问板块课' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
try:
bkk = "1" if bkk=="2" else "2"
startTime = time.time()
print('【%s】查看了板块课' % stu.name)
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = Xuanke(base_url=base_url, cookies=cookies, year=year, term=term)
bkk_list = person.get_bkk_list(bkk)
endTime = time.time()
spendTime = endTime - startTime
if spendTime > 30:
ServerChan = config["ServerChan"]
text = "板块课超时"
if ServerChan == "none":
return HttpResponse(json.dumps({'err':'板块课超时'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
requests.get(ServerChan + 'text=' + text)
return HttpResponse(json.dumps({'err':'板块课超时'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
content = ('【%s】[%s]访问了板块课,耗时%.2fs' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name, spendTime))
writeLog(content)
return HttpResponse(json.dumps(bkk_list, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
print(e)
content = ('【%s】[%s]访问板块课出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = Xuanke(base_url=base_url, cookies=sta, year=year, term=term)
bkk_list = person.get_bkk_list(bkk)
return HttpResponse(json.dumps(bkk_list, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8") | 03010646ce1e83f644f9bf44b8dcb4e5b8355e52 | 7,389 |
import calendar
def mkmonth(year, month, dates, groups):
"""Make an array of data for the year and month given.
"""
cal = calendar.monthcalendar(int(year), month)
for row in cal:
for index in range(len(row)):
day = row[index]
if day == 0:
row[index] = None
else:
date = '%04.d-%02.d-%02.d' % (year, month, day)
items = dates.get(date, ())
grp = 0
len_items = len(items)
if len_items > 0:
while grp < len(groups):
grp += 1
if len_items <= groups[grp - 1]:
break
row[index] = [day, grp, items, date]
while len(cal) < 6:
cal.append([None] * 7)
return dict(name=calendar.month_name[month], weeks=cal,
startdate='%04.d-%02.d' % (year, month)) | 71298d60e852e6045b4ab5c45c2f371ae7049808 | 7,390 |
def is_extension(step_str):
"""Return true if step_str is an extension or Any.
Args:
step_str: the string to evaluate
Returns:
True if step_str is an extension
Raises:
ValueError: if step_str is not a valid step.
"""
if not is_valid_step(step_str):
raise ValueError('Not a valid step in a path: "' + step_str + '"')
return step_str[0] == "(" | a3b30e238b3b8c42b645d18ae370dea501d1f389 | 7,391 |
def diff_list(first, second):
"""
Get difference of lists.
"""
second = set(second)
return [item for item in first if item not in second] | 19975990b5a05433266b3258cd541cca54ab83ac | 7,392 |
from typing import Optional
def validate_dissolution_statement_type(filing_json, legal_type) -> Optional[list]:
"""Validate dissolution statement type of the filing."""
msg = []
dissolution_stmt_type_path = '/filing/dissolution/dissolutionStatementType'
dissolution_stmt_type = get_str(filing_json, dissolution_stmt_type_path)
if legal_type == Business.LegalTypes.COOP.value:
if not dissolution_stmt_type:
msg.append({'error': _('Dissolution statement type must be provided.'),
'path': dissolution_stmt_type_path})
return msg
if not DissolutionStatementTypes.has_value(dissolution_stmt_type):
msg.append({'error': _('Invalid Dissolution statement type.'),
'path': dissolution_stmt_type_path})
return msg
return None | 868c9f0d6b229c303462a4dee7df16f27cd58898 | 7,393 |
from typing import List
def weave(left: List[int], right: List[int]) -> List[List[int]]:
""" Gives all possible combinations of left and right
keeping the original order on left and right """
if not left or not right:
return [left] if left else [right]
left_result: List[List[int]] = weave_helper(left, right)
right_result: List[List[int]] = weave_helper(right, left)
return left_result + right_result | 9a9717e43337802e6cef87a37b7d8d01493ebc8a | 7,395 |
import torch
def compute_rel_attn_value(p_attn, rel_mat, emb, ignore_zero=True):
"""
Compute a part of *attention weight application* and *query-value product*
in generalized RPE.
(See eq. (10) - (11) in the MuseBERT paper.)
Specifically,
- We use distributive law on eq. (11). The function computes the
second term:
$ sum_j (alpha_{ij} * sum_a Emb_a^K(r_{ij}^a)) $
Here,
- b for batch size, h for n_head, vs for vocabulary size.
- dtype is torch.float unless specified.
:param p_attn: (b, d, L_q, L_k)
:param rel_mat: (b, Lq, Lk)
:param emb: (h, vs, d)
:param ignore_zero: bool. Whether to exclude the first vocab.
:return: (b, h, Lq, d)
"""
vs = emb.size(-2)
# bool_relmat: (b, Lq, vs - 1, Lk), dtype: torch.float
bool_relmat = compute_bool_rel_mat(rel_mat, vs, ignore_zero=ignore_zero)
# p_attn: -> (b, d, Lq, 1, 1, Lk)
# bool_relmat: -> (b, 1, L_q, vs - 1, L_k, 1)
# acmlt_p_attn: (b, d, Lq, vs - 1, 1, 1) -> (b, d, Lq, vs - 1)
acmlt_p_attn = \
torch.matmul(p_attn.unsqueeze(-2).unsqueeze(-2),
bool_relmat.unsqueeze(1).unsqueeze(-1)
).squeeze(-1).squeeze(-1)
# acc_p_attn: -> (b, h, Lq, 1, vs - 1)
# emb: -> (1, h, 1, vs, d)
# rel_scores: (b, h, Lq, 1, d) -> (b, h, Lq, d)
start_ind = 1 if ignore_zero else 0
rel_values = \
torch.matmul(acmlt_p_attn.unsqueeze(-2),
emb[:, start_ind:].unsqueeze(0).unsqueeze(-3)
).squeeze(-2)
return rel_values | a39ca9d5933bc334648994fc5211355a496f8126 | 7,396 |
def mock_real_galaxy():
"""Mock real galaxy."""
dm = np.loadtxt(TEST_DATA_REAL_PATH / "dark.dat")
s = np.loadtxt(TEST_DATA_REAL_PATH / "star.dat")
g = np.loadtxt(TEST_DATA_REAL_PATH / "gas_.dat")
gal = core.Galaxy(
m_s=s[:, 0] * 1e10 * u.M_sun,
x_s=s[:, 1] * u.kpc,
y_s=s[:, 2] * u.kpc,
z_s=s[:, 3] * u.kpc,
vx_s=s[:, 4] * (u.km / u.s),
vy_s=s[:, 5] * (u.km / u.s),
vz_s=s[:, 6] * (u.km / u.s),
m_dm=dm[:, 0] * 1e10 * u.M_sun,
x_dm=dm[:, 1] * u.kpc,
y_dm=dm[:, 2] * u.kpc,
z_dm=dm[:, 3] * u.kpc,
vx_dm=dm[:, 4] * (u.km / u.s),
vy_dm=dm[:, 5] * (u.km / u.s),
vz_dm=dm[:, 6] * (u.km / u.s),
m_g=g[:, 0] * 1e10 * u.M_sun,
x_g=g[:, 1] * u.kpc,
y_g=g[:, 2] * u.kpc,
z_g=g[:, 3] * u.kpc,
vx_g=g[:, 4] * (u.km / u.s),
vy_g=g[:, 5] * (u.km / u.s),
vz_g=g[:, 6] * (u.km / u.s),
)
return gal | 7dda66bccb5fcecbe55bd0f3ecb64171748947a6 | 7,398 |
def lend(request):
"""
Lend view.
It receives the data from the lend form, process and validates it,
and reloads the page if everything is OK
Args:
- request (HttpRequest): the request
Returns:
"""
logged_user = get_logged_user(request)
if logged_user is not None and logged_user.user_role == UserRole.LENDER:
d = dict(request.POST)
d['lender_input'] = logged_user.id
errors = Loan.objects.basic_validator(d)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
else:
borrower = request.POST.get('borrower_input', 0)
amount = request.POST.get('amount_input', 0)
new_loan = Loan.objects.create(
borrower=User.objects.get(id=borrower),
lender=logged_user,
amount=int(amount)
)
messages.info(request, 'Loan executed successfully')
return redirect('lender', id=logged_user.id)
else:
request.session.clear()
return redirect('/') | 59fdf04eafc1772b8ef880a1340af57739a71d25 | 7,399 |
def deprecated() -> None:
"""Run the command and print a deprecated notice."""
LOG.warning("c2cwsgiutils_coverage_report.py is deprecated; use c2cwsgiutils-coverage-report instead")
return main() | ea3309fc308dd969872f7a4630c137e76a3659b0 | 7,400 |
def build_syscall_Linux(syscall, arg_list, arch_bits, constraint=None, assertion = None, clmax=SYSCALL_LMAX, optimizeLen=False):
"""
arch_bits = 32 or 64 :)
"""
# Check args
if( syscall.nb_args() != len(arg_list)):
error("Error. Expected {} arguments, got {}".format(len(syscall.arg_types), len(arg_list)))
return None
# Check args length
for i in range(0,len(arg_list)):
if( not verifyArgType(arg_list[i], syscall.arg_types[i])):
error("Argument error for '{}': expected '{}', got '{}'".format(arg_list[i], syscall.arg_types[i], type(arg_list[i])))
return None
# Check constraint and assertion
if( constraint is None ):
constraint = Constraint()
if( assertion is None ):
assertion = getBaseAssertion()
# Check if we have the function !
verbose("Trying to call {}() function directly".format(syscall.def_name))
func_call = build_call(syscall.function(), arg_list, constraint, assertion, clmax=clmax, optimizeLen=optimizeLen)
if( not isinstance(func_call, str) ):
verbose("Success")
return func_call
else:
if( not constraint.chainable.ret ):
verbose("Coudn't call {}(), try direct syscall".format(syscall.def_name))
else:
verbose("Couldn't call {}() and return to ROPChain".format(syscall.def_name))
return None
# Otherwise do syscall directly
# Set the registers
args = [(Arch.n2r(x[0]), x[1]) for x in zip(syscall.arg_regs, arg_list) + syscall.syscall_arg_regs]
chain = popMultiple(args, constraint, assertion, clmax-1, optimizeLen=optimizeLen)
if( not chain ):
verbose("Failed to set registers for the mprotect syscall")
return None
# Int 0x80
if( arch_bits == 32 ):
syscall_gadgets = search(QueryType.INT80, None, None, constraint, assertion)
# syscall
elif( arch_bits == 64):
syscall_gadgets = search(QueryType.SYSCALL, None, None, constraint, assertion)
if( not syscall_gadgets ):
verbose("Failed to find an 'int 0x80' OR 'syscall' gadget")
return None
else:
chain.addChain(syscall_gadgets[0])
verbose("Success")
return chain | 1fc9e5eadb688e58f2e6ac3de4d678e3040a1086 | 7,401 |
def gamma(x):
"""Diffusion error (normalized)"""
CFL = x[0]
kh = x[1]
return (
1.
/ (-2)
* (
4. * CFL ** 2 / 3
- 7. * CFL / 3
+ (-23. * CFL ** 2 / 12 + 35 * CFL / 12) * np.cos(kh)
+ (2. * CFL ** 2 / 3 - 2 * CFL / 3) * np.cos(2 * kh)
+ (-CFL ** 2 / 12 + CFL / 12) * np.cos(3 * kh)
)
) | c8689e1388338cc4d6b1b135f09db90f0e866346 | 7,402 |
import re
def copylabel(original_name):
"""create names/labels with the sequence (Copy), (Copy 2), (Copy 3), etc."""
copylabel = pgettext_lazy("this is a copy", "Copy")
copy_re = f"\\({copylabel}( [0-9]*)?\\)"
match = re.search(copy_re, original_name)
if match is None:
label = f"{original_name} ({copylabel})"
elif match.groups()[0] is None:
label = re.sub(copy_re, f"({copylabel} 2)", original_name)
else:
n = int(match.groups()[0].strip()) + 1
label = re.sub(copy_re, f"({copylabel} {n})", original_name)
return label | 1f838c33faf347b4219ca23083b664bda01cb9ef | 7,403 |
def load_opts_from_mrjob_confs(runner_alias, conf_paths=None):
"""Load a list of dictionaries representing the options in a given
list of mrjob config files for a specific runner. Returns
``[(path, values), ...]``. If a path is not found, use ``(None, {})`` as
its value.
If *conf_paths* is ``None``, look for a config file in the default
locations (see :py:func:`find_mrjob_conf`).
:type runner_alias: str
:param runner_alias: String identifier of the runner type, e.g. ``emr``,
``local``, etc.
:type conf_paths: list or ``None``
:param conf_path: locations of the files to load
This will only load each config file once, even if it's referenced
from multiple paths due to symlinks.
"""
if conf_paths is None:
results = load_opts_from_mrjob_conf(runner_alias)
else:
# don't include conf files that were loaded earlier in conf_paths
already_loaded = []
# load configs in reversed order so that order of conf paths takes
# precedence over inheritance
results = []
for path in reversed(conf_paths):
results = load_opts_from_mrjob_conf(
runner_alias, path, already_loaded=already_loaded) + results
if runner_alias and not any(conf for path, conf in results):
log.warning('No configs specified for %s runner' % runner_alias)
return results | 6ef2acc7dce0de5e467456d376a52c8078336c55 | 7,404 |
def clut8_rgb888(i):
"""Reference CLUT for wasp-os.
Technically speaking this is not a CLUT because the we lookup the colours
algorithmically to avoid the cost of a genuine CLUT. The palette is
designed to be fairly easy to generate algorithmically.
The palette includes all 216 web-safe colours together 4 grays and
36 additional colours that target "gaps" at the brighter end of the web
safe set. There are 11 greys (plus black and white) although two are
fairly close together.
:param int i: Index (from 0..255 inclusive) into the CLUT
:return: 24-bit colour in RGB888 format
"""
if i < 216:
rgb888 = ( i % 6) * 0x33
rg = i // 6
rgb888 += (rg % 6) * 0x3300
rgb888 += (rg // 6) * 0x330000
elif i < 252:
i -= 216
rgb888 = 0x7f + (( i % 3) * 0x33)
rg = i // 3
rgb888 += 0x4c00 + ((rg % 4) * 0x3300)
rgb888 += 0x7f0000 + ((rg // 4) * 0x330000)
else:
i -= 252
rgb888 = 0x2c2c2c + (0x101010 * i)
return rgb888 | ca95c95306f7f4762add01f2ffc113f348e29d3b | 7,405 |
def get_file_from_rcsb(pdb_id,data_type='pdb'):
""" (file_name) -> file_path
fetch pdb or structure factor file for pdb_id from the RCSB website
Args:
file_name: a pdb file name
data_type (str):
'pdb' -> pdb
'xray' -> structure factor
Returns:
a file path for the pdb file_name
"""
try:
file_name = fetch.get_pdb(pdb_id,data_type,mirror='rcsb',log=null_out())
except Sorry:
file_name = ''
return file_name | 19a557a0bf4f69ba132d6d4520a124aef931f816 | 7,406 |
def parse_events(fobj):
"""Parse a trace-events file into {event_num: (name, arg1, ...)}."""
def get_argnames(args):
"""Extract argument names from a parameter list."""
return tuple(arg.split()[-1].lstrip('*') for arg in args.split(','))
events = {dropped_event_id: ('dropped', 'count')}
event_num = 0
for line in fobj:
m = event_re.match(line.strip())
if m is None:
continue
disable, name, args = m.groups()
events[event_num] = (name,) + get_argnames(args)
event_num += 1
return events | af35deff9c5b76d4d46700a738186822032d4190 | 7,407 |
def enu2ECEF(phi, lam, x, y, z, t=0.0):
""" Convert ENU local coordinates (East, North, Up) to Earth centered - Earth fixed (ECEF) Cartesian,
correcting for Earth rotation if needed.
ENU coordinates can be transformed to ECEF by two rotations:
1. A clockwise rotation over east-axis by an angle (90 - phi) to align the up-axis with the z-axis.
2. A clockwise rotation over the z-axis by and angle (90 + lam) to align the east-axis with the x-axis.
Source: http://www.navipedia.net/index.php/Transformations_between_ECEF_and_ENU_coordinates
Arguments:
phi: [float] east-axis rotation angle
lam: [float] z-axis rotation angle
x: [float] ENU x coordinate
y: [float] ENU y coordinate
z: [float] ENU z coordinate
Keyword arguments:
t: [float] time in seconds, 0 by default
Return:
(x_ecef, y_ecef, z_ecef): [tuple of floats] ECEF coordinates
"""
# Calculate ECEF coordinate from given local coordinates
x_ecef = -np.sin(lam)*x - np.sin(phi)*np.cos(lam)*y + np.cos(phi)*np.cos(lam)*z
y_ecef = np.cos(lam)*x - np.sin(phi)*np.sin(lam)*y + np.cos(phi)*np.sin(lam)*z
z_ecef = np.cos(phi) *y + np.sin(phi) *z
# Calculate time correction (in radians)
tau = 2*np.pi/(23.0*3600.0 + 56.0*60.0 + 4.09054) # Earth rotation in rad/s
yaw = -tau*t
x_temp = x_ecef
y_temp = y_ecef
# Apply time correction
x_ecef = np.cos(yaw)*x_temp + np.sin(yaw)*y_temp
y_ecef = -np.sin(yaw)*x_temp + np.cos(yaw)*y_temp
return x_ecef, y_ecef, z_ecef | 494078d7c3bf9933fcc5a1b8ac62e105233722b8 | 7,408 |
def get_login(discord_id):
"""Get login info for a specific user."""
discord_id_str = str(discord_id)
logins = get_all_logins()
if discord_id_str in logins:
return logins[discord_id_str]
return None | 16b7690dd4f95df1647c7060200f3938b80993c0 | 7,410 |
import json
from typing import OrderedDict
def to_json_dict(json_data):
"""Given a dictionary or JSON string; return a dictionary.
:param json_data: json_data(dict, str): Input JSON object.
:return: A Python dictionary/OrderedDict with the contents of the JSON object.
:raises TypeError: If the input object is not a dictionary or string.
"""
if isinstance(json_data, dict):
return json_data
elif isinstance(json_data, str):
return json.loads(json_data, object_hook=OrderedDict)
else:
raise TypeError(f"'json_data' must be a dict or valid JSON string; received: {json_data!r}") | e1264d88a4424630f7348cbe7794ca072c057bdf | 7,411 |
def get_keypoints():
"""Get the COCO keypoints and their left/right flip coorespondence map."""
# Keypoints are not available in the COCO json for the test split, so we
# provide them here.
keypoints = [
'nose',
'neck',
'right_shoulder',
'right_elbow',
'right_wrist',
'left_shoulder',
'left_elbow',
'left_wrist',
'right_hip',
'right_knee',
'right_ankle',
'left_hip',
'left_knee',
'left_ankle',
'right_eye',
'left_eye',
'right_ear',
'left_ear']
return keypoints | 1bedcee8c5f38bdefcd00251dd95530966a41353 | 7,412 |
def has_mtu_mismatch(iface: CoreInterface) -> bool:
"""
Helper to detect MTU mismatch and add the appropriate OSPF
mtu-ignore command. This is needed when e.g. a node is linked via a
GreTap device.
"""
if iface.mtu != DEFAULT_MTU:
return True
if not iface.net:
return False
for iface in iface.net.get_ifaces():
if iface.mtu != iface.mtu:
return True
return False | a9415ed9fbcb276a53df8dac159f48aaac831744 | 7,413 |
def phrase_boxes_alignment(flatten_boxes, ori_phrases_boxes):
""" align the bounding boxes with corresponding phrases. """
phrases_boxes = list()
ori_pb_boxes_count = list()
for ph_boxes in ori_phrases_boxes:
ori_pb_boxes_count.append(len(ph_boxes))
strat_point = 0
for pb_boxes_num in ori_pb_boxes_count:
sub_boxes = list()
for i in range(strat_point, strat_point + pb_boxes_num):
sub_boxes.append(flatten_boxes[i])
strat_point += pb_boxes_num
phrases_boxes.append(sub_boxes)
pb_boxes_count = list()
for ph_boxes in phrases_boxes:
pb_boxes_count.append(len(ph_boxes))
assert pb_boxes_count == ori_pb_boxes_count
return phrases_boxes | e961a90f61917f217ac6908263f5b6c74bc42b26 | 7,414 |
def dismiss_notification(request):
""" Dismisses a notification
### Response
* Status code 200 (When the notification is successsfully dismissed)
{
"success": <boolean: true>
}
* `success` - Whether the dismissal request succeeded or not
* Status code 400 (When the notification ID cannot be found)
{
"success": <boolean: false>,
"message": <string: "notification_not_found">
}
* `message` - Error message, when success is false
"""
response = {'success': False}
data = request.data
try:
notif = Notification.objects.get(id=data['notificationId'])
notif.dismissed_by.add(request.user)
response['success'] = True
resp_status = status.HTTP_200_OK
except Notification.DoesNotExist:
resp_status = status.HTTP_400_BAD_REQUEST
response['message'] = 'notification_not_found'
return Response(response, status=resp_status) | 97cbd560fd16da8ba0d081616e3e2504a2dbf8a0 | 7,416 |
def log_at_level(logger, message_level, verbose_level, msg):
"""
writes to log if message_level > verbose level
Returns anything written in case we might want to drop down and output at a
lower log level
"""
if message_level <= verbose_level:
logger.info(msg)
return True
return False | 4b88ee137f7c2cb638b8a058b2dceb534329c0d9 | 7,417 |
def datafile(tmp_path_factory):
"""Make a temp HDF5 Ocat details file within 60 arcmin of 3c273 for obsids
before 2021-Nov that persists for the testing session."""
datafile = str(tmp_path_factory.mktemp('ocat') / 'target_table.h5')
update_ocat_local(datafile, target_name='3c273', resolve_name=True, radius=60,
startDate=DATE_RANGE)
return datafile | 16448a80385ab29ebbaef8e593f96ff0167c1fdb | 7,418 |
def _collect_scalars(values):
"""Given a list containing scalars (float or int) collect scalars
into a single prefactor. Input list is modified."""
prefactor = 1.0
for i in range(len(values)-1, -1, -1):
if isinstance(values[i], (int, float)):
prefactor *= values.pop(i)
return prefactor | bea7e54eec16a9b29552439cd12ce29b9e82d40b | 7,419 |
from pathlib import Path
def create_output_directory(validated_cfg: ValidatedConfig) -> Path:
"""
Creates a top level download directory if it does not already exist, and returns
the Path to the download directory.
"""
download_path = validated_cfg.output_directory / f"{validated_cfg.version}"
download_path.mkdir(parents=True, exist_ok=True)
return download_path | 720f45885e177b55ddbdf492655b17275c4097f8 | 7,420 |
def presentation_logistique(regression,sig=False):
"""
Mise en forme des résultats de régression logistique
Paramètres
----------
regression: modèle de régression de statsmodel
sig: optionnel, booléen
Retours
-------
DataFrame : tableau de la régression logistique
"""
# Passage des coefficients aux Odds Ratio
df = np.exp(regression.conf_int())
df['odd ratio'] = round(np.exp(regression.params), 2)
df["p-value"] = round(regression.pvalues, 3)
df["IC"] = df.apply(lambda x : "%.2f [%.2f-%.2f]" \
% (x["odd ratio"],x[0],x[1]),axis=1)
# Ajout de la significativité
if sig:
df["p-value"] = df["p-value"].apply(significativite)
df = df.drop([0,1], axis=1)
return df | bef9e08f463c9bc0fbb1d737a412472ab792051e | 7,421 |
def handle_colname_collisions(df: pd.DataFrame, mapper: dict, protected_cols: list) -> (pd.DataFrame, dict, dict):
"""
Description
-----------
Identify mapper columns that match protected column names. When found,
update the mapper and dataframe, and keep a dict of these changes
to return to the caller e.g. SpaceTag.
Parameters
----------
df: pd.DataFrame
submitted data
mapper: dict
a dictionary for the schema mapping (JSON) for the dataframe.
protected_cols: list
protected column names i.e. timestamp, country, admin1, feature, etc.
Output
------
pd.DataFame:
The modified dataframe.
dict:
The modified mapper.
dict:
key: new column name e.g. "day1month1year1" or "country_non_primary"
value: list of old column names e.g. ['day1','month1','year1'] or ['country']
"""
# Get names of geo fields that collide and are not primary_geo = True
non_primary_geo_cols = [d["name"] for d in mapper["geo"] if d["name"] in protected_cols and ("primary_geo" not in d or d["primary_geo"] == False)]
# Get names of date fields that collide and are not primary_date = True
non_primary_time_cols = [d['name'] for d in mapper['date'] if d["name"] in protected_cols and ('primary_date' not in d or d['primary_date'] == False)]
# Only need to change a feature column name if it qualifies another field,
# and therefore will be appended as a column to the output.
feature_cols = [d["name"] for d in mapper['feature'] if d["name"] in protected_cols and "qualifies" in d and d["qualifies"]]
# Verbose build of the collision_list, could have combined above.
collision_list = non_primary_geo_cols + non_primary_time_cols + feature_cols
# Bail if no column name collisions.
if not collision_list:
return df, mapper, {}
# Append any collision columns with the following suffix.
suffix = "_non_primary"
# Build output dictionary and update df.
renamed_col_dict = {}
for col in collision_list:
df.rename(columns={col: col + suffix}, inplace=True)
renamed_col_dict[col + suffix] = [col]
# Update mapper
for k, vlist in mapper.items():
for dct in vlist:
if dct["name"] in collision_list:
dct["name"] = dct["name"] + suffix
elif "qualifies" in dct and dct["qualifies"]:
# change any instances of this column name qualified by another field
dct["qualifies"] = [w.replace(w, w + suffix) if w in collision_list else w for w in dct["qualifies"] ]
elif "associated_columns" in dct and dct["associated_columns"]:
# change any instances of this column name in an associated_columns dict
dct["associated_columns"] = {k: v.replace(v, v + suffix) if v in collision_list else v for k, v in dct["associated_columns"].items() }
return df, mapper, renamed_col_dict | 56819ff256cc3c1bcd2062fab0cac29bce7a0c15 | 7,422 |
import codecs
import json
def process_file(filename):
"""Read a file from disk and parse it into a structured dict."""
try:
with codecs.open(filename, encoding='utf-8', mode='r') as f:
file_contents = f.read()
except IOError as e:
log.info('Unable to index file: %s, error :%s', filename, e)
return
data = json.loads(file_contents)
sections = []
title = ''
body_content = ''
if 'current_page_name' in data:
path = data['current_page_name']
else:
log.info('Unable to index file due to no name %s', filename)
return None
if 'body' in data and data['body']:
body = PyQuery(data['body'])
body_content = body.text().replace(u'¶', '')
sections.extend(generate_sections_from_pyquery(body))
else:
log.info('Unable to index content for: %s', filename)
if 'title' in data:
title = data['title']
if title.startswith('<'):
title = PyQuery(data['title']).text()
else:
log.info('Unable to index title for: %s', filename)
return {'headers': process_headers(data, filename),
'content': body_content, 'path': path,
'title': title, 'sections': sections} | 864c04449cbd998394c07790858ccbdc2d4eea6d | 7,423 |
def revive(grid: Grid, coord: Point) -> Grid:
"""Generates a set of all cells which can be revived near coord"""
revives = set()
for offset in NEIGHBOR_OFFSETS:
possible_revive = addpos(coord, offset)
if possible_revive in grid: continue
active_count = live_around(grid, possible_revive)
if active_count == 3:
revives.add(possible_revive)
return revives | 94e928ce9dff7015f2785e5a0186f06c4f754cda | 7,424 |
def process_table_creation_surplus(region, exchanges_list):
"""Add docstring."""
ar = dict()
ar["@type"] = "Process"
ar["allocationFactors"] = ""
ar["defaultAllocationMethod"] = ""
ar["exchanges"] = exchanges_list
ar["location"] = location(region)
ar["parameters"] = ""
ar["processDocumentation"] = process_doc_creation()
ar["processType"] = "UNIT_PROCESS"
ar["name"] = surplus_pool_name + " - " + region
ar[
"category"
] = "22: Utilities/2211: Electric Power Generation, Transmission and Distribution"
ar["description"] = "Electricity surplus in the " + str(region) + " region."
ar["description"]=(ar["description"]
+ " This process was created with ElectricityLCI "
+ "(https://github.com/USEPA/ElectricityLCI) version " + elci_version
+ " using the " + model_specs.model_name + " configuration."
)
ar["version"] = make_valid_version_num(elci_version)
return ar | 0669fce0363d807ee018b59125a13c95417294a7 | 7,425 |
import copy
def makepath_coupled(model_hybrid,T,h,ode_method,sample_rate):
""" Compute paths of coupled exact-hybrid model using CHV ode_method. """
voxel = 0
# make copy of model with exact dynamics
model_exact = copy.deepcopy(model_hybrid)
for e in model_exact.events:
e.hybridType = SLOW
# setup integrator
path = np.zeros((Nt,2*model_hybrid.dimension))
path[0][0:model_hybrid.dimension] = model_hybrid.getstate(0)
path[0][model_hybrid.dimension:2*model_hybrid.dimension] = model_exact.getstate(0)
clock = np.zeros(Nt)
k = 0
tj = ode(chvrhs_coupled).set_integrator(ode_method,atol = h,rtol = h)
tj.set_f_params(model_hybrid,model_exact,sample_rate)
y0 = np.zeros(2*model_hybrid.dimension+1)
while (k+1<Nt) and (clock[k]<T):
k = k+1
s1 = tryexponential(1)
# solve
y0[0:model_hybrid.dimension] = model_hybrid.getstate(0)
y0[model_hybrid.dimension:2*model_hybrid.dimension] = model_exact.getstate(0)
y0[2*model_hybrid.dimension] = 0.
tj.set_initial_value(y0,0)
tj.integrate(s1)
ys1 = tj.y
for i in range(model_hybrid.dimension):
model_hybrid.systemState[i].value[0] = ys1[i]
for i in range(model_hybrid.dimension):
model_exact.systemState[i].value[0] = ys1[i+model_hybrid.dimension]
t_next = tj.y[2*model_hybrid.dimension]
for e in model_hybrid.events:
e.updaterate()
for e in model_exact.events:
e.updaterate()
# update slow species
r = np.random.rand()
agg_rate = 0.
for i in range(len(model_hybrid.events)):
if model_hybrid.events[i].hybridType == SLOW:
hybrid_rate = model_hybrid.events[i].rate
exact_rate = model_exact.events[i].rate
agg_rate = agg_rate + res(hybrid_rate,exact_rate )
agg_rate = agg_rate + res(exact_rate,hybrid_rate )
agg_rate = agg_rate + min(hybrid_rate,exact_rate )
else:
agg_rate = agg_rate + model_exact.events[i].rate
#agg_rate = agg_rate + model_hybrid.events[i].rate
#else:
# print("PROBLEM")
# find reaction
if r>sample_rate/(agg_rate+sample_rate):
firing_event_hybrid,firing_event_exact = findreaction_coupled(model_hybrid.events,model_exact.events,agg_rate,r)
if isinstance(firing_event_hybrid,Reaction):
firing_event_hybrid.react()
if isinstance(firing_event_exact,Reaction):
firing_event_exact.react()
clock[k] = clock[k-1] + t_next
path[k][0:model_hybrid.dimension] = model_hybrid.getstate(0)
path[k][model_hybrid.dimension:2*model_hybrid.dimension] = model_exact.getstate(0)
return path[0:k+1],clock[0:k+1] | 95e26ec633b5c10797a040583cbc6ad6d6ad9127 | 7,427 |
import torch
def process_image_keypoints(img, keypoints, input_res=224):
"""Read image, do preprocessing and possibly crop it according to the bounding box.
If there are bounding box annotations, use them to crop the image.
If no bounding box is specified but openpose detections are available, use them to get the bounding box.
"""
normalize_img = Normalize(mean=constants.IMG_NORM_MEAN, std=constants.IMG_NORM_STD)
img = img[:,:,::-1].copy() # PyTorch does not support negative stride at the moment
center, scale, bbox = bbox_from_keypoints(keypoints, imageHeight = img.shape[0])
if center is None:
return None, None, None, None, None
img, boxScale_o2n, bboxTopLeft = crop_bboxInfo(img, center, scale, (input_res, input_res))
# viewer2D.ImShow(img, name='cropped', waitTime=1) #224,224,3
if img is None:
return None, None, None, None, None
# unCropped = uncrop(img, center, scale, (input_res, input_res))
# if True:
# viewer2D.ImShow(img)
img = img.astype(np.float32) / 255.
img = torch.from_numpy(img).permute(2,0,1)
norm_img = normalize_img(img.clone())[None]
# return img, norm_img, img_original, boxScale_o2n, bboxTopLeft, bbox
bboxInfo ={"center": center, "scale": scale, "bboxXYWH":bbox}
return img, norm_img, boxScale_o2n, bboxTopLeft, bboxInfo | e30e9c9b5de106c968d538a56062fefab7c1b3ee | 7,428 |
from typing import Dict
from typing import Iterable
from typing import Union
def _load_outputs(dict_: Dict) -> Iterable[Union[HtmlOutput, EbookConvertOutput]]:
"""Translates a dictionary into a list of output objects.
The dictionary is assumed to have the following structure::
{
'outputs': [{ 'path': 'some', 'new': 'text' },
{ 'path: '...', 'replace_with': '...' }]
}
If the key 'outputs' is not present in the dictionary or if there are no output
sub-dictionaries, an empty list is returned instead.
The type of the output is inferred from the file name provided as a value of the 'path' key
of the output sub-dictionary.
A file name ending in the file type '.html' will produce an HtmlOutput. '.epub', '.mobi' or
any other file type excluding '.html' will produce an EbookConvertOutput.
Note that a local stylesheet *replaces* the global stylesheet, but local ebookconvert_params
are *added* to the global ebookconvert_params if present.
Args:
dict_: The dictionary.
Returns:
The list of output objects or an empty list either if not output sub-dictionaries are
present in the encapsulating dictionary or if the 'outputs' key itself is missing.
"""
outputs = []
global_stylesheet = None
global_ec_params = []
if 'stylesheet' in dict_:
global_stylesheet = dict_['stylesheet']
if 'ebookconvert_params' in dict_:
global_ec_params = _load_ebookconvert_params(dict_)
for output in dict_['outputs']:
path = output['path']
file_type = path.split('.')[-1]
if 'stylesheet' not in output and global_stylesheet:
output['stylesheet'] = global_stylesheet
if file_type == 'html':
outputs.append(HtmlOutput(**output))
else:
if 'ebookconvert_params' in output:
local_ec_params = _load_ebookconvert_params(output)
output['ebookconvert_params'] = global_ec_params + local_ec_params
else:
output['ebookconvert_params'] = global_ec_params
outputs.append(EbookConvertOutput(**output))
return outputs | 229eeb33ca34266a397dca56b13f004a8647e8e5 | 7,430 |
def _async_friendly_contextmanager(func):
"""
Equivalent to @contextmanager, except the resulting (non-async) context
manager works correctly as a decorator on async functions.
"""
@wraps(func)
def helper(*args, **kwargs):
return _AsyncFriendlyGeneratorContextManager(func, args, kwargs)
return helper | 453fb89ca52101e178e0bd2c5895804ca2cc54e6 | 7,431 |
import itertools
def all_inputs(n):
"""
returns an iterator for all {-1,1}-vectors of length `n`.
"""
return itertools.product((-1, +1), repeat=n) | 526dff9332cf606f56dcb0c31b5c16a0124478ed | 7,432 |
def generate_winner_list(winners):
""" Takes a list of winners, and combines them into a string. """
return ", ".join(winner.name for winner in winners) | 2586292d4a96f63bf40c0d043111f5087c46f7a9 | 7,434 |
def stampify_url():
"""The stampified version of the URL passed in args."""
url = request.args.get('url')
max_pages = request.args.get('max_pages')
enable_animations = bool(request.args.get('animations') == 'on')
if not max_pages:
max_pages = DEFAULT_MAX_PAGES
_stampifier = Stampifier(url, int(max_pages), enable_animations)
try:
return _stampifier.stampify().stamp_html
except StampifierError as err:
return render_template('error_screen.html',
message=err.message) | 136d95adedeeddcdc4166a9bce20414e909fa21f | 7,435 |
def init_time(p, **kwargs):
"""Initialize time data."""
time_data = {
'times': [p['parse']],
'slots': p['slots'],
}
time_data.update(**kwargs)
return time_data | 2aff3819d561f0dc9e0c9b49702b8f3fbb6e9252 | 7,438 |
def bsplslib_D0(*args):
"""
:param U:
:type U: float
:param V:
:type V: float
:param UIndex:
:type UIndex: int
:param VIndex:
:type VIndex: int
:param Poles:
:type Poles: TColgp_Array2OfPnt
:param Weights:
:type Weights: TColStd_Array2OfReal &
:param UKnots:
:type UKnots: TColStd_Array1OfReal &
:param VKnots:
:type VKnots: TColStd_Array1OfReal &
:param UMults:
:type UMults: TColStd_Array1OfInteger &
:param VMults:
:type VMults: TColStd_Array1OfInteger &
:param UDegree:
:type UDegree: int
:param VDegree:
:type VDegree: int
:param URat:
:type URat: bool
:param VRat:
:type VRat: bool
:param UPer:
:type UPer: bool
:param VPer:
:type VPer: bool
:param P:
:type P: gp_Pnt
:rtype: void
"""
return _BSplSLib.bsplslib_D0(*args) | 4c7a95448c116ef04fac36168c05a22597bc0684 | 7,440 |
def b_cross(self) -> tuple:
"""
Solve cross one piece at a time.
Returns
-------
tuple of (list of str, dict of {'CROSS': int})
Moves to solve cross, statistics (move count in ETM).
Notes
-----
The cube is rotated so that the white centre is facing down.
The four white cross pieces are moved to the yellow side (on top),
starting with the edge which is the fewest moves away from solved.
The edges are then moved down to the white centre in the fewest
number of moves.
"""
cube = self.cube
solve = []
edges = (1,0), (-1,1), (1,-1), (0,1)
cross = {
'L': (4,1,-1),
"L'": (2,1,0),
'F': (1,1,-1),
"F'": (3,1,0),
'R': (2,1,-1),
"R'": (4,1,0),
'B': (3,1,-1),
"B'": (1,1,0),
'L2': (5,1,0),
'F2': (5,0,1),
'R2': (5,1,-1),
'B2': (5,-1,1),
"L U' F": (1,0,1),
"L' U' F": (1,-1,1),
"F U' R": (2,0,1),
"F' U' R": (2,-1,1),
"R' U F'": (3,0,1),
"R U F'": (3,-1,1),
"B' U R'": (4,0,1),
"B U R'": (4,-1,1)
}
for s, side in enumerate(cube):
if side[1][1] == 'U':
break
if s != 5:
move = ('z2', "z'", "x'", 'z', 'x')[s]
self.move(move)
solve.append(move)
while not(all(cube[0][y][x] == 'U' for y, x in edges) or
all(cube[5][y][x] == 'U' for y, x in edges) and
all(side[-1][1] == side[1][1] for side in cube[1:5])):
for edge in cross:
if cube[cross[edge][0]][cross[edge][1]][cross[edge][-1]] == 'U':
break
slot = 'LFRB'.index(edge[0])
if cube[0][edges[slot][0]][edges[slot][1]] != 'U':
moves = edge.split()
elif cube[0][edges[slot-3][0]][edges[slot-3][1]] != 'U':
moves = ['U'] + edge.split()
elif cube[0][edges[slot-1][0]][edges[slot-1][1]] != 'U':
moves = ["U'"] + edge.split()
else:
moves = ['U2'] + edge.split()
self.move(moves)
solve.extend(moves)
while any(cube[5][y][x] != 'U' for y, x in edges):
if cube[1][0][1] == cube[1][1][1] and cube[0][1][0] == 'U':
self.move('L2')
solve.append('L2')
if cube[2][0][1] == cube[2][1][1] and cube[0][-1][1] == 'U':
self.move('F2')
solve.append('F2')
if cube[3][0][1] == cube[3][1][1] and cube[0][1][-1] == 'U':
self.move('R2')
solve.append('R2')
if cube[4][0][1] == cube[4][1][1] and cube[0][0][1] == 'U':
self.move('B2')
solve.append('B2')
if any(cube[s][0][1] == cube[(s + 2) % 4 + 1][1][1] and
cube[0][edges[s-1][0]][edges[s-1][1]] == 'U'
for s in range(1, 5)):
self.move('U')
solve.append('U')
elif any(cube[s][0][1] == cube[s % 4 + 1][1][1] and
cube[0][edges[s-1][0]][edges[s-1][1]] == 'U'
for s in range(1, 5)):
self.move("U'")
solve.append("U'")
elif any(cube[s][0][1] == cube[(s + 1) % 4 + 1][1][1] and
cube[0][edges[s-1][0]][edges[s-1][1]] == 'U'
for s in range(1, 5)):
self.move('U2')
solve.append('U2')
return solve, {'CROSS': len(solve)} | f0a82ea6b6634b78e4252ac264a537af87be0fc1 | 7,441 |
def retain_groundtruth(tensor_dict, valid_indices):
"""Retains groundtruth by valid indices.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_confidences
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
fields.InputDataFields.groundtruth_difficult
valid_indices: a tensor with valid indices for the box-level groundtruth.
Returns:
a dictionary of tensors containing only the groundtruth for valid_indices.
Raises:
ValueError: If the shape of valid_indices is invalid.
ValueError: field fields.InputDataFields.groundtruth_boxes is
not present in tensor_dict.
"""
input_shape = valid_indices.get_shape().as_list()
if not (len(input_shape) == 1 or
(len(input_shape) == 2 and input_shape[1] == 1)):
raise ValueError('The shape of valid_indices is invalid.')
valid_indices = tf.reshape(valid_indices, [-1])
valid_dict = {}
if fields.InputDataFields.groundtruth_boxes in tensor_dict:
# Prevents reshape failure when num_boxes is 0.
num_boxes = tf.maximum(tf.shape(
tensor_dict[fields.InputDataFields.groundtruth_boxes])[0], 1)
for key in tensor_dict:
if key in [fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_confidences,
fields.InputDataFields.groundtruth_keypoints,
fields.InputDataFields.groundtruth_keypoint_visibilities,
fields.InputDataFields.groundtruth_instance_masks]:
valid_dict[key] = tf.gather(tensor_dict[key], valid_indices)
# Input decoder returns empty tensor when these fields are not provided.
# Needs to reshape into [num_boxes, -1] for tf.gather() to work.
elif key in [fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_label_types]:
valid_dict[key] = tf.reshape(
tf.gather(tf.reshape(tensor_dict[key], [num_boxes, -1]),
valid_indices), [-1])
# Fields that are not associated with boxes.
else:
valid_dict[key] = tensor_dict[key]
else:
raise ValueError('%s not present in input tensor dict.' % (
fields.InputDataFields.groundtruth_boxes))
return valid_dict | a6681d8e6b3c8c44fa4fee9143ab57538eac2661 | 7,443 |
import json
def cluster_list_node(realm, id):
""" this function add a cluster node """
cluster = Cluster(ES)
account = Account(ES)
account_email = json.loads(request.cookies.get('account'))["email"]
if account.is_active_realm_member(account_email, realm):
return Response(json.dumps(cluster.list_nodes(realm, id)))
else:
return Response({"failure": "account identifier and realm is not an active match"}) | eebccc3c7c3c710fc2c26ee0dfba5481e2e2043a | 7,444 |
def complete_json(input_data, ref_keys='minimal', input_root=None,
output_fname=None, output_root=None):
"""
Parameters
----------
input_data : str or os.PathLike or list-of-dict
Filepath to JSON with data or list of dictionaries with information
about annotations
ref_keys : {'minimal', 'info'}, optional
Which reference keys to check in `input_data`. Default: 'minimal'
input_root : str, optional
If `input_data` is a filename the key in the file containing data about
annotations. If not specified will be based on provided `ref_keys`.
Default: None
output_fname : str or os.PathLike, optional
Filepath where complete JSON should be saved. If not specified the
data are not saved to disk. Default: None
output_root : str, optional
If `output_fname` is not None, the key in the saved JSON where
completed information should be stored. If not specified will be based
on `input_root`. Default: None
Returns
-------
output : list-of-dict
Information about annotations from `input_data`
"""
valid_keys = ['minimal', 'info']
if ref_keys not in valid_keys:
raise ValueError(f'Invalid ref_keys: {ref_keys}. Must be one of '
f'{valid_keys}')
# this is to add missing fields to existing data
# could accept data dict list or filename as input
# set minimal vs info
if ref_keys == 'minimal':
ref_keys = MINIMAL_KEYS
if input_root is None:
input_root = 'annotations'
elif ref_keys == 'info':
ref_keys = INFO_KEYS
if input_root is None:
input_root = 'info'
# check input
if not isinstance(input_data, list):
input_data = parse_json(input_data, root=input_root)
# make output
output = []
for item in input_data:
output.append({
key: (item[key] if key in item else None)
for key in ref_keys
})
# write output
if output_fname is not None:
if output_root is None:
output_root = input_root
write_json(output, output_fname, root=output_root)
return output | 1a732c87670c890b406e935494ca2c51a0f0dc83 | 7,446 |
def BuildSystem(input_dir, info_dict, block_list=None):
"""Build the (sparse) system image and return the name of a temp
file containing it."""
return CreateImage(input_dir, info_dict, "system", block_list=block_list) | 4537da68b322e7d7714faa2c365ece1c67b255f2 | 7,447 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.