content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def residual_block(filters, repetitions,kernel_size=(3,3),strides=(2,2), is_first_layer=False):
"""Builds a residual block with repeating bottleneck blocks.
"""
def f(input):
for i in range(repetitions):
init_strides = (1, 1)
if i == 0 and not is_first_layer:
init_strides = strides
input = basic_block(filters=filters,kernel_size=kernel_size, init_strides=init_strides,
is_first_block_of_first_layer=(is_first_layer and i == 0))(input)
return input
return f | d3771289e034c4cd06f38caa715c925d7b947ab1 | 9,500 |
def vpg_omega(X,Y,Gamma=1, sigma=1, polarIn=False):
"""
Vorticity distribution for 2D Gaussian vortex patch
"""
if polarIn:
r = X
else:
r = np.sqrt(X ** 2 + Y ** 2)
omega_z = Gamma/(np.pi*sigma) * (np.exp(- r**2/sigma**2))
return omega_z | d7964152c9d21defc395e2a31d8709fe9c5d94c8 | 9,501 |
from typing import Tuple
def get_outgroup(tree: CassiopeiaTree, triplet: Tuple[str, str, str]) -> str:
"""Infers the outgroup of a triplet from a CassioepiaTree.
Finds the outgroup based on the depth of the latest-common-ancestors
of each pair of items. The pair with the deepest LCA is the
ingroup and the remaining leaf is the outgroup. We infer the depth
of the LCA from the number of shared ancestors.
Args:
tree: CassiopeiaTree
triplet: A tuple of three leaves constituting a triplet.
Returns:
The outgroup (i.e. the most distal leaf in the triplet.)
"""
i, j, k = triplet[0], triplet[1], triplet[2]
i_ancestors = tree.get_all_ancestors(i)
j_ancestors = tree.get_all_ancestors(j)
k_ancestors = tree.get_all_ancestors(k)
ij_common = len(set(i_ancestors) & set(j_ancestors))
ik_common = len(set(i_ancestors) & set(k_ancestors))
jk_common = len(set(j_ancestors) & set(k_ancestors))
out_group = "None"
if ij_common > jk_common and ij_common > ik_common:
out_group = k
elif ik_common > jk_common and ik_common > ij_common:
out_group = j
elif jk_common > ij_common and jk_common > ik_common:
out_group = i
return out_group | c48e7121a8622876b6fb1269f881da4afe9cd8da | 9,502 |
from unittest.mock import call
def delete_host(resource_root, host_id):
"""
Delete a host by id
@param resource_root: The root Resource object.
@param host_id: Host id
@return: The deleted ApiHost object
"""
return call(resource_root.delete, "%s/%s" % (HOSTS_PATH, host_id), ApiHost) | 8d4349c0722517e0f4f8d74ea74b2d74bbc08227 | 9,503 |
from typing import Union
from typing import Tuple
from typing import List
def get_preds(model: nn.Module, image: Union[np.array, str], **kwargs) -> Tuple[List]:
"""
Generated predictions for the given `image` using `model`.
"""
logger = _get_logger(name=__name__)
# load in the image if string is give
if isinstance(image, str):
image = Image.open(image).convert("RGB")
# Convert PIL image to array
image = np.array(image)
# Convert Image to a tensor
tensor_image = transforms(image=image)["image"]
# Generate predicitons
model.eval()
pred = model.predict([tensor_image])
# Gather the bbox, scores & labels from the preds
pred_boxes = pred[0]["boxes"] # Bounding boxes
pred_class = pred[0]["labels"] # predicted class labels
pred_score = pred[0]["scores"] # predicted scores
# Process detections
boxes = list(pred_boxes.cpu().numpy())
clas = list(pred_class.cpu().numpy())
scores = list(pred_score.cpu().numpy())
return boxes, clas, scores | 8778f43fd65bccca8fc9372454aba2a7cd2544d5 | 9,504 |
async def unblock_func(func_name:object,
func_args,
logger=None,
default_res=None,
is_new_loop=False,):
"""
异步函数非阻塞
:param func_name: def 函数对象名
:param func_args: 请求参数可迭代对象(必须遵循元素入参顺序!)
:param logger:
:param default_res: 默认返回结果
:param is_new_loop: 是否开启新loop, True容易造成OSError, too many file open错误
:return:
"""
# todo notice: 一个进程/线程只能一个 event loop
loop = get_event_loop() if not is_new_loop else new_event_loop()
try:
default_res = await loop.run_in_executor(None, func_name, *func_args)
except Exception as e:
_print(msg='遇到错误:', logger=logger, log_level=2, exception=e)
finally:
# loop.close()
try:
del loop
except:
pass
collect()
return default_res | a8f0a6f4f1171f01df4b637ca002c30efc8954e0 | 9,505 |
from typing import Dict
from typing import Tuple
import re
def replace_if_has_wiki_link(line: str, folder_dict: Dict) -> Tuple[str, int]:
""" ^title
:return: (string with all wikis replaced, replacement count)
"""
embed_rule = re.compile(re_md_reference)
wiki_partial_rule = re.compile(re_md_wiki_partial)
wiki_rule = re.compile(re_md_wiki)
new_line = line
wiki_count = 0
# ![[xxxx.png]] -> 
while (match := re.search(embed_rule, new_line)):
new_line = handle_rewrite(match, new_line, folder_dict, "embed")
wiki_count += 1
# [[xxxx|yyy]] -> [[yyy]](...xxxx.md) todo: not implemented
while (match := re.search(wiki_partial_rule, new_line)):
new_line = handle_rewrite(match, new_line, folder_dict, "md_partial")
wiki_count += 1
# [[xxxx]] -> [xxx](...xxxx.md)
while (match := re.search(wiki_rule, new_line)):
new_line = handle_rewrite(match, new_line, folder_dict, "md")
wiki_count +=1
# new_line=line if no changes made
return new_line, wiki_count | 5e7a1111685e5543a6624756809da59ae4b37f47 | 9,506 |
def init_w(w, n):
"""
:purpose:
Initialize a weight array consistent of 1s if none is given
This is called at the start of each function containing a w param
:params:
w : a weight vector, if one was given to the initial function, else None
NOTE: w MUST be an array of np.float64. so, even if you want a boolean w,
convert it to np.float64 (using w.astype(np.float64)) before passing it to
any function
n : the desired length of the vector of 1s (often set to len(u))
:returns:
w : an array of 1s with shape (n,) if w is None, else return w un-changed
"""
if w is None:
return np.ones(n)
else:
return w | 2157f12410c2a909a32f37b9fcae4a489361fb6e | 9,507 |
def _ensure_min_resources(progs, cores, memory, min_memory):
"""Ensure setting match minimum resources required for used programs.
"""
for p in progs:
if p in min_memory:
if not memory or cores * memory < min_memory[p]:
memory = float(min_memory[p]) / cores
return cores, memory | f311259242a73a7bc527e3601765c95153a08748 | 9,508 |
import ctypes
def ctypes_pointer(name):
"""Create a ctypes type representing a C pointer to a custom data type ``name``."""
return type("c_%s_p" % name, (ctypes.c_void_p,), {}) | d87f10ac06391379a24f166272fd42fa938e3676 | 9,509 |
def generate_linear_data(n, betas, sigma):
"""Generate pandas df with x and y variables related by a linear equation.
Export data as csv.
:param n: Number of observations.
:param betas: beta parameters.
:param sigma: standard deviation
:return: None
"""
x = np.linspace(start=0.0, stop=1.0, num=n)
y = betas[0] + betas[1]*x + np.random.normal(loc=1, scale=sigma, size=n)
df = pd.DataFrame({'x': x, 'y': y})
df.to_csv('data/train_data.csv', index=False)
return None | 2f8b99a3c11ecf75afee51bd5df31f22efaddf58 | 9,510 |
def entry(
text,
*,
foreground: str = "",
background: str = "",
sgr: str = "",
jump_line: str = "\n> ",
) -> str:
"""
This function is derived from the input, but with the option of
coloring it and some different formatting.
Note: If you use Windows, the coloring option will not work.
>>> from snakypy.helpers import entry, FG
>>> entry("What's your name?", foreground=FG().QUESTION)
➜ What's your name?
> 'snakypy'
>>> entry("What's your name?", foreground=FG().BLUE)
➜ What's your name?
> 'snakypy'
>>> entry("What's your name?", foreground=FG().GREEN)
➜ What's your name?
> 'snakypy'
Args:
text (object): Argument must receive an object
foreground (str): This named argument should optionally receive \
an object of class "snakypy.helpers.ansi.FG" for the foreground \
color of the text. This object will be text with ansi code. \
(default: '')
background (str): This named argument should optionally receive \
an object of class "snakypy.helpers.ansi.BG" for the background \
color of the text. This object will be text with ansi code. \
(default: '')
sgr (str): This named argument should optionally receive \
an object of class "snakypy.helpers.ansi.SGR" for the effect \
of the text. This object will be text with ansi code. \
(default: '')
jump_line (str): Named argument that makes the action of skipping a line \
and adding a greater sign to represent an arrow. You change \
that argument to your liking. (default: '[bar]n> ') \
"""
# TODO: DEPRECATED
# check_fg_bg_sgr(FG, BG, SGR, foreground, background, sgr)
try:
return input(f"{NONE}{sgr}{foreground}{background}{text}{jump_line}{NONE}")
except KeyboardInterrupt:
print(f"\n{FG().WARNING} Aborted by user.{NONE}")
return "Aborted by user."
except TypeError:
print(f"\n{FG().ERROR} Input value not defined.{NONE}")
return "Input value not defined." | c0bbbf5bfc675407088a3d820e3a543d9ad167c9 | 9,511 |
def vrotate_3D(vec: np.ndarray,
ref: np.ndarray) -> np.ndarray:
"""Rotates a vector in a 3D space.
Returns the rotation matrix for `vec` to match the orientation of a
reference vector `ref`.
https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d/476311#476311
Parameters
----------
vec
Vector to rotate, as a numpy 1D array
ref
Reference vector, as a numpy 1D array
Returns
-------
np.ndarray
(3,3) rotation matrix, as a numpy 2D array
"""
def norm(A):
return sqrt(np.dot(A, A))
# G = np.matrix([
# [np.dot(A, B), -norm(np.cross(A, B)), 0.0],
# [norm(np.cross(A, B)), np.dot(A, B), 0.0],
# [0.0, 0.0, 1.0]
# ])
# F = np.matrix([
# A,
# (B-np.dot(A, B)*A)/norm(B-np.dot(A, B)*A),
# np.cross(B, A)/norm(np.cross(B, A))
# ])
# return F.I*G*F
V = np.cross(vec, ref)
S = norm(V)
if abs(S) < 1.0e-6:
# Already collinear, nothing to do
return np.eye(3)
else:
C = np.dot(vec, ref)
Vx = np.matrix([[0.0, -V[2], V[1]],
[V[2], 0.0, -V[0]],
[-V[1], V[0], 0.0]])
return np.eye(3) + Vx + Vx**2*(1.0-C)/S**2 | 4cea9d84d8fba2dd5bd9399b83ca9f1aca79b830 | 9,512 |
def asymptotic_decay(learning_rate, t, max_iter):
"""Decay function of the learning process.
Parameters
----------
learning_rate : float
current learning rate.
t : int
current iteration.
max_iter : int
maximum number of iterations for the training.
"""
return learning_rate / (1+t/(max_iter/2)) | 7cc699caed4ddcbde67f5d6e4199fc8479364585 | 9,513 |
def get_cell_content(browser, author):
"""
get novel cells
return [cell, cell, cell]
"""
content = list()
cells = browser.find_all(class_='t t2')
for cell in cells:
if cell.find(class_='r_two').b.string != author:
continue
for cell_content in cell.find(class_=['tpc_content do_not_catch', 'tpc_content']).strings:
content.append(cell_content.strip())
return "\n".join(content) | eb498d937b8ffd51ef7805a30940833e09571ed5 | 9,514 |
def triangle_area(a, h):
"""Given length of a side and high return area for a triangle.
>>> triangle_area(5, 3)
7.5
"""
#[SOLUTION]
return a * h / 2.0 | 9890d5e8332e667fab6dd672f62ca852f6f8f8c0 | 9,515 |
from nicos.core import ConfigurationError
import urllib
import logging
def create_mongo_handler(config):
"""
:param config: configuration dictionary
:return: [MongoLogHandler, ] if 'mongo_logger' is in options, else []
"""
if hasattr(config, 'mongo_logger'):
url = urllib.parse.urlparse(config.mongo_logger)
if not url.netloc:
raise ConfigurationError('mongo_logger: invalid url')
mongo_handler = MongoLogHandler()
mongo_handler.setLevel(logging.WARNING)
return mongo_handler | c7ac39574a21c44519ae57c6fb40b8f6ca679311 | 9,516 |
def split_and_load(data, ctx_list, batch_axis=0, even_split=True):
"""Splits an NDArray into `len(ctx_list)` slices along `batch_axis` and loads
each slice to one context in `ctx_list`.
Parameters
----------
data : NDArray
A batch of data.
ctx_list : list of Context
A list of Contexts.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
Returns
-------
list of NDArray
Each corresponds to a context in `ctx_list`.
"""
if len(ctx_list) == 1:
return [d.as_in_context(ctx_list[0]) for d in data]
size = len(data)
num_slice = len(ctx_list)
step = size // num_slice
for i in range(num_slice):
for k in range(i*step, (i+1)*step):
data[k].as_in_context(ctx_list[i])
return data | 4b8f0d1b6b256895da3e37fbb4b1be0cd0da5c46 | 9,517 |
def _get_scoped_outputs(comp, g, explicit_outs):
"""Return a list of output varnames scoped to the given name."""
cnamedot = comp.name + '.'
outputs = set()
if explicit_outs is None:
explicit_outs = ()
for u,v in g.list_connections():
if u.startswith(cnamedot):
outputs.add(u)
outputs.update([n for n in explicit_outs if n.startswith(cnamedot)])
if not outputs:
return None
return [n.split('.',1)[1] for n in outputs] | 8ff2cfe49dc3d892c4ed4adaeb9300e9395c790b | 9,518 |
def judge_1d100_with_6_ver(target: int, dice: int):
"""
Judge 1d100 dice result, and return text and color for message.
Result is critical, success, failure or fumble.
Arguments:
target {int} -- target value (ex. skill value)
dice {int} -- dice value
Returns:
message {string}
rgb_color {string}
"""
if dice <= target:
if dice <= 5:
return "クリティカル", yig.config.COLOR_CRITICAL
return "成功", yig.config.COLOR_SUCCESS
if dice >= 96:
return "ファンブル", yig.config.COLOR_FUMBLE
return "失敗", yig.config.COLOR_FAILURE | f870f6ffee3bb90046eb2f1660e827b899c59f04 | 9,519 |
def get_normals(self, indices=None, loc="center"):
"""Return the array of the normals coordinates.
Parameters
----------
self : MeshVTK
a MeshVTK object
indices : list
list of the points to extract (optional)
loc : str
localization of the normals ("center" or "point")
Returns
-------
normals: ndarray
Normals coordinates
"""
# Get surfaces
surf = self.get_surf()
if loc == "center":
normals = surf.cell_normals
elif loc == "point":
if self.node_normals is None:
self.surf.compute_normals(
cell_normals=False, point_normals=True, inplace=True
)
self.node_normals = self.surf["Normals"]
normals = self.node_normals
if indices is None:
return normals
else:
return normals[indices, :] | 5d08247f70e1012eef7d525ae63f7aebe294e700 | 9,520 |
def string_to_weld_literal(s):
"""
Converts a string to a UTF-8 encoded Weld literal byte-vector.
Examples
--------
>>> string_to_weld_literal('hello')
'[104c,101c,108c,108c,111c]'
"""
return "[" + ",".join([str(b) + 'c' for b in list(s.encode('utf-8'))]) + "]" | d85b016091988c9307cbed56aafdd5766c3c9be5 | 9,521 |
def verify_model_licensed(class_name : str, model_path:str):
"""
Load a licensed model from HDD
"""
try :
m = eval(class_name).load(model_path)
return m
except:
print(f"Could not load Annotator class={class_name} located in {model_path}. Try updaing spark-nlp-jsl") | 057987d838982a85925f70c93ff2f4166b038cec | 9,522 |
def examine_api(api):
"""Find all style issues in the given parsed API."""
global failures
failures = {}
for key in sorted(api.keys()):
examine_clazz(api[key])
return failures | c94efa9a2be66e30597c63b376adf74bd2ef6462 | 9,523 |
from logging.handlers import RotatingFileHandler
import sys
from pathlib import Path
import logging
def enable_logging_app_factory(log_file: Path, level) -> logging.Logger:
"""
Enable logging for the system.
:param level: Logging Level
:param log_file: Log File path
:return:
"""
logger = logging.getLogger(LOGGER)
formatter = logging.Formatter(LOGGER + ': %(asctime)s %(levelname)7s: %(message)s')
fileHandler = RotatingFileHandler(log_file, mode="a+", maxBytes=5000000, backupCount=5)
fileHandler.setFormatter(formatter)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(formatter)
consoleHandler.setLevel(logging.INFO)
logger.setLevel(level)
logger.addHandler(fileHandler)
logger.addHandler(consoleHandler)
return logger | 827b99db1980c826bdebd0cfe0c559d6d1647ce8 | 9,524 |
def launch():
""" Initialize the module. """
return BinCounterWorker(BinCounter, PT_STATS_RESPONSE, STATS_RESPONSE) | 07bfc99731088a8572616aa1cbfbd0be74db5492 | 9,525 |
def make_map(source):
"""Creates a Bokeh figure displaying the source data on a map
Args:
source: A GeoJSONDataSource object containing bike data
Returns: A Bokeh figure with a map displaying the data
"""
tile_provider = get_provider(Vendors.STAMEN_TERRAIN_RETINA)
TOOLTIPS = [
('bikes available', '@bikes'),
]
p = figure(x_range=(-8596413.91, -8558195.48), y_range=(4724114.13, 4696902.60),
x_axis_type="mercator", y_axis_type="mercator", width=1200, height=700, tooltips=TOOLTIPS)
p.add_tile(tile_provider)
p.xaxis.visible = False
p.yaxis.visible = False
p.circle(x='x', y='y', size='size', color='color', alpha=0.7, source=source)
color_bar_palette = viridis(256)
color_mapper = LinearColorMapper(palette=color_bar_palette, low=0, high=100)
color_bar = ColorBar(color_mapper=color_mapper, background_fill_alpha=0.7, title='% Full',
title_text_align='left', title_standoff=10)
p.add_layout(color_bar)
label = Label(x=820, y=665, x_units='screen', y_units='screen',
text='Dot size represents total docks in station', render_mode='css',
border_line_color=None, background_fill_color='white', background_fill_alpha=0.7)
p.add_layout(label)
return p | 51578186a1fabd071e31e46b20568c23c79bc693 | 9,526 |
import os
def save_all_pages(pages, root='.'):
"""Save picture references in pages on the form:
pages = {
urn1 : [page1, page2, ..., pageN],
urn2: [page1, ..., pageM]},
...
urnK: [page1, ..., pageL]
}
Each page reference is a URL.
"""
# In case urn is an actual URN, works also if urn is passed as sesamid
for urn in pages:
folder_name = urn.split(':')[-1]
folder_ref = os.path.join(root, folder_name)
try:
os.mkdir(folder_ref)
except FileExistsError:
True
for p in pages[urn]:
# pell ut entydig referanse til bildet fra URL-en i bildelisten som filnavn
filename = p.split('/')[6].split(':')[-1] + '.jpg'
path = os.path.join(folder_ref, filename)
get_picture_from_url(p).save(path)
return True | 5472be4a2d6f84eef9a256eb114e9d3ce17a5375 | 9,527 |
def rotate_images(images, rot90_scalars=(0, 1, 2, 3)):
"""Return the input image and its 90, 180, and 270 degree rotations."""
images_rotated = [
images, # 0 degree
tf.image.flip_up_down(tf.image.transpose_image(images)), # 90 degrees
tf.image.flip_left_right(tf.image.flip_up_down(images)), # 180 degrees
tf.image.transpose_image(tf.image.flip_up_down(images)) # 270 degrees
]
results = tf.stack([images_rotated[i] for i in rot90_scalars])
results = tf.reshape(results,
[-1] + images.get_shape().as_list()[1:])
return results | dd151b83918eba9b62a91b499273772e66af6ba9 | 9,528 |
def csv(args:[str])->str:
"""create a string of comma-separated values"""
return ','.join(args) | 1e48583c236940f2af10f8e050af8ad70ace51f6 | 9,529 |
import types
def _update_class(oldclass, newclass):
"""Update a class object."""
# XXX What about __slots__?
olddict = oldclass.__dict__
newdict = newclass.__dict__
# PDF changed to remove use of set as not in Jython 2.2
for name in olddict.keys():
if name not in newdict:
delattr(oldclass, name)
for name in newdict.keys():
if name not in ["__dict__", "__doc__"]:
if name not in olddict:
setattr(oldclass, name, newdict[name])
continue
new = getattr(newclass, name)
old = getattr(oldclass, name, None)
if new == old:
continue
if old is None:
setattr(oldclass, name, new)
continue
if isinstance(new, types.MethodType):
changed = _update_method(old, new)
setattr(oldclass, name, changed)
elif isinstance(new, types.FunctionType):
# __init__ is a function
changed = _update_function(old, new)
setattr(oldclass, name, changed)
else:
# Fallback to just replace the item
setattr(oldclass, name, new)
return oldclass | 123eb4eadf7bf6ee65ae5df6ae9ed6df444c25d3 | 9,530 |
import google
def update_signature():
"""Create and update signature in gmail.
Returns:Draft object, including updated signature.
Load pre-authorized user credentials from the environment.
TODO(developer) - See https://developers.google.com/identity
for guides on implementing OAuth2 for the application.
"""
creds, _ = google.auth.default()
try:
# create gmail api client
service = build('gmail', 'v1', credentials=creds)
primary_alias = None
# pylint: disable=E1101
aliases = service.users().settings().sendAs().list(userId='me')\
.execute()
for alias in aliases.get('sendAs'):
if alias.get('isPrimary'):
primary_alias = alias
break
send_as_configuration = {
'displayName': primary_alias.get('sendAsEmail'),
'signature': 'Automated Signature'
}
# pylint: disable=E1101
result = service.users().settings().sendAs() \
.patch(userId='me', sendAsEmail=primary_alias.get('sendAsEmail'),
body=send_as_configuration).execute()
print(F'Updated signature for: {result.get("displayName")}')
except HttpError as error:
print(F'An error occurred: {error}')
result = None
return result.get('signature') | 77e9a9aef4b1ab1471cf147f57008874497d28e3 | 9,531 |
def generate_block(constraints, p, rng=None):
"""Generated a balanced set of trials, might be only part of a run."""
if rng is None:
rng = np.random.RandomState()
n_trials = constraints.trials_per_run
# --- Assign trial components
# Assign the target to a side
gen_dist = np.repeat([0, 1], n_trials // 2)
while max_repeat(gen_dist) > constraints.max_dist_repeat:
gen_dist = rng.permutation(gen_dist)
# Assign pulse counts to each trial
count_support = np.arange(p.pulse_count[-1], p.pulse_count_max) + 1
count_pmf = trunc_geom_pmf(count_support, p.pulse_count[1])
expected_count_dist = count_pmf * n_trials
count_error = np.inf
while count_error > constraints.sum_count_error:
pulse_count = flexible_values(p.pulse_count, n_trials, rng,
max=p.pulse_count_max).astype(int)
count_dist = np.bincount(pulse_count, minlength=p.pulse_count_max + 1)
count_error = np.sum(np.abs(count_dist[count_support]
- expected_count_dist))
# Assign initial ITI to each trial
total_iti = np.inf
while not_in_range(total_iti, constraints.iti_range):
wait_iti = flexible_values(p.wait_iti, n_trials, rng)
total_iti = wait_iti.sum()
# Use the first random sample if we're not being precise
# about the overall time of the run (i.e. in psychophys rig)
if not p.keep_on_time:
break
# --- Build the trial_info structure
trial = np.arange(1, n_trials + 1)
trial_info = pd.DataFrame(dict(
trial=trial,
gen_dist=gen_dist,
pulse_count=pulse_count.astype(int),
wait_iti=wait_iti,
))
# --- Assign trial components
# Map from trial to pulse
trial = np.concatenate([
np.full(c, i, dtype=np.int) for i, c in enumerate(pulse_count, 1)
])
pulse = np.concatenate([
np.arange(c) + 1 for c in pulse_count
])
n_pulses = pulse_count.sum()
# Assign gaps between pulses
run_duration = np.inf
while not_in_range(run_duration, constraints.run_range):
wait_pre_stim = flexible_values(p.pulse_gap, n_trials, rng)
gap_dur = flexible_values(p.pulse_gap, n_pulses, rng)
run_duration = np.sum([
wait_iti.sum(),
wait_pre_stim.sum(),
gap_dur.sum(),
p.pulse_dur * n_pulses,
])
# Use the first random sample if we're not being precise
# about the overall time of the run (i.e. in psychophys rig)
if not p.keep_on_time:
break
# Assign pulse intensities
max_contrast = np.log10(1 / np.sqrt(p.stim_gratings))
log_contrast = np.zeros(n_pulses)
pulse_dist = np.concatenate([
np.full(n, i, dtype=np.int) for n, i in zip(pulse_count, gen_dist)
])
llr_mean = np.inf
llr_sd = np.inf
expected_acc = np.inf
while (not_in_range(llr_mean, constraints.mean_range)
or not_in_range(llr_sd, constraints.sd_range)
or not_in_range(expected_acc, constraints.acc_range)):
for i in [0, 1]:
dist = "norm", p.dist_means[i], p.dist_sds[i]
rows = pulse_dist == i
n = rows.sum()
log_contrast[rows] = flexible_values(dist, n, rng,
max=max_contrast)
pulse_llr = compute_llr(log_contrast, p)
target_llr = np.where(pulse_dist, pulse_llr, -1 * pulse_llr)
llr_mean = target_llr.mean()
llr_sd = target_llr.std()
dv = pd.Series(target_llr).groupby(pd.Series(trial)).sum()
dv_sd = np.sqrt(constraints.sigma ** 2 * pulse_count)
expected_acc = stats.norm(dv, dv_sd).sf(0).mean()
# --- Build the pulse_info structure
pulse_info = pd.DataFrame(dict(
trial=trial,
pulse=pulse,
gap_dur=gap_dur,
log_contrast=log_contrast,
contrast=10 ** log_contrast,
pulse_llr=pulse_llr,
))
# --- Update the trial_info structure
trial_info["wait_pre_stim"] = wait_pre_stim
trial_llr = (pulse_info
.groupby("trial")
.sum()
.loc[:, "pulse_llr"]
.rename("trial_llr"))
trial_info = trial_info.join(trial_llr, on="trial")
# TODO reorder the columns so they are more intuitively organized?
return trial_info, pulse_info | 3c712f94e6fc4e7b5317f13a733afd1c13d7a723 | 9,532 |
def mean_by_weekday(day, val):
"""
Returns a list that contain weekday, mean of beginning and end of presence.
"""
return [day_abbr[day], mean(val['start']), mean(val['end'])] | 8aa7ac3dde83db88b44d2178ba19c5b731af683c | 9,533 |
def parse_metrics(match, key):
"""Gets the metrics out of the parsed logger stream"""
elements = match.split(' ')[1:]
elements = filter(lambda x: len(x) > 2, elements)
elements = [float(e) for e in elements]
metrics = dict(zip(['key', 'precision', 'recall', 'f1'], [key] + elements))
return metrics | 70de1ad16edfe827e0a851c719d902695696700f | 9,534 |
def minecraftify(clip: vs.VideoNode, div: float = 64.0, mod: int | None = None) -> vs.VideoNode:
"""
Function that transforms your clip into a Minecraft.
Idea from Meme-Maji's Kobayashi memery (love you varde).
:param clip: Input clip
:param div: How much to divide the clip's resolution with
:param mod: Force the downscaled clip to be MOD# compliant
:return: A Minecraft.
"""
ow, oh = round(clip.width/div), round(clip.height/div)
if mod is not None:
ow, oh = force_mod(ow, mod), force_mod(oh, mod)
i444 = core.resize.Bicubic(clip, format=vs.YUV444PS)
down = Point().scale(i444, ow, oh)
return Point().scale(down, clip.width, clip.height) | 4f8338cfe2df8bff8d4f2c7571fa38688e39496c | 9,535 |
def processGOTerm(goTerm):
"""
In an object representing a GO term, replace single-element lists with
their only member.
Returns the modified object as a dictionary.
"""
ret = dict(goTerm) #Input is a defaultdict, might express unexpected behaviour
for key, value in ret.items():
if len(value) == 1:
ret[key] = value[0]
return ret | 541916a0060726bbc972b784f9a011541e7c8128 | 9,536 |
import urllib
def searchxapian_show(request):
""" zeigt den Inhalt eines Dokumentes """
SORT_BY = { -1: _(u'Relevanz'),
0: _(u'URL'),
1: _(u'Überschrift/Titel'),
2: _(u'Datum der letzten Änderung') }
if request.path.find('index.html') < 0:
my_path = request.path.replace('searchxapian', 'index.html/searchxapian')
else:
my_path = request.path
item_container = get_item_container(my_path, '/searchxapian/')
def get_sort_by_choices():
ret = []
ret.append((-1, SORT_BY[-1])) # Siehe SORT_BY
ret.append((0, SORT_BY[0]))
ret.append((1, SORT_BY[1]))
ret.append((2, SORT_BY[2]))
return ret
def get_domain_choices():
""" """
ret = []
ret.append( ('', _(u'Alle Seiten')) )
if item_container != None:
url = item_container.container.site.url[7:]
ret.append( (url, _(u'Nur Seiten der Domaine <i>') + url + '</i>') )
return ret
class DmsItemForm(forms.Form):
query = forms.CharField(required=False, max_length=60,
widget=forms.TextInput(attrs={'size':60}) )
sort_by = forms.CharField(
widget=forms.Select(choices=
get_sort_by_choices(),
attrs={'size':4, 'style':'width:60%'} ) )
domain = forms.ChoiceField(required=False, choices=get_domain_choices(),
widget=forms.RadioSelect() )
def get_prev_next(query, offset, delta, domain, sort_by, count):
aquery = u'query=%s' % urllib.quote_plus(query)
if domain == '':
site = ''
else:
site = '&domain=' + domain
show_prev = ''
show_next = ''
show_middle = ''
n_start = 0
if count > offset + 10*delta:
show_next_more = True
count = offset + 10*delta
else:
show_next_more = False
if offset > 10*delta:
show_prev_more = True
n_start = offset - 10*delta
else:
show_prev_more = False
n = n_start
while n < count:
if n < offset:
show_prev += show_link(u'./?%s&offset=%i&sort_by=%i%s' % (aquery, n, sort_by, site),
smart_unicode(n), url_class='navLink') + ' '
elif n == offset:
show_middle = '<b>%i</b> ' % n
else:
show_next += show_link(u'./?%s&offset=%i&sort_by=%i%s' % \
(aquery, n, sort_by, site),
smart_unicode(n), url_class='navLink') + ' '
n += delta
if show_next_more:
show_next += show_link(u'./?%s&offset=%i&sort_by=%i%s' % \
(aquery, n, sort_by, site),
' » Weiter', url_class='navLink')
if show_prev_more:
show_prev = show_link(u'./?%s&offset=%i&sort_by=%i%s' % \
(aquery, n_start-delta, sort_by, site),
'Zurück « ', url_class='navLink') + show_prev
if count < delta:
show_middle = ''
return show_prev, show_middle, show_next
def get_search_results(request):
sort_by = -1
offset = 0
delta = 20
count = -1
if show_errors:
data = request.POST.copy()
query = data['query']
domain = data['domain']
else:
data = { 'query': '', 'sort_by': -1,}
query = ''
domain = ''
if params.has_key('offset'):
offset = int(params['offset'])
if params.has_key('sort_by'):
sort_by = int(params['sort_by'])
if params.has_key('domain'):
domain = params['domain']
if params.has_key('query'):
query = params['query']
data = { 'query': query, 'sort_by': sort_by, 'domain': domain}
s = xmlrpclib.Server('http://localhost:3000')
sort_by = int(data['sort_by'])
ascending = sort_by==2
res = s.search(query, offset, delta, domain, sort_by, ascending)
return res, query, offset, delta, domain, sort_by, data
def get_link_list(rs):
results = []
for r in rs:
this_link = show_link(r['url'], r['title']) + u' {%s}' % r['percent']
# --- Siehe SORT_BY
if sort_by == 0:
this_link += '<br />' + r['url']
elif sort_by == 2:
this_link += ', ' + get_german_date(r['date'])
results.append(this_link)
return results
app_name = 'searchxapian'
my_title = _(u'Suchanfrage stellen')
if item_container != None:
my_absolute_url = item_container.get_absolute_url()
else:
my_absolute_url = './'
show_errors = ( request.method == 'POST' )
params = request.GET.copy()
if params!={} or show_errors:
res, query, offset, delta, domain, sort_by, data = get_search_results(request)
query = decode_html(query)
# --- Rohdaten in Liste ueberfuehren
count = res['count']
rs = res['results']
results = get_link_list(rs)
if query.find('&') >= 0:
q = query
else:
try:
q = encode_html(query.decode('iso-8859-1'))
except:
q = encode_html(query)
show_prev, show_middle, show_next = \
get_prev_next(q, offset, delta, domain, sort_by, count)
else :
sort_by = -1
query = ''
count = 20
data = { 'query': '', 'sort_by': sort_by, 'doamin': '', }
results = []
show_prev = ''
show_middle = ''
show_next = ''
f = DmsItemForm(data)
# --- Reihenfolge, Ueberschriften, Hilfetexte
tabs = [
('tab_base',['query',]),
('tab_more', ['sort_by', 'domain', ]) ]
# --- Formular zusammenbauen
content = get_tabbed_form(tabs, help_form, app_name , f)
# --- externe Suchmaschinen
search_engines = get_search_engines()
links = []
for engine in search_engines:
if query.find('&') < 0:
url = engine.url_query % (urllib.quote_plus(encode_html(query.decode('iso-8859-1'))),
SEARCH_DOMAIN)
else:
url = engine.url_query % (urllib.quote_plus(query), SEARCH_DOMAIN)
links.append(show_link(url, engine.name, url_class="navLink"))
t = get_template('utils/info_slot_right_list_simple.html')
c = Context ( { 'header': _(u'Externe Suche'),
'links': links
} )
slot_info_right = t.render(c)
# --- Zur Ausgangsseite
back_link = show_link(my_absolute_url, _(u'Zur Ausgangsseite ...'),
url_class="navLink")
t = get_template('utils/info_slot_right.html')
c = Context ( { 'header': _(u'Ausgangsseite'),
'info': back_link
} )
slot_info_right += '<br /><br />\n' + t.render(c)
vars = get_item_vars_add(request, item_container, app_name, my_title,
content, show_errors)
vars['next'] = get_site_url(item_container, 'searchxapian/')
vars['path'] = item_container.container.path + 'searchxapian/'
vars['sub_title'] = ''
vars['slot_right_info'] = slot_info_right
vars['action'] = ''
vars['results'] = results
vars['count'] = count
vars['show_prev'] = show_prev
vars['show_middle'] = show_middle
vars['show_next'] = show_next
vars['sort_by'] = SORT_BY[sort_by]
vars['google_search'] = 'google'
vars['no_top_main_navigation'] = True
return render_to_response ( 'app/searchxapian/base.html', vars ) | bd1f252107bfcf2aa02cf58ef6d1a302d71edbd8 | 9,537 |
import re
def html2plaintext(html, body_id=None, encoding='utf-8'):
""" From an HTML text, convert the HTML to plain text.
If @param body_id is provided then this is the tag where the
body (not necessarily <body>) starts.
"""
## (c) Fry-IT, www.fry-it.com, 2007
## <[email protected]>
## download here: http://www.peterbe.com/plog/html2plaintext
html = ustr(html)
tree = etree.fromstring(html, parser=etree.HTMLParser())
if body_id is not None:
source = tree.xpath('//*[@id=%s]' % (body_id,))
else:
source = tree.xpath('//body')
if len(source):
tree = source[0]
url_index = []
i = 0
for link in tree.findall('.//a'):
url = link.get('href')
if url:
i += 1
link.tag = 'span'
link.text = '%s [%s]' % (link.text, i)
url_index.append(url)
html = ustr(etree.tostring(tree, encoding=encoding))
# \r char is converted into , must remove it
html = html.replace(' ', '')
html = html.replace('<strong>', '*').replace('</strong>', '*')
html = html.replace('<b>', '*').replace('</b>', '*')
html = html.replace('<h3>', '*').replace('</h3>', '*')
html = html.replace('<h2>', '**').replace('</h2>', '**')
html = html.replace('<h1>', '**').replace('</h1>', '**')
html = html.replace('<em>', '/').replace('</em>', '/')
html = html.replace('<tr>', '\n')
html = html.replace('</p>', '\n')
html = re.sub('<br\s*/?>', '\n', html)
html = re.sub('<.*?>', ' ', html)
html = html.replace(' ' * 2, ' ')
# strip all lines
html = '\n'.join([x.strip() for x in html.splitlines()])
html = html.replace('\n' * 2, '\n')
for i, url in enumerate(url_index):
if i == 0:
html += '\n\n'
html += ustr('[%s] %s\n') % (i + 1, url)
return html | 70a7af7e557b6cffac05e33a7a394fdccbf7bc84 | 9,538 |
def _create_regularization_of_grad(param, grad, regularization=None):
""" Create and add backward regularization Operators
Function helper of append_regularization_ops.
"""
# If no gradient or no regularization is specified, then we don't need to do anything
if grad is None or (param.regularizer is None and regularization is None):
return grad
regularization_term = None
if param.regularizer is not None:
# Add variable for regularization term in grad block
regularization_term = param.regularizer(param, grad, grad.block)
elif regularization is not None:
regularization_term = regularization(param, grad, grad.block)
assert regularization_term is not None
new_grad = grad
if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
# FIXME(zcd): If the grad is SELECTED_ROWS, after regularization,
# the grad's type and name will be changed. But the gradient's name
# is used in ParallelExecutor Reduce mode, so I add a flag for
# the new_grad here.
new_grad = grad.block.create_var(
name=grad.name + core.kNewGradSuffix(),
dtype=param.dtype,
shape=param.shape,
lod_level=param.lod_level,
type=core.VarDesc.VarType.LOD_TENSOR)
inputs = {"X": [grad, regularization_term]}
outputs = {"Out": [new_grad]}
if in_dygraph_mode():
core.ops.sum(inputs, {}, outputs)
else:
grad.block.append_op(type='sum', inputs=inputs, outputs=outputs)
return new_grad | c3c027bc72cf2a0ced45ca2ed686301303582588 | 9,539 |
def MooreSpace(q):
"""
Triangulation of the mod `q` Moore space.
INPUT:
- ``q`` -0 integer, at least 2
This is a simplicial complex with simplices of dimension 0, 1,
and 2, such that its reduced homology is isomorphic to
`\\ZZ/q\\ZZ` in dimension 1, zero otherwise.
If `q=2`, this is the real projective plane. If `q>2`, then
construct it as follows: start with a triangle with vertices
1, 2, 3. We take a `3q`-gon forming a `q`-fold cover of the
triangle, and we form the resulting complex as an
identification space of the `3q`-gon. To triangulate this
identification space, put `q` vertices `A_0`, ..., `A_{q-1}`,
in the interior, each of which is connected to 1, 2, 3 (two
facets each: `[1, 2, A_i]`, `[2, 3, A_i]`). Put `q` more
vertices in the interior: `B_0`, ..., `B_{q-1}`, with facets
`[3, 1, B_i]`, `[3, B_i, A_i]`, `[1, B_i, A_{i+1}]`, `[B_i,
A_i, A_{i+1}]`. Then triangulate the interior polygon with
vertices `A_0`, `A_1`, ..., `A_{q-1}`.
EXAMPLES::
sage: simplicial_complexes.MooreSpace(2)
Minimal triangulation of the real projective plane
sage: simplicial_complexes.MooreSpace(3).homology()[1]
C3
sage: simplicial_complexes.MooreSpace(4).suspension().homology()[2]
C4
sage: simplicial_complexes.MooreSpace(8)
Triangulation of the mod 8 Moore space
"""
if q <= 1:
raise ValueError("the mod q Moore space is only defined if q is at least 2")
if q == 2:
return RealProjectivePlane()
facets = []
for i in range(q):
Ai = "A" + str(i)
Aiplus = "A" + str((i+1) % q)
Bi = "B" + str(i)
facets.append([1, 2, Ai])
facets.append([2, 3, Ai])
facets.append([3, 1, Bi])
facets.append([3, Bi, Ai])
facets.append([1, Bi, Aiplus])
facets.append([Bi, Ai, Aiplus])
for i in range(1, q-1):
Ai = "A" + str(i)
Aiplus = "A" + str((i+1) % q)
facets.append(["A0", Ai, Aiplus])
return UniqueSimplicialComplex(facets,
name='Triangulation of the mod {} Moore space'.format(q)) | 448e948782d530f6b1ee0909fae02b66606da94d | 9,540 |
def show(tournament_name, params=[], filter_response=True):
"""Retrieve a single tournament record by `tournament name`"""
utils._validate_query_params(params=params, valid_params=VALID_PARAMS, route_type='tournament')
uri = TOURNAMENT_PREFIX + tournament_name
response = api.get(uri, params)
if filter_response:
response = _filter_tournament_response(response, params)
return response | d854c97e312a0bd6860a5c7fa7cbd36cd79d4ffd | 9,541 |
def auto_run_api_pk(**kwargs):
"""run api by pk and config
"""
id = kwargs['id']
env = kwargs['config']
config_name = 'rig_prod' if env == 1 else 'rig_test'
api = models.API.objects.get(id=id)
config = eval(models.Config.objects.get(name=config_name, project=api.project).body)
test_case = eval(api.body)
summary = loader.debug_api(test_case, api.project.id, config=config)
api_request = summary['details'][0]['records'][0]['meta_data']['request']
api_response = summary['details'][0]['records'][0]['meta_data']['response']
# API执行成功,设置tag为自动运行成功
if summary['stat']['failures'] == 0 and summary['stat']['errors'] == 0:
models.API.objects.filter(id=id).update(tag=3)
return 'success'
elif summary['stat']['failures'] == 1:
# models.API.objects.filter(id=id).update(tag=2)
return 'fail' | ba0e424d7ccbc3d1a6d3f8f0ec58892f4172d215 | 9,542 |
from typing import Optional
from datetime import datetime
def create(arxiv_id: ArXivID,
arxiv_ver: int,
resource_type: str,
resource_id: str,
description: str,
creator: Optional[str]) -> Relation:
"""
Create a new relation for an e-print.
Parameters
----------
arxiv_id: ArXivID
The arXiv ID of the e-print.
arxiv_ver: int
The version of the e-print.
resource_type: str
The type of the corresponding resource.
resource_id: str
An identifier of the resource e.g., DOI.
description: str
A description for the relation.
creator: Optional[str]
Info of the user/app who requested this relation creation.
Returns
-------
Relation
The newly-created relation.
"""
# store it to DB
rel_data = RelationDB(rel_type=RelationType.ADD,
arxiv_id=str(arxiv_id),
arxiv_ver=arxiv_ver,
resource_type=resource_type,
resource_id=resource_id,
description=description,
added_at=datetime.now(UTC),
creator=creator,
supercedes_or_suppresses=None)
try:
db.session.add(rel_data)
db.session.commit()
except Exception as e:
db.session.rollback()
raise StorageError from e
# return the result
return relation_from_DB(rel_data) | e1cbe374bba359b66d8564134ee27ac777c4a16e | 9,543 |
def p_contain_resist(D, t, f_y, f_u=None):
"""Pressure containment resistance in accordance with DNVGL-ST-F101.
(press_contain_resis)
Reference:
DNVGL-ST-F101 (2017-12)
sec:5.4.2.2 eq:5.8 p:94 $p_{b}(t)$
"""
if f_u is None:
f_cb = f_y
else:
f_cb = np.minimum(f_y, f_u/1.15)
p_b = (2*t/(D-t) * f_cb * 2/np.sqrt(3))
return p_b | 1c771eebf2ed43115b8ae32405172cd8576d66a2 | 9,544 |
def revalue(request):
"""其它设备参数修改"""
value = request.GET.get('value')
name = request.GET.get('name')
others = Machines().filter_machines(OtherMachineInfo, pk=request.GET.get('dID'))[0]
if name == 'remark':
others.remark = value
elif name == 'machine_name':
others.machine_name = value
elif name == 'reson_str':
others.reson_str = value
elif name == 'oth_cab_id':
return '再考虑考虑'
others.save()
return JsonResponse({'is_ok': 1}) | 0a7ab179932466e171a119c87871e04e2a3ae252 | 9,545 |
import authl.handlers.indieauth
import json
def indieauth_endpoint():
""" IndieAuth token endpoint """
if 'me' in flask.request.args:
# A ticket request is being made
me_url = flask.request.args['me']
try:
endpoint, _ = authl.handlers.indieauth.find_endpoint(me_url,
rel='ticket_endpoint')
except RuntimeError:
endpoint = None
if not endpoint:
raise http_error.BadRequest("Could not get ticket endpoint")
LOGGER.info("endpoint: %s", endpoint)
send_auth_ticket(me_url, flask.request.url_root, endpoint)
return "Ticket sent", 202
if 'grant_type' in flask.request.form:
# token grant
if flask.request.form['grant_type'] == 'ticket':
# TicketAuth
if 'ticket' not in flask.request.form:
raise http_error.BadRequest("Missing ticket")
ticket = parse_token(flask.request.form['ticket'])
LOGGER.info("Redeeming ticket for %s; scopes=%s", ticket['me'],
ticket['scope'])
scopes = set(ticket.get('scope', '').split())
if 'ticket' not in scopes:
raise http_error.BadRequest("Missing 'ticket' scope")
scopes.remove('ticket')
scope = ' '.join(scopes)
token = get_token(ticket['me'], config.token_lifetime, scope)
response = {
'access_token': token,
'token_type': 'Bearer',
'me': ticket['me'],
'expires_in': config.token_lifetime,
'refresh_token': get_token(ticket['me'],
config.token_lifetime,
ticket['scope'])
}
if scope:
response['scope'] = scope
return json.dumps(response), {'Content-Type': 'application/json'}
raise http_error.BadRequest("Unknown grant type")
if 'action' in flask.request.form:
raise http_error.BadRequest()
if 'Authorization' in flask.request.headers:
# ticket verification
parts = flask.request.headers['Authorization'].split()
if parts[0].lower() == 'bearer':
token = parse_token(parts[1])
return json.dumps(token), {'Content-Type': 'application/json'}
raise http_error.Unauthorized("Invalid authorization header")
raise http_error.BadRequest() | 29809af2c243a08b675738b0169bdc794965c934 | 9,546 |
def policy_simulation_c(model,var,ages):
""" policy simulation for couples"""
if var == 'd':
return {'hs': lifecycle_c(model,var=var,MA=[0],ST_w=[1,3],ages=ages,calc='sum')['y'][0] +
lifecycle_c(model,var=var,MA=[1],ST_h=[1,3],ages=ages,calc='sum')['y'][0],
'hs_f': lifecycle_c(model,var=var,MA=[0],ST_w=[1,3],ages=ages,calc='sum')['y'][0],
'hs_m': lifecycle_c(model,var=var,MA=[1],ST_h=[1,3],ages=ages,calc='sum')['y'][0],
'base': lifecycle_c(model,var=var,MA=[0,1],ages=ages,calc='sum')['y'][0],
'base_f': lifecycle_c(model,var=var,MA=[0],ages=ages,calc='sum')['y'][0],
'base_m': lifecycle_c(model,var=var,MA=[1],ages=ages,calc='sum')['y'][0],
'ls': lifecycle_c(model,var=var,MA=[0],ST_w=[0,2],ages=ages,calc='sum')['y'][0] +
lifecycle_c(model,var=var,MA=[1],ST_h=[0,2],ages=ages,calc='sum')['y'][0],
'ls_f': lifecycle_c(model,var=var,MA=[0],ST_w=[0,2],ages=ages,calc='sum')['y'][0],
'ls_m': lifecycle_c(model,var=var,MA=[1],ST_h=[0,2],ages=ages,calc='sum')['y'][0]
}
if var == 'probs':
return {'base_f': retirement_probs_c(model,ma=0),
'base_m': retirement_probs_c(model,ma=1)
}
if var == 'GovS':
return lifecycle_c(model,var=var,MA=[0,1],ages=ages,calc='total_sum')['y'][0]
if var == 'RetAge':
return {'hs':
np.mean(np.concatenate((RetAge_C(model,ma=0,ST_w=[1,3]),
RetAge_C(model,ma=1,ST_h=[1,3])))),
'base_m':
np.mean(RetAge_C(model,ma=1)),
'base_f':
np.mean(RetAge_C(model,ma=0)),
'base':
np.mean(np.concatenate((RetAge_C(model,ma=0),
RetAge_C(model,ma=1)))),
'ls':
np.mean(np.concatenate((RetAge_C(model,ma=0,ST_w=[0,2]),
RetAge_C(model,ma=1,ST_h=[0,2]))))
} | d81ddd950eafb23b8cb219638b358a65084ae08d | 9,547 |
def emit_obj_db_entry(target, source, env):
"""Emitter for object files. We add each object file
built into a global variable for later use"""
for t in target:
if str(t) is None:
continue
OBJ_DB.append(t)
return target, source | e02c2b4e3f3b1aad15097c6b4701407ef1902b77 | 9,548 |
def listtimes(list, c):
"""multiplies the elements in the list by the given scalar value c"""
ret = []
for i in range(0, len(list)):
ret.extend([list[i]]*c);
return ret; | 8aef63677a1a926f355644187d58b47e437e152c | 9,549 |
import os
import stat
import traceback
import sys
def get(request):
"""Given a Request, return a Resource object (with caching).
We need the request because it carries media_type_default.
"""
# XXX This is not thread-safe. It used to be, but then I simplified it
# when I switched to diesel. Now that we have multiple engines, some of
# which are threaded, we need to make this thread-safe again.
# Get a cache Entry object.
# =========================
if request.fs not in __cache__:
entry = Entry()
__cache__[request.fs] = entry
entry = __cache__[request.fs]
# Process the resource.
# =====================
mtime = os.stat(request.fs)[stat.ST_MTIME]
if entry.mtime == mtime: # cache hit
if entry.exc is not None:
raise entry.exc
else: # cache miss
try:
entry.resource = load(request, mtime)
except: # capture any Exception
entry.exc = ( LoadError(traceback.format_exc())
, sys.exc_info()[2]
)
else: # reset any previous Exception
entry.exc = None
entry.mtime = mtime
if entry.exc is not None:
raise entry.exc[0] # TODO Why [0] here, and not above?
# Return
# ======
# The caller must take care to avoid mutating any context dictionary at
# entry.resource.pages[0].
return entry.resource | dcc573fff237fde0b444dc5a655865a6593a27b1 | 9,550 |
def split_audio_ixs(n_samples, rate=STEP_SIZE_EM, min_coverage=0.75):
"""
Create audio,mel slice indices for the audio clip
Args:
Returns:
"""
assert 0 < min_coverage <= 1
# Compute how many frames separate two partial utterances
samples_per_frame = int((SAMPLING_RATE * WINDOW_STEP_DIARIZATION / 1000))
n_frames = int(np.ceil((n_samples + 1) / samples_per_frame))
frame_step = int(np.round((SAMPLING_RATE / rate) / samples_per_frame))
assert 0 < frame_step, "The rate is too high"
assert frame_step <= H_L, "The rate is too low, it should be %f at least" % \
(SAMPLING_RATE / (samples_per_frame * H_L))
wav_slices, mel_slices = [], []
steps = max(1, n_frames - H_L + frame_step + 1)
for i in range(0, steps, frame_step):
mel_range = np.array([i, i + H_L])
wav_range = mel_range * samples_per_frame
mel_slices.append(slice(*mel_range))
wav_slices.append(slice(*wav_range))
last_wav_range = wav_slices[-1]
coverage = (n_samples - last_wav_range.start) / \
(last_wav_range.stop - last_wav_range.start)
if coverage < min_coverage and len(mel_slices) > 1:
mel_slices = mel_slices[:-1]
wav_slices = wav_slices[:-1]
return wav_slices, mel_slices | d3a71082c9f551dffb5a0457ba79fc3318f6df6a | 9,551 |
def new(w: int, h: int, fmt: str, bg: int) -> 'Image':
"""
Creates new image by given size and format
and fills it with bg color
"""
if fmt not in ('RGB', 'RGBA', 'L', 'LA'):
raise ValueError('invalid format')
c = len(fmt)
image = Image()
image.im = _new_image(w, h, c)
lib.image_draw_rect(image.im, 0, 0, w, h, bg)
return image | c570eab9d62def584a2a12b8a228b30c57cfed76 | 9,552 |
def eval_f(f, xs):
"""Takes a function f = f(x) and a list xs of values that should be used as arguments for f.
The function eval_f should apply the function f subsequently to every value x in xs, and
return a list fs of function values. I.e. for an input argument xs=[x0, x1, x2,..., xn] the
function eval_f(f, xs) should return [f(x0), f(x1), f(x2), ..., f(xn)]."""
return [f(x) for x in xs]
# alternatively: return list(map(f, xs)) | 00c6ed7fc59b213a3ec9fec9feeb3d91b1522061 | 9,553 |
def cie94_loss(x1: Tensor, x2: Tensor, squared: bool = False, **kwargs) -> Tensor:
"""
Computes the L2-norm over all pixels of the CIEDE2000 Color-Difference for two RGB inputs.
Parameters
----------
x1 : Tensor:
First input.
x2 : Tensor:
Second input (of size matching x1).
squared : bool
Returns the squared L2-norm.
Returns
-------
ΔE_00_l2 : Tensor
The L2-norm over all pixels of the CIEDE2000 Color-Difference.
"""
ΔE_94_squared = rgb_cie94_color_difference(x1, x2, squared=True, **kwargs).flatten(1)
ε = kwargs.get('ε', 0)
if squared:
return ΔE_94_squared.sum(1)
return ΔE_94_squared.sum(1).clamp_min(ε).sqrt() | 1044585ce4cf8158caa3b969a8b94001681815db | 9,554 |
def get_current_user_id() -> str:
"""
This functions gets the id of the current user that is signed in to the Azure CLI.
In order to get this information, it looks like there are two different services,
"Microsoft Graph" (developer.microsoft.com/graph) and "Azure AD Graph"
(graph.windows.net), the latter being deprecated
(https://devblogs.microsoft.com/microsoft365dev/microsoft-graph-or-azure-ad-graph/).
I think these services correspond to two different python libraries, msal
(https://docs.microsoft.com/en-us/python/api/overview/azure/active-directory?view=azure-python)
and adal (https://docs.microsoft.com/en-us/python/api/adal/adal?view=azure-python),
but these libraries don't appear to do anything super useful on their own.
The deprecated Azure Graph API seems to correspond to a higher-level library
azure-graphrbac, which does seem to have the functionality we need:
azure.graphrbac.GraphRbacManagementClient.signed_in_user, but is deprecated along
with Azure Graph
(https://github.com/Azure/azure-sdk-for-python/issues/14022#issuecomment-752279618).
The msgraph library that we use here seems to be a not-very-high-level library
for Microsoft Graph (https://github.com/microsoftgraph/msgraph-sdk-python-core).
As a side note, another way to get this information is to use the command line to
call `az ad signed-in-user show`, but that appears to be relying on the deprecated
Azure Graph API as it gives a deprecation warning.
"""
# crucial scopes parameter is needed, see
# https://github.com/microsoftgraph/msgraph-sdk-python-core/issues/106#issuecomment-969281260
with get_credential() as credential:
client = GraphClient(
credential=credential, scopes=["https://graph.microsoft.com"]
)
# https://docs.microsoft.com/en-us/graph/api/user-get?view=graph-rest-1.0&tabs=http
result = client.get("/me")
return result.json()["id"] | 79a557762c9c4c2a6546370f492f879f3f046f67 | 9,555 |
def scale_labels(subject_labels):
"""Saves two lines of code by wrapping up the fitting and transform methods of the LabelEncoder
Parameters
:param subject_labels: ndarray
Label array to be scaled
:return: ndarray
Scaled label array
"""
encoder = preprocessing.LabelEncoder()
_ = encoder.fit(subject_labels)
return encoder.transform(subject_labels) | e7c4e4c01f7bc7b43519f1eaf97ff9ce0fda9bbd | 9,556 |
def _get_basemap(grid_metadata_dict):
"""Creates basemap.
M = number of rows in grid
M = number of columns in grid
:param grid_metadata_dict: Dictionary created by
`grids.create_equidistant_grid`.
:return: basemap_object: Basemap handle (instance of
`mpl_toolkits.basemap.Basemap`).
:return: basemap_x_matrix_metres: M-by-N numpy array of x-coordinates under
Basemap projection (different than pyproj projection).
:return: basemap_y_matrix_metres: Same but for y-coordinates.
"""
x_matrix_metres, y_matrix_metres = grids.xy_vectors_to_matrices(
x_unique_metres=grid_metadata_dict[grids.X_COORDS_KEY],
y_unique_metres=grid_metadata_dict[grids.Y_COORDS_KEY]
)
projection_object = grid_metadata_dict[grids.PROJECTION_KEY]
latitude_matrix_deg, longitude_matrix_deg = (
projections.project_xy_to_latlng(
x_coords_metres=x_matrix_metres, y_coords_metres=y_matrix_metres,
projection_object=projection_object)
)
standard_latitudes_deg, central_longitude_deg = _get_lcc_params(
projection_object)
basemap_object = Basemap(
projection='lcc', lat_1=standard_latitudes_deg[0],
lat_2=standard_latitudes_deg[1], lon_0=central_longitude_deg,
rsphere=projections.DEFAULT_EARTH_RADIUS_METRES,
ellps=projections.SPHERE_NAME, resolution=RESOLUTION_STRING,
llcrnrx=x_matrix_metres[0, 0], llcrnry=y_matrix_metres[0, 0],
urcrnrx=x_matrix_metres[-1, -1], urcrnry=y_matrix_metres[-1, -1]
)
basemap_x_matrix_metres, basemap_y_matrix_metres = basemap_object(
longitude_matrix_deg, latitude_matrix_deg)
return basemap_object, basemap_x_matrix_metres, basemap_y_matrix_metres | caeac576f5a6345378c71e8e0e690f9bafda0995 | 9,557 |
import types
def unary_math_intr(fn, intrcode):
"""
Implement the math function *fn* using the LLVM intrinsic *intrcode*.
"""
@lower(fn, types.Float)
def float_impl(context, builder, sig, args):
res = call_fp_intrinsic(builder, intrcode, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
unary_math_int_impl(fn, float_impl)
return float_impl | cd3a4c22dab5ea1776987a717c32fbbc71d75da7 | 9,558 |
def is_is_int(a):
"""Return `True` if `a` is an expression of the form IsInt(b).
>>> x = Real('x')
>>> is_is_int(IsInt(x))
True
>>> is_is_int(x)
False
"""
return is_app_of(a, Kind.IS_INTEGER) | d7565102a228119ba3157e9569495c5531ea5d74 | 9,559 |
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
],
"component":
[ "testComponents"
, "testReadMe"
, "testCreateFile"
, "testRewriteFile"
, "testUpdateFile"
, "testDeleteFile"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
, "testWebDAVFile"
, "testWebDAVFileUrlLib"
]
}
return TestUtils.getTestSuite(TestWebDAVAccess, testdict, select=select) | 8ea94ad556dd77d28d5abdb2034b51858c996042 | 9,560 |
import torch
def normalize_channel_wise(tensor: torch.Tensor, mean: torch.Tensor, std: torch.Tensor) -> torch.Tensor:
"""Normalizes given tensor channel-wise
Parameters
----------
tensor: torch.Tensor
Tensor to be normalized
mean: torch.tensor
Mean to be subtracted
std: torch.Tensor
Std to be divided by
Returns
-------
result: torch.Tensor
"""
if len(tensor.size()) != 3:
raise ValueError
for channel in range(tensor.size(0)):
tensor[channel, :, :] -= mean[channel]
tensor[channel, :, :] /= std[channel]
return tensor | 862a5497d9c4379a974e8e2543acc0c1282faea5 | 9,561 |
import tqdm
def load_images(shot_paths):
"""
images = {
shot1: {
frame_id1: PIL image1,
...
},
...
}
"""
images = list(tqdm(map(load_image, shot_paths), total=len(shot_paths), desc='loading images'))
images = {k: v for k, v in images}
return images | 4916c68e1b4255d066bc624284cde77036764dd6 | 9,562 |
import numpy
def rmSingles(fluxcomponent, targetstring='target'):
"""
Filter out targets in fluxcomponent that have only one ALMA source.
"""
nindiv = len(fluxcomponent)
flagger = numpy.zeros(nindiv)
for icomp in range(nindiv):
target = fluxcomponent[targetstring][icomp]
match = fluxcomponent[targetstring] == target
nmatch = fluxcomponent[targetstring][match].size
if nmatch == 1:
flagger[icomp] = 1
goodflag = flagger == 0
fluxcomponent = fluxcomponent[goodflag]
return fluxcomponent | 013d5f3169fd1dcb277733627ecd5b0135bc33fb | 9,563 |
import os
import json
def discover_guids(file_path, keys):
"""
"""
# Now we revise the files
if isinstance(file_path, list):
discovered_files = file_path
else:
discovered_files = [os.path.join(file_path, f) for f in os.listdir(file_path)]
known_guids = set()
for f in discovered_files:
dir_path, fname = os.path.split(f)
if fname.startswith("_"):
continue
with open(f, 'r') as fp:
data = json.load(fp)
results = _recursive_guid(None, data, keys)
known_guids = known_guids.union(results)
return list(known_guids) | a858024d7d93ab65607e41f6f8b392b28b2baab4 | 9,564 |
def gray():
"""Convert image to gray scale."""
form = ImageForm(meta={'csrf': False})
current_app.logger.debug(f"request: {request.form}")
if form.validate():
service_info = cv_services["gray"]
json_format = request.args.get("json", False)
# Image Processing
image = services.convert_to_image(request.files["image"].read())
err, image = services.gray(image)
current_app.logger.debug(f"respond: {image}")
respond = services.convert_to_base64(image)
# Respond
respond = jsonify({"image": str(respond)}), 200
else:
respond = jsonify(message=form.errors), 404
return respond | 93ad57e1d66b65ea4af351c942aa6defe5ee4e60 | 9,565 |
import torch
def compute_accuracy(outputs, targets, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = targets.size(0)
_, preds = outputs.topk(maxk, 1, True, True)
preds = preds.t()
corrects = preds.eq(targets[None])
result_list = []
for k in topk:
correct_k = corrects[:k].flatten().sum(dtype=torch.float32)
result_list.append(correct_k * (100.0 / batch_size))
return result_list | 6cfcc9e43aaaed09baae567f9cc27818c555fe5f | 9,566 |
import io
def unpack_text_io_wrapper(fp, encoding):
"""
If *fp* is a #io.TextIOWrapper object, this function returns the underlying
binary stream and the encoding of the IO-wrapper object. If *encoding* is not
None and does not match with the encoding specified in the IO-wrapper, a
#RuntimeError is raised.
"""
if isinstance(fp, io.TextIOWrapper):
if fp.writable() and encoding is not None and fp.encoding != encoding:
msg = 'TextIOWrapper.encoding({0!r}) != {1!r}'
raise RuntimeError(msg.format(fp.encoding, encoding))
if encoding is None:
encoding = fp.encoding
fp = fp.buffer
return fp, encoding | f2c93babab4bff1f08e6fe5c04fbd97dd1ee8a84 | 9,567 |
def dummy_blob(size_arr=(9, 9, 9), pixdim=(1, 1, 1), coordvox=None):
"""
Create an image with a non-null voxels at coordinates specified by coordvox.
:param size_arr:
:param pixdim:
:param coordvox: If None: will create a single voxel in the middle of the FOV.
If tuple: (x,y,z): Create single voxel at specified coordinate
If list of tuples: [(x1,y1,z1), (x2,y2,z2)]: Create multiple voxels.
:return: Image object
"""
# nx, ny, nz = size_arr
data = np.zeros(size_arr)
# if not specified, voxel coordinate is set at the middle of the volume
if coordvox is None:
coordvox = tuple([round(i / 2) for i in size_arr])
elif isinstance(coordvox, list):
for icoord in coordvox:
data[icoord] = 1
elif isinstance(coordvox, tuple):
data[coordvox] = 1
else:
ValueError("Wrong type for coordvox")
# Create image with default orientation LPI
affine = np.eye(4)
affine[0:3, 0:3] = affine[0:3, 0:3] * pixdim
nii = nib.nifti1.Nifti1Image(data, affine)
img = Image(data, hdr=nii.header, dim=nii.header.get_data_shape())
return img | 2426ca5cddfa3da660bd5e7436f8093b1d7fa109 | 9,568 |
import torch
def poly_edges(P, T):
"""
Returns the ordered edges from the given polygons
Parameters
----------
P : Tensor
a (N, D,) points set tensor
T : LongTensor
a (M, T,) topology tensor
Returns
-------
tuple
a tuple containing the edges of the given polygons
"""
p = P[torch.cat((T, T[0].unsqueeze(0)), dim=0)]
return tuple(p[1:]-p[:-1]) | c8d838bf1ada319cebc5c08719f66846959ce2c2 | 9,569 |
def make_list(v):
"""
If the object is not a list already, it converts it to one
Examples:
[1, 2, 3] -> [1, 2, 3]
[1] -> [1]
1 -> [1]
"""
if not jsoncfg.node_is_array(v):
if jsoncfg.node_is_scalar(v):
location = jsoncfg.node_location(v)
line = location.line
column = location.column
else:
line = v.line
column = v.column
a = jsoncfg.config_classes.ConfigJSONArray(line, column)
a._append(v)
return a
return v | c5288cc726d103667e5f51055bc4e8cd4a90816e | 9,570 |
def score_game(game_core):
"""Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число"""
count_ls = []
np.random.seed(1) # фиксируем RANDOM SEED, чтобы ваш эксперимент был воспроизводим!
random_array = np.random.randint(1, 101, 1000)
for number in random_array:
count_ls.append(game_core(number))
score = int(np.mean(count_ls))
print(f"Ваш алгоритм угадывает число в среднем за {score} попыток")
return score | 74a8c4b44ff2caec31f38f136c3fc2336909759f | 9,571 |
def add_plot(
lon, lat, kind=None, props=None, ax=None, break_on_change=False, transform=identity
):
"""Add a plot with different props for different 'kind' values to an existing map
Parameters
----------
lon : sequence of float
lat : sequence of float
kind : sequence of hashable, optional
Controls what props are used. Length must match lon/lat and values
are used to index into the `props` map.
props : dict, optional.
Maps `kind` of first and last point of each segment to plot style.
By default, sorted values from `kind`
are mapped to 'axes.prop_cycle'. `props` for segments between
points with different `kind` value are looked up under `None`.
If `None` is missing, these points are not plotted.
ax : matplotlib axes object, optional
break_on_change : bool, optional
Whether to create a new segment when kind changes. Generally True for fishing plots
and False for vessel plots.
transform : cartopy.crs.Projection, optional
Returns
-------
dict mapping keys to Line2D
Values are suitable for passing to legend.
"""
if ax is None:
ax = plt.gca()
assert len(lon) == len(lat)
if kind is None:
kind = np.ones(len(lon))
else:
kind = np.asarray(kind)
assert len(kind) == len(lon)
if props is None:
props = styles.create_props(np.unique(kind))
handles = {}
for k1, k2 in sorted(props.keys()):
mask = _build_mask(kind, k1, k2, break_on_change)
if mask.sum():
ml_coords = _build_multiline_string_coords(lon, lat, mask, break_on_change)
mls = MultiLineString(ml_coords)
p = props[k1, k2].copy()
if "legend" in p:
key = p.pop("legend")
else:
key = k1 if (k1 == k2) else f"{k1}-{k2}"
ax.add_geometries([mls], crs=transform, **p)
if key:
handles[key] = Line2D(
[0], [0], color=p["edgecolor"], lw=p.get("linewidth", 1)
)
return handles | c5d6b5234fe560e9d954d4ea8d0a7aef0e810f89 | 9,572 |
def can_review_faults(user):
"""
users can review faults if one of the the following applies:
a) No fault review groups exist and they have can_review permissions
b) Fault review groups exist, they are a member of one, and they have
review permissions
"""
can_review = user.has_perm("faults.can_review")
review_groups = [frg.group for frg in FaultReviewGroup.objects.select_related("group")]
if review_groups:
can_review = can_review and len(set(review_groups) & set(user.groups.all())) > 0
return can_review | c66f022b6f52144d8e9fde6865f0a8a263819813 | 9,573 |
import requests
def create_freshservice_object(obj_type, data):
"""Use the Freshservice v2 API to create an object.
Accepts an object name (string) and a dict of key values.
"""
url = '{}/{}'.format(settings.FRESHSERVICE_ENDPOINT, obj_type)
resp = requests.post(url, auth=FRESHSERVICE_AUTH, json=data)
return resp | 597348b744d6193beb12dcf2a3a4958808f09d24 | 9,574 |
def print_begin(*args, sep=' ', end='\n', file=None, ret_value='') -> str:
"""Print the function name and start."""
print(_prefix('begin'), *args, sep=sep, end=end, file=file, flush=True)
return ret_value | 8e9ac418d161a0d2b5b7c0c9de7b81da42ea5017 | 9,575 |
def scale_bounding_box(bounding_box,scale):
"""Scales bounding box coords (in dict from {x1,y1,x2,y2}) by x and y given by sclae in dict form {x,y}"""
scaled_bounding_box = {
"x1" : int(round(bounding_box["x1"]*scale["x"]))
,"y1" : int(round(bounding_box["y1"]*scale["y"]))
,"x2" : int(round(bounding_box["x2"]*scale["x"]))
,"y2" : int(round(bounding_box["y2"]*scale["y"]))
}
return scaled_bounding_box | 8aa374537ed2ae3ae2324bd8a4819e981f281b71 | 9,576 |
import click
def is_command(obj) -> bool:
"""
Return whether ``obj`` is a click command.
:param obj:
"""
return isinstance(obj, click.Command) | 8159aea42baca70b3218a0b82e2f4dc3f34278aa | 9,577 |
def GetContigs(orthologs):
"""get map of contigs to orthologs.
An ortholog can be part of only one contig, but the same ortholog_id can
be part of several contigs.
"""
contigs = {}
for id, oo in orthologs.items():
for o in oo:
if o.contig not in contigs:
contigs[o.contig] = []
contigs[o.contig].append(o)
return contigs | 0c449a31e60f1a149317de815d630c4d8a817ca1 | 9,578 |
import logging
import json
def _save_training_results(
mltk_model:MltkModel,
keras_model:KerasModel,
training_history,
logger: logging.Logger,
show:bool = False
) -> TrainingResults:
"""Save the training history as .json and .png"""
results = TrainingResults(mltk_model, keras_model, training_history)
metric, best_val = results.get_best_metric()
logger.info(f'\n\n*** Best training {metric} = {best_val:.3f}\n\n')
try:
history_json_path = f'{mltk_model.log_dir}/train/training-history.json'
logger.debug(f'Generating {history_json_path}')
with open(history_json_path, 'w') as f:
json.dump(results.asdict(), f, indent=2)
except Exception as e:
logger.warning(f'Error while saving training results to {history_json_path}, err: {e}')
# See https://github.com/keras-team/keras/blob/master/keras/losses.py
supported_metrics = {}
supported_metrics['accuracy'] = 'Accuracy'
supported_metrics['loss'] = 'Loss'
supported_metrics['mse'] = 'Mean Square Error'
supported_metrics['mae'] = 'Mean Absolute Error'
supported_metrics['mape'] = 'Mean Absolute Percentage Error'
supported_metrics['msle '] = 'Mean Square Logarithmic Error'
supported_metrics['bce '] = 'Binary Cross-entropy'
supported_metrics['cce'] = 'Categorical Cross-entropy'
found_metrics = []
history = results.history
for metric in history:
if not metric in supported_metrics:
continue
if not f'val_{metric}' in history:
continue
found_metrics.append(dict(
name=metric,
train=history[metric],
validation=history[f'val_{metric}'],
))
fig, _ = plt.subplots(figsize=(6, 6), clear=True)
fig.suptitle(f'{mltk_model.name} Training History')
# %% Plot training and validation metrics
for i, metric in enumerate(found_metrics):
plt.subplot(len(found_metrics), 1, i + 1)
plt.plot(metric['train'])
plt.plot(metric['validation'])
plt.title(f'{supported_metrics[metric["name"]]}')
plt.ylabel(supported_metrics[metric['name']])
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.subplots_adjust(hspace=.5)
training_results_path = f'{mltk_model.log_dir}/train/training-history.png'
logger.debug(f'Generating {training_results_path}')
plt.savefig(training_results_path)
if show:
plt.show(block=False)
else:
fig.clear()
plt.close(fig)
return results | fbc8cdb44dcada27df4c47dfdc394dc1900eeb9f | 9,579 |
def set_backwards_pass(op, backwards):
"""
Returns new operation which behaves like `op` in the forward pass but
like `backwards` in the backwards pass.
"""
return backwards + tf.stop_gradient(op - backwards) | 13287ac73c52ac01808c41c81ba5311bc3f49b91 | 9,580 |
def remove_hydrogens(list_of_lines):
"""
Removes hydrogen from the pdb file.
To add back the hydrogens, run the reduce program on the file.
"""
return (line for line in list_of_lines if line['element']!=" H") | 164ac79171cf6b3632fe7909ace91ffe75192b61 | 9,581 |
def crash_random_instance(org: str, space: str, appname: str, configuration: Configuration, count: int = 1):
"""
Crash one or more random application instances.
:param org: String; Cloud Foundry organization containing the application.
:param space: String; Cloud Foundry space containing the application.
:param appname: String; Application in Cloud Foundry which is to be targeted.
:param count: int; Number of instances to kill.
:param configuration: Configuration; Configuration details, see `README.md`.
:return: A JSON Object representing the application which was targeted.
"""
return run_ctk(
lambda app: app.crash_random_instance(count=count),
configuration, org, space, appname,
"Crashing {} random app instance(s)...".format(count)
) | 652ab95038d405b6a193809804aae7f3bc15978f | 9,582 |
def spc_dict_from_spc_info(spc_info: dict, resonance: bool = True) -> dict:
"""
Generate a species dictionary from species info.
Args:
spc_info (dict): Species info contains the label and species geom info.
resonance (bool): Whether generate resonance geom in the species dictionary.
Returns:
dict: The species dictionary generated from the spc_info.
"""
spc_dict = {}
for label, spc in spc_info.items():
species = species_from_spc_info(spc)
if not species:
continue
if resonance:
species.generate_resonance_structures()
spc_dict[label] = species
return spc_dict | 0a291f2fd50134b1c1259adc36b5637e30e21118 | 9,583 |
import torch
def label_smooth_loss(log_prob, label, confidence=0.9):
"""
:param log_prob: log probability
:param label: one hot encoded
:param confidence: we replace one (in the one hot) with confidence. 0 <= confidence <= 1.
:return:
"""
N = log_prob.size(0)
C = log_prob.size(1)
smoothed_label = torch.full(size=(N, C), fill_value=(1-confidence) / (C - 1)).to(log_prob)
smoothed_label.scatter_(dim=1, index=torch.unsqueeze(label, dim=1), value=confidence)
loss = - torch.sum(log_prob * smoothed_label) / N
return loss | f1164d1a41d2c275ae4e406e2a46a0d50a2d240d | 9,584 |
def update(self, using=None, **kwargs):
"""
Updates specified attributes on the current instance.
"""
assert self.pk, "Cannot update an instance that has not yet been created."
using = using or router.db_for_write(self.__class__, instance=self)
for field in self._meta.fields:
if getattr(field, 'auto_now', False) and field.name not in kwargs:
kwargs[field.name] = field.pre_save(self, False)
affected = self.__class__._base_manager.using(using).filter(pk=self.pk).update(**kwargs)
for k, v in kwargs.iteritems():
if isinstance(v, ExpressionNode):
v = resolve_expression_node(self, v)
setattr(self, k, v)
if affected == 1:
signals.post_save.send(sender=self.__class__, instance=self, created=False)
return True
elif affected == 0:
return False
elif affected < 0:
raise ValueError("Somehow we have updated a negative amount of rows, you seem to have a problem with your db backend.")
else:
raise ValueError("Somehow we have updated multiple rows, and you are now royally fucked.") | b3400f43c0a744de17225ee6c029fc41465b784d | 9,585 |
def differential_privacy_with_risk( dfg_freq, dfg_time, delta, precision, aggregate_type=AggregateType.SUM):
"""
This method adds the differential privacy to the DFG of both time and frequencies.
* It calculates the epsilon using the guessing advantage technique.
* It adds laplace noise to the DFGs.
* It calculates the distance resulted from the noise
"""
accuracy=1
# calculate epsilon
epsilon_freq,senstivity_freq=calculate_epsilon_freq(dfg_freq,delta)
epsilon_time,senstivity_time=calculate_epsilon_time(dfg_time,delta,precision, aggregate_type)
# adding laplace noise to DFG freq
dfg_freq_new = add_laplace_noise_freq(dfg_freq, epsilon_freq)
# adding laplace noise to DFG time
dfg_time, dfg_time_new = add_laplace_noise_time(aggregate_type, dfg_time, epsilon_time)
# Calculate earth moving distance
emd_freq=earth_mover_dist(dfg_freq,dfg_freq_new)
emd_time=earth_mover_dist(dfg_time,dfg_time_new)
#calculating the APE, MAPE, and SMAPE
MAPE_freq, SMAPE_freq, APE_dist_freq,SMAPE_dist_freq=error_calculation(dfg_freq,dfg_freq_new)
MAPE_time, SMAPE_time, APE_dist_time, SMAPE_dist_time = error_calculation(dfg_time,dfg_time_new)
# return dfg_freq_new, dfg_time_new, epsilon_freq,epsilon_time, emd_freq, emd_time, percent_freq,percent_time,percent_freq_dist,percent_time_dist
return dfg_freq_new, dfg_time_new, epsilon_freq,epsilon_time, MAPE_freq, SMAPE_freq, APE_dist_freq, MAPE_time, SMAPE_time, APE_dist_time, SMAPE_dist_freq, SMAPE_dist_time | a85035b8786bb6bf9a5cc0af88433a490faac77f | 9,586 |
from typing import Iterable
from typing import Counter
def get_all_values(string: str) -> Iterable[int]:
"""Return all kinds of candidates, with ordering: Dec, Hex, Oct, Bin."""
if string.startswith('0x'):
return filter(bool, [parse_hex(string[2:])]) # type: ignore[list-item]
if string.startswith('0o'):
return filter(bool, [parse_oct(string[2:])]) # type: ignore[list-item]
if string.startswith('0b'):
return filter(bool, [parse_bin(string[2:])]) # type: ignore[list-item]
# try each base when no prefix
return Counter(filter(bool, map(lambda f: f(string), # type: ignore[arg-type,return-value]
[parse_dec, parse_hex, parse_oct, parse_bin]))) | d9e12290339cbf31dc572c9e3d49ec503949250d | 9,587 |
def svn_auth_get_simple_provider(*args):
"""svn_auth_get_simple_provider(apr_pool_t pool)"""
return _core.svn_auth_get_simple_provider(*args) | e91c2198f5ee214fb1db9e8969711a806caf19c6 | 9,588 |
def preferred_language():
""" It just returns first language from acceptable
"""
return acceptable_languages()[0] | 6e5c2b069f84c5a6601b579616858457598f2cf4 | 9,589 |
def get_frequencies(trial = 1):
"""
get frequency lists
"""
if trial =="run_fast_publish":
lb_targ, ub_targ, obs_hz = 340, 350, 10
elif trial == 1:
lb_targ, ub_targ, obs_hz = 210, 560, int(320 / 2)
elif trial == 2:
lb_targ, ub_targ, obs_hz = 340, 640, 280
elif trial == 3:
lb_targ, ub_targ, obs_hz = 340, 350, 20#40
elif trial == 4:
lb_targ, ub_targ, obs_hz = 60, 350, 40
elif trial == 5:
lb_targ, ub_targ, obs_hz = 50, 200, 40
if trial == 6:
lb_targ, ub_targ, obs_hz = 130, 530, 130
if trial == 7:
lb_targ, ub_targ, obs_hz = 500, 900, 250
obs_list = list( range( lb_targ - obs_hz, lb_targ))
obs_list += list( range( ub_targ, ub_targ + obs_hz))
resp_list = list( range( lb_targ, ub_targ))
return obs_list, resp_list | e6c7f33865ffd76532a19426f0748d4dd22e37f8 | 9,590 |
from sys import path
def load_csv(file_path: str = None, clean: bool = True) -> pd.DataFrame:
"""Load the dataset CSV file.
Args:
file_path (str, optional): Path to CSV file. Can be omitted, to load the default dataset. Defaults to None.
drop (list[str], optional): Optionally supply a list of column names to drop. Defaults to None.
Returns:
pd.DataFrame: A Pandas dataframe representing the dataset.
"""
if file_path is None:
file_path = get_path()
if not path.exists(file_path):
_generate_minimized(file_path)
df = pd.DataFrame(pd.read_csv(file_path))
if "datetime" in df.columns:
df.index = pd.to_datetime(df.pop("datetime"))
if clean:
df = _clean_data(df)
return df | 8afa250f4d91b349193682b5bb7bd9c8b1d4eec4 | 9,591 |
import re
def parse_field_pubblicazione(field):
"""
Extracts year, place and publisher from the field `pubblicazione` by applying a cascade of regexps.
"""
exp2 = r'^(?P<place>\D+)(?:\s?\W\s?)(?P<publisher>.*?)\D{1}?(?P<year>\d+)?$'
exp1 = r'^(?P<place>.*?)(?::)(?P<publisher>.*?)\D{1}?(?P<year>\d+)?$'
exp3 = r'(?:.*?)?(?P<year>\d{4})'
exp4 = r'^(?P<place>\D{3,})$'
not_matched = 0
partly_matched = 0
result = {}
result1 = re.match(exp1,field)
if(result1 is None):
result2 = re.match(exp2,field)
if(result2 is None):
result3 = re.match(exp3,field)
if(result3 is None):
result4 = re.match(exp4,field)
if(result4 is None):
not_matched += 1
else:
result = result4.groupdict()
else:
result = result3.groupdict()
else:
result = result2.groupdict()
else:
result = result1.groupdict()
return result | 91aee4dabf62b3ec5bccff2a07d664312226448c | 9,592 |
def test_api_calendar():
"""Return a test calendar object used in API responses."""
return TEST_API_CALENDAR | 1c73e63bf19cef92dbbe328825c2ae4e867c1e84 | 9,593 |
def apply_custom_colormap(image_gray, cmap=plt.get_cmap("seismic")):
"""
Implementation of applyColorMap in OpenCV using colormaps in Matplotlib.
"""
assert image_gray.dtype == np.uint8, "must be np.uint8 image"
if image_gray.ndim == 3:
image_gray = image_gray.squeeze(-1)
# Initialize the matplotlib color map
sm = plt.cm.ScalarMappable(cmap=cmap)
# Obtain linear color range
color_range = sm.to_rgba(np.linspace(0, 1, 256))[:, 0:3] # color range RGBA => RGB
color_range = (color_range * 255.0).astype(np.uint8) # [0,1] => [0,255]
color_range = np.squeeze(
np.dstack([color_range[:, 2], color_range[:, 1], color_range[:, 0]]), 0
) # RGB => BGR
# Apply colormap for each channel individually
channels = [cv2.LUT(image_gray, color_range[:, i]) for i in range(3)]
return np.dstack(channels) | e2f3c9a8900f47c0e7183f4ebe72f41a7f6d26b9 | 9,594 |
from typing import Callable
def _cond_with_per_branch_args(pred,
true_operand, true_fun: Callable,
false_operand, false_fun: Callable):
"""Conditionally apply ``true_fun`` or ``false_fun``.
Has equivalent semantics to this Python implementation::
def cond(pred, true_operand, true_fun, false_operand, false_fun):
if pred:
return true_fun(true_operand)
else:
return false_fun(false_operand)
Pred has to be a scalar type, collection types (list, tuple) are not supported
"""
if not (callable(true_fun) and callable(false_fun)):
raise TypeError("lax.cond: true_fun and false_fun arguments should be callable.")
return _cond(pred,
lambda op: true_fun(op[0]),
lambda op: false_fun(op[1]),
(true_operand, false_operand)) | e942124beafebb69fed80e3175164a34f088cb9e | 9,595 |
import urllib
def msgSet(key, notUsed, queryString, body):
"""no treatment on the body (we send exactly the body like we received it)"""
dict = urllib.parse.parse_qs(body.decode('utf-8'))
#sendSMS.writeRawMsg(body)
user = dict['user'][0]
print(dict)
sendSMS.writeMsgUser(dict['msg'][0], user)
return "Message sent to " + user | b67663f516f54af9a7dbbece67933ee1d04ee7a2 | 9,596 |
import re
def _strip_build_number(api_version):
"""Removes the build number component from a full api version string."""
match = re.match(r"^([A-Z]+-)?([0-9]+)(\.[0-9]+){2}$", api_version)
if match:
return api_version[:match.start(3)]
# if there aren't exactly 3 version number components, just leave it unchanged
return api_version | 20d8023281f05dfcb8c9fdd021b77796c72e1001 | 9,597 |
def get_me():
"""サインインしている自分自身の情報を取得"""
jia_user_id = get_user_id_from_session()
return {"jia_user_id": jia_user_id} | c31f6a1a8c794e4a2aa70779f9c8b2559baccd84 | 9,598 |
def dec_file(name, out=None, **kwargs):
"""
This is a helper function to decrypt a file and return its contents.
You can provide an optional output file using `out`
`name` can be a local file or when not using `salt-run` can be a url like `salt://`, `https://` etc.
CLI Examples:
.. code-block:: bash
salt-run nacl.dec_file name=/tmp/id_rsa.nacl
salt-call nacl.dec_file name=salt://crt/mycert.nacl out=/tmp/id_rsa
salt-run nacl.dec_file name=/tmp/id_rsa.nacl box_type=secretbox \
sk_file=/etc/salt/pki/master/nacl.pub
"""
kwargs["opts"] = __opts__
return salt.utils.nacl.dec_file(name, out, **kwargs) | 3ff74b9300fa8b441a22daf65d546f329e414447 | 9,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.