content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def load_summary_data():
""" Function to load data
param DATA_URL: data_url
return: pandas dataframe
"""
DATA_URL = 'data/summary_df.csv'
data = pd.read_csv(DATA_URL)
return data | b5f09e845e1379fd00a03fd11b0174e3114eb7d3 | 11,313 |
import itertools
def _enumerate_trees_w_leaves(n_leaves):
"""Construct all rooted trees with n leaves."""
def enumtree(*args):
n_args = len(args)
# trivial cases:
if n_args == 0:
return []
if n_args == 1:
return args
# general case of 2 or more args:
# build index array
idxs = range(0, n_args)
trees = []
# we consider all possible subsets of size n_set to gather
for n_set in range(2, n_args+1):
idxsets = list(itertools.combinations(idxs, n_set))
for idxset in idxsets:
# recurse by joining all subtrees with
# n_set leaves and (n_args - n_set) leaves
arg_set = tuple(args[i] for i in idxs if i in idxset)
arg_coset = tuple(args[i] for i in idxs if i not in idxset)
if arg_coset:
trees.extend(tuple(itertools.product(enumtree(*arg_set),
enumtree(*arg_coset))))
else:
# trivial case where arg_set is entire set
trees.append(arg_set)
return trees
# return enumerated trees with integers as leaves
return enumtree(*range(n_leaves)) | 574a2d3ec63d3aeeb06292ec361b83aebba0ff84 | 11,314 |
def gen_tfidf(tokens, idf_dict):
"""
Given a segmented string and idf dict, return a dict of tfidf.
"""
# tokens = text.split()
total = len(tokens)
tfidf_dict = {}
for w in tokens:
tfidf_dict[w] = tfidf_dict.get(w, 0.0) + 1.0
for k in tfidf_dict:
tfidf_dict[k] *= idf_dict.get(k, 0.0) / total
return tfidf_dict | 9217867b3661a8070cc1b2d577918c95d1ff7755 | 11,316 |
def timestamp_to_seconds(timestamp):
"""Convert timestamp to python (POSIX) time in seconds.
:param timestamp: The timestamp.
:return: The python time in float seconds.
"""
return (timestamp / 2**30) + EPOCH | 3d5ca5f5ec93b54e1d1a6c53cefba1d49f8ebac2 | 11,317 |
def fit_lowmass_mstar_mpeak_relation(mpeak_orig, mstar_orig,
mpeak_mstar_fit_low_mpeak=default_mpeak_mstar_fit_low_mpeak,
mpeak_mstar_fit_high_mpeak=default_mpeak_mstar_fit_high_mpeak):
"""
"""
mid = 0.5*(mpeak_mstar_fit_low_mpeak + mpeak_mstar_fit_high_mpeak)
mask = (mpeak_orig >= 10**mpeak_mstar_fit_low_mpeak)
mask &= (mpeak_orig < 10**mpeak_mstar_fit_high_mpeak)
# Add noise to mpeak to avoid particle discreteness effects in the fit
_x = np.random.normal(loc=np.log10(mpeak_orig[mask])-mid, scale=0.002)
_y = np.log10(mstar_orig[mask])
c1, c0 = np.polyfit(_x, _y, deg=1)
return c0, c1, mid | 620275ad18173bb00d38f3d468be132d150fc1fa | 11,319 |
def load_ref_system():
""" Returns benzaldehyde as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 0.3179 1.0449 -0.0067
C 1.6965 0.8596 -0.0102
C 2.2283 -0.4253 -0.0050
C 1.3808 -1.5297 0.0037
C 0.0035 -1.3492 0.0073
C -0.5347 -0.0596 0.0021
C -2.0103 0.0989 0.0061
O -2.5724 1.1709 0.0021
H 2.3631 1.7283 -0.0171
H 3.3139 -0.5693 -0.0078
H 1.8000 -2.5413 0.0078
H -0.6626 -2.2203 0.0142
H -2.6021 -0.8324 0.0131
H -0.1030 2.0579 -0.0108
""") | 518ca10a84befa07fefa3c2f646e40095318d63c | 11,320 |
def get_department_level_grade_data_completed(request_ctx, account_id, **request_kwargs):
"""
Returns the distribution of grades for students in courses in the
department. Each data point is one student's current grade in one course;
if a student is in multiple courses, he contributes one value per course,
but if he's enrolled multiple times in the same course (e.g. a lecture
section and a lab section), he only constributes on value for that course.
Grades are binned to the nearest integer score; anomalous grades outside
the 0 to 100 range are ignored. The raw counts are returned, not yet
normalized by the total count.
Shares the same variations on endpoint as the participation data.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param account_id: (required) ID
:type account_id: string
:return: Get department-level grade data
:rtype: requests.Response (with void data)
"""
path = '/v1/accounts/{account_id}/analytics/completed/grades'
url = request_ctx.base_api_url + path.format(account_id=account_id)
response = client.get(request_ctx, url, **request_kwargs)
return response | 8dd40c7b7c7a734aa66d4f808224424c0c0df81d | 11,321 |
def allocate_samples_to_bins(n_samples, ideal_bin_count=100):
"""goal is as best as possible pick a number of bins
and per bin samples to a achieve a given number
of samples.
Parameters
----------
Returns
----------
number of bins, list of samples per bin
"""
if n_samples <= ideal_bin_count:
n_bins = n_samples
samples_per_bin = [1 for _ in range(n_bins)]
else:
n_bins = ideal_bin_count
remainer = n_samples % ideal_bin_count
samples_per_bin = np.array([(n_samples - remainer) / ideal_bin_count for _ in range(n_bins)])
if remainer != 0:
additional_samples_per_bin = distribute_samples(remainer, n_bins)
samples_per_bin = samples_per_bin + additional_samples_per_bin
return n_bins, np.array(samples_per_bin).astype(int) | 66d5fe32a89478b543818d63c65f2745fe242b33 | 11,322 |
from typing import Any
def create_algo(name: str, discrete: bool, **params: Any) -> AlgoBase:
"""Returns algorithm object from its name.
Args:
name (str): algorithm name in snake_case.
discrete (bool): flag to use discrete action-space algorithm.
params (any): arguments for algorithm.
Returns:
d3rlpy.algos.base.AlgoBase: algorithm.
"""
return get_algo(name, discrete)(**params) | 4fab0f5581eb6036efba6074ab6e3b232bcf5679 | 11,323 |
def tf_inv(T):
""" Invert 4x4 homogeneous transform """
assert T.shape == (4, 4)
return np.linalg.inv(T) | 5bf7d54456198c25029956a7aebe118d7ee4fa87 | 11,324 |
def send_reset_password_email(token, to, username):
"""
send email to user for reset password
:param token: token
:param to: email address
:param username: user.username
:return:
"""
url_to = current_app.config["WEB_BASE_URL"] + "/auth/reset-password?token=" + token
response = _send_email(
subject="请重置密码",
to=to,
html_body=render_template(
"emails/reset_password.html", username=username, url_to=url_to
),
)
return response.status_code | dddcb66425de79a1a736bbbcc5cbc3f5855e7db9 | 11,325 |
def part1(data):
"""Solve part 1"""
countIncreased = 0
prevItem = None
for row in data:
if prevItem == None:
prevItem = row
continue
if prevItem < row:
countIncreased += 1;
prevItem = row
return countIncreased | e01b5edc9d9ac63a31189160d09b5e6e0f11e522 | 11,326 |
def yzrotation(theta = np.pi*3/20.0):
"""
Returns a simple planar rotation matrix that rotates
vectors around the x-axis.
args:
theta: The angle by which we will perform the rotation.
"""
r = np.eye(3)
r[1,1] = np.cos(theta)
r[1,2] = -np.sin(theta)
r[2,1] = np.sin(theta)
r[2,2] = np.cos(theta)
return r | 59a2a251f8e8aa77548f749f49871536de29b0bb | 11,327 |
def is_compiled_release(data):
"""
Returns whether the data is a compiled release (embedded or linked).
"""
return 'tag' in data and isinstance(data['tag'], list) and 'compiled' in data['tag'] | ea8c8ae4f1ccdedbcc145bd57bde3b6040e5cab5 | 11,328 |
import numpy
def resize_frame(
frame: numpy.ndarray, width: int, height: int, mode: str = "RGB"
) -> numpy.ndarray:
"""
Use PIL to resize an RGB frame to an specified height and width.
Args:
frame: Target numpy array representing the image that will be resized.
width: Width of the resized image.
height: Height of the resized image.
mode: Passed to Image.convert.
Returns:
The resized frame that matches the provided width and height.
"""
frame = Image.fromarray(frame)
frame = frame.convert(mode).resize(size=(width, height))
return numpy.array(frame) | 941eb73961843e46b4e67d48439a09c0223c2af0 | 11,329 |
def get_proxies(host, user, password, database, port=3306, unix_socket=None):
""""Connect to a mysql database using pymysql and retrieve proxies for the scraping job.
Args:
host: The mysql database host
user: The mysql user
password: The database password
port: The mysql port, by default 3306
unix_socket: Sometimes you need to specify the mysql socket file when mysql doesn't reside
in a standard location.
Returns;
A list of proxies obtained from the database
Raisese:
An Exception when connecting to the database fails.
"""
try:
conn = pymysql.connect(host=host, port=port, user=user, passwd=password, unix_socket=unix_socket)
conn.select_db(database)
cur = conn.cursor(pymysql.cursors.DictCursor)
# Adapt this code for you to make it retrieving the proxies in the right format.
cur.execute('SELECT host, port, username, password, protocol FROM proxies')
proxies = [Proxy(proto=s['protocol'], host=s['host'], port=s['port'],
username=s['username'], password=s['password']) for s in cur.fetchall()]
return proxies
except Exception as e:
logger.error(e)
raise | d4595440c9d4d07a7d5e27740bf7049176dbe432 | 11,330 |
def APIRevision():
"""Gets the current API revision to use.
Returns:
str, The revision to use.
"""
return 'v1beta3' | c748e1917befe76da449e1f435540e10ee433444 | 11,331 |
def pretty_string_value_error(value, error, error_digits=2, use_unicode=True):
"""
Returns a value/error combination of numbers in a scientifically
'pretty' format.
Scientific quantities often come as a *value* (the actual
quantity) and the *error* (the uncertainty in the value).
Given two floats, value and error, return the two in a
'pretty' formatted string: where the value and error are truncated
at the correct precision.
Parameters
----------
value : float
The quantity in question
error : float
The uncertainty of the quantity
error_digits : int, default 2
How many significant figures the error has. Scientific
convention holds that errors have 1 or (at most) 2 significant
figures. The larger number of digits is chosen here by default.
Returns
-------
new_string : str
A new list of strings sorted numerically
Examples
--------
>>> pretty_string_value_error(1.23456789e8, 4.5678e5,
error_digits=2)
"1.2346 +/- 0.0046 * 10^+08"
>>> pretty_string_value_error(5.6e-2, 2.0e-3, error_digits=1)
"5.6 +/- 0.2 * 10^-02"
"""
if value is None:
return "None"
if error is None or not np.isfinite(error):
if use_unicode:
new_string = "{:.6E} \u00B1 UNKNOWN ERROR MARGIN".format(value)
else:
new_string = "{:.6E} +/- UNKNOWN ERROR MARGIN".format(value)
else:
if not np.isfinite(value):
return str(value)
assert "e" in "{:e}".format(value), "Cannot convert into scientific "\
"notation: {1}".format(value)
value_mantissa_str, value_exponent_str = \
"{:e}".format(value).strip().split('e')
value_mantissa = float(value_mantissa_str)
value_exponent = int(value_exponent_str)
error_mantissa_str, error_exponent_str = \
"{:e}".format(error).strip().split('e')
error_mantissa = float(error_mantissa_str)
error_exponent = int(error_exponent_str)
padding = value_exponent - error_exponent + error_digits - 1
if padding < 1: padding = 1
exp_diff = error_exponent - value_exponent
string_for_formatting = "{:.%df}" % padding
new_value_mantissa = string_for_formatting.format(value_mantissa)
new_error_mantissa = string_for_formatting.format(
error_mantissa*10**exp_diff)
if use_unicode:
new_string = "%s \u00B1 %s * 10^%s" % (
new_value_mantissa, new_error_mantissa, value_exponent_str)
else:
new_string = "%s +/- %s * 10^%s" % (
new_value_mantissa, new_error_mantissa, value_exponent_str)
return new_string | bd7b1496880e7d1cb4ffd04d23df20d679ac8ade | 11,332 |
def sameSize(arguments) -> bool:
"""Checks whether given vectors are the same size or not"""
sameLength = True
initialSize = len(vectors[arguments[0]])
for vector in arguments:
if len(vectors[vector]) != initialSize:
sameLength = False
return sameLength | 0840adcb0f6a84c56ff3b0ce3aa23892e45d942e | 11,334 |
def db_read(src_path, read_type=set, read_int=False):
"""Read string data from a file into a variable of given type.
Read from the file at 'src_path', line by line, skipping certain lines and
removing trailing whitespace.
If 'read_int' is True, convert the resulting string to int.
Return read data as an object of the desired type specified by 'read_type'.
"""
def skip(s):
"""Bool func. for skipping a line. "#%# " is chosen as a comment
indicator. """
return s == "\n" or s.startswith("#%# ")
if read_type is list:
result = list()
with open(src_path, "r") as f:
for i in f.readlines():
if not skip(i):
result.append(int(i.strip()) if read_int else i.strip())
elif read_type is set:
result = set()
with open(src_path, "r") as f:
for i in f.readlines():
if not skip(i):
result.add(int(i.strip()) if read_int else i.strip())
elif read_type is dict:
# Process the lines in pairs: First the key, then the corresponding
# value, and then the next key... and so on.
result = dict()
with open(src_path, "r") as f:
key_temp = ""
for i in f.readlines():
if not skip(i):
if key_temp:
result[key_temp] = (
int(i.strip()) if read_int else i.strip()
)
key_temp = ""
else:
key_temp = (int(i.strip()) if read_int else i.strip())
elif read_type is str:
# Only read the first line of the file, strip and return it:
with open(src_path, "r") as f:
result = f.readline().rstrip()
else:
logger.error("db_read: read_type is not list, str, set or dict.")
return None
return result | 811c6efb83d134d695c6dec2e34d3405818b8a48 | 11,335 |
import re
def get_config_keys():
"""Parses Keys.java to extract keys to be used in configuration files
Args: None
Returns:
list: A list of dict containing the following keys -
'key': A dot separated name of the config key
'description': A list of str
"""
desc_re = re.compile(r"(/\*\*\n|\s+\*/|\s+\*)")
key_match_re = re.compile(r"\(\n(.+)\);", re.DOTALL)
key_split_re = re.compile(r",\s+", re.DOTALL)
keys = []
with open(_KEYS_FILE, "r") as f:
config = re.findall(
r"(/\*\*.*?\*/)\n\s+(public static final Config.*?;)", f.read(), re.DOTALL
)
for i in config:
try:
key_match = key_match_re.search(i[1])
if key_match:
terms = [x.strip() for x in key_split_re.split(key_match.group(1))]
key = terms[0].replace('"', "")
description = [
x.strip().replace("\n", "")
for x in desc_re.sub("\n", i[0]).strip().split("\n\n")
]
if len(terms) == 3:
description.append(f"Default: {terms[2]}")
keys.append(
{
"key": key,
"description": description,
}
)
except IndexError:
# will continue if key_match.group(1) or terms[0] does not exist
# for some reason
pass
return keys | 8c04fcac2d05579ce47f5436999f0fe86fb1bdbd | 11,336 |
def new():
"""Create a new community."""
return render_template('invenio_communities/new.html') | 60ee1560f749d94833f57b6a34e2d514e3e04ccb | 11,337 |
import torch
def interpolate(results_t, results_tp1, dt, K, c2w, img_wh):
"""
Interpolate between two results t and t+1 to produce t+dt, dt in (0, 1).
For each sample on the ray (the sample points lie on the same distances, so they
actually form planes), compute the optical flow on this plane, then use softsplat
to splat the flows. Finally use MPI technique to compute the composite image.
Used in test time only.
Inputs:
results_t, results_tp1: dictionaries of the @render_rays function.
dt: float in (0, 1)
K: (3, 3) intrinsics matrix (MUST BE THE SAME for results_t and results_tp1!)
c2w: (3, 4) current pose (MUST BE THE SAME for results_t and results_tp1!)
img_wh: image width and height
Outputs:
(img_wh[1], img_wh[0], 3) rgb interpolation result
(img_wh[1], img_wh[0]) depth of the interpolation (in NDC)
"""
device = results_t['xyzs_fine'].device
N_rays, N_samples = results_t['xyzs_fine'].shape[:2]
w, h = img_wh
rgba = torch.zeros((h, w, 4), device=device)
depth = torch.zeros((h, w), device=device)
c2w_ = torch.eye(4)
c2w_[:3] = c2w
w2c = torch.inverse(c2w_)[:3]
w2c[1:] *= -1 # "right up back" to "right down forward" for cam projection
P = K @ w2c # (3, 4) projection matrix
grid = create_meshgrid(h, w, False, device) # (1, h, w, 2)
xyzs = results_t['xyzs_fine'] # equals results_tp1['xyzs_fine']
zs = rearrange(results_t['zs_fine'], '(h w) n2 -> h w n2', w=w, h=h)
# static buffers
static_rgb = rearrange(results_t['static_rgbs_fine'],
'(h w) n2 c -> h w n2 c', w=w, h=h, c=3)
static_a = rearrange(results_t['static_alphas_fine'], '(h w) n2 -> h w n2 1', w=w, h=h)
# compute forward buffers
xyzs_w = ray_utils.ndc2world(rearrange(xyzs, 'n1 n2 c -> (n1 n2) c'), K)
xyzs_fw_w = ray_utils.ndc2world(
rearrange(xyzs+results_t['transient_flows_fw'],
'n1 n2 c -> (n1 n2) c'), K) # fw points with full flow
xyzs_fw_w = xyzs_w + dt*(xyzs_fw_w-xyzs_w) # scale the flow with dt
uvds_fw = P[:3, :3] @ rearrange(xyzs_fw_w, 'n c -> c n') + P[:3, 3:]
uvs_fw = uvds_fw[:2] / uvds_fw[2]
uvs_fw = rearrange(uvs_fw, 'c (n1 n2) -> c n1 n2', n1=N_rays, n2=N_samples)
uvs_fw = rearrange(uvs_fw, 'c (h w) n2 -> n2 h w c', w=w, h=h)
of_fw = rearrange(uvs_fw-grid, 'n2 h w c -> n2 c h w', c=2)
transient_rgb_t = rearrange(results_t['transient_rgbs_fine'],
'(h w) n2 c -> n2 c h w', w=w, h=h, c=3)
transient_a_t = rearrange(results_t['transient_alphas_fine'],
'(h w) n2 -> n2 1 h w', w=w, h=h)
transient_rgba_t = torch.cat([transient_rgb_t, transient_a_t], 1)
# compute backward buffers
xyzs_bw_w = ray_utils.ndc2world(
rearrange(xyzs+results_tp1['transient_flows_bw'],
'n1 n2 c -> (n1 n2) c'), K) # bw points with full flow
xyzs_bw_w = xyzs_w + (1-dt)*(xyzs_bw_w-xyzs_w) # scale the flow with 1-dt
uvds_bw = P[:3, :3] @ rearrange(xyzs_bw_w, 'n c -> c n') + P[:3, 3:]
uvs_bw = uvds_bw[:2] / uvds_bw[2]
uvs_bw = rearrange(uvs_bw, 'c (n1 n2) -> c n1 n2', n1=N_rays, n2=N_samples)
uvs_bw = rearrange(uvs_bw, 'c (h w) n2 -> n2 h w c', w=w, h=h)
of_bw = rearrange(uvs_bw-grid, 'n2 h w c -> n2 c h w', c=2)
transient_rgb_tp1 = rearrange(results_tp1['transient_rgbs_fine'],
'(h w) n2 c -> n2 c h w', w=w, h=h, c=3)
transient_a_tp1 = rearrange(results_tp1['transient_alphas_fine'],
'(h w) n2 -> n2 1 h w', w=w, h=h)
transient_rgba_tp1 = torch.cat([transient_rgb_tp1, transient_a_tp1], 1)
for s in range(N_samples): # compute MPI planes (front to back composition)
transient_rgba_fw = FunctionSoftsplat(tenInput=transient_rgba_t[s:s+1].cuda(),
tenFlow=of_fw[s:s+1].cuda(),
tenMetric=None,
strType='average').cpu()
transient_rgba_fw = rearrange(transient_rgba_fw, '1 c h w -> h w c')
transient_rgba_bw = FunctionSoftsplat(tenInput=transient_rgba_tp1[s:s+1].cuda(),
tenFlow=of_bw[s:s+1].cuda(),
tenMetric=None,
strType='average').cpu()
transient_rgba_bw = rearrange(transient_rgba_bw, '1 c h w -> h w c')
composed_rgb = transient_rgba_fw[..., :3]*transient_rgba_fw[..., 3:]*(1-dt) + \
transient_rgba_bw[..., :3]*transient_rgba_bw[..., 3:]*dt + \
static_rgb[:, :, s]*static_a[:, :, s]
composed_a = 1 - (1-(transient_rgba_fw[..., 3:]*(1-dt)+
transient_rgba_bw[..., 3:]*dt)) * \
(1-static_a[:, :, s])
rgba[..., :3] += (1-rgba[..., 3:])*composed_rgb
depth += (1-rgba[..., 3])*composed_a[..., 0]*zs[..., s]
rgba[..., 3:] += (1-rgba[..., 3:])*composed_a
return rgba[..., :3], depth | d5cdae22a3fb324e9bdfdedabe0b69cb5d40ebdb | 11,338 |
def divisor(baudrate):
"""Calculate the divisor for generating a given baudrate"""
CLOCK_HZ = 50e6
return round(CLOCK_HZ / baudrate) | a09eee716889ee6950f8c5bba0f31cdd2b311ada | 11,339 |
def scm_get_active_branch(*args, **kwargs):
"""
Get the active named branch of an existing SCM repository.
:param str path: Path on the file system where the repository resides. If not specified, it defaults to the
current work directory.
:return: Name of the active branch
:rtype: str
"""
if not _scm_handler:
_load_scm_handler()
return _scm_handler.get_active_branch(*args, **kwargs) | 6c18454548732cd8db4ba85b45cdc9a8d9b47fce | 11,340 |
def search_evaluations(campus, **kwargs):
"""
year (required)
term_name (required): Winter|Spring|Summer|Autumn
curriculum_abbreviation
course_number
section_id
student_id (student number)
"""
url = "%s?%s" % (IAS_PREFIX, urlencode(kwargs))
data = get_resource_by_campus(url, campus)
evaluations = _json_to_evaluation(data)
return evaluations | d7069c2e9135b350141b0053e3ec1202650b7c28 | 11,341 |
from typing import Optional
import select
async def get_user(username: str, session: AsyncSession) -> Optional[User]:
"""
Returns a user with the given username
"""
return (
(await session.execute(select(User).where(User.name == username)))
.scalars()
.first()
) | 0975c069d76414fbf57f4b8f7370d0ada40e39f5 | 11,342 |
import pandas
def compute_balances(flows):
"""
Balances by currency.
:param flows:
:return:
"""
flows = flows.set_index('date')
flows_by_asset = flows.pivot(columns='asset', values='amount').apply(pandas.to_numeric)
balances = flows_by_asset.fillna(0).cumsum()
return balances | 98728c2c687df60194eb11b479c08fc90502807a | 11,344 |
import json
def unjsonify(json_data):
"""
Converts the inputted JSON data to Python format.
:param json_data | <variant>
"""
return json.loads(json_data, object_hook=json2py) | 93a59f8a2ef96cbe25e89c2970969b0132b1a892 | 11,345 |
from typing import Tuple
from typing import List
def comp_state_dist(table: np.ndarray) -> Tuple[np.ndarray, List[str]]:
"""Compute the distribution of distinct states/diagnoses from a table of
individual diagnoses detailing the patterns of lymphatic progression per
patient.
Args:
table: Rows of patients and columns of LNLs, reporting which LNL was
involved for which patient.
Returns:
A histogram of unique states and a list of the corresponding state
labels.
Note:
This function cannot deal with parts of the diagnose being unknown. So
if, e.g., one level isn't reported for a patient, that row will just be
ignored.
"""
_, num_cols = table.shape
table = table.astype(float)
state_dist = np.zeros(shape=2**num_cols, dtype=int)
for row in table:
if not np.any(np.isnan(row)):
idx = int(np.sum([n * 2**i for i,n in enumerate(row[::-1])]))
state_dist[idx] += 1
state_labels = []
for i in range(2**num_cols):
state_labels.append(change_base(i, 2, length=num_cols))
return state_dist, state_labels | 1a2edacd40d4fea3ff3cc5ddd57d76bffc60c7bc | 11,346 |
def polyConvert(coeffs, trans=(0, 1), backward=False):
"""
Converts polynomial coeffs for x (P = a0 + a1*x + a2*x**2 + ...) in
polynomial coeffs for x~:=a+b*x (P~ = a0~ + a1~*x~ + a2~*x~**2 +
...). Therefore, (a,b)=(0,1) makes nothing. If backward, makes the
opposite transformation.
Note: backward transformation could be done using more general
polynomial composition `polyval`, but forward transformation is a
long standing issue in the general case (look for functional
decomposition of univariate polynomial).
"""
a, b = trans
if not backward:
a = -float(a) / float(b)
b = 1 / float(b)
return N.dot(polyConvMatrix(len(coeffs), (a, b)), coeffs) | 1a2607b28046a8dc67315726957a87a5d5c9a435 | 11,347 |
import random
def uniform(_data, weights):
"""
Randomly initialize the weights with values between 0 and 1.
Parameters
----------
_data: ndarray
Data to pick to initialize weights.
weights: ndarray
Previous weight values.
Returns
-------
weights: ndarray
New weight values
"""
return random.rand(*weights.shape) | fbf7e853f11a888ee01dc840c6ffcb214560c5a8 | 11,348 |
def ingresar_datos():
"""Ingresa los datos de las secciones"""
datos = {}
while True:
codigo = int_input('Ingrese el código de la sección: ')
if codigo < 0:
break
cantidad = int_input(
'Ingrese la cantidad de alumnos: ', min=MIN, max=MAX
)
datos[codigo] = cantidad
return datos | 3bacb0e5d6b234b2f90564c44a25d151a640fd1f | 11,349 |
def fetch_credentials() -> Credentials:
"""Produces a Credentials object based on the contents of the
CONFIG_FILE or, alternatively, interactively.
"""
if CONFIG_FILE_EXISTS:
return parse_config_file(CONFIG_FILE)
else:
return get_credentials_interactively() | 0b882c8c4c8066a1898771c66db6ccbe7cb09c37 | 11,350 |
def pool_adjacency_mat_reference_wrapper(
adj: sparse.spmatrix, kernel_size=4, stride=2, padding=1
) -> sparse.spmatrix:
"""Wraps `pool_adjacency_mat_reference` to provide the same API as `pool_adjacency_mat`"""
adj = Variable(to_sparse_tensor(adj).to_dense())
adj_conv = pool_adjacency_mat_reference(adj, kernel_size, stride, padding)
return sparse.coo_matrix(adj_conv.data.numpy(), dtype=np.int16) | e72cb1e50bf7542d4175b9b3b3989e70a8812373 | 11,351 |
def send(socket, obj, flags=0, protocol=-1):
"""stringify an object, and then send it"""
s = str(obj)
return socket.send_string(s) | a89165565837ad4a984905d5b5fdd73e398b35fd | 11,352 |
def arraystr(A: Array) -> str:
"""Pretty print array"""
B = np.asarray(A).ravel()
if len(B) <= 3:
return " ".join([itemstr(v) for v in B])
return " ".join([itemstr(B[0]), itemstr(B[1]), "...", itemstr(B[-1])]) | 9cceed63c83812a7fd87dba833fc4d5b5a75088c | 11,353 |
def dist2_test(v1, v2, idx1, idx2, len2):
"""Square of distance equal"""
return (v1-v2).mag2() == len2 | 3a268a3ba704a91f83345766245a952fe5d943dd | 11,354 |
def extract_grid_cells(browser, grid_id):
"""
Given the ID of a legistar table, returns a list of dictionaries
for each row mapping column headers to td elements.
"""
table = browser.find_element_by_id(grid_id)
header_cells = table.find_elements_by_css_selector(
'thead:nth-child(2) > tr:nth-child(2) > th'
)
headers = [extract_text(cell) for cell in header_cells]
tbody = table.find_element_by_css_selector('tbody:nth-child(4)')
rows = tbody.find_elements_by_tag_name('tr')
result_rows = []
for row in rows:
cells = {}
td_elements = row.find_elements_by_tag_name('td')
for header, cell in zip(headers, td_elements):
cells[header] = cell
result_rows.append(cells)
return (headers, result_rows) | bee4265a18cfd428f25e3fdf3202fb5bfad820df | 11,355 |
import ast
def gatherAllParameters(a, keep_orig=True):
"""Gather all parameters in the tree. Names are returned along
with their original names (which are used in variable mapping)"""
if type(a) == list:
allIds = set()
for line in a:
allIds |= gatherAllVariables(line)
return allIds
if not isinstance(a, ast.AST):
return set()
allIds = set()
for node in ast.walk(a):
if type(node) == ast.arg:
origName = node.originalId if (keep_orig and hasattr(node, "originalId")) else None
allIds |= set([(node.arg, origName)])
return allIds | e899e60d818750a4ff1656b039a6dc4413f8f181 | 11,356 |
def average_link_euclidian(X,verbose=0):
"""
Average link clustering based on data matrix.
Parameters
----------
X array of shape (nbitem,dim): data matrix
from which an Euclidian distance matrix is computed
verbose=0, verbosity level
Returns
-------
t a weightForest structure that represents the dendrogram of the data
Note
----
this method has not been optimized
"""
if X.shape[0]==np.size(X):
X = np.reshape(X,(np.size(X),1))
if np.size(X)<10000:
D = Euclidian_distance(X)
else:
raise ValueError, "The distance matrix is too large"
t = average_link_distance(D,verbose)
return t | 17aae1e7f802f82765bcda8b403598a2c5a9f822 | 11,357 |
import functools
def cached(func):
"""Decorator cached makes the function to cache its result and return it in duplicate calls."""
prop_name = '__cached_' + func.__name__
@functools.wraps(func)
def _cached_func(self):
try:
return getattr(self, prop_name)
except AttributeError:
val = func(self)
setattr(self, prop_name, val)
return val
return _cached_func | 5b23c251c03160ba2c4e87848201be46ba2f34fb | 11,358 |
def SX_inf(*args):
"""
create a matrix with all inf
inf(int nrow, int ncol) -> SX
inf((int,int) rc) -> SX
inf(Sparsity sp) -> SX
"""
return _casadi.SX_inf(*args) | b11fba9e9b60eadb983d1203b1dd852abca9a2b7 | 11,359 |
def aes_encrypt(mode, aes_key, aes_iv, *data):
"""
Encrypt data with AES in specified mode.
:param aes_key: aes_key to use
:param aes_iv: initialization vector
"""
encryptor = Cipher(algorithms.AES(aes_key), mode(aes_iv), backend=default_backend()).encryptor()
result = None
for value in data:
result = encryptor.update(value)
encryptor.finalize()
return result, None if not hasattr(encryptor, 'tag') else encryptor.tag | 94a39ddabe3ea186463808e79e86bec171fbaeda | 11,360 |
def _ebpm_gamma_update_a(init, b, plm, step=1, c=0.5, tau=0.5, max_iters=30):
"""Backtracking line search to select step size for Newton-Raphson update of
a"""
def loss(a):
return -(a * np.log(b) + a * plm - sp.gammaln(a)).sum()
obj = loss(init)
d = (np.log(b) - sp.digamma(init) + plm).mean() / sp.polygamma(1, init)
update = loss(init + step * d)
while (not np.isfinite(update) or update > obj + c * step * d) and max_iters > 0:
step *= tau
update = loss(init + step * d)
max_iters -= 1
if max_iters == 0:
# Step size is small enough that update can be skipped
return init
else:
return init + step * d | 038fb28824b3429b03887299af7a7feeec16b689 | 11,361 |
def edge_distance_mapping(graph : Graph,
iterations : int,
lrgen : LearningRateGen,
verbose : bool = True,
reset_locations : bool = True):
"""
Stochastic Gradient Descent algorithm for performing graph vertex laoyout
optimization using the path distances as target distance in the layout.
The algorihm is adapted from the paper https://arxiv.org/pdf/1710.04626.pdf
Args:
graph : The graph to arrange
iterations : number of iteration rounds
lrgen : learning rate function that takes iteration round as input
verbose : boolean, set True to print progress status information
Returns:
Vertex location stress value list that contains one summary stress
value per iteration.
"""
# Create temporary lists of vertex list indices
n_vertex = graph.vertex_count
vertex_idx_list_a = np.arange(n_vertex)
vertex_idx_list_b = np.arange(n_vertex)
stress_list = []
# Calculate distance look-up table
dist_arr, keys = __edge_distance_lut(graph)
if reset_locations:
__reset_locations(graph)
# Main iteration loop
for iter_round in range(iterations):
stress = 0
lr = lrgen.get_lr(iter_round)
if verbose:
progress_print = ProgressPrint(n_vertex)
a_loop = 0
np.random.shuffle(vertex_idx_list_a)
for idx_a in vertex_idx_list_a:
np.random.shuffle(vertex_idx_list_b)
for idx_b in vertex_idx_list_b:
if idx_a == idx_b:
continue
# Get path distance from vertex a to b.
# Value -1 means there is no path.
dist_target = dist_arr[idx_a, idx_b]
if dist_target == np.inf:
continue
# Update the locations and get stress for the patg
key_a = keys[idx_a]
key_b = keys[idx_b]
edge_stress = __coord_update(graph, key_a, key_b, dist_target, lr)
stress += edge_stress
# Progress monitoring
if verbose:
a_loop += 1
progress_print.print_update(iter_round, a_loop, stress)
stress_list.append(stress)
return stress_list | f5c93cf83a7cd7892936246eb6c90562030ad819 | 11,362 |
def strip_extension(name: str) -> str:
"""
Remove a single extension from a file name, if present.
"""
last_dot = name.rfind(".")
if last_dot > -1:
return name[:last_dot]
else:
return name | 9dc1e3a3c9ad3251aba8a1b61f73de9f79f9a8be | 11,363 |
def validatePullRequest(data):
"""Validate pull request by action."""
if 'action' not in data:
raise BadRequest('no event supplied')
if 'pull_request' not in data or 'html_url' not in data.get('pull_request'):
raise BadRequest('payload.pull_request.html_url missing')
return True | a4577a1b719b11f1ea845fff436a78178ca9e370 | 11,365 |
def __adjust_data_for_log_scale(dataframe: pd.DataFrame) -> pd.DataFrame:
"""
This will clean and adjust some of the data so that Altair can plot it using a logarithmic scale. Altair does not
allow zero values on the Y axis when plotting with a logarithmic scale, as log(0) is undefined.
Args:
dataframe: The data to plot on the chart.
Returns: A new data frame with the appropriate adjustments for plotting on a log scale.
"""
return dataframe.replace(0, float('nan')) | 30d7a73f2f0d564f6e52e1a2fa4b521fa1265c3d | 11,366 |
import torch
def predict_sentence(model,vocab,sentence):
"""Predicts the section value of a given sentence
INPUT: Trained model, Model vocab, Sentence to predict
OUTPUT: Assigned section to the sentence"""
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
nlp=spacy.load('en_core_sci_md')
model=model.to(device)
tokens=[t.text for t in nlp.tokenizer(sentence)]
indexed = [vocab[t] for t in tokens]
tensor_to_predict=torch.LongTensor(indexed).to(device)
tensor_to_predict=tensor_to_predict.unsqueeze(1).T
length_tensor= torch.LongTensor([len(indexed)]).to(device)
prediction=model(tensor_to_predict,length_tensor)
return prediction.argmax(1).item() | f8ef02bd92dfc3dfcea0f5a2e9d5da99050fe367 | 11,367 |
import spacy
import re
def ne_offsets_by_sent(
text_nest_list=[],
model='de_core_news_sm',
):
""" extracts offsets of NEs and the NE-type grouped by sents
:param text_nest_list: A list of list with following structure:\
[{"text": "Wien ist schön", "ner_dicts": [{"text": "Wien", "ne_type": "LOC"}]}]
:param model: The name of the spacy model which should be used for sentence splitting.
:return: A list of spacy-like NER Tuples [('some text'), entities{[(15, 19, 'place')]}]
"""
nlp = spacy.load(model)
text_nes = text_nest_list
results = []
for entry in text_nes:
ner_dicts = entry['ner_dicts']
in_text = entry['text']
doc = nlp(in_text)
for sent in doc.sents:
entities = []
if sent.text != "":
plain_text = sent.text
for x in ner_dicts:
for m in re.finditer(x['text'], plain_text):
entities.append([m.start(), m.end(), x['ne_type']])
entities = [item for item in set(tuple(row) for row in entities)]
entities = sorted(entities, key=lambda x: x[0])
ents = []
next_item_index = 1
for x in entities:
cur_start = x[0]
try:
next_start = entities[next_item_index][0]
except IndexError:
next_start = 9999999999999999999999
if cur_start == next_start:
pass
else:
ents.append(x)
next_item_index = next_item_index + 1
train_data = (
plain_text,
{
"entities": ents
}
)
results.append(train_data)
return results | 1e4fdaba07bf562b1d91b5f2376955efa9974c56 | 11,368 |
from typing import Optional
def clone_repo(
url: str,
path: str,
branch: Optional[str] = None,
) -> bool:
"""Clone repo from URL (at branch if specified) to given path."""
cmd = ['git', 'clone', url, path]
if branch:
cmd += ['--branch', branch]
return run(cmd)[0].returncode == 0 | 56bc8641c3418216f1da5f0c87d33478888775c7 | 11,369 |
def get_inputtype(name, object_type):
"""Get an input type based on the object type"""
if object_type in _input_registry:
return _input_registry[object_type]
inputtype = type(
name,
(graphene.InputObjectType,),
_get_input_attrs(object_type),
)
_input_registry[object_type] = inputtype
return inputtype | aee2a84c8aaf0d66554f022ac0fec0aaef808160 | 11,370 |
def get_engine_status(engine=None):
"""Return a report of the current engine status"""
if engine is None:
engine = crawler.engine
global_tests = [
"time()-engine.start_time",
"engine.is_idle()",
"engine.has_capacity()",
"engine.scheduler.is_idle()",
"len(engine.scheduler.pending_requests)",
"engine.downloader.is_idle()",
"len(engine.downloader.sites)",
"engine.scraper.is_idle()",
"len(engine.scraper.sites)",
]
spider_tests = [
"engine.spider_is_idle(spider)",
"engine.closing.get(spider)",
"engine.scheduler.spider_has_pending_requests(spider)",
"len(engine.scheduler.pending_requests[spider])",
"len(engine.downloader.sites[spider].queue)",
"len(engine.downloader.sites[spider].active)",
"len(engine.downloader.sites[spider].transferring)",
"engine.downloader.sites[spider].closing",
"engine.downloader.sites[spider].lastseen",
"len(engine.scraper.sites[spider].queue)",
"len(engine.scraper.sites[spider].active)",
"engine.scraper.sites[spider].active_size",
"engine.scraper.sites[spider].itemproc_size",
"engine.scraper.sites[spider].needs_backout()",
]
status = {'global': {}, 'spiders': {}}
for test in global_tests:
try:
status['global'][test] = eval(test)
except Exception, e:
status['global'][test] = "%s (exception)" % type(e).__name__
for spider in engine.downloader.sites:
x = {}
for test in spider_tests:
try:
x[test] = eval(test)
except Exception, e:
x[test] = "%s (exception)" % type(e).__name__
status['spiders'][spider] = x
return status | 0d87692a991965c8b72204d241964a27a9499014 | 11,372 |
import tqdm
import requests
import json
def stock_em_jgdy_tj():
"""
东方财富网-数据中心-特色数据-机构调研-机构调研统计
http://data.eastmoney.com/jgdy/tj.html
:return: pandas.DataFrame
"""
url = "http://data.eastmoney.com/DataCenter_V3/jgdy/gsjsdy.ashx"
page_num = _get_page_num_tj()
temp_df = pd.DataFrame()
for page in tqdm(range(1, page_num+1)):
params = {
"pagesize": "5000",
"page": str(page),
"js": "var sGrabtEb",
"param": "",
"sortRule": "-1",
"sortType": "0",
"rt": "52581365",
}
res = requests.get(url, params=params)
data_json = json.loads(res.text[res.text.find("={")+1:])
temp_df = temp_df.append(pd.DataFrame(data_json["data"]), ignore_index=True)
return temp_df | 1841702e6fb5c677245a2d213f489d95a789d68b | 11,373 |
def hpdi(x, prob=0.90, axis=0):
"""
Computes "highest posterior density interval" (HPDI) which is the narrowest
interval with probability mass ``prob``.
:param numpy.ndarray x: the input array.
:param float prob: the probability mass of samples within the interval.
:param int axis: the dimension to calculate hpdi.
:return: quantiles of ``x`` at ``(1 - prob) / 2`` and
``(1 + prob) / 2``.
:rtype: numpy.ndarray
"""
x = np.swapaxes(x, axis, 0)
sorted_x = np.sort(x, axis=0)
mass = x.shape[0]
index_length = int(prob * mass)
intervals_left = sorted_x[:(mass - index_length)]
intervals_right = sorted_x[index_length:]
intervals_length = intervals_right - intervals_left
index_start = intervals_length.argmin(axis=0)
index_end = index_start + index_length
hpd_left = np.take_along_axis(sorted_x, index_start[None, ...], axis=0)
hpd_left = np.swapaxes(hpd_left, axis, 0)
hpd_right = np.take_along_axis(sorted_x, index_end[None, ...], axis=0)
hpd_right = np.swapaxes(hpd_right, axis, 0)
return np.concatenate([hpd_left, hpd_right], axis=axis) | 579515ebb6d28c2a1578c85eab8cbff1b67bd5ee | 11,374 |
def a(n, k):
"""calculates maximum power of p(n) needed
>>> a(0, 20)
4
>>> a(1, 20)
2
>>> a(2, 20)
1
"""
return floor(log(k) / log(p(n))) | 581e2a23a3dc069fc457ed5a6fe7d5a355353242 | 11,375 |
import platform
def is_windows():
""" détermine si le système actuel est windows """
return platform.system().lower() == "windows" | fc9e2ca948f7cc5dc6b6cc9afb52ba701222bb7a | 11,376 |
def WTfilt_1d(sig):
"""
# 使用小波变换对单导联ECG滤波
# 参考:Martis R J, Acharya U R, Min L C. ECG beat classification using PCA, LDA, ICA and discrete
wavelet transform[J].Biomedical Signal Processing and Control, 2013, 8(5): 437-448.
:param sig: 1-D numpy Array,单导联ECG
:return: 1-D numpy Array,滤波后信号
"""
coeffs = pywt.wavedec(sig, 'db6', level=9)
coeffs[-1] = np.zeros(len(coeffs[-1]))
coeffs[-2] = np.zeros(len(coeffs[-2]))
coeffs[0] = np.zeros(len(coeffs[0]))
sig_filt = pywt.waverec(coeffs, 'db6')
return sig_filt | 8a3c65b35ac347b247a36e7d70705f76f41010d5 | 11,377 |
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward """
gamma = 0.99
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r | b093c0d82ef82824c08d08ce4da1b840318bd7ed | 11,378 |
def mvn(tensor):
"""Per row mean-variance normalization."""
epsilon = 1e-6
mean = K.mean(tensor, axis=1, keepdims=True)
std = K.std(tensor, axis=1, keepdims=True)
mvn = (tensor - mean) / (std + epsilon)
return mvn | c205712d3a1a53450de0e0b9af0abe1b9d51f269 | 11,379 |
import re
def grapheme_to_phoneme(text, g2p, lexicon=None):
"""Converts grapheme to phoneme"""
phones = []
words = filter(None, re.split(r"(['(),:;.\-\?\!\s+])", text))
for w in words:
if lexicon is not None and w.lower() in lexicon:
phones += lexicon[w.lower()]
else:
phones += list(filter(lambda p: p != " ", g2p(w)))
return phones | 2bb5195a323aa712b2725851fdde64b8e38856f0 | 11,380 |
def mean_log_cosh_error(pred, target):
"""
Determine mean log cosh error.
f(y_t, y) = sum(log(cosh(y_t-y)))/n
where, y_t = predicted value
y = target value
n = number of values
:param pred: {array}, shape(n_samples,)
predicted values.
:param target: {array}, shape(n_samples,)
target values.
:return: mean log cosh error.
"""
error = pred - target
return np.mean(np.log(np.cosh(error))) | 85fd6c3d582e7bc41271e3212d43c5cfea8bcf7e | 11,381 |
def check_columns(board: list):
"""
Check column-wise compliance of the board for uniqueness (buildings of unique height)
and visibility (top-bottom and vice versa).
Same as for horizontal cases, but aggregated in one function for vertical case, i.e. columns.
>>> check_columns(['***21**', '412453*', '423145*', '*543215', \
'*35214*', '*41532*', '*2*1***'])
False
>>> check_columns(['***21**', '412453*', '423145*', '*543215', \
'*35214*', '*41232*', '*2*1***'])
False
>>> check_columns(['***21**', '412553*', '423145*', '*543215', \
'*35214*', '*41532*', '*2*1***'])
False
>>> check_columns(['***22**', '412453*', '423145*', '*543215', \
'*35214*', '*41532*', '*2*1***'])
False
>>> check_columns(['***21**', '412453*', '423145*', '*543215', \
'*35214*', '*41532*', '*2*4***'])
False
"""
return check_uniqueness_in_rows(get_board_columns(board), False) and \
check_horizontal_visibility(get_board_columns(board), False) | 85fd1f02b392c6b6219ce989f7439ec0140b9fa2 | 11,383 |
import numpy
import pathlib
import tqdm
import pandas
import warnings
def extract_header(mjds, path, keywords, dtypes=None, split_dbs=False, is_range=False):
"""Returns a `~pandas.DataFrame` with header information.
For a list or range of MJDs, collects a series of header keywords for
database files and organises them in a `~pandas.DataFrame` sorted by
MJD and frame number.
Parameters
----------
mjds : list
The list of MJDs to extract. If the lenght of ``mjds`` is two and
``is_range=True``, all the MJDs between both values will be extracted.
path : str
The path to the database file.
keywords : list
A list of strings with the header keywords to extract.
dtypes : list, optional
A list of types to cast the keyword values.
split_dbs : bool, optional
If True, assumes that the DB is split into multiple files, one for each
MJD. In that case, the path for each file is assumed to be ``path``
with the ``_{MJD}`` suffix.
is_range : bool, optional
If True, assumes that ``mjds`` are the extremes of a range of MJDs.
"""
mjds = numpy.atleast_1d(mjds)
path = pathlib.Path(path)
keywords = [key.lower() for key in keywords]
if dtypes:
assert len(dtypes) == len(keywords), 'inconsistent lenghts of keywords and dtypes'
assert mjds.ndim == 1, 'invalid number of dimensions in mjds'
if is_range:
assert len(mjds) == 2, 'when is_range=True, mjds must be a list of lenght 2'
mjds = numpy.arange(mjds[0], mjds[1] + 1)
if not split_dbs:
assert path.exists()
database.init(str(path))
assert database.connect(), 'cannot connect to database'
dataframes = []
with tqdm.trange(len(mjds)) as tt:
for mjd in mjds:
tt.set_description(str(mjd))
if split_dbs:
suffix = path.suffix
database_mjd = str(path).replace(suffix, f'_{mjd}{suffix}')
if not pathlib.Path(database_mjd).exists():
tt.update()
continue
database.init(str(database_mjd))
assert database.connect(), 'cannot connect to database'
Header = playhouse.reflection.Introspector.from_database(
database).generate_models()['header']
fields = [Frame.mjd, Frame.frame]
failed = any([not hasattr(Header, keyword) for keyword in keywords])
if failed:
tt.update()
continue
for keyword in keywords:
fields.append(getattr(Header, keyword))
data = Header.select(*fields).join(Frame, on=(Frame.pk == Header.frame_pk)).tuples()
dataframes.append(pandas.DataFrame(list(data), columns=(['mjd', 'frame'] + keywords)))
tt.update()
dataframe = pandas.concat(dataframes)
if dtypes:
failed = False
for ii, key in enumerate(keywords):
try:
dataframe[key] = dataframe[key].astype(dtypes[ii])
except ValueError as ee:
warnings.warn(f'failed to apply astype: {ee!r}', exceptions.GuiderQAUserWarning)
failed = True
if not failed:
dataframe = dataframe[dataframe > -999.]
dataframe = dataframe.set_index(['mjd', 'frame'])
dataframe.sort_index(inplace=True)
return dataframe | 65934d9b5e9c1e6eb639709a16e6e93c145b57e7 | 11,384 |
import pytz
from datetime import datetime
async def get_time():
"""获取服务器时间
"""
tz = pytz.timezone('Asia/Shanghai')
return {
'nowtime': datetime.now(),
'utctime': datetime.utcnow(),
'localtime': datetime.now(tz)
} | 282eb1136713df8045c6ad5f659042484fe4ec8b | 11,385 |
def health_check():
"""Attempt to ping the database and respond with a status code 200.
This endpoint is verify that the server is running and that the database is
accessible.
"""
response = {"service": "OK"}
try:
postgres.session.query(text("1")).from_statement(text("SELECT 1")).all()
response["database"] = "OK"
except Exception as e:
app.logger.error(e)
response["database"] = "ERROR"
try:
mongo.connection.server_info()
response["document_store"] = "OK"
except Exception as e:
app.logger.error(e)
response["document_store"] = "ERROR"
return response | cd47815ada53281f2b13542dd8cad93398be5203 | 11,386 |
def find_ad_adapter(bus):
"""Find the advertising manager interface.
:param bus: D-Bus bus object that is searched.
"""
remote_om = dbus.Interface(
bus.get_object(constants.BLUEZ_SERVICE_NAME, '/'),
constants.DBUS_OM_IFACE)
objects = remote_om.GetManagedObjects()
for o, props in objects.items():
if constants.LE_ADVERTISING_MANAGER_IFACE in props:
return o
return None | 6b5f49a5908948a54f99438a38865293fca51cc7 | 11,387 |
def leaky_relu(x, slope=0.2):
"""Leaky Rectified Linear Unit function.
This function is expressed as :math:`f(x) = \max(x, ax)`, where :math:`a`
is a configurable slope value.
Args:
x (~chainer.Variable): Input variable.
slope (float): Slope value :math:`a`.
Returns:
~chainer.Variable: Output variable.
"""
return LeakyReLU(slope)(x) | cf7624309543e24c70832249116b74d56c26d1f9 | 11,388 |
def GetConfig(user_config):
"""Decide number of vms needed to run oldisim."""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
config['vm_groups']['default']['vm_count'] = (FLAGS.oldisim_num_leaves
+ NUM_DRIVERS + NUM_ROOTS)
return config | b04380fe6dbc84ef4c353dd34354581fa69aac89 | 11,389 |
import re
def gen_answer(question, passages):
"""由于是MLM模型,所以可以直接argmax解码。
"""
all_p_token_ids, token_ids, segment_ids = [], [], []
for passage in passages:
passage = re.sub(u' |、|;|,', ',', passage)
p_token_ids, _ = tokenizer.encode(passage, maxlen=max_p_len + 1)
q_token_ids, _ = tokenizer.encode(question, maxlen=max_q_len + 1)
all_p_token_ids.append(p_token_ids[1:])
token_ids.append([tokenizer._token_start_id])
token_ids[-1] += ([tokenizer._token_mask_id] * max_a_len)
token_ids[-1] += [tokenizer._token_end_id]
token_ids[-1] += (q_token_ids[1:] + p_token_ids[1:])
segment_ids.append([0] * len(token_ids[-1]))
token_ids = sequence_padding(token_ids)
segment_ids = sequence_padding(segment_ids)
probas = model.predict([token_ids, segment_ids])
results = {}
for t, p in zip(all_p_token_ids, probas):
a, score = tuple(), 0.
for i in range(max_a_len):
idxs = list(get_ngram_set(t, i + 1)[a])
if tokenizer._token_end_id not in idxs:
idxs.append(tokenizer._token_end_id)
# pi是将passage以外的token的概率置零
pi = np.zeros_like(p[i])
pi[idxs] = p[i, idxs]
a = a + (pi.argmax(),)
score += pi.max()
if a[-1] == tokenizer._token_end_id:
break
score = score / (i + 1)
a = tokenizer.decode(a)
if a:
results[a] = results.get(a, []) + [score]
results = {
k: (np.array(v)**2).sum() / (sum(v) + 1)
for k, v in results.items()
}
return results | 536880c1318cc193be19561183e652c7668eb09b | 11,391 |
def compile(function_or_sdfg, *args, **kwargs):
""" Obtain a runnable binary from a Python (@dace.program) function. """
if isinstance(function_or_sdfg, dace.frontend.python.parser.DaceProgram):
sdfg = dace.frontend.python.parser.parse_from_function(
function_or_sdfg, *args, **kwargs)
elif isinstance(function_or_sdfg, SDFG):
sdfg = function_or_sdfg
else:
raise TypeError("Unsupported function type")
return sdfg.compile(**kwargs) | 7504344e8e9df5a395e51af1211db286188f3fcb | 11,392 |
import re
def is_untweeable(html):
"""
I'm not sure at the moment what constitutes untweeable HTML, but if we don't find DVIS in tiddlywiki,
that is a blocker
"""
# the same regex used in tiddlywiki
divs_re = re.compile(
r'<div id="storeArea"(.*)</html>',
re.DOTALL
)
return bool(divs_re.search(html)) | face6c6d30b6e26ffa3344ed8e42ed7d44cf2ea5 | 11,393 |
from typing import Optional
def create_1m_cnn_model(only_digits: bool = False, seed: Optional[int] = 0):
"""A CNN model with slightly under 2^20 (roughly 1 million) params.
A simple CNN model for the EMNIST character recognition task that is very
similar to the default recommended model from `create_conv_dropout_model`
but has slightly under 2^20 parameters. This is useful if the downstream task
involves randomized Hadamard transform, which requires the model weights /
gradients / deltas concatednated as a single vector to be padded to the
nearest power-of-2 dimensions.
This model is used in https://arxiv.org/abs/2102.06387.
When `only_digits=False`, the returned model has 1,018,174 trainable
parameters. For `only_digits=True`, the last dense layer is slightly smaller.
Args:
only_digits: If True, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If False, uses 62 outputs for the larger
dataset.
seed: A random seed governing the model initialization and layer randomness.
Returns:
A `tf.keras.Model`.
"""
data_format = 'channels_last'
initializer = tf.keras.initializers.GlorotUniform(seed=seed)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(
32,
kernel_size=(3, 3),
activation='relu',
data_format=data_format,
input_shape=(28, 28, 1),
kernel_initializer=initializer),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), data_format=data_format),
tf.keras.layers.Conv2D(
64,
kernel_size=(3, 3),
activation='relu',
data_format=data_format,
kernel_initializer=initializer),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(
128, activation='relu', kernel_initializer=initializer),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(
10 if only_digits else 62,
activation=tf.nn.softmax,
kernel_initializer=initializer),
])
return model | 87353f8bd8e3b3d7602ad3dcd92b717b2590285b | 11,394 |
def _check_index(target_expr, index_expr):
"""
helper function for making sure that an index is valid
:param target_expr: the target tensor
:param index_expr: the index
:return: the index, wrapped as an expression if necessary
"""
if issubclass(index_expr.__class__, _Expression):
index = index_expr
else:
index = _ConstScalar(index_expr)
if index.proto_expr.dtype is lang.UNDEFINED_TYPE:
raise TypeError('Can only index with a scalar.')
if type(index) is _ConstScalar:
if target_expr.size <= index.value() or index.value() < 0:
raise IndexError('Index out of bounds.')
return index | 96d5bf6d6d19bfca0de30ea9915a38237cf9c80f | 11,395 |
def create_access_token(user: UserModel, expires_delta: timedelta = None) -> str:
"""
Create an access token for a user
:param user: CTSUser -> The user
:param expires_delta: timedelta -> The expiration of the token. If not given a default will be used
:return: str -> A token
"""
load_all_config()
to_encode = user.dict()
if not expires_delta:
expires_delta = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
return __generate_jwt_token(to_encode, expires_delta) | d5ba53f0ecc7e7755988ad2540e4cd4c520b30dd | 11,396 |
def is_async_mode():
"""Tests if we're in the async part of the code or not."""
async def f():
"""Unasync transforms async functions in sync functions."""
return None
obj = f()
if obj is None:
return False
obj.close() # prevent unawaited coroutine warning
return True | 8e515efc767f75c4b90486089f0d8a7203da59d7 | 11,397 |
def remove_arm(frame):
"""
Removes the human arm portion from the image.
"""
##print("Removing arm...")
# Cropping 15 pixels from the bottom.
height, width = frame.shape[:2]
frame = frame[:height - 15, :]
##print("Done!")
return frame | 99b998da87f1aa2eca0a02b67fc5adc411603ee4 | 11,398 |
def cumulative_spread(array, x):
"""
>>> import numpy as np
>>> a = np.array([1., 2., 3., 4.])
>>> cumulative_spread(a, 0.)
array([0., 0., 0., 0.])
>>> cumulative_spread(a, 5.)
array([1., 2., 2., 0.])
>>> cumulative_spread(a, 6.)
array([1., 2., 3., 0.])
>>> cumulative_spread(a, 12.)
array([1., 2., 3., 4.])
"""
# This is probably inefficient.
cumulative_effect = np.cumsum(array) - array
b = x - cumulative_effect
return np.fmin(array, np.fmax(0, b)) | c6966a97945f30cce6a794325091a31716a36e54 | 11,399 |
def GetIdentifierStart(token):
"""Returns the first token in an identifier.
Given a token which is part of an identifier, returns the token at the start
of the identifier.
Args:
token: A token which is part of an identifier.
Returns:
The token at the start of the identifier or None if the identifier was not
of the form 'a.b.c' (e.g. "['a']['b'].c").
"""
start_token = token
previous_code_token = GetPreviousCodeToken(token)
while (previous_code_token and (
previous_code_token.IsType(JavaScriptTokenType.IDENTIFIER) or
IsDot(previous_code_token))):
start_token = previous_code_token
previous_code_token = GetPreviousCodeToken(previous_code_token)
if IsDot(start_token):
return None
return start_token | 6b3ad9fb9d43411fc7df147ace872f75c70b5d11 | 11,400 |
def load_spec(filename):
"""
loads the IDL spec from the given file object or filename, returning a
Service object
"""
service = Service.from_file(filename)
service.resolve()
return service | 6dfea85635d3b610ee998999397fc92fd516933c | 11,401 |
import torch
def load_model(file_path, *, epoch, model, likelihood, mll, optimizer, loss):
"""モデルの保存関数
Parameters
----------
file_path : str
モデルの保存先のパスとファイル名
epoch : int
現在のエポック数
model : :obj:`gpytorch.models`
学習済みのモデルのオブジェクト
likelihood : :obj:`gpytorch.likelihoods`
学習済みのlikelihoodsのオブジェクト
mll : :obj:`gpytorch.mlls`
学習済みのmllsのオブジェクト
optimizer : :obj:`torch.optim`
学習済みのoptimのオブジェクト
loss : list
現在のエポックまでの経過loss
Returns
-------
epoch : int
現在のエポック数
model : :obj:`gpytorch.models`
学習済みのモデルのオブジェクト
likelihood : :obj:`gpytorch.likelihoods`
学習済みのlikelihoodsのオブジェクト
mll : :obj:`gpytorch.mlls`
学習済みのmllsのオブジェクト
optimizer : :obj:`torch.optim`
学習済みのoptimのオブジェクト
loss : list
現在のエポックまでの経過loss
"""
temp = torch.load(file_path)
epoch = temp['epoch']
model.load_state_dict(temp['model'])
likelihood.load_state_dict(temp['likelihood'])
mll.load_state_dict(temp['mll'])
optimizer.load_state_dict(temp['optimizer'])
loss = temp['loss']
return epoch, model, likelihood, mll, optimizer, loss | ccc7f221164d89ed29326f720becd29e3442c52b | 11,403 |
import re
def valid_account_id(log, account_id):
"""Validate account Id is a 12 digit string"""
if not isinstance(account_id, str):
log.error("supplied account id {} is not a string".format(account_id))
return False
id_re = re.compile(r'^\d{12}$')
if not id_re.match(account_id):
log.error("supplied account id '{}' must be a 12 digit number".format(account_id))
return False
return True | 30f3aa9547f83c4bea53041a4c79ba1242ae4754 | 11,404 |
import numpy
def prod(a, axis=None, dtype=None, out=None):
"""
Product of array elements over a given axis.
Parameters
----------
a : array_like
Elements to multiply.
axis : None or int or tuple of ints, optional
Axis or axes along which a multiply is performed.
The default (`axis` = `None`) is perform a multiply over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a multiply is performed on multiple
axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The dtype of `a` is used by default unless `a`
has an integer dtype of less precision than the default platform
integer. In that case, if `a` is signed then the platform integer
is used while if `a` is unsigned then an unsigned integer of the
same precision as the platform integer is used.
out : ndarray, optional
Array into which the output is placed. By default, a new array is
created. If `out` is given, it must be of the appropriate shape
(the shape of `a` with `axis` removed, i.e.,
``numpy.delete(a.shape, axis)``). Its type is preserved. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
protuct_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
Examples
--------
>>> np.prod([0.5, 1.5])
2.0
>>> np.prod([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.prod([[0, 1], [0, 5]])
6
>>> np.prod([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.prod([[0, 1], [0, 5]], axis=1)
array([1, 5])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).prod(dtype=np.int8)
-128
"""
if not bhary.check(a) and not bhary.check(out):
return numpy.prod(a, axis=axis, dtype=dtype, out=out)
else:
if dtype is not None:
a = array_create.array(a, dtype=dtype)
return ufuncs.multiply.reduce(a, axis=axis, out=out) | c33a506847b13924aa903b5daeece0312cc29c8f | 11,405 |
import random
def sample_pagerank(corpus, damping_factor, n):
"""
Return PageRank values for each page by sampling `n` pages
according to transition model, starting with a page at random.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
# Initialize dict with all pages with count 0
pr_sample = dict([(page, 0) for page in corpus])
sample_page = None
# Iterate over n samples and increment page each time it is selected
for i in range(n):
if sample_page:
transition_dist = transition_model(corpus, sample_page, damping_factor)
sample_page = random.choices(list(transition_dist.keys()), weights=list(transition_dist.values()), k=1)[0]
else:
sample_page = random.choice(list(pr_sample.keys()))
# Record sample selection for each time it is chosen
pr_sample[sample_page] += 1
# Apply overall percentage by dividing each page count by n
for page in pr_sample:
pr_sample[page] /= n
return pr_sample | 32c89d7669718c714663e66a926bb27f9c219c38 | 11,406 |
def guess_layout_cols_lr(mr,
buf,
alg_prefix,
layout_alg_force=None,
verbose=False):
"""
Assume bits are contiguous in columns
wrapping around at the next line
Least significant bit at left
Can either start in very upper left of bit colum and go right
Or can start in upper right of bit colum and go left
Related permutations are handled by flipx, rotate, etc
"""
# Must be able to divide input
txtw, _txth = mr.txtwh()
if txtw % mr.word_bits() != 0:
verbose and "guess_layout_cols_lr: bad width"
return
bit_cols = txtw // mr.word_bits()
# upper left start moving right
def ul_oi2cr(offset, maski):
bitcol = offset % bit_cols
col = maski * bit_cols + bitcol
row = offset // bit_cols
return (col, row)
name = "cols-right"
if layout_alg_force is None or layout_alg_force == name:
yield try_oi2cr(mr, ul_oi2cr, buf), alg_prefix + name
# upper right start moving left
def ur_oi2cr(offset, maski):
bitcol = bit_cols - 1 - offset % bit_cols
col = maski * bit_cols + bitcol
row = offset // bit_cols
return (col, row)
name = "cols-left"
if layout_alg_force is None or layout_alg_force == name:
yield try_oi2cr(mr, ur_oi2cr, buf), alg_prefix + name
# Used in TMS320C15
# even bits start from left side, odd bits from right
# Basically alternating cols-right and cols-left
# they move towards each other and then start again on the next line
if mr.word_bits() % 2 == 0:
def squeeze_lr_oi2cr(offset, maski):
left_bit = maski & 0xFFFE
if maski % 2 == 0:
# cols-right
bitcol = offset % bit_cols
else:
# cols-left (offset by left_bit)
bitcol = 2 * bit_cols - 1 - offset % bit_cols
col = left_bit * bit_cols + bitcol
row = offset // bit_cols
return (col, row)
name = "squeeze-lr"
if layout_alg_force is None or layout_alg_force == name:
yield try_oi2cr(mr, squeeze_lr_oi2cr, buf), alg_prefix + name | dbbbf68ee251fb50c413648e97c9957ed7c086ec | 11,407 |
def decrease(rse_id, account, files, bytes, session=None):
"""
Decreases the specified counter by the specified amount.
:param rse_id: The id of the RSE.
:param account: The account name.
:param files: The amount of files.
:param bytes: The amount of bytes.
:param session: The database session in use.
"""
return increase(rse_id=rse_id, account=account, files=-files, bytes=-bytes, session=session) | 2ad193e5f50c0bcb19f0d796c7f8b9da115a1f2d | 11,408 |
def get_import_error(import_error_id, session):
"""
Get an import error
"""
error = session.query(ImportError).filter(ImportError.id == import_error_id).one_or_none()
if error is None:
raise NotFound("Import error not found")
return import_error_schema.dump(error) | 37444be97de3c4fa97fba60d87f469c428011db1 | 11,409 |
def roll_dice():
""" simulate roll dice """
results = []
for num in range(times):
result = randint(1, sides)
results.append(result)
return results | 9a8442ff777c8c03146bcb9a0f8a2dc19e87a195 | 11,411 |
def _read_calib_SemKITTI(calib_path):
"""
:param calib_path: Path to a calibration text file.
:return: dict with calibration matrices.
"""
calib_all = {}
with open(calib_path, 'r') as f:
for line in f.readlines():
if line == '\n':
break
key, value = line.split(':', 1)
calib_all[key] = np.array([float(x) for x in value.split()])
# reshape matrices
calib_out = {}
calib_out['P2'] = calib_all['P2'].reshape(3, 4) # 3x4 projection matrix for left camera
calib_out['Tr'] = np.identity(4) # 4x4 matrix
calib_out['Tr'][:3, :4] = calib_all['Tr'].reshape(3, 4)
return calib_out | 2d71146ce79ce39309930bb8a452c185c35c3061 | 11,412 |
import torch
def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Fast CUDA implementation of `bias_act()` using custom ops.
"""
# Parse arguments.
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Lookup from cache.
key = (dim, act, alpha, gain, clamp)
if key in _bias_act_cuda_cache:
return _bias_act_cuda_cache[key]
# Forward op.
class BiasActCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, b): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride()[1] == 1 else torch.contiguous_format
x = x.contiguous(memory_format=ctx.memory_format)
b = b.contiguous() if b is not None else _null_tensor
y = x
if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor:
y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
y if 'y' in spec.ref else _null_tensor)
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
dy = dy.contiguous(memory_format=ctx.memory_format)
x, b, y = ctx.saved_tensors
dx = None
db = None
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
dx = dy
if act != 'linear' or gain != 1 or clamp >= 0:
dx = BiasActCudaGrad.apply(dy, x, b, y)
if ctx.needs_input_grad[1]:
db = dx.sum([i for i in range(dx.ndim) if i != dim])
return dx, db
# Backward op.
class BiasActCudaGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride()[1] == 1 else torch.contiguous_format
dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
dy if spec.has_2nd_grad else _null_tensor,
x, b, y)
return dx
@staticmethod
def backward(ctx, d_dx): # pylint: disable=arguments-differ
d_dx = d_dx.contiguous(memory_format=ctx.memory_format)
dy, x, b, y = ctx.saved_tensors
d_dy = None
d_x = None
d_b = None
d_y = None
if ctx.needs_input_grad[0]:
d_dy = BiasActCudaGrad.apply(d_dx, x, b, y)
if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]):
d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp)
if spec.has_2nd_grad and ctx.needs_input_grad[2]:
d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim])
return d_dy, d_x, d_b, d_y
# Add to cache.
_bias_act_cuda_cache[key] = BiasActCuda
return BiasActCuda | 44559520faf06fbf9b6f17ac1b29b829840e7f38 | 11,413 |
from typing import Mapping
def root_nodes(g: Mapping):
"""
>>> g = dict(a='c', b='ce', c='abde', d='c', e=['c', 'z'], f={})
>>> sorted(root_nodes(g))
['f']
Note that `f` is present: Isolated nodes are considered both as
root and leaf nodes both.
"""
nodes_having_parents = set(chain.from_iterable(g.values()))
return set(g) - set(nodes_having_parents) | 67c2043053f82a9a17f148c57bbf4d2501530f99 | 11,414 |
def _GetRemoteFileID(local_file_path):
"""Returns the checked-in hash which identifies the name of file in GCS."""
hash_path = local_file_path + '.sha1'
with open(hash_path, 'rb') as f:
return f.read(1024).rstrip() | 4a06dcdd30e379891fe3f9a5b3ecc2c4fd1a98ed | 11,415 |
def stress_stress(
bond_array_1, c1, etypes1, bond_array_2, c2, etypes2, sig, ls, r_cut, cutoff_func
):
"""2-body multi-element kernel between two partial stress components
accelerated with Numba.
Args:
bond_array_1 (np.ndarray): 2-body bond array of the first local
environment.
c1 (int): Species of the central atom of the first local environment.
etypes1 (np.ndarray): Species of atoms in the first local
environment.
bond_array_2 (np.ndarray): 2-body bond array of the second local
environment.
c2 (int): Species of the central atom of the second local environment.
etypes2 (np.ndarray): Species of atoms in the second local
environment.
sig (float): 2-body signal variance hyperparameter.
ls (float): 2-body length scale hyperparameter.
r_cut (float): 2-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Return:
float: Value of the 2-body kernel.
"""
kernel_matrix = np.zeros((6, 6))
ls1 = 1 / (2 * ls * ls)
ls2 = 1 / (ls * ls)
ls3 = ls2 * ls2
sig2 = sig * sig
for m in range(bond_array_1.shape[0]):
ri = bond_array_1[m, 0]
e1 = etypes1[m]
for n in range(bond_array_2.shape[0]):
e2 = etypes2[n]
# check if bonds agree
if (c1 == c2 and e1 == e2) or (c1 == e2 and c2 == e1):
rj = bond_array_2[n, 0]
r11 = ri - rj
D = r11 * r11
s1 = 0
for d1 in range(3):
ci = bond_array_1[m, d1 + 1]
B = r11 * ci
fi, fdi = cutoff_func(r_cut, ri, ci)
for d2 in range(d1, 3):
coordinate_1 = bond_array_1[m, d2 + 1] * ri
s2 = 0
for d3 in range(3):
cj = bond_array_2[n, d3 + 1]
A = ci * cj
C = r11 * cj
fj, fdj = cutoff_func(r_cut, rj, cj)
for d4 in range(d3, 3):
coordinate_2 = bond_array_2[n, d4 + 1] * rj
force_kern = force_helper(
A, B, C, D, fi, fj, fdi, fdj, ls1, ls2, ls3, sig2
)
kernel_matrix[s1, s2] += (
force_kern * coordinate_1 * coordinate_2
)
s2 += 1
s1 += 1
return kernel_matrix / 4 | c832b6951774eff3b37dd3a674be74ad917409df | 11,416 |
def is_color_rgb(color):
"""Is a color in a valid RGB format.
Parameters
----------
color : obj
The color object.
Returns
-------
bool
True, if the color object is in RGB format.
False, otherwise.
Examples
--------
>>> color = (255, 0, 0)
>>> is_color_rgb(color)
True
>>> color = (1.0, 0.0, 0.0)
>>> is_color_rgb(color)
True
>>> color = (1.0, 0, 0)
>>> is_color_rgb(color)
False
>>> color = (255, 0.0, 0.0)
>>> is_color_rgb(color)
False
>>> color = (256, 0, 0)
>>> is_color_rgb(color)
False
"""
if isinstance(color, (tuple, list)):
if len(color) == 3:
if all(isinstance(c, float) for c in color):
if all(c >= 0.0 and c <= 1.0 for c in color):
return True
elif all(isinstance(c, int) for c in color):
if all(c >= 0 and c <= 255 for c in color):
return True
return False | 46b8241d26fa19e4372587ffebda3690972c3395 | 11,417 |
def edit_post_svc(current_user, id, content):
"""
Updates post content.
:param current_user:
:param id:
:param content:
:return:
"""
post = single_post_svc(id)
if post is None or post.user_id != current_user:
return None
post.content = content
db.session.commit()
return True | a17b632f402ef3f915bf06bde86ab0ff40956177 | 11,418 |
from re import M
def free_free_absorp_coefPQ(n_e,n_i,T,f):
"""Returns a physical quantity for the free-free absorption coefficient
given the electron density, ion density, kinetic temperature and frequency
as physical quantities. From Shklovsky (1960) as quoted by Kraus (1966)."""
value = 9.8e-13 * n_e.inBaseUnits().value * n_i.inBaseUnits().value \
* M.pow(T.inBaseUnits().value,-1.5) * M.pow(f.inBaseUnits().value,-2) \
* (19.8 + M.log(M.pow(T.inBaseUnits().value,1.5)/f.inBaseUnits().value))
return P.pq(value,'1/m') | 17a09bf20f4363be4f273694168df2cf0eee8b38 | 11,419 |
def pixel_gain_mode_statistics(gmaps):
"""returns statistics of pixels in defferent gain modes in gain maps
gr0, gr1, gr2, gr3, gr4, gr5, gr6 = gmaps
"""
arr1 = np.ones_like(gmaps[0], dtype=np.int32)
return [np.sum(np.select((gr,), (arr1,), default=0)) for gr in gmaps] | b9c6b4c601724105d381e77f7c293e0bd00f3ba8 | 11,420 |
def run_parallel(ds1, ds2):
""" Run the calculation using multiprocessing.
:param ds1: list with points
:param ds2: list with points
:return: list of distances
"""
pool = mp.Pool(processes=mp.cpu_count())
result = pool.starmap(euclidian_distance, [(p1, p2) for p1 in ds1 for p2 in ds2])
pool.close()
return result | e8a6b0124db1948ab72b9081863cdfe77a75e08d | 11,421 |
import re
def to_latin(name):
"""Convert all symbols to latin"""
symbols = (u"іїєабвгдеёжзийклмнопрстуфхцчшщъыьэюяІЇЄАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ",
u"iieabvgdeejzijklmnoprstufhzcss_y_euaIIEABVGDEEJZIJKLMNOPRSTUFHZCSS_Y_EUA")
tr = {ord(a): ord(b) for a, b in zip(*symbols)}
translated_name = name.translate(tr)
translated_name = re.sub("[^A-Za-z0-9]", "_", translated_name)
return translated_name | 06a0d535fa7a74feea33e58815da2792a6026def | 11,422 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.