content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def rdoublegauss(mu1, mu2, sigma1, sigma2, ratio, size=None):
"""random variable from double gaussian"""
r1 = ratio / (1. + ratio)
r2 = 1 - r1
R = np.asarray(np.random.random(size))
Rshape = R.shape
R = np.atleast1d(R)
mask1 = (R < r1)
mask2 = ~mask1
N1 = mask1.sum()
N2 = R.size - N1
R[mask1] = norm(mu1, sigma1).rvs(N1)
R[mask2] = norm(mu2, sigma2).rvs(N2)
return R.reshape(Rshape) | 5286d31985656d2f38c4e6b126d2f6d0915c82cb | 6,100 |
def check_add_role(store, id, name):
""" Checks if role exist and then adds record if it doesn't """
role = store.find_role(name)
if role == None:
return store.create_role(id=id, name=name)
else:
return role | c8680158cc005bf7a278951774b9fe0a733fc8c6 | 6,101 |
import copy
def report_map():
"""
update DB with new version of a container instance's id map
:return: str. 'true' if successful
"""
if not request.json:
logger.error('received non-json data')
abort(400)
logger.info('Received map update from {}'.format(request.remote_addr))
logger.debug('Map update {}'.format(request.json))
_map = request.json
for k,v in _map.iteritems():
container_attributes = copy.deepcopy(v)
try:
container_attributes['cadvisor_url'] = \
"http://{}:9070/docker/{}".format(
container_attributes['instance_ip'],
container_attributes['container_id'])
container_attributes['graylog_url'] = \
settings.graylog_url.format(graylog_fqdn=settings.graylog_fqdn,
container_id=container_attributes['container_id'][:12])
except KeyError as e:
logger.error('Unable to find keys in response: {}'.format(e))
_map[k] = container_attributes
db.batch_put(_map, 'ecs_id_mapper_hash')
return 'true' | 46707f8c7ba4a02fa27f4c426c05b428c9eb43b2 | 6,102 |
from pathlib import Path
def delta_path(base_path: Path, item_path: Path, new_base_path: Path) -> Path:
"""
Removes a base path from an item, and appends result to a new path
:param base_path: The :py:class:`pathlib.Path` to be removed from `item_path`
:param item_path: The :py:class:`pathlib.Path` to be delta-ed
:param new_base_path: The new base :py:class:`pathlib.Path` for `item_path`.
:raises ValueError: If base_path is not a sub-path of item_path.
:return: The new combined path.
"""
path_stub = item_path.relative_to(base_path)
new_item_path = new_base_path / path_stub
return new_item_path | ec531a011e36f053a8092525faae2047f5f66ccc | 6,103 |
import asyncio
async def async_validate_config(hass, config):
"""Validate config."""
automations = list(
filter(
lambda x: x is not None,
await asyncio.gather(
*(
_try_async_validate_config_item(hass, p_config, config)
for _, p_config in config_per_platform(config, DOMAIN)
)
),
)
)
# Create a copy of the configuration with all config for current
# component removed and add validated config back in.
config = config_without_domain(config, DOMAIN)
config[DOMAIN] = automations
return config | 7f77a4c008a5fcb8d275bb2e7f65005d9e1c49b5 | 6,104 |
import logging
import os
def judge_result(problem_id, commit_id, data_num):
"""对输出数据进行评测"""
logging.debug("Judging result")
correct_result = os.path.join(
data_dir, str(problem_id), 'data%s.out' %
data_num)
user_result = os.path.join(
work_dir, str(commit_id), 'out%s.txt' %
data_num)
try:
correct = file(
correct_result).read(
).replace(
'\r',
'').rstrip(
) # 删除\r,删除行末的空格和换行
user = file(user_result).read().replace('\r', '').rstrip()
except:
return False
if correct == user: # 完全相同:AC
return "Accepted"
if correct.split() == user.split(): # 除去空格,tab,换行相同:PE
return "Presentation Error"
if correct in user: # 输出多了
return "Output limit"
return "Wrong Answer" | 6eb9503ccc2b6d9ae85611657997308bffb618ce | 6,105 |
def _fwd6(y, dt): # pragma: no cover
"""Compute the first derivative of a uniformly-spaced-in-time array with a
sixth-order forward difference scheme.
Parameters
----------
y : (7,...) ndarray
Data to differentiate. The derivative is taken along the first axis.
Returns
-------
dy0 : float or (...) ndarray
Approximate derivative of y at the first entry, i.e., dy[0] / dt.
"""
return (-147*y[0] + 360*y[1] - 450*y[2] + 400*y[3] - 225*y[4] \
+ 72*y[5] - 10*y[6]) / (60*dt) | 0d7321b3615fab6d6e065917ec94479ada0ee70c | 6,106 |
def minimize_newton_cg(nrgs, x0, num_params):
"""
Minimzes a structure using a Newton-CG method. This requires a
hopefully fully invertible analytic Hessian that will be used
to minimize geometries.
Parameters
----------
nrgs: [list of functionals]
Energy functions used to compute the energy, hessian, and mixed partials.
x0: np.array
Structure of the molecule to be minimized.
num_params: int
total number of parameters of the model. (ytz): this should be refactored out.
"""
assert x0.shape[1] == 3
N = x0.shape[0]
def hessian(conf):
conf = conf.reshape((N,3))
hess = None
for e in nrgs:
_, _, test_hessians, _ = e.total_derivative(conf, num_params)
if hess is None:
hess = test_hessians
else:
hess += test_hessians
return hess.reshape((N*3, N*3))
def gradient(conf):
conf = conf.reshape((N,3))
grads = np.zeros_like(conf)
for e in nrgs:
_, test_grads, _, _ = e.total_derivative(conf, num_params)
grads += test_grads
return grads.reshape(-1)
def energy(conf):
conf = conf.reshape((N,3))
nrg = 0
for e in nrgs:
test_nrg, _, _, _ = e.total_derivative(conf, num_params)
nrg += test_nrg
return nrg
res = minimize(
energy,
x0.reshape(-1),
# method='Newton-CG',
method='L-BFGS-B',
jac=gradient,
# hess=hessian,
# options={'xtol': 1e-8, 'disp': False}
)
# print("before and after")
# print(x0)
# print(np.array(res.x).reshape((N,3)))
return res.x.reshape((N,3))
# print(energy(x0), gradient(x0), hessian(x0).shape) | 46ddd6b2004579ef07170ef578859c7119ed4e13 | 6,107 |
def currency(price, currency):
"""
Returns price in currency format
"""
price = float(price)
price *= float(currency.exchange_rate)
try:
return currency.display_format.format(price)
except Exception as e:
raise ImproperlyConfigured('Invalid currency format string: "%s" for currency "%s". %s' % (currency.display_format, currency.name, e.message)) | 2204993f5f51c62669395de40dc14d16f110c4b4 | 6,108 |
def project_point(x, R, T, f, c, k, p):
"""
Args
x: Nx3 points in world coordinates
R: 3x3 Camera rotation matrix
T: 3x1 Camera translation parameters
f: 2x1 Camera focal length
c: 2x1 Camera center
k: 3x1 Camera radial distortion coefficients
p: 2x1 Camera tangential distortion coefficients
Returns
ypixel.T: Nx2 points in pixel space
depth: N points
"""
if 'aist' in config.DATASET.TEST_DATASET:
xcam = R.dot(x.T) + T # [B, 3, PJ]
else:
xcam = R.dot(x.T - T) # [B, 3, PJ]
y = xcam[:2] / (xcam[2] + 1e-5)
# === add camera distortion
r = np.sum(y ** 2, axis=0)
d = 1 + k[0] * r + k[1] * r * r + k[2] * r * r * r
u = y[0, :] * d + 2 * p[0] * y[0, :] * y[1, :] + p[1] * (r + 2 * y[0, :] * y[0, :])
v = y[1, :] * d + 2 * p[1] * y[0, :] * y[1, :] + p[0] * (r + 2 * y[1, :] * y[1, :])
y[0, :] = u
y[1, :] = v
ypixel = np.multiply(f, y) + c
depth = xcam[2]
return ypixel.T, depth | 5b6cce136ac6753fcdefcde01db9636357687ab2 | 6,109 |
def sum_to_scalar(*args):
"""Adding losses/nmsks together that were evaluated in parallel"""
new_args = list()
for arg in args:
new_args.append({k: v.sum() for (k, v) in arg.items()})
return new_args | a4264911962c7bf3432735f8872522e193ceec8f | 6,110 |
def inv(h_array: np.ndarray) -> np.ndarray:
"""
Calculate pinvh of PSD array. Note pinvh performs poorly
if input matrix is far from being Hermitian, so use pinv2
instead in this case.
Parameters:
----------
h_array : input matrix, assume to be Hermitian
Returns:
----------
h_inv : pseudo inverse of h_array.
"""
if np.allclose(h_array, h_array.T):
h_inv = linalg.pinvh(h_array)
else:
h_inv = linalg.pinv2(h_array)
return h_inv | c3305878b3f2dfdaabe6a245d8063b1039e19bc2 | 6,111 |
from datetime import datetime
def update_risk_cavs(connection):
"""Parse cavs from html to markdown.
Args:
connection: SQLAlchemy connection.
Returns:
ids of risks for which cavs where updated.
"""
cavs_data = connection.execute(
sa.text("""
SELECT cav.id, cav.attribute_value, cav.attributable_id
FROM custom_attribute_values AS cav
JOIN custom_attribute_definitions AS cad
ON cad.id = cav.custom_attribute_id
WHERE cad.definition_type = "risk"
AND attribute_value REGEXP :reg_exp
"""),
reg_exp=REGEX_HTML
).fetchall()
risks_ids = {data[2] for data in cavs_data}
cavs_ids = {data[0] for data in cavs_data}
cavs_table = sa.sql.table(
'custom_attribute_values',
sa.Column('id', sa.Integer()),
sa.Column('attribute_value', sa.Text, nullable=False),
sa.Column('updated_at', sa.DateTime, nullable=False),
)
for cav_id, attribute_value, _ in cavs_data:
op.execute(cavs_table.update().values(
attribute_value=parse_html(attribute_value),
updated_at=datetime.datetime.utcnow(),
).where(cavs_table.c.id == cav_id))
utils.add_to_objects_without_revisions_bulk(
connection, cavs_ids, "CustomAttributeValue", "modified",
)
return risks_ids | 8af9ef613259915573ca1efc699278c0c2a6a4e4 | 6,112 |
def prefix_to_number(prefix):
"""Return the number of the prefix."""
if prefix in PREFIXES:
return PREFIXES[prefix]
raise ValueError(f'prefix "{prefix}" not found in list of prefixes') | e0a3822aa615d79a1ff0d5c7405097e055573ed0 | 6,113 |
def is_response_going_to_be_used(request, spider):
"""Check whether the request's response is going to be used."""
callback = get_callback(request, spider)
if is_callback_using_response(callback):
return True
for provider in discover_callback_providers(callback):
if is_provider_using_response(provider):
return True
return False | 4cd908dbebfd6089a25bf5168937b2a4f02f23ee | 6,114 |
def eval_market1501(distmat, q_vids, g_vids, q_camids, g_camids, max_rank=50):
"""Evaluation with Market1501 metrics
Key: for each query identity, its gallery images from the same camera view are discarded.
"""
num_q, num_g = distmat.shape
if num_g < max_rank:
max_rank = num_g
print(f"Note: number of gallery samples is quite small, got {num_g}")
indices = np.argsort(distmat, axis=1)
matches = (g_vids[indices] == q_vids[:, np.newaxis]).astype(np.int32)
# compute cmc curve for each query
all_cmc = []
all_ap = []
num_valid_q = 0. # number of valid query
for q_idx in range(num_q):
# get query vid and camid
q_vid = q_vids[q_idx]
q_camid = q_camids[q_idx]
# remove gallery samples that have the same vid and camid with query
order = indices[q_idx]
remove = (g_vids[order] == q_vid) & (g_camids[order] == q_camid)
keep = np.invert(remove)
# compute cmc curve
# binary vector, positions with value 1 are correct matches
orig_cmc = matches[q_idx][keep]
if not np.any(orig_cmc):
# this condition is true when query identity does not appear in gallery
continue
cmc = orig_cmc.cumsum()
cmc[cmc > 1] = 1
all_cmc.append(cmc[:max_rank])
num_valid_q += 1.
# compute average precision
# https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision
num_rel = orig_cmc.sum()
tmp_cmc = orig_cmc.cumsum()
tmp_cmc = [x / (i+1.) for i, x in enumerate(tmp_cmc)]
tmp_cmc = np.asarray(tmp_cmc) * orig_cmc
ap_ = tmp_cmc.sum() / num_rel
all_ap.append(ap_)
assert num_valid_q > 0, "Error: all query identities do not appear in gallery"
all_cmc = np.array(all_cmc, dtype=np.float32)
all_cmc = all_cmc.sum(0) / num_valid_q
map_ = np.mean(all_ap)
return all_cmc, map_ | 5387ee7fe7cac90406ac91619844e8e1fd814d88 | 6,115 |
import os
import glob
import warnings
def get_band_structure_from_vasp_multiple_branches(dir_name, efermi=None,
projections=False):
"""
This method is used to get band structure info from a VASP directory. It
takes into account that the run can be divided in several branches named
"branch_x". If the run has not been divided in branches the method will
turn to parsing vasprun.xml directly.
The method returns None is there"s a parsing error
Args:
dir_name: Directory containing all bandstructure runs.
efermi: Efermi for bandstructure.
projections: True if you want to get the data on site projections if
any. Note that this is sometimes very large
Returns:
A BandStructure Object
"""
#ToDo: Add better error handling!!!
if os.path.exists(os.path.join(dir_name, "branch_0")):
#get all branch dir names
branch_dir_names = [os.path.abspath(d)
for d in glob.glob("{i}/branch_*"
.format(i=dir_name))
if os.path.isdir(d)]
#sort by the directory name (e.g, branch_10)
sort_by = lambda x: int(x.split("_")[-1])
sorted_branch_dir_names = sorted(branch_dir_names, key=sort_by)
# populate branches with Bandstructure instances
branches = []
for dir_name in sorted_branch_dir_names:
xml_file = os.path.join(dir_name, "vasprun.xml")
if os.path.exists(xml_file):
run = Vasprun(xml_file, parse_projected_eigen=projections)
branches.append(run.get_band_structure(efermi=efermi))
else:
# It might be better to throw an exception
warnings.warn("Skipping {}. Unable to find {}"
.format(d=dir_name, f=xml_file))
return get_reconstructed_band_structure(branches, efermi)
else:
xml_file = os.path.join(dir_name, "vasprun.xml")
#Better handling of Errors
if os.path.exists(xml_file):
return Vasprun(xml_file, parse_projected_eigen=projections)\
.get_band_structure(kpoints_filename=None, efermi=efermi)
else:
return None | 5df7c4db41a29cfb9c811251b2de7f3a52289fab | 6,116 |
def pytype_raise():
"""A pytest.raises wrapper for catching TypeErrors.
Parameters
----------
match : str, default=None
Regular expression to match exception error text against.
Returns
-------
RaisesContext
pytest context manager for catching exception-raising blocks.
"""
def _pytype_raise(match=None):
return pytest.raises(TypeError, match=match)
_pytype_raise.__doc__ = pyvalue_raise.__doc__
return _pytype_raise | ec5c7a56a8a3fb9028fb0ec72ac814061def467d | 6,117 |
def lift_split_buffers(lines):
"""Lift the split buffers in the program
For each module, if we find any split buffers with the name "buf_data_split",
we will lift them out of the for loops and put them in the variable declaration
section at the beginning of the module.
Parameters
----------
lines:
contains the codelines of the program
"""
code_len = len(lines)
for pos in range(code_len):
line = lines[pos]
if line.find("variable=buf_data_split") != -1:
# Search for the variable declaration section
decl_pos = -1
prev_pos = pos - 1
while prev_pos >= 0:
prev_line = lines[prev_pos]
if prev_line.find("Variable Declaration") != -1:
decl_pos = prev_pos
break
prev_pos -= 1
# Move the two code lines at [pos - 1] and [pos] to [decl_pos] and
# [decl_pos + 1]
indent = lines[decl_pos].find("/*")
line1 = " " * indent + lines[pos - 1].lstrip()
line2 = " " * indent + lines[pos].lstrip()
del lines[pos - 1]
del lines[pos - 1]
lines.insert(decl_pos, line1)
lines.insert(decl_pos + 1, line2)
return lines | 78919247b241dc29de84594b097c75d5e7ae1f03 | 6,118 |
import scipy
def peak_finder(
df_run,
cd,
windowlength,
polyorder,
datatype,
lenmax,
peak_thresh):
"""Determines the index of each peak in a dQdV curve
V_series = Pandas series of voltage data
dQdV_series = Pandas series of differential capacity data
cd = either 'c' for charge and 'd' for discharge.
Output:
i = list of indexes for each found peak"""
(cycle_ind_col, data_point_col, volt_col, curr_col, dis_cap_col,
char_cap_col, charge_or_discharge) = col_variables(datatype)
V_series = df_run[volt_col]
# this makes the peak finding smoothing independent of any smoothing that
# has already occured.
dQdV_series = df_run['Smoothed_dQ/dV']
sigx, sigy = cd_dataframe(V_series, dQdV_series, cd)
# the below is to make sure the window length ends up an odd number - even
# though we are basing it on the length of the df
wl = lenmax / 20
wlint = int(round(wl))
if wlint % 2 == 0:
windowlength_new = wlint + 1
else:
windowlength_new = wlint
if len(sigy) > windowlength_new and windowlength_new > polyorder:
# has to be larger than 69 so that windowlength > 3 - necessary for sav
# golay function
sigy_smooth = scipy.signal.savgol_filter(
sigy, windowlength_new, polyorder)
else:
sigy_smooth = sigy
peak_thresh_ft = float(peak_thresh)
i = peakutils.indexes(
sigy_smooth,
thres=peak_thresh_ft,
min_dist=lenmax / 50)
if i is not None and len(i) > 0:
sigx_volts = list(sigx[i])
peak_heights = list(sigy[i])
else:
sigx_volts = []
peak_heights = []
return i, sigx_volts, peak_heights | 370e019354579ab7b9a4eedef514dbde84801950 | 6,119 |
def make_box(world, x_dim, y_dim, z_dim, mass=0.5):
"""Makes a new axis-aligned box centered at the origin with
dimensions width x depth x height. The box is a RigidObject
with automatically determined inertia.
"""
boxgeom = Geometry3D()
boxgeom.loadFile("data/objects/cube.tri")
# box is centered at the origin
boxgeom.transform([x_dim, 0, 0, 0, y_dim, 0, 0, 0, z_dim], [-x_dim * 0.5, -y_dim * 0.5, -z_dim * 0.5])
print "Making a box a rigid object"
bmass = Mass()
bmass.setMass(mass)
bmass.setCom([0, 0, 0])
bmass.setInertia([x_dim / 12, y_dim / 12, z_dim / 12])
box = world.makeRigidObject("box")
box.geometry().set(boxgeom)
box.appearance().setColor(0.6, 0.3, 0.2, 1.0)
box.setMass(bmass)
cparams = box.getContactParameters()
cparams.kFriction = 1.5
cparams.kStiffness = 100000
cparams.kDamping = 30000
cparams.kRestitution = 0.5
return box | f3257a8339542c55d96bd752bad1d0c69c6370e0 | 6,120 |
import os
def as_file(uri):
"""
If the URI is a file (either the ``file`` scheme or no scheme), then returns the normalized
path. Otherwise, returns ``None``.
"""
if _IS_WINDOWS:
# We need this extra check in Windows before urlparse because paths might have a drive
# prefix, e.g. "C:" which will be considered a scheme for urlparse below
path = uri.replace('/', '\\')
if os.path.exists(path):
return os.path.normpath(path)
url = urlparse.urlparse(uri)
scheme = url.scheme
if (not scheme) or (scheme == 'file'):
path = url.path
if _IS_WINDOWS:
path = path.replace('/', '\\')
return os.path.normpath(path)
return None | 774cd4bd5786b64cea757ab777d56a610d40b71d | 6,121 |
def add_full_name(obj):
"""
A decorator to add __full_name__ to the function being decorated.
This should be done for all decorators used in pywikibot, as any
decorator that does not add __full_name__ will prevent other
decorators in the same chain from being able to obtain it.
This can be used to monkey-patch decorators in other modules.
e.g.
<xyz>.foo = add_full_name(<xyz>.foo)
@param obj: The function to decorate
@type obj: callable
@return: decorating function
@rtype: function
"""
def outer_wrapper(*outer_args, **outer_kwargs):
"""Outer wrapper.
The outer wrapper may be the replacement function if the decorated
decorator was called without arguments, or the replacement decorator
if the decorated decorator was called without arguments.
@param outer_args: args
@type outer_args: list
@param outer_kwargs: kwargs
@type outer_kwargs: dict
"""
def inner_wrapper(*args, **kwargs):
"""Replacement function.
If the decorator supported arguments, they are in outer_args,
and this wrapper is used to process the args which belong to
the function that the decorated decorator was decorating.
@param args: args passed to the decorated function.
@param kwargs: kwargs passed to the decorated function.
"""
add_decorated_full_name(args[0])
return obj(*outer_args, **outer_kwargs)(*args, **kwargs)
inner_wrapper.__doc__ = obj.__doc__
inner_wrapper.__name__ = obj.__name__
inner_wrapper.__module__ = obj.__module__
inner_wrapper.__signature__ = signature(obj)
# The decorator being decorated may have args, so both
# syntax need to be supported.
if (len(outer_args) == 1 and len(outer_kwargs) == 0 and
callable(outer_args[0])):
add_decorated_full_name(outer_args[0])
return obj(outer_args[0])
else:
return inner_wrapper
if not __debug__:
return obj
return outer_wrapper | ca7b1541adaa39a62073ec630d705166fc8833b6 | 6,122 |
def LikelihoodRedshiftMeasure( measure='', data=[], scenario=False, measureable=False):
"""
returns likelihood functions of redshift for observed data of measure,
can be used to obtain estimate and deviation
Parameters
----------
measure : string
indicate which measure is probed
data : array-like
1D array contain extragalactic component of observed values
scenario : dictionary
list of models combined to one scenario
prior : boolean
"""
if not measure:
exit( "you must provide a measure. Try: 'DM', 'RM', 'tau'" )
if scenario.redshift:
exit( "requires scenario with telescope and population" )
## prepare scenario for increasing redshift
tmp = scenario.copy()
tmp.population = False
tmp.telescope = False
## container for likelihoods and deviation at incrasing redshift
Ps = np.zeros( [len(DMs),len(redshift_bins)] )
devs= Ps.copy()
## for each redshift
for iz, z in enumerate( redshift_bins ):
tmp.redshift = z
L = GetLikelihood( measure, tmp )
if measureable:
L.Measureable()
Ps[:,iz], devs[:,iz] = L.Likelihoods( DMs, density=True ) ### use probability density to compare same value of DM at different redshifts. Otherwise influenced by different binning
Ls = []
for P, dev in Ps, devs:
L = LikelihoodFunction( P=P, x=redshift_range, dev=dev )
Ls.append(L)
return Ls | 55d414bb0adb00fe549485f2e3682d15b761b7a4 | 6,123 |
def plural_suffix(count: int) -> str:
""""s" when count is not one"""
suffix = ''
if count != 1:
suffix = 's'
return suffix | 950002d57560d06e93e08647ff17d885688bca87 | 6,124 |
def _pr_exists(user, namespace, repo, idx):
""" Utility method checking if a given PR exists. """
repo_obj = pagure.lib.query.get_authorized_project(
flask.g.session, project_name=repo, user=user, namespace=namespace
)
if not repo_obj:
return False
pr_obj = pagure.lib.query.search_pull_requests(
flask.g.session, project_id=repo_obj.id, requestid=idx
)
if not pr_obj:
return False
return pr_obj | 2e68b6d4282f6f3ca4d9645c78579e3df3889494 | 6,125 |
import csv
def readData(filename):
"""
Read in our data from a CSV file and create a dictionary of records,
where the key is a unique record ID and each value is dict
"""
data_d = {}
with open(filename) as f:
reader = csv.DictReader(f)
for row in reader:
clean_row = [(k, preProcess(v)) for (k, v) in row.items()]
row_id = int(row['activity_nr'])
data_d[row_id] = dict(clean_row)
return data_d | 57dcb39dac9568024ae4be07bc0921c941d6fae3 | 6,126 |
def _get_client(app):
"""Returns a client instance for an App.
If the App already has a client associated with it, simply returns
it. Otherwise creates a new client, and adds it to the App before
returning it.
Args:
app: A Firebase App instance (or ``None`` to use the default App).
Returns:
Client: A client for the specified App instance.
Raises:
ValueError: If the app argument is invalid.
"""
return _utils.get_app_service(app, _AUTH_ATTRIBUTE, Client) | de96140ed7c15a4aa390f08a76fe7de0074730db | 6,127 |
def get_job_config_build_for_branch(**kwargs):
"""pass kwargs to JobConfig constructor"""
return JobConfig(
type=JobType.copr_build,
trigger=JobConfigTriggerType.commit,
branch="build-branch",
scratch=True,
**kwargs,
) | 0c16a16bce6a1f05ca8daf764dd2de80147c90c4 | 6,128 |
import yaml
def get_connection_string_from_config_file(cfg_src, db_cfg_key):
"""
Gets connection parameters from specified section in
a configuration file.
"""
# reading complete configuration
with open(cfg_src, 'r') as yml_file:
cfg = yaml.safe_load(yml_file)
# looking for specified connection name
for connection_cfg in cfg['connections']:
if db_cfg_key in connection_cfg:
db_cfg = connection_cfg[db_cfg_key]
# reading distinct configuration parameters
try:
db_engine = db_cfg['db_engine']
user = db_cfg['user']
password = db_cfg['password']
host = db_cfg['host']
port = db_cfg['port']
database = db_cfg['database']
except KeyError as e:
print(
"Unable to retrieve parameter '%s' "
"from configuration file." % e.args[0])
return
except Exception:
print("Unable to read configuration file")
return
# setting up connection string
conn_string = "{0}://{1}:{2}@{3}:{4}/{5}".format(
db_engine, user, password, host, port, database)
return conn_string | e2245f8e9124d36e5a373f1891590046c10a38fd | 6,129 |
from typing import Tuple
from typing import Sequence
def _decomp_0_matrices(
kak: 'cirq.KakDecomposition',
atol: float = 1e-8,
) -> Tuple[Sequence[Tuple[np.ndarray, np.ndarray]], complex]:
"""Returns the single-qubit matrices for the 0-SQRT_ISWAP decomposition.
Assumes canonical x, y, z and (x, y, z) = (0, 0, 0) within tolerance.
"""
# Pairs of single-qubit unitaries, SQRT_ISWAP between each is implied
# Only a single pair of single-qubit unitaries is returned here so
# _decomp_to_operations will not insert any sqrt-iSWAP gates in between
return [
(
kak.single_qubit_operations_after[0] @ kak.single_qubit_operations_before[0],
kak.single_qubit_operations_after[1] @ kak.single_qubit_operations_before[1],
)
], kak.global_phase | b84d65cc7076b5d294cbf7f4f6a3c3ddff7ef7d2 | 6,130 |
import math
def concave(x, m):
"""Shape function."""
assert shape_args_ok(x, m)
result = 1.0
for i in range(1, len(x) - m + 1):
result *= math.sin(x[i - 1] * math.pi / 2.0)
if m != 1:
result *= math.cos(x[len(x) - m] * math.pi / 2.0)
return correct_to_01(result) | 70020efb06f35e169041491724bd6ddc7c7a9a35 | 6,131 |
import os
def get_filenames(is_training, data_dir):
"""Return filenames for dataset."""
if is_training:
return [
os.path.join(data_dir, 'train-%05d-of-01024' % i)
for i in range(_NUM_TRAIN_FILES)]
else:
return [
os.path.join(data_dir, 'validation-%05d-of-00128' % i)
for i in range(_NUM_VAL_FILES)] | f925c9f6018ad23f97b0f84c42581857852bd4a7 | 6,132 |
def norm_img(img):
"""
normalization image
:param img: (C, H, W)
:return:
norm_img: (C, H, W)
"""
height, width, channel = img.shape
img = np.reshape(img, (height * width, channel)) # (height * width, channel)
mean = np.mean(img, axis=0, keepdims=True) # (1, channel)
center = img - mean # (height * width, channel)
var = np.sum(np.power(center, 2), axis=0, keepdims=True) / (height * width) # (1, channel)
std = np.sqrt(var) # (1, channel)
_norm_img = center / std # (height * width, channel)
_norm_img = np.reshape(_norm_img, (height, width, channel))
return _norm_img | a794ec4e096faa0efbfc9c993d9292a54f6573cc | 6,133 |
import scipy
def propagator_radial_diffusion(n,dim_rad,rate,wrad,lagtime,
lmax,bessel0_zeros,bessels,):
"""calculate propagator for radial diffusion as matrix exponential
n -- dim_trans, dimension transition matrix, usually number of bins in z-direction
dim_rad -- dimension transition matrix, always equal to len(redges)
rate -- rate matrix for 1-D diffusion in z-direction, in [1/dt]
wrad -- ln Drad, radial diffusion coefficient, dimension n
Drad = exp(wrad), in [dr**2/dt]
lagtime -- should be in units [dt]
bessels0_zeros -- first lmax zeros, no unit
bessels -- dimension lmax x dim_rad, no unit, in unit 'per r-bin'
rate_l -- rate matrix including sink equation, in [1/dt]
propagator -- no unit, is per r-bin per z-bin"""
rmax = np.float64(dim_rad) # in units [dr]
# initialize arrays
rate_l = np.zeros((n,n),dtype=np.float64) # N x N
propagator = np.zeros((dim_rad,n,n),dtype=np.float64) # dim_rad x N x N
# set up sink
sink = np.zeros((n),dtype=np.float64) # N
# loop over l (index of Bessel function)
for l in range(lmax):
sink = np.exp(wrad)*bessel0_zeros[l]**2/rmax**2 # sink term D_par(i) * (b_l)**2
# in units np.exp(wrad) [dr**2/dt] / rmax**2 [dr**2], so in units [1/dt]
rate_l[:,:] = rate[:,:] # take rate matrix for 1-D diffusion
rate_l.ravel()[::n+1] -= sink # and add sink term
mat_exp = scipy.linalg.expm(lagtime*rate_l) # matrix exponential, no unit
# increment propagator by solution of sink equation for each l
# propagator to arrive in radial bin k, contribution from Bessel function l
# bessels is 'per r-bin', no unit
# mat_exp is 'per z-bin', no unit
# so propagator is 'per r-bin per z-bin', no unit
for k in range(dim_rad):
propagator[k,:,:] += bessels[l,k] * mat_exp[:,:] # no unit
# TODO normalize? some probability might flow away after long times
#propagator /= np.sum(np.sum(propagator,axis=0),axis=0)
return propagator | 9fdfa7001ca319fcf57d5e80c492de73bca03b85 | 6,134 |
def convert_examples_to_features(examples, use_label):
"""Loads a data file into a list of `InputBatch`s."""
features = []
line_tags = []
for (ex_index, example) in enumerate(examples):
if use_label:
labels = example.labels
else:
labels = ['O'] * len(example.units)
samples = []
context, tokens, predict_mask, label_ids = [], [], [], []
for i, w in enumerate(example.units):
if w == '[MASK]':
sub_words = ['[MASK]']
else:
sub_words = tokenizer.tokenize(w)
if not sub_words:
sub_words = ['[UNK]']
tokens.extend(sub_words)
predict_mask.append(1)
predict_mask.extend([0] * (len(sub_words) - 1))
label_ids.append(label_map[labels[i]])
label_ids.extend([0] * (len(sub_words) - 1))
while len(context) + len(tokens) >= max_seq_length - 2:
l = max_seq_length - len(context) - 2
samples.append(
[['[CLS]'] + context + tokens[:l] + ['[SEP]'], [0] * (len(context) + 1) + predict_mask[:l] + [0],
[0] * (len(context) + 1) + label_ids[:l] + [0]])
if not context:
line_tags.append(1)
else:
line_tags.append(0)
context = tokens[max(0, l - max_seq_length // 2):l]
tokens, predict_mask, label_ids = tokens[l:
], predict_mask[l:], label_ids[l:]
if sum(predict_mask):
samples.append([['[CLS]'] + context + tokens + ['[SEP]'], [0] * (len(
context) + 1) + predict_mask + [0], [0] * (len(context) + 1) + label_ids + [0]])
if not context:
line_tags.append(1)
else:
line_tags.append(0)
for s in samples:
input_ids = tokenizer.convert_tokens_to_ids(s[0])
input_mask = [1] * len(input_ids)
padding_length = max_seq_length - len(input_ids)
zero_padding = [0] * padding_length
input_ids += zero_padding
input_mask += zero_padding
predict_mask = s[1] + zero_padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(predict_mask) == max_seq_length
if use_label:
label_ids = s[2] + zero_padding
assert len(label_ids) == max_seq_length
one_hot_labels = np.eye(
len(label_map), dtype=np.float32)[label_ids]
else:
one_hot_labels = None
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask,
predict_mask=predict_mask, one_hot_labels=one_hot_labels))
assert len(examples) == sum(line_tags), logger.error(
'{} != {}'.format(len(examples), sum(line_tags)))
return features, line_tags | 7720a79b7404e0d4cc340ae5ea78084b64115f92 | 6,135 |
def broadcast_to_rank(t, rank, axis = -1):
"""Appends dimensions to tf.Tensor `t` at axis `axis` to match rank `rank`."""
rank_t = t.shape.rank # Assumes ranks are known at compile time (static).
for _ in range(rank - rank_t):
t = tf.expand_dims(t, axis=axis)
return t | 8a57a1d71f92aefc6015481b358b65f565af1b00 | 6,136 |
def operator(func):
"""
Help decorator to rewrite a function so that
it returns another function from it.
"""
@wraps(func)
def wrapper(*args, **kwargs):
def operator(stream):
return func(stream, *args, **kwargs)
return operator
return wrapper | cd2873954ee9dff003d2481d296c5be8740675c8 | 6,137 |
def json(body, charset="utf-8", **kwargs):
"""Takes JSON formatted data, converting it into native Python objects"""
return json_converter.loads(text(body, charset=charset)) | e2cabfca983abb96018f51ea3c09826e033227bb | 6,138 |
def read_corpus(file_path, encoding=ENCODING, **kwargs):
"""
Create a Linguistica object with a corpus data file.
:param file_path: path of input corpus file
:param encoding: encoding of the file at *file_path*. Default: ``'utf8'``
:param kwargs: keyword arguments for parameters and their values.
"""
return Lexicon(file_path=file_path, wordlist_file=False, encoding=encoding,
**kwargs) | 28f8303e0b94e8df9b6d9a33aca14fa62b15f6e8 | 6,139 |
import random
def sample_pagerank(corpus, damping_factor, n):
"""
Return PageRank values for each page by sampling `n` pages
according to transition model, starting with a page at random.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
pageRanks = {page: 0 for page in corpus}
# Randomly select a page to start
currPage = random.choice(list(corpus.keys()))
for _ in range(n):
pageRanks[currPage] += 1
model = transition_model(corpus, currPage, damping_factor)
currPage = random.choice(list(model.keys()))
return {page: rank / n for page, rank in pageRanks.items()} | 5c9f66aaf72c8330c2ee0fcd2402bf613c4eb9b7 | 6,140 |
import argparse
import sys
def parse_args() -> argparse.Namespace:
"""
Parse program arguments
:return: Parser values
"""
parser = argparse.ArgumentParser(description="")
parser.add_argument("-a", action="store_true")
parser.add_argument("-c", action="store_true")
parser.add_argument("-x", action="store_true")
parser.add_argument("-z", action="store_true")
parser.add_argument("-s", metavar="SET", nargs="*", type=str)
parser.add_argument("--skip-keys", action="store_true")
parser.add_argument("--skip-sets", metavar="SET", nargs="*", type=str)
parser.add_argument("--skip-cache", action="store_true")
# Ensure there are args
if len(sys.argv) < 2:
parser.print_usage()
sys.exit(1)
return parser.parse_args() | 9d86d37d94af5c8ff128c4da8226f15728b0da70 | 6,141 |
import networkx
def compute_participants(matches, challonge_data):
"""Compute series participants.
Iterate all matches and players to create a graph.
Apply connected components algorithm to resolve distinct
participant groups over all matches.
Sort participant groups by number of wins to correlate
with Challonge participant data (which also includes number
of wins).
Note that edge cases exist that are not covered. For example,
teams sometimes field a 1v1 player for a single match. If neither
player in the 1v1 match takes part in any other matches,
the players can't be placed in a participant group and their win
is not counted. There are two consequences:
1. Not counting a win may make the number of wins between
participants even, in which case we don't know which
participant group won the series.
2. Not grouping a player means the participant player list
will be incomplete.
"""
graph = networkx.DiGraph()
win_id = 0
platform_ids = []
name_to_user = {}
for match in matches:
# Record a win
win_id += 1
graph.add_node(win_id, type='win')
# Record platform ID
platform_ids.append(match['platform_id'])
# Add node for each player
for player in match['players']:
name_to_user[player['name']] = player['user_id']
graph.add_node(player['name'], type='player')
# Can happen for incomplete matches
if match['winning_team'] is None:
continue
# Connect winning players to recorded win
for player in match['winning_team']['players']:
graph.add_edge(player['name'], win_id)
# Connect all players on the same team
for team in match['teams']:
for i in team['players']:
for j in team['players']:
graph.add_edge(i['name'], j['name'])
mgz_data = [{
'wins': len([node for node in g if graph.nodes[node]['type'] == 'win']),
'players': [node for node in g if graph.nodes[node]['type'] == 'player']
} for g in networkx.weakly_connected_components(graph)]
return [{
'user_ids': [name_to_user[n] for n in mgz['players']],
'winner': challonge['winner'],
'name': challonge['name'],
'score': challonge['score'],
'platform_id': platform_ids[0]
} for mgz, challonge in zip(
sorted(mgz_data, key=lambda k: -1 * k['wins']),
sorted(challonge_data, key=lambda k: -1 * k['score'] if k['score'] else 0)
)] | a715773d5edd3b4d6852096c665070e64bef1165 | 6,142 |
def write_haiku(word_array, is_ipv6):
"""Return the beautiful haiku"""
# String to place in schema to show word slot.
octct = 'OCTET'
schema = get_schema(is_ipv6, octct)
# Replace each instance of 'octet' in the schema with a word from
# the encoded word array.
for i in range(len(word_array)):
for j in range(len(schema)):
if schema[j] == octct:
schema[j] = word_array[i]
break
# Capitalize appropriate words.
schema = capitalize_haiku(schema)
haiku = ''.join(schema)
return haiku | b51dc7cd1cca642eb135c48952bbc2ca74faf5e1 | 6,143 |
def import_data():
"""
Utility function to imoprt summary tsv ready for usage in PyMol
"""
col_types = {
'sift_score': float, 'sift_median': float, 'total_energy': float,
'interaction_energy': float, 'diff_interaction_energy': float,
'diff_interface_residues': float, 'freq': float
}
return pd.read_csv('data/output/summary.tsv', sep='\t', index_col=False,
dtype=col_types, low_memory=False) | 1b116d74ecba83658d05ea5dbda66b15175f3fdb | 6,144 |
from datetime import datetime
def get_current_datetime():
"""
Get the current datetime.
Note: This function is intended to be mocked in testing
Return:
time(datetime.datetime): current datetime
"""
return datetime.datetime.now(current_app.config['TIMEZONE']) | 6e7986eb6029e9c2be66019d7e9f35a79580c742 | 6,145 |
def adapt_all(iterable, to_cls):
"""
Returns a list of items from adapting each item in iterable to `cls`
If `iterable` is `None`, an empty list will be returned.
"""
if iterable is None:
return []
return [adapt(obj, to_cls) for obj in iterable] | a7c4d0adcce144223929081f47512f9d673efb28 | 6,146 |
import re
import os
import yaml
def get_config(seed, shot):
"""
Uses a given base 1-shot config to replicate it for 'shot' and 'seed'.
Changes dataset training split, cfg.OUTPUT_DIR and iteration number and steps accordingly.
"""
base_config_path: str = args.base_config
assert '1shot' in base_config_path
dataset_mode = 'novel' if '_novel' in base_config_path else 'all'
dataset_config = DatasetConfigs('coco' if args.coco else None, dataset_mode, args.method,
args.num_gpus, args.imgs_per_gpu, is_correct_train_iters='correct' in base_config_path)
seed_str = f'seed{seed}'
dataset_split = re.findall('split.', base_config_path)
assert len(dataset_split) <= 1
dataset_split = dataset_split[0] if dataset_split else ''
output_cfg_name = get_output_name_from_base(base_config_path, shot)
model_output_root = os.path.join(args.root, dataset_config.checkpoint_dir, dataset_split, seed_str)
os.makedirs(model_output_root, exist_ok=True)
output_dir = os.path.join(model_output_root,
os.path.splitext(output_cfg_name)[0])
result_config = load_yaml_file(base_config_path)
result_config = _fill_config(result_config, shot, dataset_split, seed, dataset_config, output_dir)
print(yaml.dump(result_config))
dry_run_config = not args.no_dry_run or args.skip_config_write
output_cfg_fullpath = _save_config(dataset_config.config_dir, dataset_split,
seed_str, output_cfg_name, result_config, dry_run_config)
return output_cfg_fullpath, result_config | d8cac8518a600d6f2900f63dc1320cf234341661 | 6,147 |
import torch
def log_sum_exp_vb(vec, m_size):
"""
calculate log of exp sum
args:
vec (batch_size, vanishing_dim, hidden_dim) : input tensor
m_size : hidden_dim
return:
batch_size, hidden_dim
"""
_, idx = torch.max(vec, 1) # B * 1 * M
max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) # B * M
return max_score.view(-1, m_size) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), 1)).view(-1,
m_size) | 87c99f9ab9a9c114792a2c895284a8743682fc06 | 6,148 |
def C_fun_gen(fractions, speciesindices, y, time):
"""
Calculate the distribution of carbon functional groups as a percent of
total carbon.
Parameters
----------
fractions : list
The lumped phases that you want to include (as specified
in MW['species'][1], options are any subset of
['g','s','lt','t','char','H20','CO','CO2'] or ['initial']
for the case when you want to determine the initial
distribution before pyrolysis)
speciesindices : dict
dictionary from `load_results()` where species names are
keys and values are the index in `y` that corresponds to
that species
y : numpy array
a matrix with the concentrations of each species in the
kinetic scheme for every time in `t` (mol/L)
time : int
the index of the timepoint that you want the results for
Returns
-------
C_fun : numpy array
the distribution of carbon functional groups as a percent of total
carbon. The order of the elements in the array is:
carbonyl, aromatic C-O, aromatic C-C, aromatic C-H, aliphatic C-O,
aromatic methoxyl, aliphatic C-C
"""
C_fun = np.zeros(7)
ind = speciesindices
for species in MW:
if fractions == ['initial']:
time = 0
if y[time, speciesindices[species]] != 0:
# moles of functional group/L (order from Return docstring)
C_fun[0] += y[time, ind[species]] * MW[species][4][0]
C_fun[1] += y[time, ind[species]] * MW[species][4][1]
C_fun[2] += y[time, ind[species]] * MW[species][4][2]
C_fun[3] += y[time, ind[species]] * MW[species][4][3]
C_fun[4] += y[time, ind[species]] * MW[species][4][4]
C_fun[5] += y[time, ind[species]] * MW[species][4][5]
C_fun[6] += y[time, ind[species]] * MW[species][4][6]
else:
if MW[species][1] in set(fractions):
C_fun[0] += y[time, ind[species]] * MW[species][4][0]
C_fun[1] += y[time, ind[species]] * MW[species][4][1]
C_fun[2] += y[time, ind[species]] * MW[species][4][2]
C_fun[3] += y[time, ind[species]] * MW[species][4][3]
C_fun[4] += y[time, ind[species]] * MW[species][4][4]
C_fun[5] += y[time, ind[species]] * MW[species][4][5]
C_fun[6] += y[time, ind[species]] * MW[species][4][6]
C_fun /= C_fun.sum()
return C_fun | 28704b470fd919d998fcd8704b125827226fe151 | 6,149 |
def get_branch(repo):
""" Retrieve the current branch of a dulwich repository
"""
refnames, sha = repo.refs.follow(b"HEAD")
if len(refnames) != 2:
LOGGER.debug("Got more than two refnames for HEAD!")
for ref in refnames:
if ref != b"HEAD":
return to_utf8(ref) | d1c5dbcede16e5b1fcd1e078457efae29643b6fd | 6,150 |
def _sigmoid(x):
"""
Sigmoid function that smoothly limits values between 0.0 and 1.0
:param x: Numpy array with float values that are to be limited.
:return: Numpy array with float values between 0.0 and 1.0
"""
return 1.0 / (1.0 + np.exp(-x)) | 770875ba82df9d4ac8eb6d403527cf0fb62d3990 | 6,151 |
from typing import Dict
def inherit_n_genes_prob(n, n_father, n_mother, mutation_prob) -> Dict:
"""Returns dictionary with distribution of conditional probability of
inherited genes given that father has n_father genes and mother has
n_mother genes, taking into account probability of mutations."""
# Probabily distributions:
# key 0 or False: probability of not inheriting the gene from parent
# key 1 or True: probability of inheriting the gene from parent
probs_f: Dict[bool, float] = p_not_p(prob_inherit(n_father, mutation_prob))
probs_m: Dict[bool, float] = p_not_p(prob_inherit(n_mother, mutation_prob))
return (
# Prob to not inherit at all
probs_f[0] * probs_m[0] if n == 0
# Prob to inherit from one parent only
else probs_f[1] * probs_m[0] + probs_f[0] * probs_m[1] if n == 1
# Prob to inherit from both parents
else probs_f[1] * probs_m[1]
) | 0481244db107f6623aa109212e74be8b719f5bb8 | 6,152 |
async def get_metrics_address_counts_summary():
"""
Latest summary of address counts.
"""
qry = f"""
select col
, latest
, diff_1d
, diff_1w
, diff_4w
, diff_6m
, diff_1y
from mtr.address_counts_by_minimal_balance_change_summary;
"""
async with CONNECTION_POOL.acquire() as conn:
rows = await conn.fetch(qry)
return [dict(r) for r in rows] | c22d6c3442833743559c42e4be59a25ab073c03b | 6,153 |
from typing import Dict
from typing import Any
async def processor(db, document: Dict[str, Any]) -> Dict[str, Any]:
"""
Process a history document before it is returned to the client.
:param db: the application object
:param document: the document to process
:return: the processed document
"""
return await apply_transforms(
virtool.utils.base_processor(document),
[AttachUserTransform(db, ignore_errors=True)],
) | 89de3dd255923b3eca6444ee4410980e857aa8e1 | 6,154 |
def _unit_scale_traindata(X, xmins, xmaxs):
"""If xmax > xmin, unit-scale the training data, else do nothing
Parameters
----------
x : ndarray of shape (m, n)
xmins : ndarray of shape (n, )
xmaxs : ndarray of shape (n, )
Returns
-------
result : ndarray of shape (m, n)
Notes
-----
Training data must fit inside a rectangular box aligned with each dimension
"""
X = jnp.atleast_2d(X)
xmins = jnp.atleast_1d(xmins)
xmaxs = jnp.atleast_1d(xmaxs)
msk = xmins == xmaxs
norm = jnp.where(msk, 1.0, xmaxs - xmins)
offset = jnp.where(msk, 0.0, xmins)
return (X - offset) / norm | 2778c7a9d7b6e23775df2354b92057e6a5511dc5 | 6,155 |
import subprocess
def get_port_properties(port):
"""Retrieves common port properties from its package.sh file.
Returns:
dict: keys are values from PORT_PROPERTIES, values are from the package.sh file
"""
props = {}
for prop in PORT_PROPERTIES:
res = subprocess.run(f"cd {port}; exec ./package.sh showproperty {prop}", shell=True, capture_output=True)
if res.returncode == 0:
props[prop] = res.stdout.decode('utf-8').strip()
else:
print((
f'Executing "./package.sh showproperty {prop}" script for port {port} failed with '
f'exit code {res.returncode}, output from stderr:\n{res.stderr.decode("utf-8").strip()}'
))
props[prop] = ''
return props | 116828540f37e0a3189092ed985ad0f88ed6534a | 6,156 |
import numpy as np
import hdbscan
import matplotlib.pyplot as plt
import seaborn as sns
def run_HDBSCAN_subclustering(df=None, target=None, cluster_col="Cluster", soft_clustering=True,
min_cluster_size=100, min_samples=10,
cluster_selection_epsilon=0.0, cluster_selection_method='eom',
draw_condensed_tree=True, core_dist_n_jobs=None):
"""An implement of HDBSCAN (CPU version) for further clustering of a subcluster.
Parameters
----------
df: pd.DataFrame
A DataFrame with columns X, Y, and clusters.
soft_clustering: boolean
Use soft clustering or not. Default=True.
min_cluster_size: int
min_cluster_size in HDBSCAN.
min_samples: int
min_samples in HDBSCAN
cluster_selection_epsilon: float
cluster_selection_epsilon in HDBSCAN
cluster_selection_method: str
cluster_selection_method in HDBSCAN. Should be "eom" or "leaf".
draw_condensed_tree: boolean
Draw the condensed tree of HDBSCAN or not.
core_dist_n_jobs:
core_dist_n_jobs in HDBSCAN.
Returns
-------
sequences_onehot: list
A list of one-hot encoded sequences.
"""
df = df.copy()
max_cluster_id = df[cluster_col].max()
df1 = df[df[cluster_col]==target].copy()
X = np.stack([df1["X"], df1["Y"]], axis=1)
model = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, cluster_selection_method=cluster_selection_method, cluster_selection_epsilon=cluster_selection_epsilon, core_dist_n_jobs=core_dist_n_jobs, prediction_data=True)
yhat = model.fit(X)
soft_clusters = hdbscan.all_points_membership_vectors(yhat)
labels = [np.argmax(x) for x in soft_clusters]
df1[cluster_col] = [max_cluster_id + i + 1 for i in labels ] # re-number lables to make it human-readable
df.loc[df1.index, cluster_col] = df1[cluster_col].tolist()
print("HDBSCAN cluster number: {}".format(df["Cluster"].max()-1))
print(df.groupby(cluster_col)[cluster_col].count())
if draw_condensed_tree == True:
fig, ax = plt.subplots()
model.condensed_tree_.plot(select_clusters=True, selection_palette=sns.color_palette())
plt.savefig("Condensed_tree_subcluster.pdf")
return df, model | 0ce5c53a390fec6b40addd6182c9ef36ed4047fc | 6,157 |
def extractive_explanations(
dataset,
prefix='explain sentiment',
input_feature='review',
output_classes=('negative', 'positive'),
drop_explanations=False
):
"""Preprocessor to handle extractive rationale prediction datasets.
The preprocessor expects a dataset with the provided 'input_feature', a label,
and a list of evidences. E.g. the movie rationale dataset consists of the
following features.
{
review: 'This is a bad movie. Skip it.'
label: 0,
evidences: ['bad movie', 'Skip it']
}
The example will be transformed to the following format by the preprocessor:
{
inputs: 'explain sentiment review: This is a bad movie. Skip it.'
targets: 'NEG because bad movie explanation: Skip it'
}
Args:
dataset: a tf.data.Dataset to process.
prefix: str, prefix to prepend to the inputs.
input_feature: str, feature name in input dataset.
output_classes: list of output classes in the input dataset. Defaults to
['negative', 'positive'] for the movie reviews dataset.
drop_explanations: bool, whether or not to drop explanations.
Returns:
a tf.data.Dataset
"""
if output_classes is None:
output_classes = ['negative', 'positive']
def my_fn(x):
"""Helper function to transform a rationale dataset to inputs/targets."""
input_label = tf.strings.join([input_feature, ':'], separator='')
inputs = tf.strings.join(
[prefix, input_label, x[input_feature]], separator=' ')
class_label = tf.gather(output_classes, x['label'])
if drop_explanations:
targets = class_label
else:
targets = _explanation_targets(class_label, x['evidences'])
return {'inputs': inputs, 'targets': targets}
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) | c1549279cbb676ee45287afe99f1f94410c27b62 | 6,158 |
def corr_weighted_kendalltau(top_list_prev, top_list, use_fast=True):
"""Compute weighted Kendall's Tau correlation (based on custom implementation!).
NOTE: Lists are DataFrame columns AND they must be sorted according to their value!!!"""
# it is irrelevant whether we compute kendall for ranks or scores.
list_a, list_b = proc_corr(top_list_prev, top_list)
if len(list_a) != len(list_b):
raise RuntimeError("The length of 'list_a' and 'list_b' must be the same!")
if use_fast:
return [fast_weighted_kendall(list_a, list_b)[1]]
else:
rank_list_a = tiedrank(list_a)
rank_list_b = tiedrank(list_b)
return [computeWKendall(rank_list_a,rank_list_b,ranked_input=True)[1]] | 35b473040508561798831343d770acabd97cb76e | 6,159 |
from datetime import datetime
import random
def generate_processes_by_exposure(exposure):
""" Creates a simulated process based on an exposure.
Arguments:
exposure {object} -- Exposure model
Raises:
ValueError -- returns when there is no processing
with a respective exposure.
Returns:
object -- Process model
"""
flavor = exposure.flavor
process = qlf_models.get_last_process_by_flavor(
flavor, jobs_isnull=False)
if not process:
raise ValueError(
'There is no process with {} flavor.'.format(flavor)
)
process.exposure_id = exposure.exposure_id
process.id = None
tdate = datetime.datetime.now()
tdate += datetime.timedelta(minutes=random.randint(1, 5))
process.end = tdate
process.save()
return process | a3a335184fbf9c51e47210ac22fd4d4e8a8a6aa4 | 6,160 |
import copy
def cross_val_confusion(classifier, X, y, cv=None):
"""
Evaluate confusion matrix and score from each fold of cross validation
Parameters:
----------
classifier: classifier object
The object used to fit the data.
X[ndarray]: shape=(n_sample, n_feature)
y[ndarray]: shape=(n_sample,)
cv[int]: the number of folds of the cross validation
Returns:
-------
conf_ms[list]: confusion matrices of the folds
accuracies[list]: accuracies of the folds
"""
assert getattr(classifier, "_estimator_type", None) == "classifier", \
"Estimator must be a classifier!"
# calculate CV metrics
conf_ms = []
accuracies = []
classifier = copy.deepcopy(classifier)
skf = StratifiedKFold(n_splits=cv)
for train_indices, test_indices in skf.split(X, y):
# fit and prediction
classifier.fit(X[train_indices], y[train_indices])
y_preds = classifier.predict(X[test_indices])
# calculate confusion matrix and accuracy
conf_m = confusion_matrix(y[test_indices], y_preds)
acc = np.sum(conf_m.diagonal()) / np.sum(conf_m)
# collection
conf_ms.append(conf_m)
accuracies.append(acc)
return conf_ms, accuracies | bbdbed0bc18b7ac201f2933e9cff10eab19d5a75 | 6,161 |
import asyncio
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload Synology DSM sensors."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
entry_data = hass.data[DOMAIN][entry.unique_id]
entry_data[UNDO_UPDATE_LISTENER]()
await entry_data[SYNO_API].async_unload()
hass.data[DOMAIN].pop(entry.unique_id)
return unload_ok | 876aceeaa113a6275a60328f6f00c0d0c4c0f2e1 | 6,162 |
import os
def transfer_segm_labels(verts_before, mesh, dir_path, name):
"""
Save segmentation labels for mesh after scan imitation
"""
verts_after = utils.get_vertices_np(mesh)
verts_mapping = utils.match_vert_lists(verts_after, verts_before)
# print(os.path.join(dir_path, name + '_sim_segmentation.txt'))
with open(os.path.join(dir_path, name + '_sim_segmentation.txt'), 'r') as f:
vert_labels = [line.rstrip() for line in f] # remove \n
scan_labels = [vert_labels[i] for i in verts_mapping]
filepath = os.path.join(dir_path, name + '_scan_imitation_segmentation.txt')
with open(filepath, 'w') as f:
for panel_name in scan_labels:
f.write("%s\n" % panel_name)
return 0 | 2076062e084b85701c4bdd5879ca840ee736cb7b | 6,163 |
import pathlib
def confirm_control_contains(trestle_dir: pathlib.Path, control_id: str, part_label: str, seek_str: str) -> bool:
"""Confirm the text is present in the control markdown in the correct part."""
control_dir = trestle_dir / ssp_name / control_id.split('-')[0]
md_file = control_dir / f'{control_id}.md'
responses, _ = ControlIOReader.read_all_implementation_prose_and_header(md_file)
if part_label not in responses:
return False
prose = '\n'.join(responses[part_label])
return seek_str in prose | b78cd7a7ef435fcee483d98fe2199ba90c905833 | 6,164 |
import random
def describe_current_subtask(subtask, prefix=True):
"""
Make a 'natural' language description of subtask name
"""
to_verb = {"AnswerQuestion": "answering a question",
"ArmGoal": "moving my arm",
"DemoPresentation": "giving a demo",
"Find": "finding",
"Follow": "following",
"Guide": "guiding",
"GripperGoal": "moving my gripper",
"HandOver": "handing something over",
"Inspect": "inspecting",
"LookAt": "looking",
"NavigateTo": "navigating",
"PickUp": "picking up",
"Place": "placing",
"ResetWM": "resetting my world model",
"Say": "speaking",
"SendPicture": "sending a picture",
"TurnTowardSound": "turning towards a sound"}
description = to_verb.get(subtask, subtask + "ing")
if prefix:
description = random.choice(["I'm busy", "I'm"]) + " " + description
return description | 628c699201c26242bd72c6066cba07cce54b14ca | 6,165 |
def addprint(x: int, y: int):
"""Print and "added" representation of `x` and `y`."""
expr = x + y
return "base addprint(x=%r, y=%r): %r" % (x, y, expr) | e3f735afc1d4826a1af7210c3cec88c8b8c87dfe | 6,166 |
import re
def parse_date(deadline_date):
"""
Given a date in the form MM/DD/YY or MM/DD/YYYY, returns
the integers MM, DD, and YYYY (or YY) in this order.
"""
deadline_split = re.split('\\/|\\-', deadline_date)
return int(deadline_split[0]), int(deadline_split[1]), int(deadline_split[2]) | 0ded6bccce8437aad61cfa5ff121c5ed0595849b | 6,167 |
import requests
def jyfm_tools_position_fund_direction(
trade_date="2020-02-24", indicator="期货品种资金流向排名", headers=""
):
"""
交易法门-工具-资金分析-资金流向
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param trade_date: 指定交易日
:type trade_date: str
:param indicator: "期货品种资金流向排名" or "期货主力合约资金流向排名"
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的资金流向数据
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "期货品种资金流向排名":
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["flowCategory"]),
data_json["flowCategory"],
data_json["flowValue"],
],
index=["date", "symbol", "fund"],
).T
else:
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["dominantFlowCategory"]),
data_json["dominantFlowCategory"],
data_json["dominantFlowValue"],
],
index=["date", "symbol", "fund"],
).T | 10cfb29f1705460916fa93542ba72a22b3cdbf70 | 6,168 |
def generate_points_in_areas(gdf, values, points_per_unit=1, seed=None):
"""
Create a GeoSeries of random points in polygons.
Parameters
----------
gdf : GeoDataFrame
The areas in which to create points
values : str or Series
The [possibly scaled] number of points to create in each area
points_per_unit : numeric, optional
The rate to scale the values in point generation.
seed : int, optional
A random seed
Returns
-------
GeoSeries
"""
geometry = gdf.geometry
if isinstance(values, str) and values in gdf.columns:
values = gdf[values]
new_values = (values / points_per_unit).astype(int)
g = gpd.GeoDataFrame(data={'vals': new_values}, geometry=geometry)
a = g.apply(lambda row: tuple(generate_random_points_in_polygon(row['geometry'], row['vals'], seed)), 1)
b = gpd.GeoSeries(a.apply(pd.Series).stack(), crs=geometry.crs)
b.name = 'geometry'
return b | 14232540c4bee8c9863b2af4f3f2f200bb261098 | 6,169 |
def montager(xi, col=None, row=None, aspect=1.4, transpose=False, isRGB=False,
flipx=False, flipy=False, flipz=False, output_grid_size=False):
""" tile a 3D or 4D image into a single 2D montage
Parameters
----------
xi : ndarray
image data to montage
col : int, optional
number of columns in the montage
row : int, optional
number of rows in the montage
aspect : float, optional
desired aspect ratio of the montage
transpose : bool, optional
transpose each image slice in the montage? (transposes first two
dimensions of the input)
isRGB : bool, optional
set True if the input is RGB
flipx : bool, optional
reverse x-axis indices?
flipy : bool, optional
reverse y-axis indices?
flipz : bool, optional
reverse z-axis indices?
output_grid_size : bool, optional
if true, the number of rows and columns will also be returned
Returns
-------
xo : ndarray
2D ndarray containing the montage
Notes
-----
Any axis flips are applied prior to transposition
added RGB support, aspect ratio, transpose flag and axis flip flags
adapted from: montager.m (Jeff Fessler's IRT toolbox)
"""
# TODO?: also allow RGBA axis to be the first rather than last
# TODO: add option to add a border between the cells
# TODO: allow >4D by stacking all remaining dimensions along the 4th
if isRGB: # call montager for R,G,B channels separately
if xi.shape[-1] < 3 or xi.shape[-1] > 4:
raise Exception(
"if isRGB=True, the last dimension must be size 3 or 4")
if xi.shape[-1] == 4:
has_alpha = True
else:
has_alpha = False
xiR = xi[..., 0]
xiG = xi[..., 1]
xiB = xi[..., 2]
xoR, row, col = montager(xiR, col=col, row=row, aspect=aspect,
transpose=transpose, isRGB=False,
flipx=flipx, flipy=flipy, flipz=flipz,
output_grid_size=True)
xoR = xoR[:, :, None]
xoG = montager(xiG, col=col, row=row, aspect=aspect,
transpose=transpose, isRGB=False, flipx=flipx,
flipy=flipy, flipz=flipz,
output_grid_size=False)
xoG = xoG[:, :, None]
xoB = montager(xiB, col=col, row=row, aspect=aspect,
transpose=transpose, isRGB=False, flipx=flipx,
flipy=flipy, flipz=flipz, output_grid_size=False)
xoB = xoB[:, :, None]
if has_alpha:
xiA = xi[..., 3]
xoA = montager(xiA, col=col, row=row, aspect=aspect,
transpose=transpose, isRGB=False, flipx=flipx,
flipy=flipy, flipz=flipz, output_grid_size=False)
xoA = xoA[:, :, None]
xo = np.concatenate((xoR, xoG, xoB, xoA), axis=2)
else:
xo = np.concatenate((xoR, xoG, xoB), axis=2)
if output_grid_size:
return (xo, row, col)
else:
return xo
if xi.ndim > 4:
print('ERROR in %s: >4D not done' % __name__)
if xi.ndim == 4:
if flipx:
xi = xi[::-1, :, :, :]
if flipy:
xi = xi[:, ::-1, :, :]
if flipz:
xi = xi[:, :, ::-1, :]
if not transpose:
xi = np.transpose(xi, axes=(1, 0, 2, 3))
(nx, ny, n3, n4) = xi.shape
nz = n3 * n4
xi = np.reshape(xi, (nx, ny, nz), order='F')
elif xi.ndim == 3:
if flipx:
xi = xi[::-1, :, :]
if flipy:
xi = xi[:, ::-1, :]
if flipz:
xi = xi[:, :, ::-1]
if not transpose:
xi = np.transpose(xi, axes=(1, 0, 2))
(nx, ny, nz) = xi.shape
else: # for 1D or 2D case, just return the input, unchanged
if flipx:
xi = xi[::-1, :]
if flipy:
xi = xi[:, ::-1]
if not transpose:
xi = xi.T
if output_grid_size:
return xi, 1, 1
else:
return xi
if xi.ndim == 4:
col = n3
row, col = _calc_rows(nx, ny, nz, row=row, col=col, aspect=aspect)
xo = np.zeros((ny * row, nx * col))
for iz in range(nz):
iy = int(np.floor(iz / col))
ix = iz - iy * col
xo[iy * ny:(iy + 1) * ny, ix * nx:(ix + 1) * nx] = xi[:, :, iz].T
if output_grid_size:
return (xo, row, col)
else:
return xo | b8ded004cb0e3aef328fc953c5a0b81805646e1a | 6,170 |
def template_dict(input_dict_arg, params_dict_arg):
"""function to enable templating a dictionary"""
output_dict = input_dict_arg
for key, value in output_dict.items():
if isinstance(value, str):
output_dict[key] = params_re_str(value, params_dict_arg)
elif isinstance(value, dict):
output_dict[key] = template_dict(value, params_dict_arg)
elif isinstance(value, list):
output_dict[key] = template_list(value, params_dict_arg)
return output_dict | 3a9e2df200f52f9ec320ab3900653851dfb77fcc | 6,171 |
def _traverse_dictionaries(instance, parent="spin_systems"):
"""Parses through the instance object contained within the parent object and return
a list of attributes that are populated.
Args:
instance: An instance object from the parent object.
parent: a string object used to create the addresses of the SpinSystem
attributes.
Returns:
List Object.
"""
if isinstance(instance, list):
return [
value
for i, obj in enumerate(instance)
for value in _traverse_dictionaries(obj, _str_encode(f"{parent}[{i}]"))
]
if isinstance(instance, dict):
return [
item
for key, value in instance.items()
if key not in EXCLUDE and value is not None
for item in (
_traverse_dictionaries(value, _str_encode(f"{parent}.{key}"))
if isinstance(value, (dict, list))
else [_str_encode(f"{parent}.{key}")]
)
]
return [] | 9ecf8050e7c4d9c4f8e84f04303f0be186f594d5 | 6,172 |
def getSingleChildTextByName(rootNode, name):
"""Returns the text of a child node found by name.
Only one such named child is expected.
"""
try:
nodeList = [e.firstChild.data for e in rootNode.childNodes if e.localName == name]
if len(nodeList) > 0:
return nodeList[0]
else:
return None
except AttributeError:
return None | 48a8a4b2c3c95cac944bcb96e33e602d62499f19 | 6,173 |
def test_aggregate_stores_output_in_record(configured_test_manager):
"""An aggregate output should exist in the record state."""
@aggregate(["output"])
def small_aggregate(record, records):
return "hello world"
record = Record(configured_test_manager, None)
small_aggregate(record, [record]) # TODO: blank records array crashes??
assert record.state["output"] == "hello world" | 865210e1d79c1a467bc44c5a9a1cd69870ff953f | 6,174 |
def _get_energy_ratio_single_wd_bin_bootstrapping(
df_binned,
df_freq,
N=1,
percentiles=[5.0, 95.0],
return_detailed_output=False,
):
"""Get the energy ratio for one particular wind direction bin and
an array of wind speed bins. This function also includes bootstrapping
functionality by increasing the number of bootstrap evaluations (N) to
larger than 1. The bootstrap percentiles default to 5 % and 95 %.
"""
# Get results excluding uncertainty
if return_detailed_output:
energy_ratio_nominal, dict_info = _get_energy_ratio_single_wd_bin_nominal(
df_binned=df_binned,
df_freq=df_freq,
return_detailed_output=return_detailed_output,
)
else:
energy_ratio_nominal = _get_energy_ratio_single_wd_bin_nominal(
df_binned=df_binned,
df_freq=df_freq,
return_detailed_output=return_detailed_output,
)
# Add bootstrapping results, if necessary
if N <= 1:
results_array = np.array([energy_ratio_nominal] * 3, dtype=float)
else:
# Get a bootstrap sample of range
bootstrap_results = np.zeros(N)
bootstrap_results[0] = energy_ratio_nominal
for i in range(1, N):
df_randomized = df_binned.sample(frac=1, replace=True).copy()
bootstrap_results[i] = _get_energy_ratio_single_wd_bin_nominal(
df_binned=df_randomized,
df_freq=df_freq,
return_detailed_output=False,
)
# Return the results in the order used in previous versions
results_array = np.array(
[
energy_ratio_nominal,
np.nanpercentile(bootstrap_results, percentiles)[0],
np.nanpercentile(bootstrap_results, percentiles)[1],
]
)
if return_detailed_output:
return results_array, dict_info
else:
return results_array | a29e1ebaa9994148e473d61d7881737b62a9082e | 6,175 |
from datacube import Datacube
from .tasks import SaveTasks
from .model import DateTimeRange
import json
import sys
def save_tasks(
grid,
year,
temporal_range,
frequency,
output,
products,
dataset_filter,
env,
complevel,
overwrite=False,
tiles=None,
debug=False,
gqa=None,
):
"""
Prepare tasks for processing (query db).
<todo more help goes here>
\b
Not yet implemented features:
- output product config
- multi-product inputs
"""
filter = {}
if dataset_filter:
filter = json.loads(dataset_filter)
if temporal_range is not None and year is not None:
print("Can only supply one of --year or --temporal_range", file=sys.stderr)
sys.exit(1)
if temporal_range is not None:
try:
temporal_range = DateTimeRange(temporal_range)
except ValueError:
print(f"Failed to parse supplied temporal_range: '{temporal_range}'")
sys.exit(1)
if year is not None:
temporal_range = DateTimeRange.year(year)
if frequency is not None:
if frequency not in ("annual", "annual-fy", "semiannual", "seasonal", "all"):
print(f"Frequency must be one of annual|annual-fy|semiannual|seasonal|all and not '{frequency}'")
sys.exit(1)
dc = Datacube(env=env)
products = products.split("+")
if len(products) == 1:
product = products[0]
dss = None
n_dss = None
else:
dss, n_dss, product, error_logger = _parse_products(dc, products, filter, temporal_range)
if output == "":
if temporal_range is not None:
output = f"{product}_{temporal_range.short}.db"
else:
output = f"{product}_all.db"
try:
tasks = SaveTasks(
output, grid, frequency=frequency, overwrite=overwrite, complevel=complevel
)
except ValueError as e:
print(str(e))
sys.exit(1)
def on_message(msg):
print(msg)
def gqa_predicate(ds):
return ds.metadata.gqa_iterative_mean_xy <= gqa
predicate = None
if gqa is not None:
predicate = gqa_predicate
try:
ok = tasks.save(
dc,
product,
dataset_filter=filter,
temporal_range=temporal_range,
tiles=tiles,
predicate=predicate,
debug=debug,
msg=on_message,
dss=dss,
n_dss=n_dss,
)
except ValueError as e:
print(str(e))
sys.exit(2)
if len(products) != 1:
for product, count in error_logger.missing_counts.items():
print(f"Product {product} has {count} missing datasets.")
if not ok:
# exit with error code, failure message was already printed
sys.exit(3) | 247fcc8208ad42a8cca2a8e43152b4b6e3f25d00 | 6,176 |
import re
def get_file_name(part):
"""get file name using regex from fragment ID"""
return re.findall(r"='(.*\-[a-z]+).*", part)[0] | 30c8867d8e14b04c593359f1c16d9bf324711ba0 | 6,177 |
def get_helping_materials(project_id, limit=100, offset=0, last_id=None):
"""Return a list of helping materials for a given project ID.
:param project_id: PYBOSSA Project ID
:type project_id: integer
:param limit: Number of returned items, default 100
:type limit: integer
:param offset: Offset for the query, default 0
:param last_id: id of the last helping material, used for pagination. If provided, offset is ignored
:type last_id: integer
:type offset: integer
:returns: True -- the response status code
"""
if last_id is not None:
params = dict(limit=limit, last_id=last_id)
else:
params = dict(limit=limit, offset=offset)
print(OFFSET_WARNING)
params['project_id'] = project_id
try:
res = _pybossa_req('get', 'helpingmaterial',
params=params)
if type(res).__name__ == 'list':
return [HelpingMaterial(helping) for helping in res]
else:
return res
except: # pragma: no cover
raise | 163436a9a09816bc18b31c9911b87db74b8aefbd | 6,178 |
import math
def generate_sphere_points(n):
"""
Returns list of 3d coordinates of points on a sphere using the
Golden Section Spiral algorithm.
"""
points = []
inc = math.pi * (3 - math.sqrt(5))
offset = 2 / float(n)
for k in range(int(n)):
y = k * offset - 1 + (offset / 2)
r = math.sqrt(1 - y*y)
phi = k * inc
points.append([math.cos(phi)*r, y, math.sin(phi)*r])
return points | bd6c7624220f7928a44f6dcb24b7112e8d803eb4 | 6,179 |
def svn_repos_dir_delta2(*args):
"""
svn_repos_dir_delta2(svn_fs_root_t src_root, char src_parent_dir, char src_entry,
svn_fs_root_t tgt_root, char tgt_path,
svn_delta_editor_t editor, void edit_baton,
svn_repos_authz_func_t authz_read_func, svn_boolean_t text_deltas,
svn_depth_t depth, svn_boolean_t entry_props,
svn_boolean_t ignore_ancestry,
apr_pool_t pool) -> svn_error_t
"""
return apply(_repos.svn_repos_dir_delta2, args) | c972237fee8c76a24fb9443a9607931566b642ff | 6,180 |
def linear_r2_points(points: np.ndarray, coef: tuple, r2: R2 = R2.classic) -> float:
"""Computes the coefficient of determination (R2).
Args:
points (np.ndarray): numpy array with the points (x, y)
coef (tuple): the coefficients from the linear fit
r2 (R2): select the type of coefficient of determination
Returns:
float: coefficient of determination (R2)
"""
x = points[:, 0]
y = points[:, 1]
return linear_r2(x, y, coef, r2) | 98c33ba3354ed22ddf3ab718f2f41967c2555f18 | 6,181 |
from typing import List
from datetime import datetime
def _show_tournament_list() -> List:
"""
Функция возвращает список предстоящих турниров
"""
tournaments = []
for tournament in loop.run_until_complete(get_request('https://codeforces.com/api/contest.list?gym=false')):
if tournament['phase'] != 'BEFORE':
break
tournaments.append(tournament)
for tournament in range(len(tournaments)):
tournaments[tournament]['durationSeconds'] = datetime.utcfromtimestamp(tournaments[tournament]['durationSeconds']).strftime("%H:%M:%S")
tournaments[tournament]['startTimeSeconds'] = datetime.utcfromtimestamp(tournaments[tournament]['startTimeSeconds']).strftime("%d.%m.%Y %H:%M:%S")
return tournaments | 0815ae126671a8c85bb3311e900db48ce87fa1f0 | 6,182 |
def less_goals_scored():
"""
returns the lowest number of goals scored during one week
"""
return goals_scored('min') | fda281196148370d4639aef9dabc6ad1cb4fd339 | 6,183 |
from typing import Sequence
from typing import Union
from typing import Tuple
def compute_avgpool_output_shape(input_shape:Sequence[Union[int, None]],
kernel_size:Union[Sequence[int], int]=1,
stride:Union[Sequence[int], int]=1,
padding:Union[Sequence[int], int]=0,
channel_last:bool=False) -> Tuple[Union[int, None]]:
""" finished, cheched,
compute the output shape of a avgpool layer
input_shape: sequence of int or None,
shape of an input Tensor,
the first dimension is the batch dimension, which is allowed to be `None`
kernel_size: int, or sequence of int, default 1,
kernel size (filter size) of the layer, should be compatible with `input_shape`
stride: int, or sequence of int, default 1,
stride (down-sampling length) of the layer, should be compatible with `input_shape`
padding: int, or sequence of int, default 0,
padding length(s) of the layer, should be compatible with `input_shape`
channel_last: bool, default False,
channel dimension is the last dimension,
or the second dimension (the first is the batch dimension by convention)
Returns:
--------
output_shape: tuple,
shape of the output Tensor
"""
output_shape = compute_output_shape(
'avgpool',
input_shape, 1, kernel_size, stride, padding, 0, 1,
channel_last,
)
return output_shape | 5116f6fdb95c1cf07d34c2193e6e08eee47a06da | 6,184 |
def _obs_intersect(((x0, y0), (x1, y1)), ((x2, y2), (x3, y3))):
"""Check if two lines intersect. The boundaries don't count as
intersection."""
base1 = (x0, y0)
base2 = (x2, y2)
dir1 = (x1-x0, y1-y0)
dir2 = (x3-x2, y3-y2)
t1, t2 = _intersect(base1, dir1, base2, dir2)
eps = 0.00001
if -eps < t1 and t1 < 1.0 + eps and -eps < t2 and t2 < 1.0 + eps:
return True
else:
return False | ea2b268adac5fc1156b566ea0c6cabdd2f4fe94e | 6,185 |
import json
import re
def project_configure(request, project_name):
"""
get configuration
:param request: request object
:param project_name: project name
:return: json
"""
# get configuration
if request.method == 'GET':
project = Project.objects.get(name=project_name)
project = model_to_dict(project)
project['configuration'] = json.loads(project['configuration']) if project['configuration'] else None
return JsonResponse(project)
# update configuration
elif request.method == 'POST':
project = Project.objects.filter(name=project_name)
data = json.loads(request.body)
configuration = json.dumps(data.get('configuration'), ensure_ascii=False)
project.update(**{'configuration': configuration})
# for safe protection
project_name = re.sub('[\!\@\#\$\;\&\*\~\"\'\{\}\]\[\-\+\%\^]+', '', project_name)
# execute generate cmd
cmd = ' '.join(['gerapy', 'generate', project_name])
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = bytes2str(p.stdout.read()), bytes2str(p.stderr.read())
if not stderr:
return JsonResponse({'status': '1'})
else:
return JsonResponse({'status': '0', 'message': stderr}) | a033d7d1810cee5e5370d8d9f6562f23e3e7e64a | 6,186 |
import time
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.input.epoch_size):
feed_dict = {}
for i, h in enumerate(model.initial_state):
feed_dict[h] = state[i]
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += model.input.num_steps
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / model.input.epoch_size, np.exp(costs / iters),
iters * model.input.batch_size / (time.time() - start_time)))
return np.exp(costs / iters) | 641100d0789c3841a4b3cb67e42963387d0f888d | 6,187 |
def unemployment(
token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Economic data
https://iexcloud.io/docs/api/#economic-data
Args:
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id="ECONOMIC",
key="UNRATE",
token=token,
version=version,
filter=filter,
format=format,
**timeseries_kwargs
) | a5412d78673f639e0d10a95bb91138da1b432221 | 6,188 |
import warnings
def splitunc(p):
"""Deprecated since Python 3.1. Please use splitdrive() instead;
it now handles UNC paths.
Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
warnings.warn("ntpath.splitunc is deprecated, use ntpath.splitdrive instead",
DeprecationWarning, 2)
drive, path = splitdrive(p)
if len(drive) == 2:
# Drive letter present
return p[:0], p
return drive, path | d9748b551e6a9ba101b3817ab22c74dd30cf89d1 | 6,189 |
def expand_locations(ctx, input, targets = []):
"""Expand location templates.
Expands all `$(execpath ...)`, `$(rootpath ...)` and deprecated `$(location ...)` templates in the
given string by replacing with the expanded path. Expansion only works for labels that point to direct dependencies
of this rule or that are explicitly listed in the optional argument targets.
See https://docs.bazel.build/versions/main/be/make-variables.html#predefined_label_variables.
Use `$(rootpath)` and `$(rootpaths)` to expand labels to the runfiles path that a built binary can use
to find its dependencies. This path is of the format:
- `./file`
- `path/to/file`
- `../external_repo/path/to/file`
Use `$(execpath)` and `$(execpaths)` to expand labels to the execroot (where Bazel runs build actions).
This is of the format:
- `./file`
- `path/to/file`
- `external/external_repo/path/to/file`
- `<bin_dir>/path/to/file`
- `<bin_dir>/external/external_repo/path/to/file`
The deprecated `$(location)` and `$(locations)` expansions returns either the execpath or rootpath depending on the context.
Args:
ctx: context
input: String to be expanded
targets: List of targets for additional lookup information.
Returns:
The expanded path or the original path
"""
return ctx.expand_location(input, targets = targets) | efa482d928484b7d6f9c8acbf81e0a3d5b4cd50f | 6,190 |
import requests
import json
def scrape_db(test=False, write_file=True):
"""
Function to scrape bodybuild.com recipe database and save results as json.
Parameters:
-----------
"""
# Hacky way to get all recipes - you have to request the number. Luckily,
# this is listed at the beginning of any result you pull from DB.
# We want all of the recipes, so we'll do a quick request of one recipe to
# get the 'total' number in the DB
url_request = 'https://cms-api.bodybuilding.com/BbcomRecipe'
url_parameters = {'sort': 'publishDate', 'order': 'desc', 'limit': '1'}
fake_recipes_list = requests.get(url_request, params=url_parameters)
fake_recipes = bs4.BeautifulSoup(fake_recipes_list.content, features='html.parser')
fake = json.loads(str(fake_recipes))
# Get the total number of recipes in the db
total_recipes = fake['total']
if test == True:
all_recipes = fake_recipes
else:
# Change the 'limit' on the url to the total number of recipes
url_parameters['limit'] = str(total_recipes)
all_recipes_list = requests.get(url_request, params=url_parameters)
all_recipes = bs4.BeautifulSoup(all_recipes_list.content, features='html.parser')
# Just get search results and get rid of data before.
all_recipes_list = json.loads(str(all_recipes))['_embedded']['bb-cms:search-results']
# Dump to json file - results will always be saved in 'data' folder
if write_file:
save_path = _DATA_DIR.joinpath('bodybuilding_recipes.json')
rf = open(save_path, 'w')
json.dump(all_recipes_list, rf)
rf.close()
return all_recipes_list | d9883058ac434fca861168625493467bfbcafaed | 6,191 |
import functools
def require(required):
""" Decorator for checking the required values in state.
It checks the required attributes in the passed state and stop when
any of those is missing. """
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
for key in required:
if key not in args[0]:
raise KeyError('{0} not passed to {1}'.format(
key, function.__name__))
return function(*args, **kwargs)
return wrapper
return decorator | 9bf04a95d39b89fd10c9872dd7fe29c5c10f06a1 | 6,192 |
import re
def simplify_unicode(sentence):
"""
Most accented Latin characters are pronounced just the same as the base character.
Shrink as many extended Unicode repertoire into the Estonian alphabet as possible.
It is GOOD for machine learning to have smaller ortographic repertoire.
It is a BAD idea if we start using any proper name dictionaries for morph analysis
or pronunciations later on. You are warned.
:param sentence:
:return: str
"""
sentence = sentence.replace("Ð", "D").replace("Þ", "Th")
sentence = sentence.replace("ð", "d").replace("þ", "th")
sentence = sentence.replace("ø", "ö").replace("Ø", "Ö")
sentence = sentence.replace("ß", "ss").replace("ẞ", "Ss")
sentence = re.sub(r'S(c|C)(h|H)', r'Š', sentence)
sentence = re.sub(r'sch', r'š', sentence)
sentence = re.sub(r'[ĆČ]', r'Tš', sentence)
sentence = re.sub(r'[ćč]', r'tš', sentence)
sentence = re.sub(r'[^A-ZÄÖÜÕŽŠa-zäöüõšž ,]+', lambda m: r'{}'.format( strip_combining(m.group(0)) ), sentence)
return sentence | 291a1e002d4d428697d7b892291ad314f0000a2a | 6,193 |
import pickle
def read_file(pickle_file_name):
"""Reads composite or non-composite novelty results from Pickle file.
:param pickle_file_name: Path to input file (created by
`write_standard_file` or `write_pmm_file`).
:return: novelty_dict: Has the following keys if not a composite...
novelty_dict['denorm_radar_matrix_baseline']: See doc for
`write_standard_file`.
novelty_dict['denorm_radar_matrix_trial']: Same.
novelty_dict['novel_indices']: Same.
novelty_dict['denorm_radar_matrix_upconv']: Same.
novelty_dict['denorm_radar_matrix_upconv_svd']: Same.
novelty_dict['percent_variance_to_keep']: Same.
novelty_dict['cnn_feature_layer_name']: Same.
novelty_dict['multipass']: Same.
novelty_dict['baseline_full_id_strings']: Same.
novelty_dict['baseline_times_unix_sec']: Same.
novelty_dict['trial_full_id_strings']: Same.
novelty_dict['trial_times_unix_sec']: Same.
novelty_dict['cnn_file_name']: Same.
novelty_dict['upconvnet_file_name']: Same.
...or the following keys if composite...
novelty_dict['mean_denorm_radar_matrix_baseline']:
See doc for `write_pmm_file`.
novelty_dict['mean_denorm_radar_matrix_novel']: Same.
novelty_dict['mean_denorm_radar_matrix_upconv']: Same.
novelty_dict['mean_denorm_radar_matrix_upconv_svd']: Same.
novelty_dict['cnn_file_name']: Same.
novelty_dict['non_pmm_file_name']: Same.
novelty_dict['pmm_max_percentile_level']: Same.
:return: pmm_flag: Boolean flag. True if `novelty_dict` contains composite,
False otherwise.
:raises: ValueError: if dictionary does not contain expected keys.
"""
pickle_file_handle = open(pickle_file_name, 'rb')
novelty_dict = pickle.load(pickle_file_handle)
pickle_file_handle.close()
pmm_flag = MEAN_BASELINE_MATRIX_KEY in novelty_dict
if pmm_flag:
missing_keys = list(
set(PMM_FILE_KEYS) - set(novelty_dict.keys())
)
else:
missing_keys = list(
set(STANDARD_FILE_KEYS) - set(novelty_dict.keys())
)
if len(missing_keys) == 0:
return novelty_dict, pmm_flag
error_string = (
'\n{0:s}\nKeys listed above were expected, but not found, in file '
'"{1:s}".'
).format(str(missing_keys), pickle_file_name)
raise ValueError(error_string) | fcc4976648bafc7e845a22552965e1f65e3ddc85 | 6,194 |
import re
def AutoscalersForMigs(migs, autoscalers, project):
"""Finds Autoscalers with target amongst given IGMs.
Args:
migs: List of triples (IGM name, scope type, scope name).
autoscalers: A list of Autoscalers to search among.
project: Project owning resources.
Returns:
A list of all Autoscalers with target on mig_names list.
"""
igm_url_regexes = []
for (name, scope_type, scope_name) in migs:
igm_url_regexes.append(
'/projects/{project}/{scopeType}/{scopeName}/'
'instanceGroupManagers/{name}$'
.format(project=project,
scopeType=(scope_type + 's'),
scopeName=scope_name,
name=name))
igm_url_regex = re.compile('(' + ')|('.join(igm_url_regexes) + ')')
result = [
autoscaler for autoscaler in autoscalers
if igm_url_regex.search(autoscaler.target)
]
return result | 12b6e10c16c7ea5324f5090cdc3027a38e1247c1 | 6,195 |
def log_loss(
predictions: ArrayLike,
targets: ArrayLike,
) -> ArrayLike:
"""Calculates the log loss of predictions wrt targets.
Args:
predictions: a vector of probabilities of arbitrary shape.
targets: a vector of probabilities of shape compatible with predictions.
Returns:
a vector of same shape of `predictions`.
"""
base.type_assert([predictions, targets], float)
return -jnp.log(likelihood(predictions, targets)) | a3d27b0229b287e32701fa80822ad1025e875a62 | 6,196 |
import json
def GetAccessTokenOrDie(options):
"""Generates a fresh access token using credentials passed into the script.
Args:
options: Flag values passed into the script.
Returns:
A fresh access token.
Raises:
ValueError: response JSON could not be parsed, or has no access_token.
"""
cred = GetDSApiCredOrDie(options)
[cid, csc, refresh_token] = cred.split(",")
query_string_template = (
"refresh_token=%s&client_id=%s&client_secret=%s"
"&grant_type=refresh_token"
)
output = RunCommand(
[
"curl",
"--data",
query_string_template % (refresh_token, cid, csc),
"https://accounts.google.com/o/oauth2/token",
]
)
json_output = json.loads(output)
if "access_token" in json_output:
return json_output["access_token"]
else:
raise ValueError("missing access_token in response: %s" % output) | 6ecbd6875931c6ef139da52578050380da4e62bd | 6,197 |
def remove_whitespace(tokens):
"""Remove any top-level whitespace and comments in a token list."""
return tuple(
token for token in tokens
if token.type not in ('whitespace', 'comment')) | 5ed78f38277487d2e05e20e10e25413b05cab8e5 | 6,198 |
def update(args):
"""
For LdaCgsMulti
"""
(docs, doc_indices, mtrand_state, dtype) = args
start, stop = docs[0][0], docs[-1][1]
global Ktype
if _K.value < 2 ** 8:
Ktype = np.uint8
elif _K.value < 2 ** 16:
Ktype = np.uint16
else:
raise NotImplementedError("Invalid Ktype. k={}".format(_K))
corpus = np.frombuffer(_corpus, dtype=dtype)[start:stop]
Z = np.frombuffer(_Z, dtype=Ktype)[start:stop].copy()
gbl_word_top = np.frombuffer(_word_top, dtype=np.float32)
gbl_word_top = gbl_word_top.reshape(_V.value, _K.value)
loc_word_top = gbl_word_top.copy()
inv_top_sums = np.frombuffer(_inv_top_sums, dtype=np.float32).copy()
top_doc = np.frombuffer(_top_doc, dtype=np.float32)
top_doc = top_doc.reshape(_K.value, int(top_doc.size/_K.value))
top_doc = top_doc[:, doc_indices[0]:doc_indices[1]].copy()
log_p = 0
log_wk = np.log(gbl_word_top * inv_top_sums[np.newaxis, :])
log_kc = np.log(top_doc / top_doc.sum(0)[np.newaxis, :])
indices = np.array([(j - start) for (i,j) in docs], dtype='i')
if dtype == np.uint16 and Ktype == np.uint8:
update_fn = cgs_update[cython.ushort,cython.uchar]
elif dtype == np.uint16 and Ktype == np.uint16:
update_fn = cgs_update[cython.ushort,cython.ushort]
elif dtype == np.uint32 and Ktype == np.uint8:
update_fn = cgs_update[cython.uint,cython.uchar]
elif dtype == np.uint32 and Ktype == np.uint16:
update_fn = cgs_update[cython.uint,cython.ushort]
else:
raise NotImplementedError
results = update_fn(_iteration.value,
corpus,
loc_word_top,
inv_top_sums,
top_doc,
Z,
indices,
mtrand_state[0],
mtrand_state[1],
mtrand_state[2],
mtrand_state[3],
mtrand_state[4])
#final_results = [np.asarray(result, dtype=dtype)
# for result,dtype in zip(results[:4],
# [Ktype, np.float32, np.float32, np.float32])]
#final_results.extend(results[4:])
(loc_word_top, inv_top_sums, top_doc, Z, log_p, mtrand_str, mtrand_keys,
mtrand_pos, mtrand_has_gauss, mtrand_cached_gaussian) = results
loc_word_top -= gbl_word_top
return (Z, top_doc, loc_word_top, log_p,
mtrand_str, mtrand_keys, mtrand_pos,
mtrand_has_gauss, mtrand_cached_gaussian) | 2dd014472c77e363fafab1f9dc22ce0267d3e3df | 6,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.