content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_http_proxy():
"""
Get http_proxy and https_proxy from environment variables.
Username and password is not supported now.
"""
host = conf.get_httpproxy_host()
port = conf.get_httpproxy_port()
return host, port | f04dc8580d9fdd3d867c5b28fa3694fe82a6739a | 13,035 |
def get_parser_udf(
structural=True, # structural information
blacklist=["style", "script"], # ignore tag types, default: style, script
flatten=["span", "br"], # flatten tag types, default: span, br
language="en",
lingual=True, # lingual information
lingual_parser=None,
strip=True,
replacements=[("[\u2010\u2011\u2012\u2013\u2014\u2212]", "-")],
tabular=True, # tabular information
visual=False, # visual information
visual_parser=None,
):
"""Return an instance of ParserUDF."""
parser_udf = ParserUDF(
structural=structural,
blacklist=blacklist,
flatten=flatten,
lingual=lingual,
lingual_parser=lingual_parser,
strip=strip,
replacements=replacements,
tabular=tabular,
visual=visual,
visual_parser=visual_parser,
language=language,
)
return parser_udf | cf12b36fe9219aabfd746b2ad1f1f39e62ad7fe9 | 13,036 |
def img_preprocess2(image, target_shape,bboxes=None, correct_box=False):
"""
RGB转换 -> resize(resize不改变原图的高宽比) -> normalize
并可以选择是否校正bbox
:param image_org: 要处理的图像
:param target_shape: 对图像处理后,期望得到的图像shape,存储格式为(h, w)
:return: 处理之后的图像,shape为target_shape
"""
h_target, w_target = target_shape
h_org, w_org, _ = image.shape
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
resize_ratio = min(1.0 * w_target / w_org, 1.0 * h_target / h_org)
resize_w = int(resize_ratio * w_org)
resize_h = int(resize_ratio * h_org)
image_resized = cv2.resize(image, (resize_w, resize_h))
image_paded = np.full((h_target, w_target, 3), 128.0)
dw = int((w_target - resize_w) / 2)
dh = int((h_target - resize_h) / 2)
image_paded[dh:resize_h+dh, dw:resize_w+dw,:] = image_resized
image = image_paded / 255.0
image = normalize(image)
if correct_box:
bboxes[:, [0, 2]] = bboxes[:, [0, 2]] * resize_ratio + dw
bboxes[:, [1, 3]] = bboxes[:, [1, 3]] * resize_ratio + dh
return image, bboxes
return image,resize_ratio,dw,dh | e950e0e8cca4f31449feb12203ee9a9ef74baa8c | 13,037 |
def pivot_timeseries(df, var_name, timezone=None):
"""
Pivot timeseries DataFrame and shift UTC by given timezone offset
Parameters
----------
df : pandas.DataFrame
Timeseries DataFrame to be pivoted with year, month, hour columns
var_name : str
Name for new column describing data
timezone : int, optional
UTC offset to apply to DatetimeIndex, by default None
Returns
-------
pandas.DataFrame
Seaborn style long table with source, year, month, hour columns
"""
sns_df = []
for name, col in df.iteritems():
col = col.to_frame()
col.columns = [var_name]
col['source'] = name
col['year'] = col.index.year
col['month'] = col.index.month
col['hour'] = col.index.hour
if timezone is not None:
td = pd.to_timedelta('{:}h'.format(timezone))
col['local_hour'] = (col.index + td).hour
sns_df.append(col)
return pd.concat(sns_df) | 914ba75929caacd16da5170e98a95f2135a1682f | 13,038 |
def _preprocess_stored_query(query_text, config):
"""Inject some default code into each stored query."""
ws_id_text = " LET ws_ids = @ws_ids " if 'ws_ids' in query_text else ""
return '\n'.join([
config.get('query_prefix', ''),
ws_id_text,
query_text
]) | bc63391724773cd4a60f3dc9686d243d6d733b40 | 13,039 |
def handler_request_exception(response: Response):
"""
Args:
response (Response):
"""
status_code = response.status_code
data = response.json()
if "details" in data and len(data.get("details")) > 0:
data = data.get("details")[0]
kwargs = {
"error_code": data.get("error_code")
or data.get("error")
or str(data.get("status_code")),
"description": data.get("description_detail")
or data.get("description")
or data.get("error_description")
or data.get("message"),
"response": response,
}
message = "{} {} ({})".format(
kwargs.get("error_code"),
kwargs.get("description"),
response.url,
)
if status_code == 400:
return errors.BadRequest(message, **kwargs)
elif status_code == 402:
return errors.BusinessError(message, **kwargs)
elif status_code == 404:
return errors.NotFound(message, **kwargs)
elif status_code == 500:
return errors.ServerError(message, **kwargs)
elif status_code == 503:
return errors.ServiceUnavailable(message, **kwargs)
elif status_code == 504:
return errors.GatewayTimeout(message, **kwargs)
else:
return errors.RequestError(message, **kwargs) | 8847b4a1fd6f90d6e25d0ef8dc33a32e38e81617 | 13,040 |
def mlrPredict(W, data):
"""
mlrObjFunction predicts the label of data given the data and parameter W
of Logistic Regression
Input:
W: the matrix of weight of size (D + 1) x 10. Each column is the weight
vector of a Logistic Regression classifier.
X: the data matrix of size N x D
Output:
label: vector of size N x 1 representing the predicted label of
corresponding feature vector given in data matrix
"""
label = np.zeros((data.shape[0], 1))
##################
# YOUR CODE HERE #
##################
# HINT: Do not forget to add the bias term to your input data
"""
Add the bias term at the beginning
"""
n_data = data.shape[0]
bias = np.ones((n_data,1))
"""
Concatenate the bias to the training data
"""
data = np.concatenate( (bias,data),axis=1)
outputs = np.zeros([n_data,W.shape[1]],dtype=float)
outputs = np.dot(data,W)
#print (outputs[0])
i = 0
for i in range(n_data):
label[i][0] = np.argmax(outputs[i],axis=0)
return label | 18bf0c86195cf144eb63f5b6c440f92c57d2fe9b | 13,043 |
from .error_pages import add_error_pages
from .global_variables import init_global
from .home import home_page
from .rules import rule_page
from .create_game import create_game_page, root_url_games
from .global_stats import global_stats_page, page_url
from .utils.add_dash_table import add_dash as add_dash_table
from .utils.add_dash_games import add_dash_games
from .admin import admin_page
def create_app():
"""Create Flask application."""
app = Flask(__name__, instance_relative_config=False)
app = add_error_pages(app)
app.config.from_object("config")
with app.app_context():
init_global()
# # Import parts of our application
bootstrap = Bootstrap()
app.register_blueprint(home_page)
Markdown(app)
app.register_blueprint(rule_page)
app.register_blueprint(create_game_page)
app.register_blueprint(global_stats_page)
bootstrap.init_app(app)
app = add_dash_table(app, page_url)
app = add_dash_games(app, root_url_games)
app.register_blueprint(admin_page)
return app | 665ab7beda7ff79e4b81c22d5f28409a31dc896f | 13,044 |
def process_integration(request, case_id):
"""Method to process case."""
try:
case = OVCBasicCRS.objects.get(case_id=case_id, is_void=False)
county_code = int(case.county)
const_code = int(case.constituency)
county_id, const_id = 0, 0
crs_id = str(case_id).replace('-', '')
user_counties, user_geos = get_person_geo(request)
# Get person orgs
ou_ids = get_person_orgs(request)
if request.method == 'POST':
response = handle_integration(request, case, case_id)
print(response)
check_fields = ['sex_id', 'case_category_id', 'case_reporter_id',
'family_status_id', 'household_economics',
'risk_level_id', 'mental_condition_id',
'perpetrator_status_id', 'other_condition_id',
'physical_condition_id', 'yesno_id']
vals = get_dict(field_name=check_fields)
category = OVCBasicCategory.objects.filter(
case_id=case_id, is_void=False)
person = OVCBasicPerson.objects.filter(case_id=case_id, is_void=False)
# Attached Geos and Org Units for the user
# ou_ids = []
org_id = request.session.get('ou_primary', 0)
ou_ids.append(org_id)
ou_attached = request.session.get('ou_attached', 0)
user_level = request.session.get('user_level', 0)
user_type = request.session.get('user_type', 0)
print(org_id, ou_attached, user_level, user_type)
# person_id = request.user.reg_person_id
county = SetupGeography.objects.filter(
area_code=county_code, area_type_id='GPRV')
for c in county:
county_id = c.area_id
# Get constituency
constituency = SetupGeography.objects.filter(
area_code=const_code, area_type_id='GDIS')
for c in constituency:
const_id = c.area_id
ous = RegOrgUnit.objects.filter(is_void=False)
counties = SetupGeography.objects.filter(area_type_id='GPRV')
if user_counties:
counties = counties.filter(area_id__in=user_counties)
if request.user.is_superuser:
all_ou_ids = ['TNGD']
ous = ous.filter(org_unit_type_id__in=all_ou_ids)
geos = SetupGeography.objects.filter(
area_type_id='GDIS', parent_area_id=county_id)
else:
ous = ous.filter(id__in=ou_ids)
geos = SetupGeography.objects.filter(
area_type_id='GDIS', parent_area_id=county_id)
return render(request, 'management/integration_process.html',
{'form': {}, 'case': case, 'vals': vals,
'category': category, 'person': person,
'geos': geos, 'ous': ous, 'counties': counties,
'county_id': county_id, 'const_id': const_id,
'crs_id': crs_id})
except Exception as e:
print('Error processing integration - %s' % (e))
else:
pass | bd383b624a072fec634bc28bbba71c2d635eeac2 | 13,045 |
def get_aabb(pts):
"""axis-aligned minimum bounding box"""
x, y = np.floor(pts.min(axis=0)).astype(int)
w, h = np.ceil(pts.ptp(axis=0)).astype(int)
return x, y, w, h | 68cffaf0b1cacf702a2dd3c6c22af6323d220e93 | 13,046 |
from re import S
def _solve(f, *symbols, **flags):
"""Return a checked solution for f in terms of one or more of the
symbols. A list should be returned except for the case when a linear
undetermined-coefficients equation is encountered (in which case
a dictionary is returned).
If no method is implemented to solve the equation, a NotImplementedError
will be raised. In the case that conversion of an expression to a Poly
gives None a ValueError will be raised."""
not_impl_msg = "No algorithms are implemented to solve equation %s"
if len(symbols) != 1:
soln = None
free = f.free_symbols
ex = free - set(symbols)
if len(ex) != 1:
ind, dep = f.as_independent(*symbols)
ex = ind.free_symbols & dep.free_symbols
if len(ex) == 1:
ex = ex.pop()
try:
# soln may come back as dict, list of dicts or tuples, or
# tuple of symbol list and set of solution tuples
soln = solve_undetermined_coeffs(f, symbols, ex, **flags)
except NotImplementedError:
pass
if soln:
if flags.get('simplify', True):
if isinstance(soln, dict):
for k in soln:
soln[k] = simplify(soln[k])
elif isinstance(soln, list):
if isinstance(soln[0], dict):
for d in soln:
for k in d:
d[k] = simplify(d[k])
elif isinstance(soln[0], tuple):
soln = [tuple(simplify(i) for i in j) for j in soln]
else:
raise TypeError('unrecognized args in list')
elif isinstance(soln, tuple):
sym, sols = soln
soln = sym, {tuple(simplify(i) for i in j) for j in sols}
else:
raise TypeError('unrecognized solution type')
return soln
# find first successful solution
failed = []
got_s = set([])
result = []
for s in symbols:
xi, v = solve_linear(f, symbols=[s])
if xi == s:
# no need to check but we should simplify if desired
if flags.get('simplify', True):
v = simplify(v)
vfree = v.free_symbols
if got_s and any([ss in vfree for ss in got_s]):
# sol depends on previously solved symbols: discard it
continue
got_s.add(xi)
result.append({xi: v})
elif xi: # there might be a non-linear solution if xi is not 0
failed.append(s)
if not failed:
return result
for s in failed:
try:
soln = _solve(f, s, **flags)
for sol in soln:
if got_s and any([ss in sol.free_symbols for ss in got_s]):
# sol depends on previously solved symbols: discard it
continue
got_s.add(s)
result.append({s: sol})
except NotImplementedError:
continue
if got_s:
return result
else:
raise NotImplementedError(not_impl_msg % f)
symbol = symbols[0]
# /!\ capture this flag then set it to False so that no checking in
# recursive calls will be done; only the final answer is checked
flags['check'] = checkdens = check = flags.pop('check', True)
# build up solutions if f is a Mul
if f.is_Mul:
result = set()
for m in f.args:
if m in set([S.NegativeInfinity, S.ComplexInfinity, S.Infinity]):
result = set()
break
soln = _solve(m, symbol, **flags)
result.update(set(soln))
result = list(result)
if check:
# all solutions have been checked but now we must
# check that the solutions do not set denominators
# in any factor to zero
dens = flags.get('_denominators', _simple_dens(f, symbols))
result = [s for s in result if
all(not checksol(den, {symbol: s}, **flags) for den in
dens)]
# set flags for quick exit at end; solutions for each
# factor were already checked and simplified
check = False
flags['simplify'] = False
elif f.is_Piecewise:
result = set()
for i, (expr, cond) in enumerate(f.args):
if expr.is_zero:
raise NotImplementedError(
'solve cannot represent interval solutions')
candidates = _solve(expr, symbol, **flags)
# the explicit condition for this expr is the current cond
# and none of the previous conditions
args = [~c for _, c in f.args[:i]] + [cond]
cond = And(*args)
for candidate in candidates:
if candidate in result:
# an unconditional value was already there
continue
try:
v = cond.subs(symbol, candidate)
_eval_simpify = getattr(v, '_eval_simpify', None)
if _eval_simpify is not None:
# unconditionally take the simpification of v
v = _eval_simpify(ratio=2, measure=lambda x: 1)
except TypeError:
# incompatible type with condition(s)
continue
if v == False:
continue
result.add(Piecewise(
(candidate, v),
(S.NaN, True)))
# set flags for quick exit at end; solutions for each
# piece were already checked and simplified
check = False
flags['simplify'] = False
else:
# first see if it really depends on symbol and whether there
# is only a linear solution
f_num, sol = solve_linear(f, symbols=symbols)
if f_num is S.Zero or sol is S.NaN:
return []
elif f_num.is_Symbol:
# no need to check but simplify if desired
if flags.get('simplify', True):
sol = simplify(sol)
return [sol]
result = False # no solution was obtained
msg = '' # there is no failure message
# Poly is generally robust enough to convert anything to
# a polynomial and tell us the different generators that it
# contains, so we will inspect the generators identified by
# polys to figure out what to do.
# try to identify a single generator that will allow us to solve this
# as a polynomial, followed (perhaps) by a change of variables if the
# generator is not a symbol
try:
poly = Poly(f_num)
if poly is None:
raise ValueError('could not convert %s to Poly' % f_num)
except GeneratorsNeeded:
simplified_f = simplify(f_num)
if simplified_f != f_num:
return _solve(simplified_f, symbol, **flags)
raise ValueError('expression appears to be a constant')
gens = [g for g in poly.gens if g.has(symbol)]
def _as_base_q(x):
"""Return (b**e, q) for x = b**(p*e/q) where p/q is the leading
Rational of the exponent of x, e.g. exp(-2*x/3) -> (exp(x), 3)
"""
b, e = x.as_base_exp()
if e.is_Rational:
return b, e.q
if not e.is_Mul:
return x, 1
c, ee = e.as_coeff_Mul()
if c.is_Rational and c is not S.One: # c could be a Float
return b**ee, c.q
return x, 1
if len(gens) > 1:
# If there is more than one generator, it could be that the
# generators have the same base but different powers, e.g.
# >>> Poly(exp(x) + 1/exp(x))
# Poly(exp(-x) + exp(x), exp(-x), exp(x), domain='ZZ')
#
# If unrad was not disabled then there should be no rational
# exponents appearing as in
# >>> Poly(sqrt(x) + sqrt(sqrt(x)))
# Poly(sqrt(x) + x**(1/4), sqrt(x), x**(1/4), domain='ZZ')
bases, qs = list(zip(*[_as_base_q(g) for g in gens]))
bases = set(bases)
if len(bases) > 1 or not all(q == 1 for q in qs):
funcs = set(b for b in bases if b.is_Function)
trig = set([_ for _ in funcs if
isinstance(_, TrigonometricFunction)])
other = funcs - trig
if not other and len(funcs.intersection(trig)) > 1:
newf = TR1(f_num).rewrite(tan)
if newf != f_num:
# don't check the rewritten form --check
# solutions in the un-rewritten form below
flags['check'] = False
result = _solve(newf, symbol, **flags)
flags['check'] = check
# just a simple case - see if replacement of single function
# clears all symbol-dependent functions, e.g.
# log(x) - log(log(x) - 1) - 3 can be solved even though it has
# two generators.
if result is False and funcs:
funcs = list(ordered(funcs)) # put shallowest function first
f1 = funcs[0]
t = Dummy('t')
# perform the substitution
ftry = f_num.subs(f1, t)
# if no Functions left, we can proceed with usual solve
if not ftry.has(symbol):
cv_sols = _solve(ftry, t, **flags)
cv_inv = _solve(t - f1, symbol, **flags)[0]
sols = list()
for sol in cv_sols:
sols.append(cv_inv.subs(t, sol))
result = list(ordered(sols))
if result is False:
msg = 'multiple generators %s' % gens
else:
# e.g. case where gens are exp(x), exp(-x)
u = bases.pop()
t = Dummy('t')
inv = _solve(u - t, symbol, **flags)
if isinstance(u, (Pow, exp)):
# this will be resolved by factor in _tsolve but we might
# as well try a simple expansion here to get things in
# order so something like the following will work now without
# having to factor:
#
# >>> eq = (exp(I*(-x-2))+exp(I*(x+2)))
# >>> eq.subs(exp(x),y) # fails
# exp(I*(-x - 2)) + exp(I*(x + 2))
# >>> eq.expand().subs(exp(x),y) # works
# y**I*exp(2*I) + y**(-I)*exp(-2*I)
def _expand(p):
b, e = p.as_base_exp()
e = expand_mul(e)
return expand_power_exp(b**e)
ftry = f_num.replace(
lambda w: w.is_Pow or isinstance(w, exp),
_expand).subs(u, t)
if not ftry.has(symbol):
soln = _solve(ftry, t, **flags)
sols = list()
for sol in soln:
for i in inv:
sols.append(i.subs(t, sol))
result = list(ordered(sols))
elif len(gens) == 1:
# There is only one generator that we are interested in, but
# there may have been more than one generator identified by
# polys (e.g. for symbols other than the one we are interested
# in) so recast the poly in terms of our generator of interest.
# Also use composite=True with f_num since Poly won't update
# poly as documented in issue 8810.
poly = Poly(f_num, gens[0], composite=True)
# if we aren't on the tsolve-pass, use roots
if not flags.pop('tsolve', False):
soln = None
deg = poly.degree()
flags['tsolve'] = True
solvers = {k: flags.get(k, True) for k in
('cubics', 'quartics', 'quintics')}
soln = roots(poly, **solvers)
if sum(soln.values()) < deg:
# e.g. roots(32*x**5 + 400*x**4 + 2032*x**3 +
# 5000*x**2 + 6250*x + 3189) -> {}
# so all_roots is used and RootOf instances are
# returned *unless* the system is multivariate
# or high-order EX domain.
try:
soln = poly.all_roots()
except NotImplementedError:
if not flags.get('incomplete', True):
raise NotImplementedError(
filldedent('''
Neither high-order multivariate polynomials
nor sorting of EX-domain polynomials is supported.
If you want to see any results, pass keyword incomplete=True to
solve; to see numerical values of roots
for univariate expressions, use nroots.
'''))
else:
pass
else:
soln = list(soln.keys())
if soln is not None:
u = poly.gen
if u != symbol:
try:
t = Dummy('t')
iv = _solve(u - t, symbol, **flags)
soln = list(ordered({i.subs(t, s) for i in iv for s in soln}))
except NotImplementedError:
# perhaps _tsolve can handle f_num
soln = None
else:
check = False # only dens need to be checked
if soln is not None:
if len(soln) > 2:
# if the flag wasn't set then unset it since high-order
# results are quite long. Perhaps one could base this
# decision on a certain critical length of the
# roots. In addition, wester test M2 has an expression
# whose roots can be shown to be real with the
# unsimplified form of the solution whereas only one of
# the simplified forms appears to be real.
flags['simplify'] = flags.get('simplify', False)
result = soln
# fallback if above fails
# -----------------------
if result is False:
# try unrad
if flags.pop('_unrad', True):
try:
u = unrad(f_num, symbol)
except (ValueError, NotImplementedError):
u = False
if u:
eq, cov = u
if cov:
isym, ieq = cov
inv = _solve(ieq, symbol, **flags)[0]
rv = {inv.subs(isym, xi) for xi in _solve(eq, isym, **flags)}
else:
try:
rv = set(_solve(eq, symbol, **flags))
except NotImplementedError:
rv = None
if rv is not None:
result = list(ordered(rv))
# if the flag wasn't set then unset it since unrad results
# can be quite long or of very high order
flags['simplify'] = flags.get('simplify', False)
else:
pass # for coverage
# try _tsolve
if result is False:
flags.pop('tsolve', None) # allow tsolve to be used on next pass
try:
soln = _tsolve(f_num, symbol, **flags)
if soln is not None:
result = soln
except PolynomialError:
pass
# ----------- end of fallback ----------------------------
if result is False:
raise NotImplementedError('\n'.join([msg, not_impl_msg % f]))
if flags.get('simplify', True):
result = list(map(simplify, result))
# we just simplified the solution so we now set the flag to
# False so the simplification doesn't happen again in checksol()
flags['simplify'] = False
if checkdens:
# reject any result that makes any denom. affirmatively 0;
# if in doubt, keep it
dens = _simple_dens(f, symbols)
result = [s for s in result if
all(not checksol(d, {symbol: s}, **flags)
for d in dens)]
if check:
# keep only results if the check is not False
result = [r for r in result if
checksol(f_num, {symbol: r}, **flags) is not False]
return result | af2c8de5f2ee7cdfc41856ffe438c2bf0fcaee78 | 13,047 |
import numpy
def get_object_ratio(obj):
"""Calculate the ratio of the object's size in comparison to the whole image
:param obj: the binarized object image
:type obj: numpy.ndarray
:returns: float -- the ratio
"""
return numpy.count_nonzero(obj) / float(obj.size) | fd18e460be32037c73fe75c8fa5eef5ba6c1c217 | 13,048 |
def get_region(ds, region):
""" Return a region from a provided DataArray or Dataset
Parameters
----------
region_mask: xarray DataArray or list
Boolean mask of the region to keep
"""
return ds.where(region, drop=True) | 102b672f8040b722ec346435775cba1056485ae2 | 13,049 |
def read_borehole_file(path, fix_df=True):
"""Returns the df with the depths for each borehole in one single row instead
instead being each chunck a new row"""
df = pd.read_table(path,
skiprows=41,
header=None,
sep='\t',
)
df.rename(columns={1: 'x', 2: 'y', 3: 'name',
4: 'num', 5: 'z', 6: 'year', 10: 'altitude'},
inplace=True)
if fix_df:
df['name'] = df['name'] + df['num']
n_fixed_columns = 11
n_segments_per_well = 15
n_wells = df.shape[0]
# Repeat fixed rows (collar name and so)
df_fixed = df.iloc[:, :n_fixed_columns]
df_fixed = df_fixed.loc[df_fixed.index.repeat(
n_segments_per_well)]
# Add a formation to each segment
tiled_formations = pd.np.tile(formations, (n_wells))
df_fixed['formation'] = tiled_formations
# Add the segments base to the df
df_bottoms = df.iloc[:,
n_fixed_columns:n_fixed_columns + n_segments_per_well]
df_fixed['base'] = df_bottoms.values.reshape(-1, 1, order='C')
# Adding tops column from collar and base
df_fixed = ss.io.wells.add_tops_from_base_and_altitude_in_place(
df_fixed,
'name',
'base',
'altitude'
)
# Fixing boreholes that have the base higher than the top
top_base_error = df_fixed["top"] > df_fixed["base"]
df_fixed["base"][top_base_error] = df_fixed["top"] + 0.01
# Add real coord
df_fixed['z'] = df_fixed['altitude'] - df_fixed['md']
df = df_fixed
return df | 50c3df5a3d2aae2a0f58b555380efb9fd63a90e1 | 13,050 |
def cpl_parse(path):
""" Parse DCP CPL """
cpl = generic_parse(
path, "CompositionPlaylist",
("Reel", "ExtensionMetadata", "PropertyList"))
if cpl:
cpl_node = cpl['Info']['CompositionPlaylist']
cpl_dcnc_parse(cpl_node)
cpl_reels_parse(cpl_node)
return cpl | a025bf82bdeac13d6c7cfbca95d667f2ae58c8f9 | 13,051 |
def notfound():
"""Serve 404 template."""
return make_response(render_template('404.html'), 404) | d81d794bad67c8128b8f6e55dbc5383bda7a1405 | 13,052 |
from typing import Tuple
from typing import List
def read_network(file: str) -> Tuple[int, int, List[int]]:
"""
Read a Boolean network from a text file:
Line 1: number of state variables
Line 2: number of control inputs
Line 3: transition matrix of the network (linear representation of a logical matrix)
:param file: a text file
:return: (n, m, Lm), where
n: number of state variables
m: number of control inputs
Lm: network transition matrix
"""
with open(file, 'r') as f:
n = int(f.readline().strip())
m = int(f.readline().strip())
N = 2 ** n
M = 2 ** m
line = f.readline().strip()
assert line, f'network transition matrix must be provided!'
numbers = line.split()
assert len(numbers) == M * N, f'The transition matrix must have {M * N} columns'
L = [int(num) for num in numbers]
for i in L:
assert 1 <= i <= N, f'All integers in the network transition matrix must be in range [1, {N}]'
return n, m, L | 217bd86f8d00cf27cf80d1a199b76b023a374f10 | 13,053 |
def bundle_products_list(request,id):
"""
This view Renders Bundle Product list Page """
bundle = get_object_or_404(Bundle, bundle_id=id)
bundleProd = BundleProducts.objects.filter(bundle=id)
stocks = Stock.objects.all()
context = {
"title": "Bundle Products List",
"bundle": bundle,
"bundleproducts": bundleProd,
"stocks": stocks
}
return render(request, 'bundle_products.html',context) | 3afef4fdd2886300bc2fbda306bc05b499a47d0f | 13,055 |
def rot_x(theta):
"""
Rotation matrix around X axis
:param theta: Rotation angle in radians, right-handed
:return: Rotation matrix in form of (3,3) 2D numpy array
"""
return rot_axis(0,theta) | d4a892ed5ede6ffd2353b0121bec640e81c23ec7 | 13,056 |
def ValidateEntryPointNameOrRaise(entry_point):
"""Checks if a entry point name provided by user is valid.
Args:
entry_point: Entry point name provided by user.
Returns:
Entry point name.
Raises:
ArgumentTypeError: If the entry point name provided by user is not valid.
"""
return _ValidateArgumentByRegexOrRaise(entry_point, _ENTRY_POINT_NAME_RE,
_ENTRY_POINT_NAME_ERROR) | 7175e63562b04aba430044e0898db7368b68fb23 | 13,057 |
def park2_4_z(z, x):
""" Computes the Parkd function. """
y1 = x[0][0]
y2 = x[0][1]
chooser = x[1]
y3 = (x[2] - 103.0) / 91.0
y4 = x[3] + 10.0
x = [y1, y2, y3, y4]
if chooser == 'rabbit':
ret = sub_park_1(x)
elif chooser == 'dog':
ret = sub_park_2(x)
elif chooser == 'gerbil':
ret = sub_park_3(x)
elif chooser in ['hamster', 'ferret']:
ret = sub_park_4(x)
return ret * np.exp(z - 1) | 458ba79ada010b3c93419719b68f7a953908b184 | 13,058 |
import re
def get_string_coords(line):
"""return a list of string positions (tuple (start, end)) in the line
"""
result = []
for match in re.finditer(STRING_RGX, line):
result.append( (match.start(), match.end()) )
return result | a8fd7443ce242ce4f84196947fb4d82c2ff0d20e | 13,059 |
def array_from_pixbuf(p):
"""Convert from GdkPixbuf to numpy array"
Args:
p (GdkPixbuf): The GdkPixbuf provided from some window handle
Returns:
ndarray: The numpy array arranged for the pixels in height, width, RGBA order
"""
w,h,c,r=(p.get_width(), p.get_height(), p.get_n_channels(), p.get_rowstride())
assert p.get_colorspace() == GdkPixbuf.Colorspace.RGB
assert p.get_bits_per_sample() == 8
if p.get_has_alpha():
assert c == 4
else:
assert c == 3
assert r >= w * c
a=np.frombuffer(p.get_pixels(),dtype=np.uint8)
if a.shape[0] == w*c*h:
return a.reshape( (h, w, c), order = 'C' )
else:
b=np.zeros((h,w*c),'uint8')
for j in range(h):
b[j,:]=a[r*j:r*j+w*c]
return b.reshape( (h, w, c) ) | da2e980d804c283e2993049c63e3dacf67f7f0bd | 13,060 |
def entropy(x,k=3,base=2):
""" The classic K-L k-nearest neighbor continuous entropy estimator
x should be a list of vectors, e.g. x = [[1.3],[3.7],[5.1],[2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert k <= len(x)-1, "Set k smaller than num. samples - 1"
d = len(x[0])
N = len(x)
intens = 1e-10 #small noise to break degeneracy, see doc.
x = [list(p + intens*nr.rand(len(x[0]))) for p in x]
tree = ss.cKDTree(x)
nn = [tree.query(point,k+1,p=float('inf'))[0][k] for point in x]
const = digamma(N)-digamma(k) + d*log(2)
return (const + d*np.mean(map(log,nn)))/log(base) | 41d55d2bef2475ece27a487afb1e54d433bad5f0 | 13,061 |
from typing import Optional
def s3upload_start(
request: HttpRequest,
workflow: Optional[Workflow] = None,
) -> HttpResponse:
"""Upload the S3 data as first step.
The four step process will populate the following dictionary with name
upload_data (divided by steps in which they are set
STEP 1:
initial_column_names: List of column names in the initial file.
column_types: List of column types as detected by pandas
src_is_key_column: Boolean list with src columns that are unique
step_1: URL name of the first step
:param request: Web request
:return: Creates the upload_data dictionary in the session
"""
# Bind the form with the received data
form = UploadS3FileForm(
request.POST or None,
request.FILES or None,
workflow=workflow)
if request.method == 'POST' and form.is_valid():
# Dictionary to populate gradually throughout the sequence of steps. It
# is stored in the session.
request.session['upload_data'] = {
'initial_column_names': form.frame_info[0],
'column_types': form.frame_info[1],
'src_is_key_column': form.frame_info[2],
'step_1': reverse('dataops:csvupload_start')}
return redirect('dataops:upload_s2')
return render(
request,
'dataops/upload1.html',
{
'form': form,
'wid': workflow.id,
'dtype': 'S3 CSV',
'dtype_select': _('S3 CSV file'),
'valuerange': range(5) if workflow.has_table() else range(3),
'prev_step': reverse('dataops:uploadmerge')}) | b3fc1ac6c3754df836d8c219b0fb416f9d5973ce | 13,063 |
def search_explorations(query, limit, sort=None, cursor=None):
"""Searches through the available explorations.
args:
- query_string: the query string to search for.
- sort: a string indicating how to sort results. This should be a string
of space separated values. Each value should start with a '+' or a
'-' character indicating whether to sort in ascending or descending
order respectively. This character should be followed by a field name
to sort on. When this is None, results are based on 'rank'. See
_get_search_rank to see how rank is determined.
- limit: the maximum number of results to return.
- cursor: A cursor, used to get the next page of results.
If there are more documents that match the query than 'limit', this
function will return a cursor to get the next page.
returns: a tuple:
- a list of exploration ids that match the query.
- a cursor if there are more matching explorations to fetch, None
otherwise. If a cursor is returned, it will be a web-safe string that
can be used in URLs.
"""
return search_services.search(
query, SEARCH_INDEX_EXPLORATIONS, cursor, limit, sort, ids_only=True) | bead5de6f9803a7715ad497bb1f5c22da1faf296 | 13,064 |
import pkgutil
def find_resourceadapters():
"""
Finds all resource adapter classes.
:return List[ResourceAdapter]: a list of all resource adapter classes
"""
subclasses = []
def look_for_subclass(module_name):
module = __import__(module_name)
d = module.__dict__
for m in module_name.split('.')[1:]:
d = d[m].__dict__
for key, entry in d.items():
if key == tortuga.resourceAdapter.resourceAdapter.ResourceAdapter.__name__:
continue
try:
if issubclass(entry, tortuga.resourceAdapter.resourceAdapter.ResourceAdapter):
subclasses.append(entry)
except TypeError:
continue
for _, modulename, _ in pkgutil.walk_packages(
tortuga.resourceAdapter.__path__):
look_for_subclass('tortuga.resourceAdapter.{0}'.format(modulename))
return subclasses | 3aab6e6b28fa69cf9e7b1c8bc04589c69e43a3ee | 13,065 |
def print_scale(skill, points):
"""Return TeX lines for a skill scale."""
lines = ['\\cvskill{']
lines[0] += skill
lines[0] += '}{'
lines[0] += str(points)
lines[0] += '}\n'
return lines | c88de0c6db9e7b92dbcee025f42f56817a4aa033 | 13,066 |
def print_(fh, *args):
"""Implementation of perl $fh->print method"""
global OS_ERROR, TRACEBACK, AUTODIE
try:
print(*args, end='', file=fh)
return True
except Exception as _e:
OS_ERROR = str(_e)
if TRACEBACK:
cluck(f"print failed: {OS_ERROR}",skip=2)
if AUTODIE:
raise
return False | 8289aba67cb81b710d04da609ea63c65fa986e21 | 13,068 |
def subprocess(mocker):
""" Mock the subprocess and make sure it returns a value """
def with_return_value(value: int = 0, stdout: str = ""):
mock = mocker.patch(
"subprocess.run", return_value=CompletedProcess(None, returncode=0)
)
mock.returncode.return_value = value
mock.stdout = stdout
return mock
return with_return_value | 4b7140127eeb2d9202ed976518a121fed5fac302 | 13,070 |
def ljust(string, width):
"""
A version of ljust that considers the terminal width (see
get_terminal_width)
"""
width -= get_terminal_width(string)
return string + " " * width | e9c6ab8bbeeb268bc82f479e768be32f74fab488 | 13,071 |
import operator
def device_sort (device_set):
"""Sort a set of devices by self_id. Can't be used with PendingDevices!"""
return sorted(device_set, key = operator.attrgetter ('self_id')) | 92a22a87b5b923771cd86588180a8c6eb15b9fdf | 13,072 |
def _ontology_value(curie):
"""Get the id component of the curie, 0000001 from CL:0000001 for example."""
return curie.split(":")[1] | 7ef1f0874e698c498ccef16294c0469f67cd5233 | 13,073 |
def readpacket( timeout=1000, hexdump=False ):
"""Reads a HP format packet (length, data, checksum) from device.
Handles error recovery and ACKing.
Returns data or prints hexdump if told so.
"""
data = protocol.readpacket()
if hexdump == True:
print hpstr.tohexstr( data )
else:
return data | d673e61974058fc73a47bd0e5856563c9f5370bf | 13,075 |
def df_down_next_empty_pos(df, pos):
"""
Given a position `pos` at `(c, r)`, reads down column `c` from row `r` to find the next
empty cell.
Returns the position of that cell if found, or `None` otherwise.
"""
return df_down_next_matching_pos(df, pos, pd.isna) | 79fdba60e6a5846c39fb1141f3d21430230c2a31 | 13,076 |
def optimise_f2_thresholds(y, p, verbose=False, resolution=100):
"""Optimize individual thresholds one by one. Code from anokas.
Inputs
------
y: numpy array, true labels
p: numpy array, predicted labels
"""
n_labels = y.shape[1]
def mf(x):
p2 = np.zeros_like(p)
for i in range(n_labels):
p2[:, i] = (p[:, i] > x[i]).astype(np.int)
score = fbeta_score(y, p2, beta=2, average='samples')
return score
x = [0.2]*n_labels
for i in range(n_labels):
best_i2 = 0
best_score = 0
for i2 in range(resolution):
i2 /= resolution
x[i] = i2
score = mf(x)
if score > best_score:
best_i2 = i2
best_score = score
x[i] = best_i2
if verbose:
print(i, best_i2, best_score)
return x | 5f1ad6dda86229cffb7167f5cc3365c601048937 | 13,077 |
def holding_vars():
""" input
This is experimental, used to indicate unbound (free) variables in
a sum or list comprehensive.
This is inspired by Harrison's {a | b | c} set comprehension notation.
>>> pstream(holding_vars(),', holding x,y,z')
Etok(holding_vars,', holding x , y , z')
"""
def f(acc):
((_,_),cs) = acc
return Etok(name='holding_vars',etoks=cs[0::2],raw=acc)
return (comma + next_word('holding') + c.plus_comma(var())).treat(f,'holding_vars') | 5566bc97e2fa972b1ccde4d24f30fb06635bdcb7 | 13,078 |
import re
def select_with_several_genes(accessions, name, pattern,
description_items=None,
attribute='gene',
max_items=3):
"""
This will select the best description for databases where more than one
gene (or other attribute) map to a single URS. The idea is that if there
are several genes we should use the lowest one (RNA5S1, over RNA5S17) and
show the names of genes, if possible. This will list the genes if there are
few, otherwise provide a note that there are several.
"""
getter = op.attrgetter(attribute)
candidate = min(accessions, key=getter)
genes = set(getter(a) for a in accessions if getter(a))
if not genes or len(genes) == 1:
description = candidate.description
# Append gene name if it exists and is not present in the description
# already
if genes:
suffix = genes.pop()
if suffix not in description:
description += ' (%s)' % suffix
return description
regexp = pattern % getter(candidate)
basic = re.sub(regexp, '', candidate.description)
func = getter
if description_items is not None:
func = op.attrgetter(description_items)
items = sorted([func(a) for a in accessions if func(a)], key=item_sorter)
if not items:
return basic
return add_term_suffix(basic, items, name, max_items=max_items) | 04df56e64259aafd1e0d5b0d68839d8016514cb7 | 13,079 |
def list_messages_matching_query(service, user_id, query=''):
"""List all Messages of the user's mailbox matching the query.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
query: String used to filter messages returned.
Eg.- 'from:user@some_domain.com' for Messages from a particular sender.
Returns:
List of Messages that match the criteria of the query. Note that the
returned list contains Message IDs, you must use get with the
appropriate ID to get the details of a Message.
"""
try:
response = service.users().messages().list(userId=user_id,
q=query).execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(
userId=user_id, q=query, pageToken=page_token).execute()
messages.extend(response['messages'])
return messages
except errors.HttpError as error:
print('An error occurred: %s' % error) | a6ec376d7cfb4a6c724646a0e4d9ac1b86526ae7 | 13,080 |
def write_to_string(input_otio, **profile_data):
"""
:param input_otio: Timeline, Track or Clip
:param profile_data: Properties passed to the profile tag describing
the format, frame rate, colorspace and so on. If a passed Timeline has
`global_start_time` set, the frame rate will be set automatically.
Please note that numeric values must be passed as strings.
Please check MLT website for more info on profiles.
You may pass an "image_producer" argument with "pixbuf" to change
image sequence producer. The default image sequence producer is "image2"
:return: MLT formatted XML
:rtype: `str`
"""
mlt_adapter = MLTAdapter(input_otio, **profile_data)
return mlt_adapter.create_mlt() | 36a0e7fe741b4c216bd068b8544d68c63176d679 | 13,081 |
import re
def parse_IS(reply: bytes, device: str):
"""Parses the reply to the shutter IS command."""
match = re.search(b"\x00\x07IS=([0-1])([0-1])[0-1]{6}\r$", reply)
if match is None:
return False
if match.groups() == (b"1", b"0"):
if device in ["shutter", "hartmann_right"]:
return "open"
else:
return "closed"
elif match.groups() == (b"0", b"1"):
if device in ["shutter", "hartmann_right"]:
return "closed"
else:
return "open"
else:
return False | 827b5ebf5c98bcc65b823276d5ab5b8086a2c069 | 13,082 |
def quatXYZWFromRotMat(rot_mat):
"""Convert quaternion from rotation matrix"""
quatWXYZ = quaternions.mat2quat(rot_mat)
quatXYZW = quatToXYZW(quatWXYZ, 'wxyz')
return quatXYZW | 2a0a736c3950dca481c993e9801e14b362f78940 | 13,083 |
import sqlite3
def schema_is_current(db_connection: sqlite3.Connection) -> bool:
"""
Given an existing database, checks to see whether the schema version in the existing
database matches the schema version for this version of Gab Tidy Data.
"""
db = db_connection.cursor()
db.execute(
"""
select metadata_value from _gab_tidy_data
where metadata_key = 'schema_version'
"""
)
db_schema_version = db.fetchone()[0]
return db_schema_version == data_mapping.schema_version | 183502c292f9bb92e18a4ea7767028bea4e746fb | 13,084 |
import xattr
def xattr_writes_supported(path):
"""
Returns True if the we can write a file to the supplied
path and subsequently write a xattr to that file.
"""
try:
except ImportError:
return False
def set_xattr(path, key, value):
xattr.setxattr(path, "user.%s" % key, value)
# We do a quick attempt to write a user xattr to a temporary file
# to check that the filesystem is even enabled to support xattrs
fake_filepath = os.path.join(path, 'testing-checkme')
result = True
with open(fake_filepath, 'wb') as fake_file:
fake_file.write(b"XXX")
fake_file.flush()
try:
set_xattr(fake_filepath, 'hits', b'1')
except IOError as e:
if e.errno == errno.EOPNOTSUPP:
result = False
else:
# Cleanup after ourselves...
if os.path.exists(fake_filepath):
os.unlink(fake_filepath)
return result | 4992f2f5808575eac1f816aa09d80ff881286368 | 13,085 |
def _lovasz_softmax(probabilities, targets, classes="present", per_image=False, ignore=None):
"""The multiclass Lovasz-Softmax loss.
Args:
probabilities: [B, C, H, W]
class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output
with outputs of size [B, H, W].
targets: [B, H, W] ground truth targets (between 0 and C - 1)
classes: "all" for all,
"present" for classes present in targets,
or a list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class targets
"""
if per_image:
loss = mean(
_lovasz_softmax_flat(
*_flatten_probabilities(prob.unsqueeze(0), lab.unsqueeze(0), ignore),
classes=classes
)
for prob, lab in zip(probabilities, targets)
)
else:
loss = _lovasz_softmax_flat(
*_flatten_probabilities(probabilities, targets, ignore), classes=classes
)
return loss | c46006c921d1f40b5b86ff861750a9d89ec4bbdc | 13,086 |
def encodeDERTRequest(negoTypes = [], authInfo = None, pubKeyAuth = None):
"""
@summary: create TSRequest from list of Type
@param negoTypes: {list(Type)}
@param authInfo: {str} authentication info TSCredentials encrypted with authentication protocol
@param pubKeyAuth: {str} public key encrypted with authentication protocol
@return: {str} TRequest der encoded
"""
negoData = NegoData().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))
#fill nego data tokens
i = 0
for negoType in negoTypes:
s = Stream()
s.writeType(negoType)
negoToken = NegoToken()
negoToken.setComponentByPosition(0, s.getvalue())
negoData.setComponentByPosition(i, negoToken)
i += 1
request = TSRequest()
request.setComponentByName("version", univ.Integer(2).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
if i > 0:
request.setComponentByName("negoTokens", negoData)
if not authInfo is None:
request.setComponentByName("authInfo", univ.OctetString(authInfo).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
if not pubKeyAuth is None:
request.setComponentByName("pubKeyAuth", univ.OctetString(pubKeyAuth).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
return der_encoder.encode(request) | bba9ed483eec2ef39927689a8924cbcc15a2093e | 13,087 |
def hierholzer(network: Network, source=0):
""" Hierholzer's algorithm for finding an Euler cycle
Args:
network (Network): network object
source(int): node where starts (and ends) the path
Raises:
NotEulerianNetwork: if exists at least one node with odd degree
NotNetworkNode: if source is not in the network
Returns:
list of nodes that form a path visiting all edges
References:
.. [1] sanjeev2552, heruslu, Code_Mech,
Geeks For Geeks, A computer science portal for geeks
https://www.geeksforgeeks.org/hierholzers-algorithm-directed-graph/
.. [2] Reinhard Diestel,
Graph Theory,
Springer, Volume 173 of Graduate texts in mathematics, ISSN 0072-5285
"""
if source > network.n:
raise NotNetworkNode(f"Source node {source} is not in the network (N={network.n})")
path = []
temp_path = []
degrees_list = deepcopy(network.degrees_list)
edges_basket = deepcopy(network.edges_basket)
if network.n == 0:
return path
eulerian, odd_degree_nodes = is_eulerian(network)
if not eulerian:
raise NotEulerianNetwork(f"Network is not Eulerian, not all nodes are even degree: {odd_degree_nodes}")
temp_path.append(source)
temp_node = source
while len(temp_path):
if degrees_list[temp_node]:
temp_path.append(temp_node)
next_node = edges_basket[temp_node][-1]
degrees_list[temp_node] -= 1
edges_basket[temp_node].pop()
if not network.directed:
degrees_list[next_node] -= 1
i = edges_basket[next_node].index(temp_node)
del edges_basket[next_node][i]
temp_node = next_node
else:
path.append(temp_node)
temp_node = temp_path[-1]
temp_path.pop()
# If the network is directed we will revert the path
if network.directed:
return path[::-1]
return path | 9a1fb1107e9a2b086d1716cea7708dba9849fb4e | 13,089 |
def fit1d(xdata,zdata,degree=1,reject=0,ydata=None,plot=None,plot2d=False,xr=None,yr=None,zr=None,xt=None,yt=None,zt=None,pfit=None,log=False,colorbar=False,size=5) :
"""
Do a 1D polynomial fit to data set and plot if requested
Args:
xdata : independent variable
zdata : dependent variable to be fit
Keyword args:
degree: degree of polynomial to fit (default=1 for linear fit)
reject : single iteration rejection of points that deviate from initial by more than specified value (default=0, no rejection)
ydata : auxiliary variable for plots (default=None)
plot : axes to plot into (default=None)
plot2d (bool) : set to make a 2D plot with auxiliary variable, rather than 1D color-coded by auxiliary variable
xr[2] : xrange for plot
yr[2] : yrange for plot
zr[2] : zrange for plot
xt : xtitle for plot
yt : ytitle for plot
zt : ztitle for plot
Returns :
pfit : 1D polynomial fit
"""
# set up fitter and do fit
if pfit is None :
fit_p = fitting.LinearLSQFitter()
p_init = models.Polynomial1D(degree=degree)
pfit = fit_p(p_init, xdata, zdata)
# rejection of points?
if reject > 0 :
gd=np.where(abs(zdata-pfit(xdata)) < reject)[0]
bd=np.where(abs(zdata-pfit(xdata)) >= reject)[0]
print('rejected ',len(xdata)-len(gd),' of ',len(xdata),' points')
pfit = fit_p(p_init, xdata[gd], zdata[gd])
print('1D rms: ',(zdata-pfit(xdata)).std())
# plot if requested
if plot is not None :
if xr is None : xr = [xdata.min(),xdata.max()]
if yr is None and ydata is not None : yr = [ydata.min(),ydata.max()]
if log :
zplot=10.**zdata
else :
zplot=zdata
if zr is None : zr = [zplot.min(),zplot.max()]
if ydata is None :
x = np.linspace(xr[0],xr[1],200)
if log :
zfit=10.**pfit(x)
else :
zfit=pfit(x)
# straight 1D plot
plots.plotp(plot,xdata,zplot,xr=xr,yr=yr,zr=zr,
xt=xt,yt=yt,size=size)
plots.plotl(plot,x,zfit)
elif plot2d :
# 2D image plot with auxiliary variable
y, x = np.mgrid[yr[1]:yr[0]:200j, xr[1]:xr[0]:200j]
if log :
zfit=10.**pfit(x)
else :
zfit=pfit(x)
plots.plotc(plot,xdata,ydata,zplot,xr=xr,yr=yr,zr=zr,
xt=xt,yt=xt,zt=yt,colorbar=True,size=size,cmap='rainbow')
plot.imshow(zfit,extent=[xr[1],xr[0],yr[1],yr[0]],
aspect='auto',vmin=zr[0],vmax=zr[1], origin='lower',cmap='rainbow')
else :
# 1D plot color-coded by auxiliary variable
x = np.linspace(xr[0],xr[1],200)
if log :
zfit=10.**pfit(x)
else :
zfit=pfit(x)
plots.plotc(plot,xdata,zplot,ydata,xr=xr,yr=zr,zr=yr,
xt=xt,yt=yt,zt=zt,size=size,colorbar=colorbar)
plots.plotl(plot,x,zfit,color='k')
return pfit | 0c40a2b1af72c0df8523a92cd5cc80c99f631472 | 13,090 |
import torch
def nucleus_sampling(data, p, replace=0, ascending=False, above=True):
"""
:param tensor data: Input data
:param float p: Probability for filtering (or be replaced)
:param float replace: Default value is 0. If value is provided, input data will be replaced by this value
if data match criteria.
:param bool ascending: Return ascending order or descending order. Sorting will be executed if replace is None.
:param bool above: If True is passed, only value smaller than p will be kept (or not replaced)
:return: tensor Filtered result
"""
sorted_data, sorted_indices = torch.sort(data, descending=not ascending)
cum_probas = torch.cumsum(F.softmax(sorted_data, dim=-1), dim=-1)
if replace is None:
if above:
replace_idxes = cum_probas < p
else:
replace_idxes = cum_probas > p
idxes = sorted_indices[replace_idxes]
else:
if above:
replace_idxes = cum_probas > p
else:
replace_idxes = cum_probas < p
idxes = sorted_indices[~replace_idxes]
if replace is None:
sorted_data = sorted_data[replace_idxes]
else:
sorted_data[replace_idxes] = replace
return sorted_data, idxes | 6332e9f5e04fa2ec0130fa2db7dd5a8aad26caec | 13,091 |
from datetime import datetime
import json
def mark_ready_for_l10n_revision(request, document_slug, revision_id):
"""Mark a revision as ready for l10n."""
revision = get_object_or_404(Revision, pk=revision_id,
document__slug=document_slug)
if not revision.document.allows(request.user, 'mark_ready_for_l10n'):
raise PermissionDenied
if revision.can_be_readied_for_localization():
# We don't use update(), because that wouldn't update
# Document.latest_localizable_revision.
revision.is_ready_for_localization = True
revision.readied_for_localization = datetime.now()
revision.readied_for_localization_by = request.user
revision.save()
ReadyRevisionEvent(revision).fire(exclude=request.user)
return HttpResponse(json.dumps({'message': revision_id}))
return HttpResponseBadRequest() | 64d7d84ceab204a3d3fea98e9753fde486c4490c | 13,092 |
def is_all_maxed_out(bad_cube_counts, bad_cube_maximums):
"""Determines whether all the cubes of each type are at their maximum
amounts."""
for cube_type in CUBE_TYPES:
if bad_cube_counts[cube_type] < bad_cube_maximums[cube_type]:
return False
return True | 23332712b46d33a1a8e552ecf30389d4b0a10c90 | 13,093 |
def get_local_vars(*args):
"""
get_local_vars(prov, ea, out) -> bool
"""
return _ida_dbg.get_local_vars(*args) | ebed21c8b90c48e76734f07a5e83c11bf5b9dd0c | 13,094 |
def gcc():
"""
getCurrentCurve
Get the last curve that was added to the last plot plot
:return: The last curve
:rtype: pg.PlotDataItem
"""
plotWin = gcf()
try:
return plotWin.plotWidget.plotItem.dataItems[-1]
except IndexError:
return None | 2f9226c51a84d39b43f1d8ef83969b94a2c308cd | 13,095 |
import requests
import json
def searchDevice(search):
"""
Method that searches the ExtraHop system for a device that
matches the specified search criteria
Parameters:
search (dict): The device search criteria
Returns:
dict: The metadata of the device that matches the criteria
"""
url = urlunparse(("https", HOST, "/api/v1/devices/search", "", "", ""))
headers = {"Authorization": "ExtraHop apikey=%s" % APIKEY}
r = requests.post(
url, headers=headers, verify=False, data=json.dumps(search)
)
return r.json()[0] | 9b65346054f099e4a2aa78035802b2de799850ac | 13,096 |
def regularmeshH8(nelx, nely, nelz, lx, ly, lz):
""" Creates a regular H8 mesh.
Args:
nelx (:obj:`int`): Number of elements on the X-axis.
nely (:obj:`int`): Number of elements on the Y-axis.
nelz (:obj:`int`): Number of elements on the Z-axis.
lx (:obj:`float`): X-axis length.
ly (:obj:`float`): Y-axis length.
lz (:obj:`float`): Z-axis length.
Returns:
Tuple with the coordinate matrix, connectivity, and the indexes of each node.
"""
x, y, z = np.linspace(0, lx, num=nelx + 1), np.linspace(0, ly, num=nely + 1), np.linspace(0, lz, num=nelz + 1)
nx, ny, nz = len(x), len(y), len(z)
mat_x = (x.reshape(nx, 1)@np.ones((1, ny*nz))).T
mat_y = y.reshape(ny, 1)@np.ones((1, nx))
mat_z = z.reshape(nz, 1)@np.ones((1, nx*ny))
x_t, y_t, z_t = mat_x.flatten(), np.tile(mat_y.flatten(), nz), mat_z.flatten()
ind_coord = np.arange(1, (nz)* nx * ny + 1, 1, dtype=int)
coord = (np.array([ind_coord, x_t, y_t, z_t])).T
# processing of connectivity matrix
ind_connect = np.arange(1, nelz * nelx * nely + 1, dtype=int)
mat_aux = ind_connect.reshape(nely, nelx, nelz)
a = np.arange(0, nely * nelz, 1)
for ind in range(nely, len(a), nely):
a[ind:] += nelx + 1
c = (a.reshape(len(a),1)@np.ones((1, nelx))).reshape(nely, nelx, nelz)
b = (mat_aux + c).flatten()
connect = np.array([ind_connect, b+(nelx+1), b, b+1, b+(nelx+2), \
b+(nelx+1)*(nely+1)+(nelx+1), b+(nelx+1)*(nely+1), \
b+1+(nelx+1)*(nely+1), b+(nelx+1)*(nely+1)+(nelx+2)], dtype=int).T
return coord, connect | 1c0050b8c48438f548e67f7776194a067c77ae39 | 13,097 |
def only_t1t2(src, names):
"""
This function...
:param src:
:param names:
:return:
"""
if src.endswith("TissueClassify"):
# print "Keeping T1/T2!"
try:
names.remove("t1_average_BRAINSABC.nii.gz")
except ValueError:
pass
try:
names.remove("t2_average_BRAINSABC.nii.gz")
except ValueError:
pass
else:
names.remove("TissueClassify")
# print "Ignoring these files..."
# for name in names:
# print "\t" + name
return names | 60116fbc602bbe03f7c18776b623ef3680b9dfc1 | 13,098 |
def distanceEucl(a, b):
"""Calcul de la distance euclidienne en dimension quelconque"""
dist = np.linalg.norm(a - b)
return dist | 572d98aecf17cd1f0e34dcad9e07beb3bcf6d06d | 13,099 |
def _search(self, *query):
"""Search for a match between the query terms and a tensor's Id, Tag, or Description.
https://github.com/OpenMined/PySyft/issues/2609
Note that the query is an AND query meaning that every item in the list of strings (query*)
must be found somewhere on the tensor in order for it to be included in the results.
Args:
query: A list of strings to match against.
me: A reference to the worker calling the search.
Returns:
A list of PointerTensors.
"""
results = list()
for key, obj in self._objects.items():
found_something = True
for query_item in query:
# If deserialization produced a bytes object instead of a string,
# make sure it's turned back to a string or a fair comparison.
if isinstance(query_item, bytes):
query_item = query_item.decode("ascii")
match = False
if query_item == str(key):
match = True
if isinstance(obj, FrameworkTensor):
if obj.tags is not None:
if query_item in obj.tags:
match = True
if obj.description is not None:
if query_item in obj.description:
match = True
if not match:
found_something = False
if found_something:
# set garbage_collect_data to False because if we're searching
# for a tensor we don't own, then it's probably someone else's
# decision to decide when to delete the tensor.
ptr = obj.create_pointer(garbage_collect_data=False, owner=sy.local_worker)
results.append(ptr)
return results | 8ffd9ae2fc0eb9f5f01c9cd3d27123a316bad655 | 13,100 |
def val2str(val):
"""Writes values to a string.
Args:
val (any): Any object that should be represented by a string.
Returns:
valstr (str): String representation of `val`.
"""
# Return the input if it's a string
if isinstance(val,str ): valstr=val
# Handle types where spaces are added
elif isinstance(val,tuple): valstr=repr(val).replace(', ',',')
elif isinstance(val,list ): valstr=repr(val).replace(', ',',')
elif isinstance(val,dict ): valstr=repr(val).replace(', ',',').replace(': ',':')
# Otherwise use repr()
else: valstr=repr(val)
# Return output
return valstr | c8f26553ceeeef841239c534815f86293f91086a | 13,103 |
def showItems(category_name):
"""Pulls all the Categories, the specific Category selected by the user
from the home page, all the items within that specific Category, and
then counts the number of items. All this information is displayed on the
items.html page.
"""
categories = session.query(Category).order_by(asc(Category.name))
category = session.query(Category).filter_by(name=category_name).one()
items = session.query(Item).filter_by(category_name=category_name).all()
itemscount = session.query(Item). \
filter_by(category_name=category_name). \
count()
return render_template('items.html', categories=categories, items=items,
category=category, itemscount=itemscount) | 0ef0c8dfca16a9f16a9d4a46c3d796e817710165 | 13,104 |
from datetime import datetime
import math
import calendar
def date_ranges():
"""Build date ranges for current day, month, quarter, and year.
"""
today = datetime.date.today()
quarter = math.floor((today.month - 1) / 3)
cycle = current_cycle()
return {
'month': (
today.replace(day=1),
today.replace(day=calendar.monthrange(today.year, today.month)[1]),
),
'quarter': (
today.replace(day=1, month=quarter * 3 + 1),
today.replace(
day=calendar.monthrange(today.year, quarter * 3 + 3)[1],
month=quarter * 3 + 3,
),
),
'year': (
today.replace(day=1, month=1),
today.replace(
day=calendar.monthrange(today.year, 12)[1],
month=12,
),
),
'cycle': (
datetime.date(
year=cycle - 1,
month=1,
day=1,
),
datetime.date(
year=cycle,
month=12,
day=calendar.monthrange(cycle, 12)[1],
),
),
} | 08feb47fe09d5a0d1c9e5e16bdcbd65d3e211e1e | 13,105 |
def FiskJohnsonDiscreteFuncBCKWD(r,F0,T):
"""Compute reverse Fourier-Bessel transformation via Fisk Johnson
procedure.
Compute reverse Fourier-Bessel transform (i.e. 0th order reverse Hankel
transform) using a rapidly convergent summation of a Fourier-Bessel
expansion following the metod introduced in Ref. [1] and further
detailed in Ref. [2].
Args:
r (numpy array, ndim=1): equispaced 1D grid of target coordinates.
F0 (numpy array, ndim=1): Fourier-Bessel transformed function
at discrete coordinates given by its scaled bessel zeros.
T (float): truncation threshold for objective function.
Returns:
f (numpy array, ndim=1): reverse Fourier-Bessel transform of input
function.
Notes:
- Fisk Johnson procedure for reverse Fourier-Bessel transformation.
- Implements Eq. (10) of Ref. [1].
- above truncation threshold it holds that f(r>T) = 0.
- on input F0 = F0[jm/T] for m = 0...N-1 where jm are the first
N zeros of the 0th order Bessel function in ascending order.
Refs:
[1] An Improved Method for Computing a Discrete Hankel Transform
H. Fisk Johnson
Comp. Phys. Commun. 43 (1987) 181-202
[2] Theory and operational rules for the discrete Hankel transform
N. Baddour, U. Chouinard
J. Opt. Soc. Am. A 32 (2015) 611
"""
# INITIALIZE EMPTY ARRAY FOR REVESE TRANSFORM
f = np.zeros(r.size)
# COMPUTE FIRST N ZEROS OF 0TH ORDER BESSEL FUNCTION IN ASCENDING ORDER
jm = scs.jn_zeros(0,F0.size)
# REVERSE TRANSFORM YIELDING ARBITRARY FUNCTION VALUES f(xT) FROM ITS
# FOURIER BESSEL TRANSFORM F(j[m]/T) m=0...N-1 AT SCALED BESSEL ZEROS
# j[m]/T. SEE EQ. (10) OF REF. [1].
x = r/T
f[x<1] = 2.0/T**2*np.sum(
F0*scs.j0(jm*x[x<1,np.newaxis])/scs.j1(jm)**2,
axis=1)
return f | f950323bcad980f8b0af94d5848b59cd3522adfc | 13,106 |
def make_waterfall_horizontal(data, layout):
"""Function used to flip the figure from vertical to horizontal.
"""
h_data = list(data)
h_data = []
for i_trace, trace in enumerate(list(data)):
h_data.append(trace)
prov_x = h_data[i_trace]['x']
h_data[i_trace]['x'] = list(h_data[i_trace]['y'])[::-1]
h_data[i_trace]['y'] = list(prov_x)[::-1]
h_data[i_trace]['orientation'] = 'h'
h_data[i_trace]['hoverinfo'] = hoverinfo_horizontal_(
h_data[i_trace]['hoverinfo'])
h_annotations = []
for i_ann, annotation in enumerate(list(layout['annotations'])):
h_annotations.append(annotation)
prov_x = h_annotations[i_ann]['x']
h_annotations[i_ann]['x'] = h_annotations[i_ann]['y']
h_annotations[i_ann]['y'] = prov_x
h_annotations.reverse()
h_layout = layout
h_layout['annotations'] = h_annotations
h_layout['xaxis'] = go.layout.XAxis({'title': 'Prediction score'})
h_layout['yaxis'] = go.layout.YAxis({'title': ''})
return h_data, h_layout | 0dacdefc4e36d10dea3404e1fc5e92ce6f7326be | 13,107 |
def parse_file(producer):
"""
Given a producer name, return appropriate parse function.
:param producer: NMR machine producer.
:return: lambda function that reads file according to producer.
"""
global path_to_directory
return {
"Agilent": (lambda: ng.agilent.read(dir=path_to_directory)),
"Bruker": (lambda: ng.bruker.read(dir=path_to_directory)),
"Varian": (lambda: ng.varian.read(dir=path_to_directory)),
}.get(producer) | cdb8e5e6f506b6d393eeefc13e982f246ea527b6 | 13,108 |
from typing import Union
from typing import Optional
def get_lon_dim_name_impl(ds: Union[xr.Dataset, xr.DataArray]) -> Optional[str]:
"""
Get the name of the longitude dimension.
:param ds: An xarray Dataset
:return: the name or None
"""
return _get_dim_name(ds, ['lon', 'longitude', 'long']) | 7f063690d8835b7cdd3298e14a5c35bd32025acc | 13,109 |
def logout():
"""Log out user."""
session.pop('eventbrite_token', None)
return redirect(url_for('index')) | 449690645fc19d72ef85636776f8d853ca65f4f8 | 13,110 |
def search(query="", casesense=False, filterout=[], subscribers=0, nsfwmode=2, doreturn=False, sort=None):
"""
Search for a subreddit by name
*str query = The search query
"query" = results where "query" is in the name
"*query" = results where "query" is at the end of the name
"query*" = results where "query" is at the beginning of the name
"*query*" = results where "query" is in the middle of the name
bool casesense = is the search case sensitive
list filterout = [list, of, words] to omit from search. Follows casesense
int subscribers = minimum number of subscribers
int nsfwmode =
0 - Clean only
1 - Dirty only
2 - All
int sort = The integer representing the sql column to sort by. Defaults
to no sort.
"""
querys = ''.join([c for c in query if c in GOODCHARS])
queryx = '%%%s%%' % querys
if '!' in query:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ?', [querys])
return cur.fetchone()
if nsfwmode in [0,1]:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ? AND subscribers > ? AND nsfw=?', [queryx, subscribers, nsfwmode])
else:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ? AND subscribers > ?', [queryx, subscribers])
results = []
if casesense is False:
querys = querys.lower()
filterout = [x.lower() for x in filterout]
if '*' in query:
positional = True
front = query[-1] == '*'
back = query[0] == '*'
if front and back:
mid = True
front = False
back = False
else:
mid = False
else:
positional = False
lenq = len(querys)
for item in fetchgenerator(cur):
name = item[SQL_NAME]
if casesense is False:
name = name.lower()
if querys not in name:
#print('%s not in %s' % (querys, name))
continue
if (positional and front) and (name[:lenq] != querys):
#print('%s not front %s (%s)' % (querys, name, name[:lenq]))
continue
if (positional and back) and (name[-lenq:] != querys):
#print('%s not back %s (%s)' % (querys, name, name[-lenq:]))
continue
if (positional and mid) and (querys not in name[1:-1]):
#print('%s not mid %s (%s)' % (querys, name, name[1:-1]))
continue
if any(filters in name for filters in filterout):
#print('%s not filter %s' % (querys, name))
continue
results.append(item)
if sort is not None:
results.sort(key=lambda x: x[sort], reverse=True)
if doreturn is True:
return results
else:
for item in results:
print(item) | c623ee11d507dbd7de84b109c2aa40866bb06dda | 13,111 |
def is_xh(filename):
"""
Detects if the given file is an XH file.
:param filename: The file to check.
:type filename: str
"""
info = detect_format_version_and_endianness(filename)
if info is False:
return False
return True | f0c33e5eed11522210dbc64a556e77f1c68d63c1 | 13,112 |
from typing import Tuple
from typing import List
from typing import Union
from typing import Callable
from typing import Any
def validate_func_kwargs(
kwargs: dict,
) -> Tuple[List[str], List[Union[str, Callable[..., Any]]]]:
"""
Validates types of user-provided "named aggregation" kwargs.
`TypeError` is raised if aggfunc is not `str` or callable.
Parameters
----------
kwargs : dict
Returns
-------
columns : List[str]
List of user-provied keys.
func : List[Union[str, callable[...,Any]]]
List of user-provided aggfuncs
Examples
--------
>>> validate_func_kwargs({'one': 'min', 'two': 'max'})
(['one', 'two'], ['min', 'max'])
"""
tuple_given_message = "func is expected but received {} in **kwargs."
columns = list(kwargs)
func = []
for col_func in kwargs.values():
if not (isinstance(col_func, str) or callable(col_func)):
raise TypeError(tuple_given_message.format(type(col_func).__name__))
func.append(col_func)
if not columns:
no_arg_message = "Must provide 'func' or named aggregation **kwargs."
raise TypeError(no_arg_message)
return columns, func | 81475f1467546f31a63a021c05a0c5f1adfd28a8 | 13,114 |
def mni152_to_fslr(img, fslr_density='32k', method='linear'):
"""
Projects `img` in MNI152 space to fsLR surface
Parameters
----------
img : str or os.PathLike or niimg_like
Image in MNI152 space to be projected
fslr_density : {'32k', '164k'}, optional
Desired output density of fsLR surface. Default: '32k'
method : {'nearest', 'linear'}, optional
Method for projection. Specify 'nearest' if `img` is a label image.
Default: 'linear'
Returns
-------
fsLR : (2,) tuple-of-nib.GiftiImage
Projected `img` on fsLR surface
"""
if fslr_density in ('4k', '8k'):
raise NotImplementedError('Cannot perform registration fusion to '
f'fsLR {fslr_density} space yet.')
return _vol_to_surf(img, 'fsLR', fslr_density, method) | 07129a79bc51e4655516573f517c4270d89800ed | 13,115 |
def parse_record(raw_record, _mode, dtype):
"""Parse CIFAR-10 image and label from a raw record."""
# Convert bytes to a vector of uint8 that is record_bytes long.
record_vector = tf.io.decode_raw(raw_record, tf.uint8)
# The first byte represents the label, which we convert from uint8 to int32
# and then to one-hot.
label = tf.cast(record_vector[0], tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(record_vector[1:_RECORD_BYTES],
[_NUM_CHANNELS, _HEIGHT, _WIDTH])
# Convert from [depth, height, width] to [height, width, depth], and cast
# as float32.
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
# normalise images to range 0-1
image = image/255.0
image = tf.cast(image, dtype)
return image, image | 278998f8ee1a126c6c248d8124bba1a4abdf7621 | 13,116 |
def makeSSHTTPClient(paramdict):
"""Creates a SingleShotHTTPClient for the given URL. Needed for Carousel."""
# get the "url" and "postbody" keys from paramdict to use as the arguments of SingleShotHTTPClient
return SingleShotHTTPClient(paramdict.get("url", ""),
paramdict.get("postbody", ""),
extraheaders = paramdict.get("extraheaders", None),
method = paramdict.get('method', None)
) | e7172d849e9c97baf07b9d97b914bf3e05551026 | 13,117 |
import glob
def getFiles(regex, camera, mjdToIngest = None, mjdthreshold = None, days = None, atlasroot='/atlas/', options = None):
"""getFiles.
Args:
regex:
camera:
mjdToIngest:
mjdthreshold:
days:
atlasroot:
options:
"""
# If mjdToIngest is defined, ignore mjdThreshold. If neither
# are defined, grab all the files.
# Don't use find, use glob. It treats the whole argument as a regex.
# e.g. directory = "/atlas/diff/" + camera "/5[0-9][0-9][0-9][0-9]", regex = *.ddc
if mjdToIngest:
if options is not None and options.difflocation is not None:
directory = options.difflocation.replace('CAMERA', camera).replace('MJD', str(mjdToIngest))
else:
directory = atlasroot + "diff/" + camera + "/" + str(mjdToIngest)
fileList = glob.glob(directory + '/' + regex)
else:
if mjdthreshold and days:
fileList = []
for day in range(days):
if options is not None and options.difflocation is not None:
directory = options.difflocation.replace('CAMERA', camera).replace('MJD', str(mjdthreshold + day))
else:
directory = atlasroot + "diff/" + camera + "/%d" % (mjdthreshold + day)
files = glob.glob(directory + '/' + regex)
if files:
fileList += files
else:
if options is not None and options.difflocation is not None:
directory = options.difflocation.replace('CAMERA', camera).replace('MJD', '/[56][0-9][0-9][0-9][0-9]')
else:
directory = atlasroot + "diff/" + camera + "/[56][0-9][0-9][0-9][0-9]"
fileList = glob.glob(directory + '/' + regex)
fileList.sort()
return fileList | 8d61d2e1900413d55e2cfc590fb6c969dd31b441 | 13,118 |
from typing import Sequence
from typing import Tuple
def chain(*args: GradientTransformation) -> GradientTransformation:
"""Applies a list of chainable update transformations.
Given a sequence of chainable transforms, `chain` returns an `init_fn`
that constructs a `state` by concatenating the states of the individual
transforms, and returns an `update_fn` which chains the update transformations
feeding the appropriate state to each.
Args:
*args: a sequence of chainable (init_fn, update_fn) tuples.
Returns:
A single (init_fn, update_fn) tuple.
"""
init_fns, update_fns = zip(*args)
def init_fn(params: Params) -> Sequence[OptState]:
return [fn(params) for fn in init_fns]
def update_fn(updates: Updates, state: OptState, params: Params = None
) -> Tuple[Updates, Sequence[OptState]]:
new_state = []
for s, fn in zip(state, update_fns): # pytype: disable=wrong-arg-types
updates, new_s = fn(updates, s, params)
new_state.append(new_s)
return updates, new_state
return GradientTransformation(init_fn, update_fn) | 089b30a4daec8be0033567da147be6dc4fab9990 | 13,119 |
def fibonacci_mult_tuple(fib0=2, fib1=3, count=10):
"""Returns a tuple with a fibonacci sequence using * instead of +."""
return tuple(fibonacci_mult_list(fib0, fib1, count)) | a43d1bd5bd2490ecbf85b305cc99929ac64a4908 | 13,120 |
import logging
def execute_in_process(f):
"""
Decorator.
Execute the function in thread.
"""
def wrapper(*args, **kwargs):
logging.info("Se ha lanzado un nuevo proceso")
process_f = Process(target=f, args=args, kwargs=kwargs)
process_f.start()
return process_f
return wrapper | 2a002ce48e07ec4b31066c1fad51cd271eaa6230 | 13,121 |
import copy
def castep_spectral_dispersion(computer, calc_doc, seed):
""" Runs a dispersion interpolation on top of a completed SCF calculation,
optionally running orbitals2bands and OptaDOS projected dispersion.
Parameters:
computer (:obj:`matador.compute.ComputeTask`): the object that will be calling CASTEP.
calc_doc (dict): the structure to run on.
seed (str): root filename of structure.
"""
LOG.info('Performing CASTEP spectral dispersion calculation...')
disp_doc = copy.deepcopy(calc_doc)
disp_doc['task'] = 'spectral'
disp_doc['spectral_task'] = 'bandstructure'
# disable checkpointing for BS/DOS by default, leaving just SCF
disp_doc['write_checkpoint'] = 'none'
disp_doc['pdos_calculate_weights'] = True
disp_doc['write_cell_structure'] = True
disp_doc['continuation'] = 'default'
required = []
forbidden = ['spectral_kpoints_mp_spacing']
computer.validate_calc_doc(disp_doc, required, forbidden)
success = computer.run_castep_singleshot(disp_doc, seed, keep=True, intermediate=True)
if disp_doc.get('write_orbitals'):
LOG.info('Planning to call orbitals2bands...')
_cache_executable = copy.deepcopy(computer.executable)
_cache_core = copy.deepcopy(computer.ncores)
computer.ncores = 1
computer.executable = 'orbitals2bands'
try:
success = computer.run_generic(intermediate=True, mv_bad_on_failure=False)
except Exception as exc:
computer.executable = _cache_executable
computer.ncores = _cache_core
LOG.warning('Failed to call orbitals2bands, with error: {}'.format(exc))
computer.ncores = _cache_core
computer.executable = _cache_executable
return success | 0f84e9b4d7a044fd50512093b51ec20425c98cbd | 13,122 |
def return_limit(x):
"""Returns the standardized values of the series"""
dizionario_limite = {'BENZENE': 5,
'NO2': 200,
'O3': 180,
'PM10': 50,
'PM2.5': 25}
return dizionario_limite[x] | 92d40eaef7b47c3a20b9bcf1f7fd72510a05d9b2 | 13,123 |
def npaths(x, y):
"""
Count paths recursively. Memoizing makes this efficient.
"""
if x>0 and y>0:
return npaths(x-1, y) + npaths(x, y-1)
if x>0:
return npaths(x-1, y)
if y>0:
return npaths(x, y-1)
return 1 | 487a1f35b1bf825ffaf6bbf1ed86eb51f6cf18e9 | 13,124 |
from datetime import datetime
def sqlify(obj):
"""
converts `obj` to its proper SQL version
>>> sqlify(None)
'NULL'
>>> sqlify(True)
"'t'"
>>> sqlify(3)
'3'
"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif datetime and isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
return repr(obj) | 6342a4fc1b4450181cee5a6287036b1f4ed38883 | 13,125 |
def create_results_dataframe(
list_results,
settings,
result_classes=None,
abbreviate_name=False,
format_number=False,
):
"""
Returns a :class:`pandas.DataFrame`.
If *result_classes* is a list of :class:`Result`, only the columns from
this result classes will be returned. If ``None``, the columns from
all results will be returned.
"""
list_series = []
for results in list_results:
builder = SeriesBuilder(settings, abbreviate_name, format_number)
for result in results:
prefix = result.getname().lower() + " "
if result_classes is None: # Include all results
builder.add_entity(result, prefix)
elif type(result) in result_classes:
if len(result_classes) == 1:
builder.add_entity(result)
else:
builder.add_entity(result, prefix)
list_series.append(builder.build())
return pd.DataFrame(list_series) | 638328936ee9207777fab504021efd83379ec93c | 13,126 |
def get_first_model_each_manufacturer(cars=cars):
"""return a list of matching models (original ordering)"""
first = []
for key,item in cars.items():
first.append(item[0])
return(first) | c6ec531ccc7a9bc48b404df34ec9c33066cd8717 | 13,127 |
def white(*N, mean=0, std=1):
""" White noise.
:param N: Amount of samples.
White noise has a constant power density. It's narrowband spectrum is therefore flat.
The power in white noise will increase by a factor of two for each octave band,
and therefore increases with 3 dB per octave.
"""
return std * np.random.randn(*N) + mean | 874dd75b3cd735f6b5642cd5567d7d0218af615b | 13,128 |
import random
def random_size_crop(src, size, min_area=0.25, ratio=(3.0/4.0, 4.0/3.0)):
"""Randomly crop src with size. Randomize area and aspect ratio"""
h, w, _ = src.shape
area = w*h
for _ in range(10):
new_area = random.uniform(min_area, 1.0) * area
new_ratio = random.uniform(*ratio)
new_w = int(new_area*new_ratio)
new_h = int(new_area/new_ratio)
if random.uniform(0., 1.) < 0.5:
new_w, new_h = new_h, new_w
if new_w > w or new_h > h:
continue
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size)
return out, (x0, y0, new_w, new_h)
return random_crop(src, size) | 76c64b91e03cb5cf65b164c10771bd78d13945ee | 13,129 |
import re
def joinAges(dataDict):
"""Merges columns by county, dropping ages"""
popColumns = list(dataDict.values())[0].columns.tolist()
popColumns = [re.sub("[^0-9]", "", column) for column in popColumns]
dictOut = dict()
for compartmentName, table in dataDict.items():
table.columns = popColumns
dictOut[compartmentName] = table.sum(axis=1, level=0)
return dictOut | d83ee4883ba58f7090141c131c4e111a4805f15d | 13,131 |
def plot_graph_route(G, route, bbox=None, fig_height=6, fig_width=None,
margin=0.02, bgcolor='w', axis_off=True, show=True,
save=False, close=True, file_format='png', filename='temp',
dpi=300, annotate=False, node_color='#999999',
node_size=15, node_alpha=1, node_edgecolor='none',
node_zorder=1, edge_color='#999999', edge_linewidth=1,
edge_alpha=1, use_geom=True, origin_point=None,
destination_point=None, route_color='r', route_linewidth=4,
route_alpha=0.5, orig_dest_node_alpha=0.5,
orig_dest_node_size=100, orig_dest_node_color='r',
orig_dest_point_color='b'):
"""
Plot a route along a networkx spatial graph.
Parameters
----------
G : networkx multidigraph
route : list
the route as a list of nodes
bbox : tuple
bounding box as north,south,east,west - if None will calculate from
spatial extents of data. if passing a bbox, you probably also want to
pass margin=0 to constrain it.
fig_height : int
matplotlib figure height in inches
fig_width : int
matplotlib figure width in inches
margin : float
relative margin around the figure
axis_off : bool
if True turn off the matplotlib axis
bgcolor : string
the background color of the figure and axis
show : bool
if True, show the figure
save : bool
if True, save the figure as an image file to disk
close : bool
close the figure (only if show equals False) to prevent display
file_format : string
the format of the file to save (e.g., 'jpg', 'png', 'svg')
filename : string
the name of the file if saving
dpi : int
the resolution of the image file if saving
annotate : bool
if True, annotate the nodes in the figure
node_color : string
the color of the nodes
node_size : int
the size of the nodes
node_alpha : float
the opacity of the nodes
node_edgecolor : string
the color of the node's marker's border
node_zorder : int
zorder to plot nodes, edges are always 2, so make node_zorder 1 to plot
nodes beneath them or 3 to plot nodes atop them
edge_color : string
the color of the edges' lines
edge_linewidth : float
the width of the edges' lines
edge_alpha : float
the opacity of the edges' lines
use_geom : bool
if True, use the spatial geometry attribute of the edges to draw
geographically accurate edges, rather than just lines straight from node
to node
origin_point : tuple
optional, an origin (lat, lon) point to plot instead of the origin node
destination_point : tuple
optional, a destination (lat, lon) point to plot instead of the
destination node
route_color : string
the color of the route
route_linewidth : int
the width of the route line
route_alpha : float
the opacity of the route line
orig_dest_node_alpha : float
the opacity of the origin and destination nodes
orig_dest_node_size : int
the size of the origin and destination nodes
orig_dest_node_color : string
the color of the origin and destination nodes
orig_dest_point_color : string
the color of the origin and destination points if being plotted instead
of nodes
Returns
-------
fig, ax : tuple
"""
# plot the graph but not the route
fig, ax = plot_graph(G, bbox=bbox, fig_height=fig_height, fig_width=fig_width,
margin=margin, axis_off=axis_off, bgcolor=bgcolor,
show=False, save=False, close=False, filename=filename,
dpi=dpi, annotate=annotate, node_color=node_color,
node_size=node_size, node_alpha=node_alpha,
node_edgecolor=node_edgecolor, node_zorder=node_zorder,
edge_color=edge_color, edge_linewidth=edge_linewidth,
edge_alpha=edge_alpha, use_geom=use_geom)
# the origin and destination nodes are the first and last nodes in the route
origin_node = route[0]
destination_node = route[-1]
if origin_point is None or destination_point is None:
# if caller didn't pass points, use the first and last node in route as
# origin/destination
origin_destination_lats = (G.nodes[origin_node]['y'], G.nodes[destination_node]['y'])
origin_destination_lons = (G.nodes[origin_node]['x'], G.nodes[destination_node]['x'])
else:
# otherwise, use the passed points as origin/destination
origin_destination_lats = (origin_point[0], destination_point[0])
origin_destination_lons = (origin_point[1], destination_point[1])
orig_dest_node_color = orig_dest_point_color
# scatter the origin and destination points
ax.scatter(origin_destination_lons, origin_destination_lats, s=orig_dest_node_size,
c=orig_dest_node_color, alpha=orig_dest_node_alpha, edgecolor=node_edgecolor, zorder=4)
# plot the route lines
edge_nodes = list(zip(route[:-1], route[1:]))
lines = []
for u, v in edge_nodes:
# if there are parallel edges, select the shortest in length
data = min(G.get_edge_data(u, v).values(), key=lambda x: x['length'])
# if it has a geometry attribute (ie, a list of line segments)
if 'geometry' in data and use_geom:
# add them to the list of lines to plot
xs, ys = data['geometry'].xy
lines.append(list(zip(xs, ys)))
else:
# if it doesn't have a geometry attribute, the edge is a straight
# line from node to node
x1 = G.nodes[u]['x']
y1 = G.nodes[u]['y']
x2 = G.nodes[v]['x']
y2 = G.nodes[v]['y']
line = [(x1, y1), (x2, y2)]
lines.append(line)
# add the lines to the axis as a linecollection
lc = LineCollection(lines, colors=route_color, linewidths=route_linewidth, alpha=route_alpha, zorder=3)
ax.add_collection(lc)
# save and show the figure as specified
fig, ax = save_and_show(fig, ax, save, show, close, filename, file_format, dpi, axis_off)
return fig, ax | 19483338300d2f0fe9426942b5e0a196178cc036 | 13,132 |
from typing import Dict
def random_polynomialvector(
secpar: int, lp: LatticeParameters, distribution: str, dist_pars: Dict[str, int], num_coefs: int,
bti: int, btd: int, const_time_flag: bool = True
) -> PolynomialVector:
"""
Generate a random PolynomialVector with bounded Polynomial entries. Essentially just instantiates a PolynomialVector
object with a list of random Polynomial objects as entries, which are in turn generated by random_polynomial
:param secpar: Input security parameter
:type secpar: int
:param lp: Lattice parameters
:type lp: LatticeParameters
:param distribution: String code describing which distribution to use
:type distribution: str
:param dist_pars: Distribution parameters
:type dist_pars: dict
:param num_coefs: Number of coefficients to generate
:type num_coefs: int
:param bti: Number of bits required to unbiasedly sample indices without replacement.
:type bti: int
:param btd: Number of bits required to unbiasedly sample an integer modulo the modulus in lp
:type btd: int
:param const_time_flag: Indicates whether arithmetic should be constant time.
:type const_time_flag: bool
:return:
:rtype: PolynomialVector
"""
if secpar < 1:
raise ValueError('Cannot random_polynomialvector without an integer security parameter.')
elif distribution == UNIFORM_INFINITY_WEIGHT:
return random_polynomial_vector_inf_wt_unif(
secpar=secpar, lp=lp, dist_pars=dist_pars, num_coefs=num_coefs,
bti=bti, btd=btd, const_time_flag=const_time_flag
)
raise ValueError('Tried to random_polynomialvector with a distribution that is not supported.') | 43d059c69f74f2ba91fec690cc6d9a86ca51cf2a | 13,133 |
def get_glare_value(gray):
"""
:param gray: cv2.imread(image_path) grayscale image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
:return: numrical value between 0-256 which tells the glare value
"""
blur = cv2.blur(gray, (3, 3)) # With kernel size depending upon image size
mean_blur = cv2.mean(blur)
return mean_blur[0] | c019d79f47949a061e74129b56bfb3d413d03314 | 13,135 |
def generate_n_clusters(object_generator, n_clusters, n_objects_per_cluster, *, rng=None):
""" Creates n_clusters of random objects """
rng = np.random.default_rng(rng)
object_clusters = []
for i in range(n_clusters):
cluster_objects = generate_random_object_cluster(n_objects_per_cluster, object_generator,
max_cluster_trans=0.5, max_cluster_rot=np.pi / 16, rng=rng)
object_clusters.append(cluster_objects)
all_objects = [item for sublist in object_clusters for item in sublist]
return all_objects, object_clusters | 1de8c3793abaf635e182b6b4640ddd8bd7d1ed28 | 13,137 |
def disp2vel(wrange, velscale):
""" Returns a log-rebinned wavelength dispersion with constant velocity.
This code is an adaptation of pPXF's log_rebin routine, simplified to
deal with the wavelength dispersion only.
Parameters
----------
wrange: list, np.array or astropy.Quantity
Input wavelength dispersion range with two elements.
velscale: float or astropy.Quantity
Desired output velocity scale. Units are assumed to be km/s unless
specified as an astropy.Quantity.
"""
c = 299792.458 # Speed of light in km/s
if isinstance(wrange, list):
wrange = np.array(wrange)
wunits = wrange.unit if hasattr(wrange, "unit") else 1
if hasattr(velscale, "unit"):
velscale = velscale.to(u.km/u.s).value
veldiff = np.log(np.max(wrange) / np.min(wrange)) * c
n = veldiff / velscale
m = int(n)
dv = 0.5 * (n-m) * velscale
v = np.arange(0, m * velscale, velscale) + dv
w = wrange[0] * np.exp(v / c)
return w * wunits | c15d5cf8dc3f26969f38e4f678441adeae710e77 | 13,138 |
def relabel(labels):
"""
Remaps integer labels based on who is most frequent
"""
uni_labels, uni_inv, uni_counts = np.unique(
labels, return_inverse=True, return_counts=True
)
sort_inds = np.argsort(uni_counts)[::-1]
new_labels = range(len(uni_labels))
uni_labels_sorted = uni_labels[sort_inds]
relabel_map = dict(zip(uni_labels_sorted, new_labels))
new_labels = np.array(itemgetter(*labels)(relabel_map))
return new_labels | bc809781968387ec9de9de05f8d5cd990ede4c62 | 13,139 |
from typing import List
from typing import Tuple
def precision_at_threshold(
weighted_actual_names: List[Tuple[str, float, int]],
candidates: np.ndarray,
threshold: float,
distances: bool = False,
) -> float:
"""
Return the precision at a threshold for the given weighted-actuals and candidates
:param weighted_actual_names: list of [name, weight, ?] - weight and ? are ignored
:param candidates: array of [name, score]
:param threshold: threshold
:param distances: if True, score must be <= threshold; if False, score must be >= threshold; defaults to False
"""
matches = _get_matches(candidates, threshold, distances)
num_matches = len(matches)
if num_matches == 0:
return 1.0
return len(set(name for name, weight, _ in weighted_actual_names).intersection(matches)) / num_matches | 40c99830339418acee59c5364f1a70dc5639a475 | 13,140 |
def alias(*alias):
"""Select a (list of) alias(es)."""
valias = [t for t in alias]
return {"alias": valias} | b2ff51f33b601468b1ba4d371bd5abd6d013a188 | 13,141 |
import pathlib
import traceback
def parse_smyle(file):
"""Parser for CESM2 Seasonal-to-Multiyear Large Ensemble (SMYLE)"""
try:
with xr.open_dataset(file, chunks={}, decode_times=False) as ds:
file = pathlib.Path(file)
parts = file.parts
# Case
case = parts[-6]
# Extract the component from the file string
component = parts[-5]
# Extract the frequency
frequency = parts[-2]
date_regex = r'\d{10}-\d{10}|\d{8}-\d{8}|\d{6}-\d{6}|\d{4}-\d{4}'
date_range = extract_attr_with_regex(parts[-1], date_regex)
# Pull out the start and end time
start_time, end_time = date_range.split('-')
# Extract variable and stream
y = parts[-1].split(date_range)[0].strip('.').split('.')
variable = y[-1]
stream = '.'.join(y[-3:-1])
# Extract init_year, init_month, member_id
z = extract_attr_with_regex(case, r'\d{4}-\d{2}.\d{3}').split('.')
inits = z[0].split('-')
init_year = int(inits[0])
init_month = int(inits[1])
member_id = z[-1]
x = case.split(z[0])[0].strip('.').split('.')
experiment = x[-2]
grid = x[-1]
# Get the long name from dataset
long_name = ds[variable].attrs.get('long_name')
# Grab the units of the variable
units = ds[variable].attrs.get('units')
# Set the default of # of vertical levels to 1
vertical_levels = 1
try:
vertical_levels = ds[ds.cf['vertical'].name].size
except (KeyError, AttributeError, ValueError):
pass
# Use standard region names
regions = {
'atm': 'global',
'ocn': 'global_ocean',
'lnd': 'global_land',
'ice': 'global',
}
spatial_domain = regions.get(component, 'global')
return {
'component': component,
'case': case,
'experiment': experiment,
'variable': variable,
'long_name': long_name.lower(),
'frequency': frequency,
'stream': stream,
'member_id': member_id,
'init_year': init_year,
'init_month': init_month,
'vertical_levels': vertical_levels,
'units': units,
'spatial_domain': spatial_domain,
'grid': grid,
'start_time': parse_date(start_time),
'end_time': parse_date(end_time),
'path': str(file),
}
except Exception:
return {INVALID_ASSET: file, TRACEBACK: traceback.format_exc()} | 791ecf41e4bc1b44ababbf35a021b4a48b46bc24 | 13,143 |
def get_shape(grid, major_ticks=False):
"""
Infer shape from grid
Parameters
----------
grid : ndarray
Minor grid nodes array
major_ticks : bool, default False
If true, infer shape of majr grid nodes
Returns
-------
shape : tuple
Shape of grid ndarray
"""
shape = tuple(len(np.unique(g)) for g in grid.T)
if major_ticks:
shape = tuple(np.max(grid + 1, axis=0).astype(int))
return shape | 57f487260ca19257bd3f9891ce87c52c1eafe3bc | 13,144 |
def lorentz_force_derivative(t, X, qm, Efield, Bfield):
"""
Useful when using generic integration schemes, such
as RK4, which can be compared to Boris-Bunemann
"""
v = X[3:]
E = Efield(X)
B = Bfield(X)
# Newton-Lorentz acceleration
a = qm*E + qm*np.cross(v,B)
ydot = np.concatenate((v,a))
return ydot | 7a7aade5ece2363e177002bac0c18c4a0b59174f | 13,145 |
def copy_rate(source, target, tokenize=False):
"""
Compute copy rate
:param source:
:param target:
:return:
"""
if tokenize:
source = toktok(source)
target = toktok(target)
source_set = set(source)
target_set = set(target)
if len(source_set) == 0 or len(target_set) == 0:
return 0.
return set_overlap(source_set, target_set) | 80b94e90ab43df2f33869660f4b83f41721826f0 | 13,146 |
import json
def read_json_info(fname):
"""
Parse info from the video information file.
Returns: Dictionary containing information on podcast episode.
"""
with open(fname) as fin:
return json.load(fin) | 1eed945ce2917cbca1fb807a807ab57229622374 | 13,147 |
def check_subman_version(required_version):
"""
Verify that the command 'subscription-manager' isn't too old.
"""
status, _ = check_package_version('subscription-manager', required_version)
return status | 33e14fd5cf68e170f5804ae393cb2a45878d19a6 | 13,148 |
import random
def bigsegment_twocolor(rows, cols, seed=None):
"""
Form a map from intersecting line segments.
"""
if seed is not None:
random.seed(seed)
possible_nhseg = [3,5]
possible_nvseg = [1,3,5]
gap_probability = random.random() * 0.10
maxdim = max(rows, cols)
nhseg = 0
nvseg = 0
while (nhseg == 0 and nvseg == 0) or (nhseg % 2 != 0 and nvseg == 0):
nhseg = random.choice(possible_nhseg)
nvseg = random.choice(possible_nvseg)
jitterx = 15
jittery = 15
team1_pattern, team2_pattern = segment_pattern(
rows,
cols,
seed,
colormode="classic",
nhseg=nhseg,
nvseg=nvseg,
jitterx=jitterx,
jittery=jittery,
gap_probability=gap_probability,
)
pattern1_url = pattern2url(team1_pattern)
pattern2_url = pattern2url(team2_pattern)
return pattern1_url, pattern2_url | 1df4861434b19d6bdebe926baad57e3a11f6a64b | 13,150 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.