content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def create_rollout_policy(domain: Simulator, rollout_descr: str) -> Policy:
"""returns, if available, a domain specific rollout policy
Currently only supported by grid-verse environment:
- "default" -- default "informed" rollout policy
- "gridverse-extra" -- straight if possible, otherwise turn
:param domain: environment
:param rollout_descr: "default" or "gridverse-extra"
"""
if isinstance(domain, gridverse_domain.GridverseDomain):
if rollout_descr == "default":
pol = partial(
gridverse_domain.default_rollout_policy,
encoding=domain._state_encoding, # pylint: disable=protected-access
)
elif rollout_descr == "gridverse-extra":
pol = partial(
gridverse_domain.straight_or_turn_policy,
encoding=domain._state_encoding, # pylint: disable=protected-access
)
else:
if rollout_descr:
raise ValueError(
f"{rollout_descr} not accepted as rollout policy for domain {domain}"
)
pol = partial(random_policy, action_space=domain.action_space)
def rollout(augmented_state: BADDr.AugmentedState) -> int:
"""
So normally PO-UCT expects states to be numpy arrays and everything is
dandy, but we are planning in augmented space here in secret. So the
typical rollout policy of the environment will not work: it does not
expect an `AugmentedState`. So here we gently provide it the underlying
state and all is well
:param augmented_state:
"""
return pol(augmented_state.domain_state)
return RolloutPolicyForPlanning(rollout) | 563363589b4ecc6c75306773a6de6320dd697c29 | 12,683 |
def get_event_details(event):
"""Extract event image and timestamp - image with no tag will be tagged as latest.
:param dict event: start container event dictionary.
:return tuple: (container image, last use timestamp).
"""
image = str(event['from'] if ":" in event['from'] else event['from'] + ":latest")
timestamp = event['time']
return image, timestamp | c9b4ded7f343f0d9486c298b9a6f2d96dde58b8c | 12,684 |
def cvCreateMemStorage(*args):
"""cvCreateMemStorage(int block_size=0) -> CvMemStorage"""
return _cv.cvCreateMemStorage(*args) | b6ced2d030345b5500daa051601a20bd19e01825 | 12,686 |
import json
def copyJSONable(obj):
"""
Creates a copy of obj and ensures it is JSONable.
:return: copy of obj.
:raises:
TypeError: if the obj is not JSONable.
"""
return json.loads(json.dumps(obj)) | 1cc3c63893c7716a4c3a8333e725bb518b925923 | 12,687 |
from typing import List
def get_eez_and_land_union_shapes(iso2_codes: List[str]) -> pd.Series:
"""
Return Marineregions.org EEZ and land union geographical shapes for a list of countries.
Parameters
----------
iso2_codes: List[str]
List of ISO2 codes.
Returns
-------
shapes: pd.Series:
Shapes of the union of EEZ and land for each countries.
Notes
-----
Union shapes are divided based on their territorial ISO codes. For example, the shapes
for French Guyana and France are associated to different entries.
"""
shape_fn = f"{data_path}geographics/source/EEZ_land_union/EEZ_Land_v3_202030.shp"
shapes = gpd.read_file(shape_fn)
# Convert country ISO2 codes to ISO3
iso3_codes = convert_country_codes(iso2_codes, 'alpha_2', 'alpha_3', throw_error=True)
# Get 'union' polygons associated with each code
shapes = shapes.set_index("ISO_TER1")["geometry"]
missing_codes = set(iso3_codes) - set(shapes.index)
assert not missing_codes, f"Error: Shapes not available for codes {sorted(list(missing_codes))}"
shapes = shapes.loc[iso3_codes]
shapes.index = convert_country_codes(list(shapes.index), 'alpha_3', 'alpha_2', throw_error=True)
return shapes | 1cebbbbc4d8d962776bbd17e8d2053e1c3dcf5ef | 12,688 |
def putrowstride(a,s):
"""
Put the stride of a matrix view object
"""
t=getType(a)
f={'mview_f':vsip_mputrowstride_f,
'mview_d':vsip_mputrowstride_d,
'mview_i':vsip_mputrowstride_i,
'mview_si':vsip_mputrowstride_si,
'mview_uc':vsip_mputrowstride_uc,
'mview_bl':vsip_mputrowstride_bl,
'cmview_f':vsip_cmputrowstride_f,
'cmview_d':vsip_cmputrowstride_d }
assert t[0] and t[1] in f,'Type <:%s:> not a supported type for for putrowstride'%t[1]
return f[t[1]](a,s) | ad15cc8c0f3e7d88849aed6cfef5408013700a01 | 12,689 |
def list_tracked_stocks():
"""Returns a list of all stock symbols for the stocks being tracker"""
data = read_json("stockJSON/tracked_stocks.json")
return list(data.keys()) | 96c1eeb4fb728d447eb4e4ae055e0ae065df6466 | 12,690 |
def min_max_two(first, second):
"""Pomocna funkce, vrati dvojici:
(mensi ze zadanych prvku, vetsi ze zadanych prvku).
K tomu potrebuje pouze jedno porovnani."""
return (first, second) if first < second else (second, first) | 7ddda1ad69056c22d9ba890e19e62464f56c08e1 | 12,691 |
def multi_ways_balance_merge_sort(a):
"""
多路平衡归并排序
- 多用于外部排序
- 使用多维数组模拟外部存储归并段
- 使用loser tree来实现多路归并
- 归并的趟数跟路数k成反比,增加路数k可以调高效率
:param a:
:return:
"""
SENTRY = float('inf') # 哨兵,作为归并段的结尾
leaves = [] # 每个归并段中的一个元素构成loser tree的原始序列
b = [] # 输出归并段,此实现中简化为以为数组。实际情况下也需要对输出分段。
for v in a:
merge_sort(v) # 归并段内排序,采用归并排序
v.append(SENTRY) # 每个归并段追加哨兵
leaves.append(v[0]) # 每个归并段的首元素构成初始化loser tree的原始序列
del v[0] # 删除各归并段的首元素
lt = LoserTree(leaves) # 构建loser tree
# 循环获取winner
while True:
i, v = lt.winner # winner
if v == SENTRY:
# 排序结束
break
b.append(v) # 将winner写入输出归并段
lt.modify_key(i, a[i][0]) # winner所在的归并段的下一个元素更新入loser tree
del a[i][0] # 删除已处理数据
return b | 4abeceb361f662e2ae8bba1a2cd94c9f598bdc9d | 12,693 |
def check_instance_of(value, types, message = None):
"""
Raises a #TypeError if *value* is not an instance of the specified *types*. If no message is
provided, it will be auto-generated for the given *types*.
"""
if not isinstance(value, types):
if message is None:
message = f'expected {_repr_types(types)}, got {type(value).__name__} instead'
raise TypeError(_get_message(message))
return value | 4d62fea56a3c33bf1a6bd5921ff982544e9fbe29 | 12,694 |
def create_input_metadatav1():
"""Factory pattern for the input to the marshmallow.json.MetadataSchemaV1.
"""
def _create_input_metadatav1(data={}):
data_to_use = {
'title': 'A title',
'authors': [
{
'first_name': 'An',
'last_name': 'author'
}
],
'description': 'A description',
'resource_type': {
'general': 'other',
'specific': 'other'
},
'license': 'mit-license',
'permissions': 'all_view',
}
data_to_use.update(data)
return data_to_use
return _create_input_metadatav1 | 3149876217b01864215d4de411acb18eb578f1a9 | 12,695 |
import logging
def create_job(title: str = Body(None, description='The title of the codingjob'),
codebook: dict = Body(None, description='The codebook'),
units: list = Body(None, description='The units'),
rules: dict = Body(None, description='The rules'),
debriefing: dict = Body(None, description='Debriefing information'),
jobsets: list = Body(None, description='A list of codingjob jobsets. An array of objects, with keys: name, codebook, unit_set'),
authorization: dict = Body(None, description='A dictionnary containing authorization settings'),
provenance: dict = Body(None, description='A dictionary containing any information about the units'),
user: User = Depends(auth_user),
db: Session = Depends(get_db)):
"""
Create a new codingjob. Body should be json structured as follows:
{
"title": <string>,
"codebook": {.. blob ..}, # required, but can be omitted if specified in every jobset
"units": [
{"id": <string> # An id string. Needs to be unique within a codingjob (not necessarily across codingjobs)
"unit": {.. blob ..},
"gold": {.. blob ..}, # optional, include correct answer here for gold questions
}
..
],
"rules": {
"ruleset": <string>,
"authorization": "open"|"restricted", # optional, default: open
.. additional ruleset parameters ..
},
"debriefing": {
"message": <string>,
"link": <string> (url)
}
"jobsets": [ # optional
{"name": <string>,
"codebook": <codebook>, ## optional
"unit_set": [<external_id>] ## optional
}
]
"authorization": { # optional, default: {'restricted': False}
restricted: boolean,
users: [emails]
},
"provenance": {.. blob ..}, # optional
}
Where ..blob.. indicates that this is not processed by the backend, so can be annotator specific.
See the annotator documentation for additional informations.
The rules distribute how units should be distributed, how to deal with quality control, etc.
The ruleset name specifies the class of rules to be used (currently "crowd" or "expert").
Depending on the ruleset, additional options can be given.
See the rules documentation for additional information
"""
check_admin(user)
if not title or not codebook or not units or not rules:
raise HTTPException(status_code=400, detail='Codingjob is missing keys')
try:
job = crud_codingjob.create_codingjob(db, title=title, codebook=codebook, jobsets=jobsets, provenance=provenance, rules=rules, debriefing=debriefing, creator=user, units=units, authorization=authorization)
except Exception as e:
logging.error(e)
raise HTTPException(status_code=400, detail='Could not create codingjob')
return dict(id=job.id) | 3b8fbeee052fc17c4490f7b51c57b9a83b878bf0 | 12,696 |
from typing import Dict
async def finalize(
db,
pg: AsyncEngine,
subtraction_id: str,
gc: Dict[str, float],
count: int,
) -> dict:
"""
Finalize a subtraction by setting `ready` to True and updating the `gc` and `files`
fields.
:param db: the application database client
:param pg: the PostgreSQL AsyncEngine object
:param subtraction_id: the id of the subtraction
:param gc: a dict contains gc data
:return: the updated subtraction document
"""
updated_document = await db.subtraction.find_one_and_update(
{"_id": subtraction_id},
{
"$set": {
"gc": gc,
"ready": True,
"count": count,
}
},
)
return updated_document | f9f0a498f5c4345bf9a61cb65ff64d05874caee7 | 12,697 |
def find_path(a, b, is_open):
"""
:param a: Start Point
:param b: Finish Point
:param is_open: Function returning True if the Point argument is an open square
:return: A list of Points containing the moves needed to get from a to b
"""
if a == b:
return []
if not is_open(b):
return None
moves = rectilinear_path(a, b, is_open) or direct_path(a, b, is_open) or find_path_using_a_star(a, b, is_open)
return moves | e42be77beb59ec9ef230c8f30abab33f4bfcd12b | 12,698 |
def view_skill_api():
""" General API for skills and posts """
dbsess = get_session()
action = request.form["action"]
kind = request.form["kind"]
if kind == "post":
if action == "read":
post = models.Post.get_by_id(dbsess, int(request.form["post-id"]))
if not post:
return "", 404
return jsonify({
"title": post.title,
"content": post.body,
})
if action == "create":
skills = request.form.getlist("skill-ids[]")
post = models.Post(title=request.form["title"],
body=request.form["content"])
dbsess.add(post)
dbsess.commit()
for skill_id in skills:
postskill = models.PostSkill(post_id=post.id, skill_id=skill_id)
dbsess.add(postskill)
dbsess.commit()
return jsonify({"new-id": post.id}), 201
if action == "modify":
skills = [int(_id) for _id in request.form.getlist("skill-ids[]")]
post = models.Post.get_by_id(dbsess, int(request.form["post-id"]))
post.title = request.form["title"]
post.body = request.form["content"]
dbsess.query(models.PostSkill).filter_by(post_id=post.id).delete()
for skill_id in skills:
postskill = models.PostSkill(post_id=post.id, skill_id=skill_id)
dbsess.add(postskill)
dbsess.commit()
dbsess.add(post)
dbsess.commit()
return "", 202
if action == "delete":
pass
if kind == "skill":
if action == "read":
send_skills = []
skills = dbsess.query(models.Skill).all()
post = models.Post.get_by_id(dbsess, int(request.form["post-id"]))
for skill in skills:
send_skills.append({
"name": skill.name,
"id": skill.id,
"selected": skill in [skl.skill for skl in post.skills] if post else False,
})
return jsonify({"skills": send_skills}), 200
return "", 400
return "", 400 | 622c4d7eadac7abf23f0706fc49389bd1453846c | 12,699 |
import _queue
def read_event(suppress=False):
"""
Blocks until a keyboard event happens, then returns that event.
"""
queue = _queue.Queue(maxsize=1)
hooked = hook(queue.put, suppress=suppress)
while True:
event = queue.get()
unhook(hooked)
return event | 2ec5159c6fb886a71f19c8f5a8f81f36b0fe8442 | 12,700 |
import re
import requests
from bs4 import BeautifulSoup
def searchCVE(service, version):
"""Return a list of strings"""
re.search
url = "https://cve.mitre.org/cgi-bin/cvekey.cgi?keyword="+service+"+"+version
res = requests.get(url)
soup = BeautifulSoup(res.content, "lxml")
listCVE = []
for elt in soup.find_all('a', attrs={'href' : re.compile("^/cgi-bin/")}):
listCVE.append(elt.get_text())
return url, listCVE | f9daf52e0c508496273c8a07b279051ebf662198 | 12,701 |
def config_from_args(args) -> config.TestConfig:
"""Convert args read from cli to config"""
return config.TestConfig(program=args.p,
test_dir=args.d,
verifier=args.v,
break_on_error=args.b == 'true',
groups=args.g or ['.*'],
timeout=args.t,
timer=args.timer,
sha=args.sha) | 664b55566839fe6eb80ba99c3ecdb9c8fd082485 | 12,702 |
def build_json(
spec_filename: str,
package_name: str,
dist_path: str,
format_: PackageFormat = PackageFormat.NONE,
) -> None:
"""
Create an OpenAlchemy distribution package with the SQLAlchemy models.
The package can be uploaded to, for example, PyPI or a private repository for
distribution.
The formats can be combined with the bitwise operator or (``|``), for
instance, building both sdist and wheel packages can be specified like that:
.. code-block: python
format_ = PackageFormat.SDIST|PackageFormat.WHEEL
Args:
spec_filename: filename of an OpenAPI spec in JSON format
package_name: The name of the package.
dist_path: The directory to output the package to.
format_: (optional) The format(s) of the archive(s) to build.
"""
# Most OpenAPI specs are YAML, so, for efficiency, we only import json if we
# need it:
import json # pylint: disable=import-outside-toplevel
with open(spec_filename) as spec_file:
spec = json.load(spec_file)
return _build_module.execute(
spec=spec, name=package_name, path=dist_path, format_=format_
) | 714cf4c71136b50ad75bf9d46066b8c17dfee9c1 | 12,703 |
def importFromDotSpec(spec):
"""
Import an object from an arbitrary dotted sequence of packages, e.g.,
"a.b.c.x" by splitting this into "a.b.c" and "x" and calling importFrom().
:param spec: (str) a specification of the form package.module.object
:return: none
:raises PygcamException: if the import fails
"""
modname, objname = spec.rsplit('.', 1)
try:
return importFrom(modname, objname)
except ImportError:
raise PygcamException("Can't import '%s' from '%s'" % (objname, modname)) | d33eef3a086bec399ab82d2a3b28acffadc9b8dc | 12,705 |
import io
def usgs_graphite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[5:9]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 13:
df_data.columns = ["Production", "space_1", "Unit", "space_6",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['graphite'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data | e2077e05096fd304f5c5f60e497f5c0b797135b8 | 12,706 |
import types
import numpy
import typing
def _gen_np_divide(arg1, arg2, out_ir, typemap):
"""generate np.divide() instead of / for array_expr to get numpy error model
like inf for division by zero (test_division_by_zero).
"""
scope = arg1.scope
loc = arg1.loc
# g_np_var = Global(numpy)
g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
typemap[g_np_var.name] = types.misc.Module(numpy)
g_np = ir.Global('np', numpy, loc)
g_np_assign = ir.Assign(g_np, g_np_var, loc)
# attr call: div_attr = getattr(g_np_var, divide)
div_attr_call = ir.Expr.getattr(g_np_var, "divide", loc)
attr_var = ir.Var(scope, mk_unique_var("$div_attr"), loc)
func_var_typ = get_np_ufunc_typ(numpy.divide)
typemap[attr_var.name] = func_var_typ
attr_assign = ir.Assign(div_attr_call, attr_var, loc)
# divide call: div_attr(arg1, arg2)
div_call = ir.Expr.call(attr_var, [arg1, arg2], (), loc)
func_typ = func_var_typ.get_call_type(
typing.Context(), [typemap[arg1.name], typemap[arg2.name]], {})
out_ir.extend([g_np_assign, attr_assign])
return func_typ, div_call | 8caa3a26fd240b7b247ccf78ae7c4c0adcbf8b3e | 12,707 |
import math
def pseudo_volume_watson(eos, r, temp, press_eos, rho_eos, a_mix, b_mix, desired_phase):
"""
Calculates a pseudo volume based on the algorithm described by Watson (2018)
in thesis "Robust Simulation and Optimization Methods for Natural Gas Liquefaction Processes"
Available at https://dspace.mit.edu/handle/1721.1/115702
"""
if eos == 0:
u, w = 1, 0
elif eos == 1:
u, w = 2, -1
else:
return '', 0, 0
# kappa is a tuning parameter whose details are given by Watson.
# Remains untouched for most cases
kappa = 0.9
solution_found, rho_mc, rho_lo, rho_hi, rho_omega, temp_mc = pseudo_root_search_mathias(eos, r,
temp, a_mix, b_mix,
desired_phase, kappa)
if desired_phase == 'liq':
rho_L_omega = rho_omega
if not solution_found:
rho_L_star = rho_mc
else:
rho_L_star = mid(rho_mc, rho_L_omega, rho_hi)
rho_test = rho_L_star
press_star = r * temp / (-b_mix + 1 / rho_test) - a_mix / (
w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2))
d_press_d_rho_star = r * temp / (rho_test ** 2 * (-b_mix + 1 / rho_test) ** 2) - (
u * b_mix / rho_test ** 2 + 2 / rho_test ** 3) * a_mix / (
w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2)) ** 2
B_L = d_press_d_rho_star * (rho_L_star - 0.7 * rho_mc)
A_L = (press_star - B_L * math.log(rho_L_star - 0.7 * rho_mc))
rho_L_extrap = min(math.exp((press_eos - A_L) / B_L) + 0.7 * rho_mc, rho_hi)
rho_L = mid(rho_eos, rho_L_star, rho_L_extrap)
rho_test = rho_L
press_calc = r * temp / (-b_mix + 1 / rho_test) - a_mix / (
w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2))
return desired_phase, 1 / rho_L, abs(press_calc)
elif desired_phase == 'vap':
rho_V_omega = rho_omega
if not solution_found:
rho_V_star = kappa * rho_mc
else:
rho_V_star = mid(rho_lo, rho_V_omega, kappa * rho_mc)
rho_V_bound = mid(rho_lo, rho_V_omega, kappa * rho_mc)
rho_test = rho_V_star
press_star = r * temp / (-b_mix + 1 / rho_test) - a_mix / (
w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2))
# Derivative of the EOS in terms of rho_test
d_press_d_rho_star = r * temp / (rho_test ** 2 * (-b_mix + 1 / rho_test) ** 2) - (
u * b_mix / rho_test ** 2 + 2 / rho_test ** 3) * a_mix / (
w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2)) ** 2
A_V = 1 / press_star
B_V = -d_press_d_rho_star / (press_star ** 2)
C_V = -abs(A_V + 0.5 * B_V * (rho_mc - rho_V_star)) / ((0.5 * (rho_mc - rho_V_star)) ** 2)
term2 = (-B_V - math.sqrt(B_V ** 2 - 4 * C_V * max(0, (A_V - 1 / press_eos)))) / (2 * C_V)
rho_test = rho_V_omega
d_press_d_rho_omega = r * temp / (rho_test ** 2 * (-b_mix + 1 / rho_test) ** 2) - (
u * b_mix / rho_test ** 2 + 2 / rho_test ** 3) * a_mix / (
w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2)) ** 2
term3 = min(0, press_eos - press_star) / d_press_d_rho_omega + term2 + max(0, temp - temp_mc) * max(0,
d_press_d_rho_star - d_press_d_rho_omega)
rho_V_extrap = mid(0, rho_hi, rho_V_bound + term3)
rho_V = mid(rho_eos, rho_V_star, rho_V_extrap)
# Do we need to correct the vapor fugacity coefficients?
# rho_test = rho_V
# press_calc = r*temp/(-b_mix + 1/rho_test) - a_mix/(w*b_mix**2 + u*b_mix/rho_test + rho_test**(-2))
return desired_phase, 1 / rho_V, press_eos
else:
return '', 0, 0 | dc45d0318cf12784571c1d0a43cf63715713efd1 | 12,708 |
def layer_norm(x, axes=1, initial_bias_value=0.0, epsilon=1e-3, name="var"):
"""
Apply layer normalization to x
Args:
x: input variable.
initial_bias_value: initial value for the LN bias.
epsilon: small constant value to avoid division by zero.
scope: scope or name for the LN op.
Returns:
LN(x) with same shape as x
"""
if not isinstance(axes, list):
axes = [axes]
scope = tf.get_variable_scope()
with tf.variable_scope(scope):
with tf.variable_scope(name):
mean = tf.reduce_mean(x, axes, keep_dims=True)
variance = tf.sqrt(tf.reduce_mean(tf.square(x - mean), axes, keep_dims=True))
with tf.device('/cpu:0'):
gain = tf.get_variable('gain', x.get_shape().as_list()[1:],
initializer=tf.constant_initializer(1.0))
bias = tf.get_variable('bias', x.get_shape().as_list()[1:],
initializer=tf.constant_initializer(initial_bias_value))
return gain * (x - mean) / (variance + epsilon) + bias | 786715f6e43e89fa2372421cf3e8c5ef11c63949 | 12,709 |
def P2D_p(df, attr):
"""
Calcul de la probabilité conditionnelle P(target | attribut).
*les parametres:
df: dataframe avec les données. Doit contenir une colonne nommée "target".
attr: attribut à utiliser, nom d'une colonne du dataframe.
*le return:
de type dictionnaire de dictionnaire, dictionnaire_proba. dictionnaire_proba[t][a] contient P(target = t | attribut = a).
"""
list_cle = np.unique(df[attr].values) #Valeurs possibles de l'attribut.
dictionnaire_proba = dict.fromkeys(list_cle)
for cle in dictionnaire_proba:
dictionnaire_proba[cle] = dict.fromkeys([0,1], 0) #Target a toujours pour valeur soit 0 soit 1.
group = df.groupby(["target", attr]).groups
for t, val in group:
dictionnaire_proba[val][t] = len(group[(t, val)])
for cle in dictionnaire_proba:
taille = (df[attr] == cle).sum()
for i in range (2):
dictionnaire_proba[cle][i] = dictionnaire_proba[cle][i] / taille
return dictionnaire_proba | 4d76ef75f0fd821d60b0364de8e5f337fe19039e | 12,710 |
def has_entries(*keys_valuematchers, **kv_args):
"""Matches if dictionary contains entries satisfying a dictionary of keys
and corresponding value matchers.
:param matcher_dict: A dictionary mapping keys to associated value matchers,
or to expected values for
:py:func:`~hamcrest.core.core.isequal.equal_to` matching.
Note that the keys must be actual keys, not matchers. Any value argument
that is not a matcher is implicitly wrapped in an
:py:func:`~hamcrest.core.core.isequal.equal_to` matcher to check for
equality.
Examples::
has_entries({'foo':equal_to(1), 'bar':equal_to(2)})
has_entries({'foo':1, 'bar':2})
``has_entries`` also accepts a list of keyword arguments:
.. function:: has_entries(keyword1=value_matcher1[, keyword2=value_matcher2[, ...]])
:param keyword1: A keyword to look up.
:param valueMatcher1: The matcher to satisfy for the value, or an expected
value for :py:func:`~hamcrest.core.core.isequal.equal_to` matching.
Examples::
has_entries(foo=equal_to(1), bar=equal_to(2))
has_entries(foo=1, bar=2)
Finally, ``has_entries`` also accepts a list of alternating keys and their
value matchers:
.. function:: has_entries(key1, value_matcher1[, ...])
:param key1: A key (not a matcher) to look up.
:param valueMatcher1: The matcher to satisfy for the value, or an expected
value for :py:func:`~hamcrest.core.core.isequal.equal_to` matching.
Examples::
has_entries('foo', equal_to(1), 'bar', equal_to(2))
has_entries('foo', 1, 'bar', 2)
"""
if len(keys_valuematchers) == 1:
try:
base_dict = keys_valuematchers[0].copy()
for key in base_dict:
base_dict[key] = wrap_matcher(base_dict[key])
except AttributeError:
raise ValueError(
"single-argument calls to has_entries must pass a dict as the argument"
)
else:
if len(keys_valuematchers) % 2:
raise ValueError("has_entries requires key-value pairs")
base_dict = {}
for index in range(int(len(keys_valuematchers) / 2)):
base_dict[keys_valuematchers[2 * index]] = wrap_matcher(
keys_valuematchers[2 * index + 1]
)
for key, value in kv_args.items():
base_dict[key] = wrap_matcher(value)
return IsDictContainingEntries(base_dict) | 20892046695b337483b768fa3e3cf3ae6fcf7cad | 12,711 |
def giou_loss(y_true: TensorLike,
y_pred: TensorLike,
mode: str = 'giou') -> tf.Tensor:
"""
Args:
y_true: true targets tensor. The coordinates of the each bounding
box in boxes are encoded as [y_min, x_min, y_max, x_max].
y_pred: predictions tensor. The coordinates of the each bounding
box in boxes are encoded as [y_min, x_min, y_max, x_max].
mode: one of ['giou', 'iou'], decided to calculate GIoU or IoU loss.
Returns:
GIoU loss float `Tensor`.
"""
if mode not in ['giou', 'iou']:
raise ValueError("Value of mode should be 'iou' or 'giou'")
y_pred = tf.convert_to_tensor(y_pred)
if not y_pred.dtype.is_floating:
y_pred = tf.cast(y_pred, tf.float32)
y_true = tf.cast(y_true, y_pred.dtype)
giou = _calculate_giou(y_pred, y_true, mode)
return 1 - giou | e12e51fa11cf7e89eb4cf99f0342a34d5b068a8c | 12,712 |
from typing import List
from typing import Dict
def generate_one_frame(
df_dict: dict,
tag_list: list,
fig,
up_to_index,
time_column,
batch_ids_to_animate: list,
animation_colour_assignment,
show_legend=False,
hovertemplate: str = "",
max_columns=0,
) -> List[Dict]:
"""
Returns a list of dictionaries.
Each entry in the list is for each subplot; in the order of the subplots.
Since each subplot is a tag, we need the `tag_list` as input.
"""
output = []
row = col = 1
for tag in tag_list:
for batch_id in batch_ids_to_animate:
# These 4 lines are duplicated from the outside function
if time_column in df_dict[batch_id].columns:
time_data = df_dict[batch_id][time_column]
else:
time_data = list(range(df_dict[batch_id].shape[0]))
output.append(
go.Scatter(
x=time_data[0:up_to_index],
y=df_dict[batch_id][tag][0:up_to_index],
name=batch_id,
mode="lines",
hovertemplate=hovertemplate,
line=animation_colour_assignment[batch_id],
legendgroup=batch_id,
showlegend=show_legend if tag == tag_list[0] else False,
xaxis=fig.get_subplot(row, col)[1]["anchor"],
yaxis=fig.get_subplot(row, col)[0]["anchor"],
)
)
# One level outdented: if the loop for the tags, not in the loop for
# the `batch_ids_to_animate`!
col += 1
if col > max_columns:
row += 1
col = 1
return output | b16c41725d24f292a06e352af5a051f78492efe6 | 12,713 |
def invite_contributor_post(node, **kwargs):
"""API view for inviting an unregistered user. Performs validation, but does not actually invite the user.
Expects JSON arguments with 'fullname' (required) and email (not required).
"""
fullname = request.json.get('fullname').strip()
email = request.json.get('email')
# Validate and sanitize inputs as needed. Email will raise error if invalid.
fullname = sanitize.strip_html(fullname)
if email:
email = email.lower().strip()
try:
validate_email(email)
except ValidationError as e:
return {'status': 400, 'message': e.message}, 400
if not fullname:
return {'status': 400, 'message': 'Full name field cannot be empty'}, 400
# Check if email is in the database
user = get_user(email=email)
if user:
if user.is_registered:
msg = 'User is already in database. Please go back and try your search again.'
return {'status': 400, 'message': msg}, 400
elif node.is_contributor(user):
msg = 'User with this email address is already a contributor to this project.'
return {'status': 400, 'message': msg}, 400
elif not user.is_confirmed:
serialized = profile_utils.serialize_unregistered(fullname, email)
else:
serialized = profile_utils.add_contributor_json(user)
# use correct display name
serialized['fullname'] = fullname
serialized['email'] = email
else:
# Create a placeholder
serialized = profile_utils.serialize_unregistered(fullname, email)
return {'status': 'success', 'contributor': serialized} | 67793d5dc18705a6133d5e9c29354de724ca6d47 | 12,714 |
def requires_roles(*roles):
""" """
def wrapper(f):
@wraps(f)
def wrapped(*args, **kwargs):
try:
if current_user.role.name not in roles:
abort(403)
except AttributeError:
pass
return f(*args, **kwargs)
return wrapped
return wrapper | b1d3f55fed3a5075a4e63a27712fc531aba84064 | 12,715 |
def home(request):
"""
Homepage, user must login to view
"""
context = {
'posts': BlogPost.objects.all().order_by('-date'), #Get all event announcement blog posts
'master': MasterControl.objects.get(identifier="MASTER") #Get the master control object
}
return render(request, 'blog/home.html', context) | 023b788363e769c323f5625dcf08bc8953b1e802 | 12,716 |
def user_login_success(request):
""" Success login page """
return core.render(request, 'login/login-conf.html') | df58c15630e6d82232eea372b13abe12b3a75db1 | 12,717 |
def use_scope() -> Scope:
"""Get the current ASGI scope dictionary"""
return use_websocket().scope | a819b1c2dc8707cecd6298e73835e35ebb7c232c | 12,718 |
def set_start_stop_from_input(spiketrains):
"""
Sets the start :attr:`t_start`and stop :attr:`t_stop` point
from given input.
If one nep.SpikeTrain objects is given the start :attr:`t_stop `and stop
:attr:`t_stop` of the spike train is returned.
Otherwise the aligned times are returned, which are the maximal start point
and minimal stop point.
Parameters
----------
spiketrains: neo.SpikeTrain object, list or array of neo.core.SpikeTrain
objects
List of neo.core SpikeTrain objects to extract `t_start` and
`t_stop` from.
Returns
-------
start : quantities.Quantity
Start point extracted from input :attr:`spiketrains`
stop : quantities.Quantity
Stop point extracted from input :attr:`spiketrains`
"""
if isinstance(spiketrains, neo.SpikeTrain):
return spiketrains.t_start, spiketrains.t_stop
else:
start = max([elem.t_start for elem in spiketrains])
stop = min([elem.t_stop for elem in spiketrains])
return start, stop | 243ec7be75a11959cb3d9802536e10b7add356ee | 12,720 |
def less(data_str):
"""Pretty print JSON and pipe to less."""
p = Popen('less', stdin=PIPE)
p.stdin.write(data_str.encode())
p.stdin.close()
p.wait()
return True | 3a1af46386d80c1c0f6082c019b27d9fb55554c3 | 12,721 |
def get_extensions(f, v):
"""
Get a dictionary which maps each extension name to a bool whether it is
enabled in the file
Parameters
----------
f : an h5py.File or h5py.Group object
The object in which to find claimed extensions
v : bool
Verbose option
Returns
-------
A dictionary {string:bool} where the keys are the extension names and the
bool states whether it is enabled or not
"""
valid, extensionIDs = get_attr(f, "openPMDextension")
result = {ext: False for ext in ext_list.keys()}
if valid:
enabledExtMask = 0
for extension, bitmask in ext_list.items():
# This uses a bitmask to identify activated extensions
if (bitmask & extensionIDs) == bitmask:
result[extension] = True
enabledExtMask |= bitmask
if v:
print("Info: Found extension '%s'." % extension)
# Mask out the extension bits we have already detected so only
# unknown ones are left
excessIDs = extensionIDs & ~enabledExtMask
if excessIDs:
print("Warning: Unknown extension Mask left: %s" % excessIDs)
return result | 0d62788b01732c32b2f227a7c8d5ffe87a810d9e | 12,722 |
async def remove_device(ws_client, device_id, config_entry_id):
"""Remove config entry from a device."""
await ws_client.send_json(
{
"id": 5,
"type": "config/device_registry/remove_config_entry",
"config_entry_id": config_entry_id,
"device_id": device_id,
}
)
response = await ws_client.receive_json()
return response["success"] | 59fc29f8fd7672ea9ea4cd1a3cf9300f4feaf539 | 12,724 |
def deco_inside_ctx_method_self(target):
"""decorator: wrap a class method inside a `with self: ...` context"""
def tgt(self, *args, **kwargs):
with self:
return target(self, *args, **kwargs)
return tgt | 6a29ad468840229c026e6abf87556018a3e16718 | 12,725 |
def c_get_mechanism_info(slot, mechanism_type):
"""Gets a mechanism's info
:param slot: The slot to query
:param mechanism_type: The type of the mechanism to get the information for
:returns: The result code, The mechanism info
"""
mech_info = CK_MECHANISM_INFO()
ret = C_GetMechanismInfo(CK_ULONG(slot), CK_MECHANISM_TYPE(mechanism_type), byref(mech_info))
return ret, mech_info | c2570874d29bdb48ee4b3146137e19133afdbff4 | 12,726 |
def make_legend(names, colors):
"""
Make a list of legend handles and colours
:param names: list of names
:param colors: list of colors
:return: list of matplotlib.patches.Patch objects for legend
"""
legend_elements = []
for idx, name in enumerate(names):
el = Patch(color=colors[idx], label=name)
legend_elements.append(el)
return legend_elements | 99d4ea06b70b8b4cf1a868179ca2218aa446296d | 12,727 |
def create_visitor_id(visitor_id, options):
"""Creates new VisitorId"""
if not visitor_id:
visitor_id = VisitorId()
if not options:
options = {}
device_id = options.get("device_id")
visitor = options.get("visitor")
if not visitor_id.tnt_id:
visitor_id.tnt_id = device_id
if not visitor_id.marketing_cloud_visitor_id:
visitor_id.marketing_cloud_visitor_id = get_marketing_cloud_visitor_id(visitor)
visitor_id.customer_ids = get_customer_ids(visitor_id.customer_ids, visitor)
return visitor_id | 168c31347334a51bcc75506c22e5846dfc26ec0f | 12,728 |
def validate_crc(response: str, candidate: str) -> bool:
"""Calculates and validates the response CRC against expected"""
expected_crc = '{:04X}'.format(crc(response))
return expected_crc == candidate.replace('*', '') | 387581b4aa42d6dedfde32b374071369d9423b20 | 12,729 |
import torch
def z_gate():
"""
Pauli z
"""
return torch.tensor([[1, 0], [0, -1]]) + 0j | 9bd6276a1d60b260f3ad41b609f29e0414c0dc95 | 12,730 |
def numpy_ewma(data, window):
"""
:param data:
:param window:
:return:
"""
alpha = 1 / window
scale = 1 / (1 - alpha)
n = data.shape[0]
scale_arr = (1 - alpha) ** (-1 * np.arange(n))
weights = (1 - alpha) ** np.arange(n)
pw0 = (1 - alpha) ** (n - 1)
mult = data * pw0 * scale_arr
cumsums = mult.cumsum()
out = cumsums * scale_arr[::-1] / weights.cumsum()
return out | e612e510ffb1b2feb726559cf6c1f3676f4aa0b8 | 12,731 |
import ctypes
def tagged_sha256(tag: bytes, msg: bytes) -> bytes:
"""
Compute a tagged hash as defined in BIP-340.
This is useful for creating a message hash and achieving domain separation
through an application-specific tag. This function returns
SHA256(SHA256(tag)||SHA256(tag)||msg).
:param tag: tag
:param msg: message
:return: 32-byte hash
:raises ValueError: if arguments are invalid type
:raises Libsecp256k1Exception: arguments are invalid
"""
hash32 = ctypes.create_string_buffer(HASH32)
result = lib.secp256k1_tagged_sha256(
secp256k1_context_verify, hash32, tag, len(tag), msg, len(msg)
)
if result != 1:
assert_zero_return_code(result)
raise Libsecp256k1Exception("invalid arguments")
return hash32.raw[:HASH32] | b3bea5c80f09740c63a5b896e487684a0a15f6f9 | 12,733 |
def validate_blacklist(password):
""" It does not contain the strings ab, cd, pq, or xy """
for blacklisted in ['ab', 'cd', 'pq', 'xy']:
if blacklisted in password:
return False
return True | 93ad092d5622e0567171f487522c2db824089eb9 | 12,734 |
def get_mobilenet(model, method, num_classes):
"""Returns the requested model, ready for training/pruning with the specified method.
:param model: str
:param method: full or prune
:param num_classes: int, num classes in the dataset
:return: A prunable MobileNet model
"""
ModuleInjection.pruning_method = method
ModuleInjection.prunable_modules = []
if model == 'mobilenetv2':
net = MobileNetv2(num_classes)
net.prunable_modules = ModuleInjection.prunable_modules
return net | f7ee264dc5db6bddc41242a9fbd90a3d6059755b | 12,735 |
import time
def f(x):
"""Squares something"""
time.sleep(10)
return x * x | 6c1ab07ebaaeca6258601ec33f181e75086a355a | 12,736 |
def get_added_after(
fetch_full_feed, initial_interval, last_fetch_time=None, filter_args=None
):
"""
Creates the added_after param, or extracts it from the filter_args
:param fetch_full_feed: when set to true, will limit added_after
:param initial_interval: initial_interval if no
:param last_fetch_time: last_fetch time value (str)
:param filter_args: set of filter_args defined by the user to be merged with added_after
:return: added_after
"""
if fetch_full_feed:
return initial_interval
if not filter_args or "added_after" not in filter_args:
return last_fetch_time or initial_interval
return filter_args["added_after"] | 281cb7d7429071bf8dca0d04eedee9130a29b28d | 12,737 |
def gdf_lineStrings():
"""Construct a gdf that contains two LineStrings."""
ls_short = LineString([(13.476808430, 48.573711823), (13.506804, 48.939008), (13.4664690, 48.5706414)])
ls_long = LineString([(13.476808430, 48.573711823), (11.5675446, 48.1485459), (8.5067847, 47.4084269)])
a_list = [(0, ls_short), (1, ls_long)]
gdf = gpd.GeoDataFrame(a_list, columns=["id", "geometry"]).set_geometry("geometry")
gdf = gdf.set_crs("wgs84")
return gdf | d14318ed1765b20151e28718938d611d6359bfd5 | 12,738 |
from typing import Callable
from typing import Coroutine
from typing import Any
from typing import Optional
from typing import Dict
import functools
import anyio
def runnify(
async_function: Callable[T_ParamSpec, Coroutine[Any, Any, T_Retval]],
backend: str = "asyncio",
backend_options: Optional[Dict[str, Any]] = None,
) -> Callable[T_ParamSpec, T_Retval]:
"""
Take an async function and create a regular (blocking) function that receives the
same keyword and positional arguments for the original async function, and that when
called will create an event loop and use it to run the original `async_function`
with those arguments.
That function returns the return value from the original `async_function`.
The current thread must not be already running an event loop.
This calls `anyio.run()` underneath.
Use it like this:
```Python
async def program(name: str) -> str:
return f"Hello {name}"
result = asyncer.runnify(program)(name="World")
print(result)
```
## Arguments
`async_function`: an async function to call
`backend` name of the asynchronous event loop implementation - currently either
`asyncio` or `trio`
`backend_options` keyword arguments to call the backend `run()` implementation with
## Return
The return value of the async function
## Raises
`RuntimeError`: if an asynchronous event loop is already running in this thread
`LookupError`: if the named backend is not found
"""
@functools.wraps(async_function)
def wrapper(*args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs) -> T_Retval:
partial_f = functools.partial(async_function, *args, **kwargs)
return anyio.run(partial_f, backend=backend, backend_options=backend_options)
return wrapper | 64f8e826dce8d35b5b7307289346eb5bfdb1bbee | 12,740 |
from typing import Dict
from typing import Any
from typing import Iterable
def _get_properties(rsp: Dict[Text, Any]) -> Iterable[CdProperty]:
""" Retrieve key properties to be passed onto dynatrace server. """
return [
CdProperty("Status", rsp.get("status", "N/A")),
CdProperty("Entry point", rsp.get("entryPoint", "N/A")),
CdProperty("Available memory Mb", rsp.get("availableMemoryMb", "N/A")),
CdProperty("Runtime", rsp.get("runtime", "")),
CdProperty("Ingress settings", rsp.get("ingressSettings", "")),
] | e409ae450ca948f63d4588714cc09f17efe8525a | 12,742 |
def photo_fit_bprior(time, ptime, nflux, flux_err, guess_transit, guess_ew, rho_star, e, w, directory, nwalk, nsteps, ndiscard, plot_transit=True, plot_burnin=True, plot_corner=True, plot_Tburnin=True, plot_Tcorner=True):
"""Fit eccentricity for a planet.
Applies Bayesian beta-dist prior from Kipping 2014
Parameters
----------
time: np.array
Light curve time
nflux: np.array
Light curve flux
flux_err: np.array
Light curve flux errors
guess_transit: np.array (length 4)
Initial guess for MCMC transit fitting. Passed into mcmc_fitter().
guess_ew: np.array (length 2)
Initial guess for MCMC e and w fitting. [e guess, w guess]
rho_star: np.array
"True" stellar density distribution
e: float
True eccentricity (just to name plots)
w: float
True longitude of periastron (just to name plots)
directory: str
Directory to save plots
nwalk: int
Number of walkers
nsteps: int
Number of steps to run in MCMC. Passed into mcmc_fitter().
ndiscard: int
Number of steps to discard in MCMC. Passed into mcmc_fitter().
plot_transit: boolean, default True
Save transit light curve plot + fit in specified directory.
Returns
-------
fite: float
Best-fit eccentricity (mean of MCMC distribution)
fitw: float
Best-fit longitude of periastron (mean of MCMC distribution)
gs: np.array
"g" distribution for planet
g_mean: float
Mean of g distribution
g_sigmas: list (length 2)
[(-) sigma, (+) sigma] of g distribution
zsc: list (length 2)
Number of sigmas away [fit e, fit w] are from true [e, w]
"""
# EMCEE Transit Model Fitting
_, _, pdist, rdist, adist, idist, t0dist = mcmc_fitter(guess_transit, time, ptime, nflux, flux_err, nwalk, nsteps, ndiscard, e, w, directory, plot_Tburnin=True, plot_Tcorner=True)
p_f, perr_f = mode(pdist), get_sigmas(pdist)
rprs_f, rprserr_f = mode(rdist), get_sigmas(rdist)
a_f, aerr_f = mode(adist), get_sigmas(adist)
i_f, ierr_f = mode(idist), get_sigmas(idist)
t0_f, t0err_f = mode(t0dist), get_sigmas(t0dist)
# Create a light curve with the fit parameters
# Boobooboo
fit = integratedlc_fitter(time, p_f, rprs_f, a_f, i_f, t0_f)
if plot_transit==True:
plt.cla()
plt.errorbar(time, nflux, yerr=flux_err, c='blue', fmt='o', alpha=0.5, label='Original LC')
plt.scatter(time, fit, c='red', alpha=1.0)
plt.plot(time, fit, c='red', alpha=1.0, label='Fit LC')
#plt.xlim(-0.1, 0.1)
plt.legend()
plt.savefig(directory + 'lightcurve_fitp' + str(p_f) + '_fitrprs' + str(rprs_f) + '_fitars' + str(a_f) + '_fiti' + str(i_f) + '.png')
plt.close()
print('Fit params:')
print('Period (days): ', p_f)
print('Rp/Rs: ', rprs_f)
print('a/Rs: ', a_f)
print('i (deg): ', i_f)
T14dist = get_T14(pdist, rdist, adist, idist)
T23dist = get_T23(pdist, rdist, adist, idist)
gs, rho_c = get_g_distribution(rho_star, pdist, rdist, T14dist, T23dist)
g_mean = mode(gs)
g_sigma_min, g_sigma_plus = get_sigmas(gs)
g_sigmas = [g_sigma_min, g_sigma_plus]
#Guesses
w_guess = guess_ew[1]
e_guess = guess_ew[0]
solnx = (w_guess, e_guess)
pos = solnx + 1e-4 * np.random.randn(32, 2)
nwalkers, ndim = pos.shape
sampler = emcee.EnsembleSampler(nwalkers, ndim, bprior_log_probability, args=(g_mean, np.nanmean(g_sigmas)), threads=4)
print('-------MCMC------')
sampler.run_mcmc(pos, 5000, progress=True);
flat_samples_e = sampler.get_chain(discard=1000, thin=15, flat=True)
if plot_burnin==True:
fig, axes = plt.subplots(2, figsize=(10, 7), sharex=True)
samples = sampler.get_chain()
labels = ["w", "e"]
for i in range(ndim):
ax = axes[i]
ax.plot(samples[:, :, i], "k", alpha=0.3)
ax.set_xlim(0, len(samples))
ax.set_ylabel(labels[i])
ax.yaxis.set_label_coords(-0.1, 0.5)
axes[-1].set_xlabel("step number");
fig.savefig(directory + 'e_g_burnin.png')
plt.close(fig)
edist = flat_samples_e[:,1]
wdist = flat_samples_e[:,0]
fite = np.percentile(edist, 50)
fitw = np.percentile(wdist, 50)
mcmc_e = np.percentile(edist, [16, 50, 84])
q_e = np.diff(mcmc_e)
mcmc_w = np.percentile(wdist, [16, 50, 84])
q_w = np.diff(mcmc_w)
if plot_corner==True:
fig = corner.corner(flat_samples_e, labels=labels, show_titles=True, title_kwargs={"fontsize": 12}, truths=[w, e], quantiles=[0.16, 0.5, 0.84], plot_contours=True);
fig.savefig(directory + 'corner_fit_e' + str(fite) + '_fit_w' + str(fitw) + '_fit_g' + str(g_mean) + '.png')
plt.close(fig)
return p_f, rprs_f, a_f, i_f, fite, fitw, edist, wdist, gs, g_mean, g_sigmas, T14dist, T23dist | 1ede4401a1f0b486cfa30bfc31fe5696e0fa4551 | 12,743 |
def canny_edges(image, minedges=5000, maxedges=15000, low_thresh=50, minEdgeRadius=20, maxEdgeRadius=None):
"""
Compute Canny edge detection on an image
"""
t0 = time.time()
dx = ndimage.sobel(image,0)
dy = ndimage.sobel(image,1)
mag = numpy.hypot(dx, dy)
mag = mag / mag.max()
ort = numpy.arctan2(dy, dx)
edge_map = non_maximal_edge_suppresion(mag, ort, minEdgeRadius, maxEdgeRadius)
edge_map = numpy.logical_and(edge_map, mag > low_thresh)
labels, numlabels = ndimage.measurements.label(edge_map, numpy.ones((3,3)))
#print "labels", len(labels)
#print maxs
maxs = ndimage.measurements.maximum(mag, labels, range(1,numlabels+1))
maxs = numpy.array(maxs, dtype=numpy.float64)
high_thresh = maxs.mean()
minThresh = maxs.min()
#print time.time() - t0
edge_count = edge_map.sum()
count = 0
while count < 25:
t0 = time.time()
count += 1
maxs = ndimage.measurements.maximum(mag, labels, range(1,numlabels+1))
maxs = numpy.array(maxs, dtype=numpy.float64)
good_label = (maxs > high_thresh)
good_label = numpy.append([False, ], good_label)
numgood = good_label.sum()
if numgood == numlabels and high_thresh > minThresh:
print "ERROR"
maxs.sort()
print high_thresh
print maxs[:3], maxs[-3:]
print maxs[0], ">", high_thresh, "=", maxs[0] > high_thresh
good_label = numpy.zeros((numlabels+1,), dtype=numpy.bool)
good_label[1:] = maxs > high_thresh
print good_label[:3], good_label[-3:]
time.sleep(10)
newedge_map = good_label[labels]
#for i in range(len(maxs)):
# #if max(mag[labels==i]) < high_thresh:
# if maxs[i] < high_thresh:
# edge_map[labels==i] = False
edge_count = newedge_map.sum()
print "canny edges=%d, (thresh=%.3f) time=%.6f"%(edge_count, high_thresh, time.time() - t0)
if edge_count > maxedges:
rand = math.sqrt(random.random())
new_thresh = high_thresh / rand
# fix for too large values
#print rand, new_thresh
if new_thresh < 1.0:
high_thresh = new_thresh
else:
high_thresh = math.sqrt(high_thresh)
elif edge_count < minedges and high_thresh > minThresh:
rand = math.sqrt(random.random())
new_thresh = high_thresh * rand
#print rand, new_thresh, minThresh
high_thresh = new_thresh
else:
break
#print time.time() - t0
return newedge_map | 06984375fa2bebf362136c00870b88d0753ed25c | 12,746 |
def scale_matrix(t):
"""
Given a d-dim vector t, returns (d+1)x(d+1) matrix M such that
left multiplication by M on a homogenuous (d+1)-dim vector v
scales v by t (assuming the last coordinate of v is 1).
"""
t = asarray(t).ravel()
d = len(t)
m = identity(d+1)
for i in xrange(d):
m[i,i] = t[i]
return asmatrix(m) | 6688ac945b0da7228edf3e54e1310655434e8c93 | 12,747 |
def validate_rule_paths(sched: schedule.Schedule) -> schedule.Schedule:
"""A validator to be run after schedule creation to ensure
each path contains at least one rule with a temperature expression.
A ValueError is raised when this check fails."""
for path in sched.unfold():
if path.is_final and not list(path.rules_with_temp):
raise ValueError(
"No temperature specified for any rule along the path {}."
.format(path)
)
return sched | 99cbb35083f6bcd10b58fe320379d074f3a6fa3f | 12,748 |
def check_input_array(xarr,shape=None,chunks=None,\
grid_location=None,ndims=None):
"""Return true if arr is a dataarray with expected shape, chunks at
grid_location attribute. Raise an error if one of the tests fails.
Parameters
----------
xarr : xarray.DataArray
xarray dataarray which attributes should be tested.
shape : tuple
expected shape of the xarray dataarray xarr
chunks : list-like of list-like object
expected chunks of the xarray dataarray xarr
grid_location : str
string describing the expected grid location : eg 'u','v','t','f'...
ndims : int
number of dimensions over which chunks should be compared.
Returns
-------
test : bool
boolean value of the test.
"""
if hasattr(xarr,'name'):
arrayname = xarr.name
else:
arrayname = 'array'
if not(is_xarray(xarr)):
raise TypeError(arrayname + 'is expected to be a xarray.DataArray')
if not(_chunks_are_compatible(xarr.chunks,chunks,ndims=ndims)):
raise ChunkError()
if not(_grid_location_equals(xarr,grid_location)):
raise GridLocationError()
return True | acc589dcd757c34362ed46e4944f9cee58e08e47 | 12,749 |
def ancestor_width(circ, supp, verbose=False):
"""
Args:
circ(list(list(tuple))): Circuit
supp(list): List of integers
Returns:
int: Width of the past causal cone of supp
"""
circ_rev= circ[::-1]
supp_coded = 0
for s in supp:
supp_coded |= (1<<s)
for unitcirc in circ_rev:
for gate in unitcirc:
if verbose:
print("gate={}".format(gate))
if (1<<gate[0]) & supp_coded:
if not ((1<<gate[1]) & supp_coded):
supp_coded |= (1<<gate[1])
elif (1<<gate[1]) & supp_coded:
supp_coded |= (1<<gate[0])
return bin(supp_coded).count('1') | 6284b80bc2aefa22cf96198e3f27f885d7074273 | 12,750 |
def get_bprop_argmax(self):
"""Generate bprop for Argmax"""
def bprop(x, out, dout):
return (zeros_like(x),)
return bprop | a92f3fc1fd31428d4097ec9a185475343432e7f4 | 12,752 |
import logging
from datetime import datetime
def register_extensions(app):
"""Register models."""
db.init_app(app)
login_manager.init_app(app)
# flask-admin configs
admin.init_app(app)
admin.add_view(ModelView(User))
admin.add_view(ModelView(Role))
login_manager.login_view = 'auth.login'
@login_manager.user_loader
def load_user(user_id):
return User.objects(id=user_id).first()
# jwt config
def jwt_authenticate(username, password):
logging.info("username:{}\npassword:{}\n".format(username, password))
user = User.objects(name=username, password=password).first()
return user
def jwt_identity(payload):
logging.info("payload:{}".format(payload))
user_id = payload['identity']
return User.objects(id=user_id).first()
def make_payload(identity):
iat = datetime.utcnow()
exp = iat + current_app.config.get('JWT_EXPIRATION_DELTA')
nbf = iat + current_app.config.get('JWT_NOT_BEFORE_DELTA')
identity = str(identity.id)
return {'exp': exp, 'iat': iat, 'nbf': nbf, 'identity': identity}
jwt.authentication_handler(jwt_authenticate)
jwt.identity_handler(jwt_identity)
jwt.jwt_payload_handler(make_payload)
jwt.init_app(app) | 342d396cc40292d5a1412b8acd5f218bd3451b8a | 12,753 |
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['cache'] = 86400
desc['description'] = """This plot presents a histogram of the change
in some observed variable over a given number of hours."""
desc['arguments'] = [
dict(type='zstation', name='zstation', default='DSM',
label='Select Station:', network='IA_ASOS'),
dict(
type='select', options=PDICT, default='tmpf', name='var',
label='Select Variable'
),
dict(type='int', name='hours', default=24,
label='Hours:'),
dict(type='float', name='interval', default=1,
label="Histogram Binning Width (unit of variable)"),
]
return desc | 08a4fad8df13f89e5764f36ec7c2e6b7e3697cbe | 12,754 |
def read_format_from_metadata(text, ext):
"""Return the format of the file, when that information is available from the metadata"""
metadata = read_metadata(text, ext)
rearrange_jupytext_metadata(metadata)
return format_name_for_ext(metadata, ext, explicit_default=False) | 6b57dbbc7cf9763d1623aa37a57911eb82e7e24c | 12,756 |
def request_requires_retry(err: Exception) -> bool:
"""Does the error mean that a retry should be performed?"""
if not isinstance(err, ClientError):
return False
code = err.response.get('Error', {}).get('Code', '').lower()
message = err.response.get('Error', {}).get('Message', '')
# This covers:
# ExpiredToken
# OperationAborted
# RequestTimeout
# SlowDown
# Busy
# RequestLimitExceeded
# It might need to cover these, but it doesn't.
# RestoreAlreadyInProgress
m_low = message.lower()
if (
'exceeded' in m_low or 'exceeded' in code
or 'expire' in m_low or 'expire' in code
or 'aborted' in m_low or 'aborted' in code
or 'timeout' in m_low or 'timeout' in code
or 'slow' in m_low or 'slow' in code
or 'busy' in m_low or 'busy' in code
):
log('INFO', "Reporting error {msg} as requiring a retry", msg=message)
return True
return False | 3b3805820d3d009e6a52482b90afeacb3db5233b | 12,757 |
def to_jd(y, m, d, method=None):
"""Convert Armenian date to Julian day count. Use the method of Sarkawag if requested."""
# Sanity check values
legal_date(y, m, d, method)
yeardays = (m - 1) * 30 + d
if method == "sarkawag":
# Calculate things
yeardelta = y - 533
leapdays = trunc(yeardelta / 4)
return EPOCH_SARKAWAG + (365 * yeardelta) + leapdays + yeardays
else:
return EPOCH + (365 * y) + yeardays | f39cfa83a02bac9273fc92fb7b1a2725cb5ff8cc | 12,758 |
def unitVector(vector):
"""
Returns the unit vector of a given input vector.
Params:
vector -> input vector.
Returns:
numpy.array().
"""
# Divide the input vector by its magnitude.
return vector / np.linalg.norm(vector) | 840d184febbc1ebbb753278705649d649be900d6 | 12,759 |
def find_poly_intervals(p):
"""
Find the intervals of 1D-polynomial (numpy.polynomial) where the polynomial is negative.
"""
assert(np.abs(p.coef[-1]) > 1e-14)
r=p.roots()
# remove imaginary roots, multiple roots, and sort
r=np.unique(np.extract(np.abs(r.imag)<1e-14, r).real)
ints = []
for ii in range(r.size-1):
rmean = 0.5*(r[ii]+r[ii+1])
if p(rmean)<0:
ints.append([r[ii],r[ii+1]])
sign_pinf = np.sign(p.coef[-1])
if p.coef[-1] < 0: # polynomial sign at plus infinity
ints.append([r[-1], np.inf])
if (-1)**p.degree()*sign_pinf<0: # polynomial sign at minus infinity
ints.append([-np.inf, r[0]])
return np.array(ints) | a3c9d7622b1ab142b92da61a75febf6c46151a81 | 12,760 |
def user_numforms_next(*args):
"""
user_numforms_next(p) -> user_numforms_iterator_t
Move to the next element.
@param p (C++: user_numforms_iterator_t)
"""
return _ida_hexrays.user_numforms_next(*args) | 85bd741496befa0fc57e8d20325fa1a7b1b89c0b | 12,761 |
from typing import Dict
import logging
def list_cms() -> Dict[Text, CalculationModule]:
"""List all cms available on a celery queue."""
app = get_celery_app()
try:
app_inspector = app.control.inspect()
nodes = app_inspector.registered("cm_info")
except (redis.exceptions.ConnectionError, kombu.exceptions.OperationalError) as err:
# If redis is down, we just don't expose any calculation module
logging.error("Connection to celery broker failed with error: %s", err)
return {}
if not nodes:
return {}
cms = {}
for node in nodes.values():
for entry in node:
try:
cm = from_registration_string(entry)
except InvalidRegistrationString as e:
# invalid cm was encountered, skip it
logging.error(e)
continue
cms[cm.name] = cm
return cms | a6ce2cc5392d6b793c52cb32a8abff2967eebb18 | 12,762 |
def _a_in_b(first, second):
"""Check if interval a is inside interval b."""
return first.start >= second.start and first.stop <= second.stop | e4ca21e1861b691510252eb3be53eed16c8bc8cf | 12,763 |
import time
import torch
def validate(val_loader, c_model, r_model, c_criterion, r_criterion):
"""
One epoch's validation.
: param val_loader: DataLoader for validation data
: param model: model
: param criterion: MultiBox loss
: return: average validation loss
"""
c_model.eval() # eval mode disables dropout
r_model.eval() # eval mode disables dropout
batch_time = AverageMeter()
losses = AverageMeter()
losses2 = AverageMeter()
start = time.time()
# Prohibit gradient computation explicity because I had some problems with memory
with torch.no_grad():
# Batches
for i_batch, (images, labels, coords) in enumerate(train_loader):
# Move to default device
images = images.to(device)
labels = labels.to(device)
coords = coords.to(device)
# CLASSIFICATION Eval
predicted_class, all_crops, cropCoords = c_model(images)
loss1 = c_criterion(predicted_class, labels)
all_crops = all_crops.to(device)
cropCoords = cropCoords.to(device)
# REGRESSION Eval
for i in range(9):
batchcrop = all_crops[:, i, :, :]
batchcrop.unsqueeze_(1)
offset = cropCoords[i]
offset = offset.repeat(all_crops.size(0), 1)
offset = torch.cat((offset, torch.zeros((all_crops.size(0),
1)).to(device)),
dim=1)
center_truth = coords[:, i, :]
center_est = r_model(batchcrop).to(device)
center_est = center_est + offset
loss2 = regrCriterion(center_truth, center_est)
losses2.update(loss2.item())
losses.update(loss1.item())
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i_batch % print_freq == 0:
print(
"[{0}/{1}]\t"
"Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t".format(i_batch, len(val_loader),
batch_time=batch_time, loss=losses),
"Regr Loss {loss.val:.4f} ({loss.avg:.4f})\t".format(i_batch, len(val_loader),
batch_time=batch_time, loss=losses2)
)
print("\n * LOSS - {loss.avg:.3f}\n".format(loss=losses))
print(" * REGR LOSS - {loss.avg:.3f}\n".format(loss=losses2))
return losses.avg, losses2.avg | af90f709c73364865b5c3610b0b5058f699203fb | 12,764 |
def radixsort(list, k=10, d=0):
""" Sort the list.
This method has been used to sort punched cards.
@param k: number different characters in a number (base)
@param d: maximum number of digits of list elements
"""
if len(list) == 0:
return []
elif d == 0:
d = max(map(lambda x : len(str(abs(x))), list))
for x in range(d):
# create an empty bin for each possible digit
bins = [[] for i in xrange(k)]
# sort the number according to the digits in the bins
for el in list:
bins[(el / 10**x ) % k].append(el)
# merge all bins to one list
list = []
for section in bins:
list.extend(section)
return list | a437032df03a5a3c97f976a9b0d5804175b7c9c6 | 12,765 |
def feature_stat_str(x, y, delimiter='~', n_lines=40, width=20):
"""Compute the input feature's sample distribution in string format for printing.
The distribution table returned (in string format) concains the sample sizes,
event sizes and event proportions of each feature value.
Parameters
----------
x: numpy.array, shape (number of examples,)
The discretizated feature array. Each value represent a right-closed interval
of the input feature. e.g. '1~8'
y: numpy.array, shape (number of examples,)
The binary dependent variable with 1 represents the target event (positive class).
delimiter: python string. Default is '~'
The symbol that separates the boundaries of a interval in array x.
n_lines: integer. Default is 40.
The number of '- ' used. This Controls the length of horizontal lines in the table.
width: integer. Default is 20.
This controls the width of each column.
Returns
-------
table_string: python string
The feature distribution table in string format
"""
res = feature_stat(x,y,delimiter) # Compute the feature distrition table
list_str = [] # String table will be constructed line by line
# Table header
for i in range(res.shape[1]):
list_str.extend([str(res.columns[i]),' '*(width-len(res.columns[i].encode('gbk')))])
list_str.append('\n(right-closed)')
list_str.extend(['\n','- '*n_lines,'\n'])
# Table body
for i in range(res.shape[0]):
for j in range(res.shape[1]):
list_str.extend([str(res.iloc[i,j]),' '*(width-len(str(res.iloc[i,j])))])
list_str.extend(['\n','- '*n_lines,'\n'])
# Put everything together
table_string = ''.join(list_str)
return table_string | 947886e5cb8210cda4fcdc6c91d29d7a569939be | 12,767 |
import torch
def dice_similarity_u(output, target):
"""Computes the Dice similarity"""
#batch_size = target.size(0)
total_dice = 0
output = output.clone()
target = target.clone()
# print('target:',target.sum())
for i in range(1, output.shape[1]):
target_i = torch.zeros(target.shape)
target_i = target_i.cuda().clone()
target_i[target == i] = 1
output_i = output[:, i:i+1].clone()
dice_i = dice_similarity(output_i, target_i)
# print('dice_: ',i,dice_i.data)
# print('target_i: ',target_i.sum())
# print('output_i: ',output_i.sum())
total_dice += dice_i
total_dice = total_dice / (output.shape[1] - 1)
#print(intersection, union, dice)
return total_dice | ce21637f33306adb13aea10e04f40bdac9d5d441 | 12,768 |
import time
import json
def get_meaning(searchterm):
"""
Fetches the meaning of the word specified
:param searchterm: the word for which you want to fetch the meaning
:return: json object of the meaning
"""
# finds the input field by id in the webpage
sbox = driver.find_element_by_id('word')
sbox.clear() # clears the input field
sbox.send_keys(searchterm) # enters the word specified in the input field
# find the 'CALL THE API' button
submit = driver.find_element_by_id("getWord")
submit.click() # invoking the click event on the button
# waiting for the results to come
time.sleep(1)
# find the code tag in the webpage where the meaning of the word (result) is present
code = driver.find_element_by_tag_name("code")
# condition if the meaning is not found
if code.text == "No results for that word.":
return {
'word':searchterm
}
# converting the meaning of the word from string to json
meaning = json.loads(code.text)
# returning the meaning of word in json formart
return meaning | 6a0608d59598a8f75fb30d17b172e48bef548031 | 12,770 |
def get_state(entity_id):
"""
Return the state of an entity
"""
try:
entity_state = ''
entity_state = Gb.hass.states.get(entity_id).state
if entity_state in IOS_TRIGGER_ABBREVIATIONS:
state = IOS_TRIGGER_ABBREVIATIONS[entity_state]
else:
state = Gb.state_to_zone.get(entity_state, entity_state.lower())
# _trace(f"{entity_id=} {entity_state=}-->{state=} ")
except Exception as err:
#When starting iCloud3, the device_tracker for the iosapp might
#not have been set up yet. Catch the entity_id error here.
#_LOGGER.exception(err)
state = NOT_SET
#if Gb.log_rawdata_flag:
# _trace(f" > {entity_id} > {entity_state=} {state=}")
return state | 4f5e3e09b2a263fd4176d11e2ae23964fa8b804e | 12,771 |
def loss_eval(net, data, labels, numclass=2, rs=40):
"""Evaluate the network performance on test samples"""
loss = np.zeros([labels.shape[0]])
for i in range(len(loss)):
label_input = condition_reshape(
label=labels[i,np.newaxis], numclass=numclass, imgshape=(rs, rs))
img_est = net.sess.run(
net.output_flatten_de,
feed_dict={
net.inputs: data[i].reshape(-1, rs, rs, 1),
net.conditions: labels[i].reshape([1, labels[i].shape[0]]),
net.conditions_input: label_input,
net.is_training: False,
net.keep_prob: 1.0})
img_est = (img_est - img_est.min()) / (img_est.max() - img_est.min())
loss[i] = np.mean((data[i] - img_est)**2)
# loss[i] = np.sum((data[i] - img_est)**2) / (40**2)
# print(data[i].shape)
# evaluation
loss_mean = np.mean(loss)
loss_std = np.std(loss)
return loss,loss_mean,loss_std | 69df9b25c56439e7fd7472e7ec7318db6a0bd51b | 12,772 |
def wide_resnet101(input_shape, num_classes, dense_classifier=False, pretrained=False):
""" return a ResNet 101 object
"""
return _resnet('resnet101', BottleNeck, [3, 4, 23, 3], 64 * 2, num_classes, dense_classifier, pretrained) | e1a2e42ee432bd44d43b4091fcda60ce73e4f2d3 | 12,773 |
def is_empty(ir: irast.Base) -> bool:
"""Return True if the given *ir* expression is an empty set
or an empty array.
"""
return (
isinstance(ir, irast.EmptySet) or
(isinstance(ir, irast.Array) and not ir.elements) or
(
isinstance(ir, irast.Set)
and ir.expr is not None
and is_empty(ir.expr)
)
) | b70d58149d0818dc09b7c5e2a2bf13001580b11e | 12,774 |
from scipy.io import loadmat
def read_official_corner_lut(filename, y_grid='lat_grid', x_grid='lon_grid',
x_corners = ['nwlon', 'swlon', 'selon', 'nelon'],
y_corners = ['nwlat', 'swlat', 'selat', 'nelat']):
"""
Read a MATLAB file containing corner point lookup data.
Returns lons, lats, corner_lut.
lons, lats: arrays, shape (N,M), of longitude and latitude giving the
locations of the corresponding offsets in corner_points
corner_lut: array, shape (N,M,4,2)
Corners of the pixel quadrilateral are given in order along the
third dimension. Longitude and latitudes are indexes 0 and 1 in the
trailing dimension, respectively.
Latitudes, longitudes, and offsets are defined with east and north positive
"""
nav = loadmat(filename)
lats = nav[y_grid]
lons = nav[x_grid]
corner_lut = np.zeros((lats.shape[0], lats.shape[1], 4, 2), dtype='f8')
corner_lut[:,:,0,0] = nav[x_corners[0]]
corner_lut[:,:,1,0] = nav[x_corners[1]]
corner_lut[:,:,2,0] = nav[x_corners[2]]
corner_lut[:,:,3,0] = nav[x_corners[3]]
corner_lut[:,:,0,1] = nav[y_corners[0]]
corner_lut[:,:,1,1] = nav[y_corners[1]]
corner_lut[:,:,2,1] = nav[y_corners[2]]
corner_lut[:,:,3,1] = nav[y_corners[3]]
return lons, lats, corner_lut | 755455a3f2bfb1bedff634776367bf0a825ff36b | 12,775 |
import re
def get_component_name(job_type):
"""Gets component name for a job type."""
job = data_types.Job.query(data_types.Job.name == job_type).get()
if not job:
return ''
match = re.match(r'.*BUCKET_PATH[^\r\n]*-([a-zA-Z0-9]+)-component',
job.get_environment_string(), re.DOTALL)
if not match:
return ''
component_name = match.group(1)
return component_name | e0cc2db3b81c7bc729c87165dadc7da88156e21c | 12,776 |
def group_by(source: ObservableBase, key_mapper, element_mapper=None) -> ObservableBase:
"""Groups the elements of an observable sequence according to a
specified key mapper function and comparer and selects the resulting
elements by using a specified function.
1 - observable.group_by(lambda x: x.id)
2 - observable.group_by(lambda x: x.id, lambda x: x.name)
3 - observable.group_by(
lambda x: x.id,
lambda x: x.name,
lambda x: str(x))
Keyword arguments:
key_mapper -- A function to extract the key for each element.
element_mapper -- [Optional] A function to map each source element to
an element in an observable group.
Returns a sequence of observable groups, each of which corresponds to a
unique key value, containing all elements that share that same key
value.
"""
def duration_mapper(_):
return Observable.never()
return source.group_by_until(key_mapper, element_mapper, duration_mapper) | 12c32c22a1077fa171135aabeb0dd7d8e759a0a3 | 12,777 |
import scipy
def gauss_pdf(x, norm, mu, sigma):
"""
The method calculates the value of the Gaussian probability
density function (using matplotlib routines) for a value/array x.
for a given mean (mu) and standard deviation (sigma). The results
is normalized (multiplied by) 'norm', and so 'norm' should equal
1.000 unless you have a reason for it to be otherwise.
"""
if any(np.isnan([norm, mu, sigma])) or any(np.isnan(x)):
return np.NaN
GaussPdf = norm * scipy.stats.norm.pdf(x, mu, sigma)
return GaussPdf | 076b6a5141e1262e826b7a62b280023f2166bae6 | 12,778 |
def max_dbfs(sample_data: np.ndarray):
"""Peak dBFS based on the maximum energy sample.
Args:
sample_data ([np.ndarray]): float array, [-1, 1].
Returns:
float: dBFS
"""
# Peak dBFS based on the maximum energy sample. Will prevent overdrive if used for normalization.
return rms_to_dbfs(max(abs(np.min(sample_data)), abs(np.max(sample_data)))) | e88fd81b0bff2b1e08f611587ed8f4e7276e2d3e | 12,779 |
from scipy.ndimage.filters import gaussian_filter1d
def get_deltaF_v2(specfile, res, spec_res, addpix=int(10), CNR=None, CE=None, MF_corr=True, domask=True):
"""Get the optical depth and return the realistic mock spectra
specfile : Address to the spectra. It should be in the foramt as the fale_spectra outputs
spec_res : spectral resolution in units of voxels along the spectrum
addpix : make a coarser spectrum by averaging this number of consecutive pixels along the line-of-sight
CNR : Continuum to Nosie ratio
CE : Continumm error
MF_corr : If true, correct the mean flux of the spectra
domask : If true, mask strong absorbtions along the spectrum
"""
ps = PS(res=res, num = 1, base='./', savedir='', savefile=specfile)
spec_file = h5py.File(specfile, 'r')
if MF_corr:
try :
# If HI density is recorded, do not use the high column density
# sightlines for fixing the mean flux.
NHI = spec_file['colden/H/1'][:]
ind = np.where(np.sum(NHI,axis=1)<10**19)
except (KeyError, np.AxisError, AttributeError):
# It is only for FGPA spectra, as we do not know the exact HI density
ind = np.ones_like(spec_file['tau/H/1/1215'][:], dtype=bool)
mean_flux_desired = get_mean_flux(z=spec_file['Header'].attrs['redshift'])
flux = correct_mean_flux(tau=spec_file['tau/H/1/1215'][:], mean_flux_desired=mean_flux_desired, ind=ind)
flux = gaussian_filter1d(flux, spec_res, axis=-1, mode='wrap')
L = np.shape(flux)[1]
# Check if the last pixel is fixed
t = np.arange(0,L+1,addpix)
new_flux = np.zeros(shape=(np.shape(flux)[0], t.size-1))
#new_NHI = np.zeros(shape=(np.shape(NHI)[0], t.size))
# Averaging over the flux within a pixel
for i in range(t.size-1) :
new_flux[:,i] = (np.sum(flux[:,t[i]:t[i+1]], axis=1))/addpix
if CE is not None:
if CNR is not None:
# the order below is important
(new_flux, delta) = ps.add_cont_error(CE=CE, flux=new_flux)
# A bit of hack, solve it later
ps.nbins = int(L/addpix)
(new_flux,noise_array) = ps.add_noise(snr=CNR, flux=new_flux)
else:
(new_flux, delta) = ps.add_cont_error(CE=CE, flux=new_flux)
else:
if CNR is not None:
ps.nbins = int(L/addpix)
(new_flux, noise_array) = ps.add_noise(snr=CNR, flux=new_flux)
if domask :
mask = np.zeros_like(new_flux,dtype=bool)
for i in range(new_flux.shape[0]):
mask[i,:] = mask_strong_absb_v2(deltav=addpix*ps.dvbin, Fnorm=new_flux[i,:], CNR=CNR[i]*np.ones(shape=(new_flux.shape[1],)), maxdv=1000, Fm=np.mean(new_flux), ewmin=5)
else :
mask = np.zeros(shape=new_flux.shape, dtype=bool)
new_flux = np.ravel(new_flux)
current_mean_flux = np.mean(np.ravel(new_flux))
print('mean flux after noise =', current_mean_flux)
print ("*** Error on mean flux :*** ", current_mean_flux-mean_flux_desired)
# flux contrast for each pixel
#deltaF = (new_flux/(1.0*np.mean(new_flux))) - 1
deltaF = (new_flux/current_mean_flux) - 1
return (deltaF, current_mean_flux, mask) | b3520eaccb209107bffd119d8fea4421e1c9411b | 12,780 |
def quick_amplitude(x, y, x_err, y_err):
"""
Assume y = ax
Calculate the amplitude only.
"""
#x[x<0] = 1E-5
#y[y<0] = 1E-5
xy = x*y
xx = x*x
xy[xy<0] = 1E-10
A = np.ones(x.shape[0])
for i in np.arange(5):
weight = 1./(np.square(y_err)+np.square(A).reshape(A.size,1)*np.square(x_err))
#weight = 1./(np.square(y_err)+np.square(A)*np.square(x_err))
A = np.einsum('ij, ij->i', xy, weight)/np.einsum('ij, ij->i', xx, weight)
chi2 = np.einsum('ij, ij->i', np.square(A.reshape(A.size,1)*x - y), weight)
#chi2 = np.einsum('ij, ij->i', np.square(A*x - y), weight)
return (A, chi2) | 2331bc4ce9f258d9c0972de56abb2becd9d979e1 | 12,781 |
def get_password(config, name):
"""Read password"""
passfile = config.passstore / name
with open(passfile, 'r') as fd:
return fd.read() | caae733030077eedc4428555eb0b106cfe586e50 | 12,782 |
def attribute_string(s):
"""return a python code string for a string variable"""
if s is None:
return "\"\""
# escape any ' characters
#s = s.replace("'", "\\'")
return "\"%s\"" % s | 9ed9d4f26e797119a339d2a3827772b945e29839 | 12,783 |
import json
def poi(request, id=None):
"""
*/entry/pois/<id>*, */entry/pois/new*
The entry interface's edit/add/delete poi view. This view creates
the edit page for a given poi, or the "new poi" page if it
is not passed an ID. It also accepts POST requests to create or edit
pois.
If called with DELETE, it will return a 200 upon success or a 404 upon
failure. This is to be used as part of an AJAX call, or some other API
call.
"""
if request.method == 'DELETE':
poi = get_object_or_404(PointOfInterest, pk=id)
poi.delete()
return HttpResponse()
if request.method == 'POST':
message = ''
post_data = request.POST.copy()
errors = []
try:
try:
post_data['location'] = fromstr(
'POINT(%s %s)' % (post_data['longitude'],
post_data['latitude']), srid=4326)
except:
coordinates = coordinates_from_address(
post_data['street'], post_data['city'], post_data['state'],
post_data['zip'])
post_data['location'] = fromstr(
'POINT(%s %s)' % (coordinates[1], coordinates[0]),
srid=4326)
# Bad Address will be thrown if Google does not return coordinates for
# the address, and MultiValueDictKeyError will be thrown if the POST
# data being passed in is empty.
except (MultiValueDictKeyError, BadAddressException):
errors.append("Full address is required.")
try:
categories = [Category.objects.get(
pk=int(c)) for c in post_data.get(
'category_ids', None).split(',')]
except:
errors.append("You must choose at least one category.")
poi_form = PointOfInterestForm(post_data)
if poi_form.is_valid() and not errors:
image_keys = post_data.get('image_ids', None)
images = []
if image_keys:
images = [Image.objects.get(
pk=int(i)) for i in image_keys.split(',')]
video_keys = post_data.get('video_ids', None)
videos = []
if video_keys:
videos = [Video.objects.get(
pk=int(v)) for v in video_keys.split(',')]
hazard_keys = post_data.get('hazard_ids', None)
hazards = []
if hazard_keys:
hazards = [Hazard.objects.get(
pk=int(h)) for h in hazard_keys.split(',')]
if id:
poi = PointOfInterest.objects.get(id=id)
# process images
existing_images = poi.images.all()
for image in existing_images:
if image not in images:
poi.images.remove(image)
for image in images:
if image not in existing_images:
poi.images.add(image)
# process videos
existing_videos = poi.videos.all()
for video in existing_videos:
if video not in videos:
poi.videos.remove(video)
for video in videos:
if video not in existing_videos:
poi.videos.add(video)
# process hazards
existing_hazards = poi.hazards.all()
for hazard in existing_hazards:
if hazard not in hazards:
poi.hazards.remove(hazard)
for hazard in hazards:
if hazard not in existing_hazards:
poi.hazards.add(hazard)
# process categories
existing_categories = poi.categories.all()
for category in existing_categories:
if category not in categories:
poi.categories.remove(category)
for category in categories:
if category not in existing_categories:
poi.categories.add(category)
poi.__dict__.update(**poi_form.cleaned_data)
poi.save()
else:
poi = poi_form.save()
for image in images:
poi.images.add(image)
for video in videos:
poi.videos.add(video)
for hazard in hazards:
poi.hazards.add(hazard)
for category in categories:
poi.categories.add(category)
return HttpResponseRedirect(
"%s?saved=true" % reverse('entry-list-pois'))
else:
pass
else:
errors = []
message = ''
if id:
poi = PointOfInterest.objects.get(id=id)
poi.latitude = poi.location[1]
poi.longitude = poi.location[0]
title = "Edit {0}".format(poi.name)
post_url = reverse('edit-poi', kwargs={'id': id})
poi_form = PointOfInterestForm(
instance=poi,
initial={'latitude': poi.latitude, 'longitude': poi.longitude})
existing_images = poi.images.all()
existing_videos = poi.videos.all()
existing_categories = poi.categories.all()
existing_hazards = poi.hazards.all()
if request.GET.get('success') == 'true':
message = "Item saved successfully!"
elif request.method != 'POST':
poi_form = PointOfInterestForm()
post_url = reverse('new-poi')
title = "New Item"
existing_images = []
existing_videos = []
existing_categories = []
existing_hazards = []
else:
post_url = reverse('new-poi')
title = "New Item"
existing_images = []
existing_videos = []
existing_categories = []
existing_hazards = []
data = {'images': [], 'videos': [], 'categories': [], 'hazards': []}
for image in Image.objects.all():
data['images'].append({
'id': image.id,
'name': image.name
})
for video in Video.objects.all():
data['videos'].append({
'id': video.id,
'name': video.name
})
for hazard in Hazard.objects.all():
data['hazards'].append({
'id': hazard.id,
'name': hazard.name
})
for category in Category.objects.all():
data['categories'].append({
'id': category.id,
'category': category.category
})
return render(request, 'poi.html', {
'parent_url': [
{'url': reverse('home'), 'name': 'Home'},
{'url': reverse('entry-list-pois'),
'name': 'Points OfInterest'}
],
'existing_images': existing_images,
'existing_videos': existing_videos,
'existing_hazards': existing_hazards,
'existing_categories': existing_categories,
'data_json': json.dumps(data),
'data_dict': data,
'title': title,
'message': message,
'post_url': post_url,
'errors': errors,
'poi_form': poi_form,
}) | cce95c37bfee855adab817f50d50d53324f00ae1 | 12,786 |
def relativeScope(fromScope, destScope):
"""relativeScope variant that handles invented fromScopes"""
rs = idlutil.relativeScope(fromScope, destScope)
if rs[0] is None:
try:
rd = idlast.findDecl(destScope)
except idlast.DeclNotFound:
return rs
new_rs = rs
while new_rs[0] is None and len(fromScope) > 1:
fromScope = fromScope[:-1]
new_rs = idlutil.relativeScope(fromScope, destScope)
if new_rs[0] is not None:
return new_rs
return rs | f2cdfa67bcedbbe4bcb9f2fb1b89a167df66ae13 | 12,787 |
def srbt(peer, pkts, inter=0.1, *args, **kargs):
"""send and receive using a bluetooth socket"""
s = conf.BTsocket(peer=peer)
a,b = sndrcv(s,pkts,inter=inter,*args,**kargs)
s.close()
return a,b | 77b087b31a6fc23b7b0122ebe827d7b868fc66a8 | 12,788 |
def rotation_matrix_from_vectors(vector1, vector2):
"""
Finds a rotation matrix that can rotate vector1 to align with vector 2
Args:
vector1: np.narray (3)
Vector we would apply the rotation to
vector2: np.narray (3)
Vector that will be aligned to
Returns:
rotation_matrix: np.narray (3,3)
Rotation matrix that when applied to vector1 will turn it to the same direction as vector2
"""
if all(np.abs(vector1)==np.abs(vector2)):
return np.eye(3)
a, b = (vector1 / np.linalg.norm(vector1)).reshape(3), (vector2 / np.linalg.norm(vector2)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
matrix = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + matrix + matrix.dot(matrix) * ((1 - c) / (s ** 2))
return rotation_matrix | a36c9b2a77bc3be91538e7768fd1688465d7cd4f | 12,789 |
def jaxpr_replicas(jaxpr) -> int:
"""The number of replicas needed for a jaxpr.
For a eqn, multiply the `axis_size` with the `jaxpr_replicas` of the
subjaxprs. For a list of eqns, take the maximum number of replicas.
"""
if isinstance(jaxpr, core.ClosedJaxpr):
jaxpr = jaxpr.jaxpr
return max(unsafe_map(eqn_replicas, jaxpr.eqns), default=1) | e4664a06ef778976fa31ce40906dbb561c2812cd | 12,791 |
import torch
def tokens2ELMOids(tokens, sent_length):
"""
Transform input tokens to elmo ids.
:param tokens: a list of words.
:param sent_length: padded sent length.
:return: numpy array of elmo ids, sent_length * 50
"""
elmo_ids = batch_to_ids([tokens]).squeeze(0)
pad_c = (0, 0, 0, sent_length - elmo_ids.size(0)) # assume PAD_id = 0
elmo_ids = torch.nn.functional.pad(elmo_ids, pad_c, value=0)
elmo_ids = elmo_ids.data.cpu().numpy()
return elmo_ids | 8343a05ec250f0d2ea607fb1db27ee11c42d1975 | 12,792 |
import warnings
def get_section_endpoints(section_name):
"""Get the [lon, lat] endpoints associated with a pre-defined section
e.g.
>> pt1, pt2 = get_section_endpoints('Drake Passage')
pt1 = [-68, -54]
pt2 = [-63, -66]
These sections mirror the gcmfaces definitions, see
gcmfaces/gcmfaces_calc/gcmfaces_lines_pairs.m
Parameters
----------
section_name : str
name of the section to compute transport across
Returns
-------
pt1, pt2 : array_like
array with two values, [lon, lat] of each endpoint
or
None
if section_name is not in the pre-defined list of sections
"""
# Set to input lower case and remove spaces/tabs
section_name = ''.join(section_name.lower().split())
# Test to see if name exists in list
section_list = get_available_sections()
section_list = [''.join(name.lower().split()) for name in section_list]
if section_name not in section_list:
warnings.warn('\nSection name %s unavailable as pre-defined section' % section_name)
return None
if section_name == 'drakepassage':
pt1 = [-68, -54]
pt2 = [-63, -66]
elif section_name == 'beringstrait':
pt1 = [-173, 65.5]
pt2 = [-164, 65.5]
elif section_name == 'gibraltar':
pt1 = [-5, 34]
pt2 = [-5, 40]
elif section_name == 'floridastrait':
pt1 = [-81, 28]
pt2 = [-77, 26]
elif section_name == 'floridastraitw1':
pt1 = [-81, 28]
pt2 = [-79, 22]
elif section_name == 'floridastraits1':
pt1 = [-76, 21]
pt2 = [-76, 8]
elif section_name == 'floridastraite1':
pt1 = [-77, 26]
pt2 = [-77, 24]
elif section_name == 'floridastraite2':
pt1 = [-77, 24]
pt2 = [-77, 22]
elif section_name == 'floridastraite3':
pt1 = [-76, 21]
pt2 = [-72, 18.5]
elif section_name == 'floridastraite4':
pt1 = [-72, 18.5]
pt2 = [-72, 10]
elif section_name == 'davisstrait':
pt1 = [-65, 66]
pt2 = [-50, 66]
elif section_name == 'denmarkstrait':
pt1 = [-35, 67]
pt2 = [-20, 65]
elif section_name == 'icelandfaroe':
pt1 = [-16, 65]
pt2 = [ -7, 62.5]
elif section_name == 'scotlandnorway':
pt1 = [-4, 57]
pt2 = [ 8, 62]
elif section_name == 'indonesiaw1':
pt1 = [103, 4]
pt2 = [103,-1]
elif section_name == 'indonesiaw2':
pt1 = [104, -3]
pt2 = [109, -8]
elif section_name == 'indonesiaw3':
pt1 = [113, -8.5]
pt2 = [118, -8.5]
elif section_name == 'indonesiaw4':
pt1 = [118, -8.5]
pt2 = [127, -15]
elif section_name == 'australiaantarctica':
pt1 = [127, -25]
pt2 = [127, -68]
elif section_name == 'madagascarchannel':
pt1 = [38, -10]
pt2 = [46, -22]
elif section_name == 'madagascarantarctica':
pt1 = [46, -22]
pt2 = [46, -69]
elif section_name == 'southafricaantarctica':
pt1 = [20, -30]
pt2 = [20, -69.5]
return pt1, pt2 | 505e098c9e0a7b7612ab08299e8009525a2125ed | 12,793 |
from datetime import datetime
def get_vm_metrics(monitor_client, resource_id):
"""Get metrics for the given vm. Returns row of cpu, disk, network activity"""
today = datetime.utcnow().date()
last_week = today - timedelta(days=7)
metrics_data = monitor_client.metrics.list(
resource_id,
timespan="{}/{}".format(last_week, today),
interval="PT12H",
metricnames="Percentage CPU,Disk Read Bytes,Disk Write Bytes,Network In Total,Network Out Total",
aggregation="Minimum,Average,Maximum",
)
row = {}
ave_cpu = []
min_cpu = []
max_cpu = []
ave_disk_read = []
ave_disk_write = []
ave_network_in = []
ave_network_out = []
for item in metrics_data.value:
if item.name.value == "Percentage CPU":
for timeserie in item.timeseries:
for data in timeserie.data:
if data.average:
ave_cpu.append(data.average)
if data.minimum:
min_cpu.append(data.minimum)
if data.maximum:
max_cpu.append(data.maximum)
if item.name.value == "Disk Read Bytes":
for timeserie in item.timeseries:
for data in timeserie.data:
if data.average:
ave_disk_read.append(data.average)
if item.name.value == "Disk Write Bytes":
for timeserie in item.timeseries:
for data in timeserie.data:
if data.average:
ave_disk_write.append(data.average)
if item.name.value == "Network In Total":
for timeserie in item.timeseries:
for data in timeserie.data:
if data.average:
ave_network_in.append(data.average)
if item.name.value == "Network Out Total":
for timeserie in item.timeseries:
for data in timeserie.data:
if data.average:
ave_network_out.append(data.average)
row = (
get_mean(ave_cpu),
get_min(min_cpu),
get_max(max_cpu),
get_mean(ave_disk_read),
get_mean(ave_disk_write),
get_mean(ave_network_in),
get_mean(ave_network_out),
)
return row | a9cca3cf8f823bdff58a9b3b8bae5e12e79b2cdc | 12,794 |
import time
def current_time_id():
""" Returns the current time ID in milliseconds """
return int(round(time.time() * 1000)) | c59c02151b7575804039f64292f0aed5ca4ebc44 | 12,796 |
import sqlite3
def get_db():
"""
Connects to the database.
Returns a database object that can be queried.
"""
if 'db' not in g:
g.db = sqlite3.connect(
'chemprop.sqlite3',
detect_types=sqlite3.PARSE_DECLTYPES
)
g.db.row_factory = sqlite3.Row
return g.db | 87267da29d562d5b66e528f0a5d58031bf666c65 | 12,797 |
def computePatientConfusionMatrix(patient_prediction_location, patient_ground_truth_location, labels_names_file):
"""
@brief: Compute the patient confusion matrix given the location of its prediction and ground truth.
@param patient_prediction_location : folder containing the prediction data
@param patient_ground_truth_location : folder containing the ground truth data
@param labels_names_file : file containing the name of the labels (stored as integer)
We define the confusion matrix as the length confusion matrix with column normalization.
It represents the repartition (ratio) of predicted labels for a given GT label.
As for the length confusion matrix, it is defined with the following convention:
- each line correspond to a given prediction class
- each column correspond to a given ground truth class
Both folders are assumed to have a particular hierarchy:
- The folder patient_ground_truth_location:
* all branches named "branch????.txt"
* a "branch_labels.txt" file
-The folder patient_prediction_location:
* all branches named "branch????.txt"
* a file "recomputed_labels.txt"
N.B. It is assumed that the number of branches in both folder are identical and that the files storing labels have the same number lines.
"""
# compute the patient length confusion matrix:
(resulting_confusion_matrix, label_legend) = computePatientLengthConfusionMatrix(patient_prediction_location, patient_ground_truth_location, labels_names_file)
# normalize each column:
totalColumnLength = sum(resulting_confusion_matrix, axis=0)
totalColumnLength = maximum(totalColumnLength, MY_EPSILON) # prevent 0-division
resulting_confusion_matrix /= totalColumnLength
# return the confusion matrix with legend
return (resulting_confusion_matrix, label_legend) | 6aa97a98eddddd7ff79d8ddf099d81673ca2fd61 | 12,798 |
async def ping_server():
"""
Ping Server
===========
Returns the message "The Optuna-server is alive!" if the server is running.
Parameters
----------
None
Returns
-------
msg : str
A message witnessing that the server is running.
"""
msg = 'The Optuna-server is alive!'
return msg | 2098f2167a14f08105824490824d62dd34b4c49e | 12,799 |
def treynor(rp: np.ndarray, rb: np.ndarray, rf: np.ndarray) -> np.ndarray:
"""Returns the treynor ratios for all pairs of p portfolios and b benchmarks
Args:
rp (np.ndarray): p-by-n matrix where the (i, j) entry corresponds to the
j-th return of the i-th portfolio
rb (np.ndarray): b-by-n matrix where the (i, j) entry corresponds to the
j-th return of the i-th benchmark
rf (np.ndarray): Scalar risk-free rate (as a 0-D tensor)
Returns:
np.ndarray: p-by-b matrix where the (i, j) entry corresponds to the
treynor ratio for the i-th portfolio and j-th benchmark
"""
__expect_rp_rb_rf(rp, rb, rf)
return kernels.treynor(rp, rb, rf) | dd247f7ea1c710939ac27e626b3f863f49818fec | 12,800 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.