content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def opensearch_plugin(request):
"""Render an OpenSearch Plugin."""
host = "%s://%s" % ("https" if request.is_secure() else "http", request.get_host())
# Use `render_to_response` here instead of `render` because `render`
# includes the request in the context of the response. Requests
# often include the session, which can include pickable things.
# `render_to_respones` doesn't include the request in the context.
return render_to_response(
"search/plugin.html",
{"host": host, "locale": request.LANGUAGE_CODE},
content_type="application/opensearchdescription+xml",
) | 5df7e8a8bb89ff5e83b51f1bc4b634db9dea6930 | 7,685 |
from datetime import datetime
def serialize_time(output_value: datetime.time) -> str:
""" Serializes an internal value to include in a response. """
return output_value.isoformat() | 81fc648eaf27efc47531f9895a9523aa5f012cf6 | 7,686 |
def decode_binary(state_int):
"""
Decode binary representation into the list view
:param state_int: integer representing the field
:return: list of GAME_COLS lists
"""
assert isinstance(state_int, int)
bits = int_to_bits(state_int, bits=GAME_COLS*GAME_ROWS + GAME_COLS*BITS_IN_LEN)
res = []
len_bits = bits[GAME_COLS*GAME_ROWS:]
for col in range(GAME_COLS):
vals = bits[col*GAME_ROWS:(col+1)*GAME_ROWS]
lens = bits_to_int(len_bits[col*BITS_IN_LEN:(col+1)*BITS_IN_LEN])
if lens > 0:
vals = vals[:-lens]
res.append(vals)
return res | a2dd5462031eeb82d9e3b59565d41b2b06e8e2d8 | 7,688 |
def clean_features(vgsales):
"""
This function cleans up some of the dataset's features. The dataset is
quite messy as many values are missing from both categorical and numerical
features. Many of these features are difficult to impute in a reasonable
manner.
<class 'pandas.core.frame.DataFrame'>
Index: 16719 entries, Wii Sports to Winning Post 8 2016
Data columns (total 9 columns):
Platform 16719 non-null category
Release 16450 non-null Int64
Genre 16717 non-null category
Publisher 16665 non-null category
Sales 16719 non-null float64
Metacritic 8137 non-null float64
Metacritic_N 8137 non-null Int64
Developer 10096 non-null category
ESRB 9950 non-null category
dtypes: Int64(2), category(5), float64(2)
memory usage: 1.5+ MB
Some of the hardest features to impute (genre or platform, for example)
don't have many nulls. Others, like the review averages, can be imputed.
:param path: A path to a Video_Games_Sales_as_at_22_Dec_2016.csv compatible
dataset.
"""
# A few of the release years are set to 2020 or other years past 2016.
# Just setting them to 2016 here. They're not a lot of them anyway.
vgsales.Release.loc[vgsales.Release > 2016] = 2016
# =============================================================================
# https://en.wikipedia.org/wiki/Entertainment_Software_Rating_Board
#
# The ESRB feature will be converted to an ordinal variable for machine
# learning during preprocessing later. Thus, we organize them here and
# add an NA for missing values.
# =============================================================================
esrb_ordinal = ["NA", "RP", "EC", "E", "E10+", "T", "M", "AO"]
vgsales.ESRB.cat.set_categories(esrb_ordinal, True, False, True)
return vgsales | ffcae20af436d4012381c4933c841c3689fbbca0 | 7,689 |
def get_object(proposition):
"""[75]
Returns the object of a given proposition
"""
return proposition[2][0] | dc9d5fe007bb66ee92cddd964bb29b897a561c8c | 7,690 |
import hmac
def __verify_hmac(data: bytes, ohmac: bytes, key: bytes) -> bool:
"""
This function verifies that a provided HMAC matches a computed HMAC for
the data given a key.
Args:
data: the data to HMAC and verify
ohmac: the original HMAC, normally appended to the data
key: the key to HMAC with for verification
Returns:
a boolean value denoting whether or not the HMAC's match
"""
return compare_digest(ohmac, hmac(key, data, HMAC_HS).digest()) | 6381bb70e35cccafdb3dcafc2428ca5ca850364a | 7,691 |
def create_block_option_from_template(text: str, value: str):
"""Helper function which generates the option block for modals / views"""
return {"text": {"type": "plain_text", "text": str(text), "emoji": True}, "value": str(value)} | 23f0cf455e659eddeca0b4eda732995feeac6341 | 7,692 |
from typing import Any
import json
def get_token_payload(token: str) -> Any:
"""Extract the payload from the token.
Args:
token (str):
A JWT token containing the session_id and other data.
Returns:
dict
"""
decoded = json.loads(_base64_decode(token.split('.')[0]))
del decoded['session_id']
return decoded | 1b9b03f8e9db6940cc44725025c1ed2ccf751e89 | 7,693 |
import torch
def create_mock_target(number_of_nodes, number_of_classes):
"""
Creating a mock target vector.
"""
return torch.LongTensor([np.random.randint(0, number_of_classes-1) for node in range(number_of_nodes)]) | e226d9e7d1944b0736952d5952e8ef3438a1e54b | 7,694 |
def initFindAndFit(parameters):
"""
Initialize and return a SplinerFISTAFinderFitter object.
"""
# Create spline object.
spline_fn = splineToPSF.loadSpline(parameters.getAttr("spline"))
# Create peak finder.
finder = SplinerFISTAPeakFinder(parameters = parameters,
psf_object = spline_fn)
# Create cubicFitC.CSplineFit object.
mfitter = findPeaksStd.initFitter(finder, parameters, spline_fn)
# Create peak fitter.
fitter = fitting.PeakFitterArbitraryPSF(mfitter = mfitter,
parameters = parameters)
# Specify which properties we want from the analysis.
properties = ["background", "error", "height", "sum", "x", "y", "z"]
return fitting.PeakFinderFitter(peak_finder = finder,
peak_fitter = fitter,
properties = properties) | 6f045b664157437fb33ab3804b84fe1c7d1deb4e | 7,695 |
def UpdateDatabase(asset, images, status):
"""Update the database entries of the given asset with the given data."""
return {'asset': asset} | 1d7d42355410be7481e706e47d7810755974dadc | 7,696 |
def get_max_word_length(days: dict, keys: list) -> int:
"""
Находит длину самого длинного слова.
"""
max_word_len = 0
for key in keys:
if days.get(key):
for _, data in days.get(key).items():
value = data.split(" ")
for word in value:
if len(word) > max_word_len:
max_word_len = len(word)
return int(max_word_len) | 8a98c7384839f10fdfa713c535b3bf7765416b4c | 7,697 |
def rateCBuf(co2: float, par: float, params: dict,
rates: dict, states: dict) -> float:
"""
Rate of increase of carbohydrates in the buffer
During the light period, carbohydrates produced by
photosynthesis are stored in the buffer and, whenever
carbohydrates are available in the buffer, carbohydrates flow
to the plant organs. This carbohydrate flow stops when the
buffer approaches its lower limit. When the buffer approaches
its upper limit, further carbohydrates cannot be stored and
photosynthesis will be inhibited.
Parameters
----------
co2 : float
Carbon dioxide concentration on air [μmol {CO2} mol-1 {air}]
par : float
Photosynthetic active radiation [μmol {photons} m-2 s-1]
params : dict
Parameters saved as model constants
rates : dict
Auxiliary variable including rates and
flows for the different fruit development stages
states : dict
State variables of the model
Returns
-------
float
Rate of accumulation of carbohydrates in the buffer [mg m-2 s-1]
"""
# These rates relate to the carbs available in the buffer by the maximum
# value available for the buffer. So in theory even if all of them
# are maximum, they would be compatible. However, the buffer is not always
# in the maximum. So they could reach their potential and demand more
# carbs than are available in the buffer.
# If there are not enough, there is the inhibition phenomena, but right
# now they don't seem compatible, as there is growth without
# enough carbs because of the different treatment of the first fruit
# stage.
rates["MCBufLeaf"] = mcBufOrg(organ="leaf", params=params, states=states)
rates["MCBufFruit"] = mcBufOrg(organ="fruit", params=params, states=states)
rates["MCBufStem"] = mcBufOrg(organ="stem", params=params, states=states)
co2_st = co2Stom(co2=co2, params=params)
# Photosynthesis Rate
mcAirBuf_ = mcAirBuf(co2=co2_st, par=par, params=params, states=states)
# Growth respiration
mcBufAir_ = mcBufAir(params=params, states=states)
cBuf_ = (mcAirBuf_ - rates["MCBufLeaf"] - rates["MCBufFruit"] - rates["MCBufStem"] -
mcBufAir_)
return cBuf_ | 33a6c5fcc6d9d1a0641d197dffa1ee5fd6afd038 | 7,698 |
import hashlib
import json
def get_config_tag(config):
"""Get configuration tag.
Whenever configuration changes making the intermediate representation
incompatible the tag value will change as well.
"""
# Configuration attributes that affect representation value
config_attributes = dict(frame_sampling=config.proc.frame_sampling)
sha256 = hashlib.sha256()
sha256.update(json.dumps(config_attributes).encode("utf-8"))
return sha256.hexdigest()[:40] | 2cab6e9473822d0176e878114ceb3fda94d1e0f7 | 7,699 |
def ctf_to_pickle(trace_directory: str, target: Pickler) -> int:
"""
Load CTF trace, convert events, and dump to a pickle file.
:param trace_directory: the trace directory
:param target: the target file to write to
:return: the number of events written
"""
ctf_events = get_trace_ctf_events(trace_directory)
count = 0
count_written = 0
for event in ctf_events:
count += 1
pod = event_to_dict(event)
target.dump(pod)
count_written += 1
return count_written | e317be9d5577c8f85e02945d9ae95e63be9e76ef | 7,700 |
def list_lines(lines):
"""Returns the list of trimmed lines.
@param lines Multi-line string
"""
return list(filter(None, (x.strip() for x in lines.splitlines()))) | 293610d17e1fe8a27ab6bb5c35a349059e0179f3 | 7,701 |
def carbon_offset_cost(kWh):
"""
Donation to Cool Earth (in USD) needed to offset carbon emssions.
"""
return KG_CO2_PER_KWH * USD_PER_KG_CO2 * kWh | 6bbb9cfd3c058d4148fe3286defe75ade0fddb62 | 7,703 |
from typing import List
from typing import Tuple
from typing import Union
import time
def run(
duration: int, runtime_mode: str, connection_mode: str
) -> List[Tuple[str, Union[int, float]]]:
"""Test memory usage."""
# pylint: disable=import-outside-toplevel,unused-import
# import manually due to some lazy imports in decision_maker
resources = Resources()
if connection_mode not in CONNECTION_MODES:
raise ValueError(
f"bad connection mode {connection_mode}. valid is one of {list(CONNECTION_MODES.keys())}"
)
base_cls = CONNECTION_MODES[connection_mode]
conn_cls = type("conn_cls", (TestConnectionMixIn, base_cls), {})
connection = conn_cls.make() # type: ignore # pylint: disable=no-member
resources.add_connection(connection)
agent = make_agent(runtime_mode=runtime_mode, resources=resources)
agent.resources.add_skill(make_skill(agent, handlers={"test": TestHandler}))
t = Thread(target=agent.start, daemon=True)
t.start()
wait_for_condition(lambda: agent.is_running, timeout=5)
connection.enable()
time.sleep(duration)
connection.disable()
time.sleep(0.2) # possible race condition in stop?
agent.stop()
t.join(5)
latency = mean(
map(
lambda x: x[1] - x[0],
zip(
connection.sends,
connection.recvs,
),
)
)
total_amount = len(connection.recvs)
rate = total_amount / duration
return [
("envelopes received", len(connection.recvs)),
("envelopes sent", len(connection.sends)),
("latency(ms)", 10**6 * latency),
("rate(envelopes/second)", rate),
] | 677bdb5cb73cfc4ccc38d813bf875d506905512e | 7,704 |
def tf_fermion_massmatrix(t_A3, t_potential, tc_masses_factor):
"""Computes the spin-1/2 mass matrix from the A3-tensor."""
# The extra factor 2.0 relative to https://arxiv.org/abs/1906.00207
# makes the fermion masses align with the way particle states are
# grouped into SUSY multiplets in appendix (B.2) of:
# https://arxiv.org/abs/1909.10969
return mu.tfc128(2.0) * tf.einsum(
'ij,ik->jk',
t_A3, tf.math.conj(t_A3)) * (
tc_masses_factor /
tf.cast(t_potential, tf.complex128)) | 934c606fd55f93bdfa91a1e4d23fb7b6b5df8703 | 7,705 |
def filter_nsa_catalog_to_approximate_sky_area(nsa, bricks, visualise=False):
"""
DECALS is only in a well-defined portion of sky (which depends on the data release version). Filter the NSA catalog
so that it only includes galaxies in that approximate area. This saves time matching later.
Args:
nsa (astropy.Table): NSA catalog of SDSS galaxies
bricks (astropy.Table): catalog of DECALS imaging bricks
visualise (bool): if True, plot and save sky footprint of NSA catalog
Returns:
(astropy.Table) NSA catalog filtered to galaxies within the approximate sky area imaged by DECALS
"""
if visualise:
fig, ((ul, ur), (ll, lr)) = plt.subplots(2, 2)
ul.hist(bricks['dec'])
ul.set_title('brick dec')
ur.hist(nsa['dec'])
ur.set_title('nsa dec')
ll.hist(bricks['ra'])
ll.set_title('brick ra')
lr.hist(nsa['ra'])
lr.set_title('nsa ra')
plt.tight_layout()
plt.savefig('nsa_catalog_sky_coverage.png')
brick_maxdec = max(bricks['dec2'])
brick_mindec = min(bricks['dec1'])
# ra spans 0 through 360, do not filter
declim = (nsa['dec'] >= brick_mindec) & (nsa['dec'] <= brick_maxdec) # approximately -25 to +30 degrees
nsa_in_decals_area = nsa[declim]
return nsa_in_decals_area | 3f961cab16a58e7323f1f0730497beaf15f5db18 | 7,706 |
from typing import Dict
from typing import Union
from typing import List
from typing import Optional
from typing import Any
from typing import Tuple
def apply_variants(variants: Dict[Union[str, List[str]], int], parameters: Optional[Dict[Any, Any]] = None, variant=DEFAULT_VARIANT_VARIANTS) -> Tuple[PetriNet, Marking, Marking]:
"""
Apply the chosen IM algorithm to a dictionary/list/set of variants obtaining a Petri net along with an initial and final marking
Parameters
-----------
variants
Dictionary/list/set of variants in the log
variant
Variant of the algorithm to apply, possible values:
- Variants.IMd
parameters
Parameters of the algorithm, including:
Parameters.ACTIVITY_KEY -> attribute of the log to use as activity name
(default concept:name)
Returns
-----------
net
Petri net
initial_marking
Initial marking
final_marking
Final marking
"""
return exec_utils.get_variant(variant).apply_variants(variants, parameters=parameters) | 13a2466c1c7921fe5f6ccbf4fe819e2ac19ee87f | 7,707 |
def query_update(request: HttpRequest, **kwargs: str) -> str:
"""Update the query string with new values."""
updated = request.GET.copy()
for key, value in kwargs.items():
updated[key] = value
return updated.urlencode() | 43d60853f53fec4e696c2c6010b6e3b3db0da389 | 7,708 |
def get_user_info(user_id):
""" Fetches User Info Based On User ID
:param user_id:
:return: user
"""
user = session.query(User).filter_by(id=user_id).one_or_none()
return user | e1134f9305bd6df1b650bc3362c0e85f6dc10ccf | 7,709 |
def gatk_version(request) -> GATKVersion:
"""Given a version number, return a GATKVersion."""
return GATKVersion(request.param) | ec05d5f34f45454bb7c0b8c562851c3691d01ace | 7,710 |
def load_objs(name_obj_dat, sim, obj_ids, auto_sleep=True):
"""
- name_obj_dat: List[(str, List[
transformation as a 4x4 list of lists of floats,
int representing the motion type
])
"""
static_obj_ids = []
for i, (name, obj_dat) in enumerate(name_obj_dat):
if len(obj_ids) == 0:
obj_id = add_obj(name, sim)
else:
obj_id = obj_ids[i]
trans = obj_dat[0]
obj_type = obj_dat[1]
use_trans = mn.Matrix4(trans)
sim.set_transformation(use_trans, obj_id)
sim.set_linear_velocity(mn.Vector3(0, 0, 0), obj_id)
sim.set_angular_velocity(mn.Vector3(0, 0, 0), obj_id)
sim.set_object_motion_type(MotionType(obj_type), obj_id)
static_obj_ids.append(obj_id)
if len(obj_ids) != 0:
return obj_ids
return static_obj_ids | 899670f7ff63ef124dd51575ff59560b27b6e974 | 7,711 |
def get_mod_metadata(module: Module):
"""
Get descriptions for produced dependencies.
"""
meta = {}
has_meta = hasattr(module, 'prod_meta')
for prod in module.produces:
prod = prod.replace('?', '').replace('!', '')
if not has_meta:
meta[prod] = '<no descritption>'
continue
prod_meta = module.prod_meta.get(prod)
meta[prod] = prod_meta if prod_meta else '<no description>'
return meta | b0000c555cc22f5d81f31241bc3eaa3aee7d99ad | 7,712 |
def register_module():
"""Registers this module in the registry."""
dashboard.dashboard.DashboardRegistry.add_analytics_section(
dashboard.analytics.QuestionScoreHandler)
global_handlers = []
for path, handler_class in mapreduce_main.create_handlers_map():
# The mapreduce and pipeline libraries are pretty casual about
# mixing up their UI support in with their functional paths.
# Here, we separate things and give them different prefixes
# so that the only-admin-access patterns we define in app.yaml
# can be reasonably clean.
if path.startswith('.*/pipeline'):
if 'pipeline/rpc/' in path or path == '.*/pipeline(/.+)':
path = path.replace('.*/pipeline', '/mapreduce/ui/pipeline')
else:
path = path.replace('.*/pipeline', '/mapreduce/worker/pipeline')
else:
if '_callback' in path:
path = path.replace('.*', '/mapreduce/worker', 1)
elif '/list_configs' in path:
# This needs mapreduce.yaml, which we don't distribute. Not
# having this prevents part of the mapreduce UI front page
# from loading, but we don't care, because we don't want
# people using the M/R front page to relaunch jobs anyhow.
continue
else:
path = path.replace('.*', '/mapreduce/ui', 1)
# The UI needs to be guarded by a config so that casual users aren't
# exposed to the internals, but advanced users can investigate issues.
if '/ui/' in path or path.endswith('/ui'):
if (hasattr(handler_class, 'dispatch') and
not hasattr(handler_class, 'real_dispatch')):
handler_class.real_dispatch = handler_class.dispatch
handler_class.dispatch = ui_access_wrapper
global_handlers.append((path, handler_class))
# Wrap worker handlers with check that request really is coming
# from task queue.
else:
if (hasattr(handler_class, 'dispatch') and
not hasattr(handler_class, 'real_dispatch')):
handler_class.real_dispatch = handler_class.dispatch
handler_class.dispatch = authorization_wrapper
global_handlers.append((path, handler_class))
# Tell map/reduce internals that this is now the base path to use.
mapreduce_parameters.config.BASE_PATH = '/mapreduce/worker'
global custom_module
custom_module = custom_modules.Module(
MODULE_NAME,
'Provides support for analysis jobs based on map/reduce',
global_handlers, [])
return custom_module | 7e711f6e67e7a9bcd118dc304bd99073b25a8049 | 7,713 |
import warnings
def theta_b(wlen, d, n=1):
"""return the Bragg angle, $\theta_{B}$, (deg) for a given wavelength
(\AA$^{-1}$) and d-spacing (\AA)"""
if not (d == 0):
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_thb = np.rad2deg(np.arcsin(((wlen * n) / (2 * d))))
return _thb
except Exception:
return 0
else:
return 0 | 89080b455744bab1e94aa47eb53a3a2935985d32 | 7,714 |
def replace_newlines(s, replacement=' / ', newlines=(u"\n", u"\r")):
"""
Used by the status message display on the buddy list to replace newline
characters.
"""
# turn all carraige returns to newlines
for newline in newlines[1:]:
s = s.replace(newline, newlines[0])
# while there are pairs of newlines, turn them into one
while s.find(newlines[0] * 2) != -1:
s = s.replace( newlines[0] * 2, newlines[0])
# replace newlines with the newline_replacement above
return s.strip().replace(newlines[0], replacement) | d7b42ad67a3732c1ecac5bbfd7b9920b0215aa13 | 7,715 |
def default_bucket_name():
"""Obtain the default Google Storage bucket name for this application.
Returns:
A string that is the name of the default bucket.
"""
return files._default_gs_bucket_name() | 01cad1b881217849ff55af6f1b67da624b584810 | 7,717 |
def LineGaussSeidel_i(Uo, Beta):
"""Return the numerical solution of dependent variable in the model eq.
This routine uses the Line-Gauss Seidel method along constant i
direction (parallel to y-axis)
to obtain the solution of the Poisson's equation.
Call signature:
LineGaussSeidel_i(Uo, Beta)
Parameters
----------
Uo : 2D array
The dependent variable obtained from the previous iteration
level, n.
Beta : float
Coefficient in the Poissons finite difference approximation.
Beta = dX/dY
Returns
-------
U : 2D array
The dependent variable calculated at time level (n+1) within the
entire domain.
"""
shapeU = Uo.shape # Obtain Dimension
if len(shapeU) == 1:
raise DimensionError("1D", "POISSONS")
# Proceed to numerical solution
U = Uo.copy() # Initialize U
iMax, jMax = shapeU
B2 = Beta*Beta
A = [B2 for j in range(jMax)]
B = [-2.0*(1.0 + B2) for j in range(jMax)]
C = [B2 for j in range(jMax)]
D = [0 for j in range(jMax)]
UU = [0 for j in range(jMax)]
# NOTE that in the POISSON'S SOLVERS formulation, the dependent
# variable U
# is used on RHS of discretized eqn instead of Uo as in other MODELS,
# which is due to the formulation requirement to use values of
# dependent
# variable from advanced time steps (k+1) at points (i-1,j) or (i,j-1).
for i in range(1, iMax-1):
UU[0] = U[i][0] # Convert U to 1-D array for Tridiagonal solver
UU[-1] = U[i][jMax-1]
for j in range(1, jMax-1):
D[j] = -(U[i+1][j] + U[i-1][j])
UU = TridiagonalSolver(jMax, A, B, C, D, UU)
for j in range(1, jMax-1):
U[i][j] = UU[j]
return U | 2fd2fda54169bc0f1e686781b26823a8f1a29b49 | 7,718 |
def add_padding_to_grid(
component,
grid_size=127,
x=10,
y=10,
bottom_padding=5,
layers=[pp.LAYER.PADDING],
suffix="p",
):
""" returns component width a padding layer on each side
matches a minimum size
"""
c = pp.Component(name=f"{component.name}_{suffix}")
c << component
c.ports = component.ports
if c.size_info.height < grid_size:
y_padding = grid_size - c.size_info.height
else:
n_grids = np.ceil(c.size_info.height / grid_size)
y_padding = n_grids * grid_size - c.size_info.height
if c.size_info.width < grid_size:
x_padding = grid_size - c.size_info.width
else:
n_grids = np.ceil(c.size_info.width / grid_size)
x_padding = n_grids * grid_size - c.size_info.width
x_padding -= x
y_padding -= y
points = [
[c.xmin - x_padding / 2, c.ymin - bottom_padding],
[c.xmax + x_padding / 2, c.ymin - bottom_padding],
[c.xmax + x_padding / 2, c.ymax + y_padding - bottom_padding],
[c.xmin - x_padding / 2, c.ymax + y_padding - bottom_padding],
]
for layer in layers:
c.add_polygon(points, layer=layer)
return c | 5f886f7f5cec874eda10675580954ad46cbb2200 | 7,719 |
def _find_role(oneandone_conn, role):
"""
Given a name, validates that the role exists
whether it is a proper ID or a name.
Returns the role if one was found, else None.
"""
for _role in oneandone_conn.list_roles(per_page=1000):
if role in (_role['id'], _role['name']):
return _role | b8e2e93b13c9595e40dd61b2e9bbda1f89f23cca | 7,720 |
def created_median_mask(disparity_map, valid_depth_mask, rect=None):
"""生成掩模,使得矩形中不想要的区域的掩模值为0,想要的区域的掩模值为1"""
if rect is not None:
x, y, w, h = rect
disparity_map = disparity_map[y:y + h, x:x + w]
valid_depth_mask = valid_depth_mask[y:y + h, x:x + w]
# 获得中位数
median = np.median(disparity_map)
# 当有效的视差值与平均视差值相差12 或者更多时,可以将像素看做噪声。12 这个值是根据经验
return np.where((valid_depth_mask == 0) | (abs(disparity_map - median) < 12), 1.0, 0.0) | e57a990d250564c4e8d2b59aa27522115c9791e2 | 7,722 |
import torch
def l2_loss(pred_traj, pred_traj_gt, mode='sum'):
"""
Input:
- pred_traj: Tensor of shape (seq_len, batch, 2). Predicted trajectory.
- pred_traj_gt: Tensor of shape (seq_len, batch, 2). Groud truth
predictions.
- mode: Can be one of sum, average, raw
Output:
- loss: l2 loss depending on mode
"""
seq_len, batch, _ = pred_traj.size()
loss = (pred_traj_gt.permute(1, 0, 2) - pred_traj.permute(1, 0, 2))**2
if mode == 'sum':
return torch.sum(loss)
elif mode == 'raw':
return loss.sum(dim=2).sum(dim=1) | f9e98e30d4299c79a93de6905c65dcb23da65ac1 | 7,723 |
import requests
import re
def api_wowlight_version_check(version: str) -> bool:
"""
Checks incoming wow-lite wallet version, returns False when the version is too old and needs to be upgraded.
:param version:
:return: bool
"""
url = "https://raw.githubusercontent.com/wownero/wow-lite-wallet/master/src/renderer/components/Landing/LandingPage.vue"
try:
resp = requests.get(url, headers={"User-Agent": "Mozilla 5.0"})
resp.raise_for_status()
content = resp.content.decode()
except:
return True # default to true
# parse latest version
current = next(re.finditer(r"wowlite\?version=(\d+.\d+.\d+)", content), None)
if not current:
return False
return version == current.group(1) | 470f8580df357c206b595c1145e04e33fd897058 | 7,725 |
import math
def fruit_growth(jth: int, last_24_canopy_t):
"""
Equations 9.38
fruit_growth_rate_j = POTENTIAL_FRUIT_DRY_WEIGHT*math.exp(-math.exp(-curve_steepness*(days_after_fruit_set - fruit_development_time)))
Returns: fruit growth rate [mg {CH2O} fruit^-1 d^-1]
"""
fruit_development_rate = fruit_development(last_24_canopy_t)
Fruit_Growth_Period = 1/(fruit_development_rate*86400)
fruit_development_time = -93.4 + 548.0 * Fruit_Growth_Period
curve_steepness = 1/(2.44 + 403.0 * fruit_development_time)
days_after_fruit_set = ((jth-1)+0.5)*Fruit_Growth_Period/FRUIT_DEVELOPMENT_STAGES_NUM
return POTENTIAL_FRUIT_DRY_WEIGHT*math.exp(-math.exp(-curve_steepness*(days_after_fruit_set - fruit_development_time))) | 78d1ee0e9ea8d364b6282466fb9ee27dc9cbb602 | 7,726 |
from typing import Callable
def _pickled_cache_s(filepath: str) -> Callable[[Callable], Callable]:
"""Store the last result of the function call
in a pickled file (string version)
Args:
filepath (str): The path of the file to read/write
Returns:
Callable[[Callable], Callable]: function decorator.
The decorated function will also have an attribute
function 'forced', that calls the function forcing
cache overwriting"""
return _pickled_cache_m(lambda *args, **kwargs: filepath) | b95703fc90275ba06d3816b442d07b14e4854eaf | 7,727 |
from datetime import datetime
def home(request):
"""Index page view
:param request: HTTP request
:return: index page render
"""
today = datetime.date.today()
return render(request, 'taskbuster/index.html',
{'today': today, 'now': now()}) | cccfa91a728ce4f5dd482bbbd9418ec94f102844 | 7,728 |
from typing import Tuple
from typing import Iterable
def get_trials_for_drug(
drug: Tuple[str, str], *, client: Neo4jClient
) -> Iterable[Node]:
"""Return the trials for the given drug.
Parameters
----------
client :
The Neo4j client.
drug :
The drug to query.
Returns
-------
:
The trials for the given drug.
"""
return client.get_targets(
drug,
relation="tested_in",
source_type="BioEntity",
target_type="ClinicalTrial",
) | 64641e52468d46a3b4071d58cbdbff3167ff3fa6 | 7,729 |
from typing import List
import torch
def convert_features_to_dataset(all_features: List[InputFeaturesTC],
dataset_type: str = 'pytorch'
) -> TensorDataset:
"""Converts a list of features into a dataset.
Args:
all_features (:obj:`list` of :obj:`InputFeatureTC`): the list of
``InputFeatureTC`` originating from a list of ``InputExampleTC``
that will constitute the dataset.
dataset_type (str): the type of dataset, curruntly only `pytorch` is
supported.
Returns:
A pytorch TensorDataset.
Raises:
ValueError if `dataset_type` is not supported.
"""
if dataset_type == 'pytorch':
all_input_ids = torch.tensor([x.input_ids for x in all_features],
dtype=torch.long)
all_attention_mask = torch.tensor([x.attention_mask
for x in all_features],
dtype=torch.long)
all_token_type_ids = torch.tensor([x.token_type_ids
for x in all_features],
dtype=torch.long)
all_label_ids = torch.tensor([x.label_ids
for x in all_features],
dtype=torch.long)
# Create Tensor dataset
dataset = TensorDataset(all_input_ids, all_attention_mask,
all_token_type_ids, all_label_ids)
else:
raise ValueError(f'Invalid return dataset type: {dataset_type}')
return dataset | 88e892effbc60569d35d8f14e1a8032837d409e0 | 7,730 |
import requests
from bs4 import BeautifulSoup
def soup_from_name(username):
""" Grabs bs4 object from html page """
# html_source = urlopen('https://www.instagram.com/'+ str(username) + '/')
url = 'https://www.instagram.com/'+ str(username) + '/'
headers = {"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0)" \
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"}
html_source = requests.get(url, headers=headers).text
return BeautifulSoup(html_source, 'html.parser')
#react-root > section > main > div > div.Nd_Rl._2z6nI > article > div._4Kbb_ > div > h2
# print(soup.body.span.section.main.div.div.article.div.div.h2) | 442c6e9fa036fef59b82246462bf0e992384fd15 | 7,731 |
def SectionMenu(rating_key, title=None, base_title=None, section_title=None, ignore_options=True,
section_items_key="all"):
"""
displays the contents of a section
:param section_items_key:
:param rating_key:
:param title:
:param base_title:
:param section_title:
:param ignore_options:
:return:
"""
items = get_all_items(key=section_items_key, value=rating_key, base="library/sections")
kind, deeper = get_items_info(items)
title = unicode(title)
section_title = title
title = base_title + " > " + title
oc = SubFolderObjectContainer(title2=title, no_cache=True, no_history=True)
if ignore_options:
add_ignore_options(oc, "sections", title=section_title, rating_key=rating_key, callback_menu=IgnoreMenu)
return dig_tree(oc, items, MetadataMenu,
pass_kwargs={"base_title": title, "display_items": deeper, "previous_item_type": "section",
"previous_rating_key": rating_key}) | 3ba91e054de81c4d8eb32d2feaeb9ab99125683e | 7,732 |
def round_floats_for_json(obj, ndigits=2, key_ndigits=None):
"""
Tries to round all floats in obj in order to reduce json size.
ndigits is the default number of digits to round to,
key_ndigits allows you to override this for specific dictionary keys,
though there is no concept of nested keys.
It converts numpy arrays and iterables to lists,
so it should only be used when serializing to json
"""
if key_ndigits is None:
key_ndigits = {}
if isinstance(obj, np.floating):
obj = float(obj)
elif isinstance(obj, np.ndarray):
obj = obj.tolist()
if isinstance(obj, float):
obj = round(obj, ndigits)
elif isinstance(obj, dict):
new_obj = {}
for k, v in obj.items():
this_ndigits = key_ndigits.get(k, ndigits)
new_obj[k] = round_floats_for_json(v, this_ndigits, key_ndigits)
return new_obj
elif isinstance(obj, str):
return obj
else:
try:
return [round_floats_for_json(x, ndigits, key_ndigits) for x in obj]
except TypeError:
pass
return obj | 8143a3a063e45b6a501ca2de6f1bb5dd1b64e843 | 7,733 |
def read_shared(function_name, verb, request, local_variables=None):
"""all the shared code for each of thse read functions"""
command = function_name.split('_')[1] # assumes fn name is query_<command>
command_args, verb_args = create_filters(function_name, command, request,
local_variables)
verb = cleanup_verb(verb)
columns = local_variables.get('columns', None)
format = local_variables.get('format', None)
ret, svc_inst = run_command_verb(
command, verb, command_args, verb_args, columns, format)
return ret | c60feac9c16cfcd2d503032826aedced20d2959d | 7,734 |
def construct_SN_default_rows(timestamps, ants, nif, gain=1.0):
""" Construct list of ants dicts for each
timestamp with REAL, IMAG, WEIGHT = gains
"""
default_nif = [gain] * nif
rows = []
for ts in timestamps:
rows += [{'TIME': [ts],
'TIME INTERVAL': [0.1],
'ANTENNA NO.': [antn],
'REAL1': default_nif,
'REAL2': default_nif,
'IMAG1': default_nif,
'IMAG2': default_nif,
'WEIGHT 1': default_nif,
'WEIGHT 2': default_nif}
for antn in ants]
return rows | b81e45d2d5299042b3332a2386a0fd4d2d6d59d7 | 7,736 |
import aiohttp
async def test_disable(aresponses):
"""Test disabling AdGuard Home query log."""
async def response_handler(request):
data = await request.json()
assert data == {"enabled": False, "interval": 1}
return aresponses.Response(status=200)
aresponses.add(
"example.com:3000",
"/control/querylog_info",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{"interval": 1}',
),
)
aresponses.add(
"example.com:3000", "/control/querylog_config", "POST", response_handler
)
aresponses.add(
"example.com:3000",
"/control/querylog_info",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{"interval": 1}',
),
)
aresponses.add(
"example.com:3000",
"/control/querylog_config",
"POST",
aresponses.Response(status=500),
)
async with aiohttp.ClientSession() as session:
adguard = AdGuardHome("example.com", session=session)
await adguard.querylog.disable()
with pytest.raises(AdGuardHomeError):
await adguard.querylog.disable() | a9c211d5bf9a0c2842ae835215718f4f81430c69 | 7,737 |
def kron_diag(*lts):
"""Compute diagonal of a KroneckerProductLazyTensor from the diagonals of the constituiting tensors"""
lead_diag = lts[0].diag()
if len(lts) == 1: # base case:
return lead_diag
trail_diag = kron_diag(*lts[1:])
diag = lead_diag.unsqueeze(-2) * trail_diag.unsqueeze(-1)
return diag.transpose(-1, -2).reshape(*diag.shape[:-2], -1) | d57bb679dede93ababb2d164cfc85132acef60db | 7,739 |
import time
def makeBundleObj(config_fname, getPackage, getPackageLength):
"""Given a description of a thandy bundle in config_fname,
return a new unsigned bundle object. getPackage must be a function
returning a package object for every package the bundle requires
when given the package's name as input. getPacakgeLength
must be a function returning the length of the package file.
"""
packages = []
def ShortGloss(lang, val): packages[-1]['gloss'][lang] = val
def LongGloss(lang, val): packages[-1]['longgloss'][lang] = val
def Package(name, order, version=None, path=None, optional=False):
packages.append({'name' : name,
'version' : version,
'path' : path,
'order' : order,
'optional' : optional,
'gloss' : {},
'longgloss' : {} })
preload = { 'ShortGloss' : ShortGloss, 'LongGloss' : LongGloss,
'Package' : Package }
r = readConfigFile(config_fname,
['name',
'os',
'version',
'location',
], ['arch'], preload)
result = { '_type' : "Bundle",
'at' : formatTime(time.time()),
'name' : r['name'],
'os' : r['os'],
'version' : r['version'],
'location' : r['location'],
'packages' : packages }
if r.has_key('arch'):
result['arch'] = r['arch']
for p in packages:
try:
pkginfo = getPackage(p['name'])
except KeyError:
raise thandy.FormatException("No such package as %s"%p['name'])
p['hash'] = formatHash(getDigest(pkginfo))
p['length'] = getPackageLength(p['name'])
if p['path'] == None:
p['path'] = pkginfo['location']
if p['version'] == None:
p['version'] = pkginfo['version']
BUNDLE_SCHEMA.checkMatch(result)
return result | 4579cab99a2e1f7f52bc49bfd12c001aee06de21 | 7,740 |
import re
def find_English_term(term: list) -> tuple:
"""
Find the English and numbers from a term list
and remove the English and numbers from the term
:param term: the term list
:return term: the term removed the English and numbers
:return Eng_terms: the removed English
"""
temp_terms = []
Eng_terms = []
for i in range(len(term)):
string = term[i]
result = re.findall(r'[a-zA-Z0-9]+', string)
for j in result:
temp_terms.append(j)
term[i] = re.sub(pattern=j, repl='', string=term[i])
temp_terms = set(temp_terms)
for k in temp_terms:
Eng_terms.append(k)
return term, Eng_terms | 69507970eb226d2379bb11e121bc224b1ce741ad | 7,741 |
def add_target_variable(df: pd.DataFrame) -> pd.DataFrame:
"""Add column with the target variable to the given dataframe."""
return df.assign(y=df.rent + df.admin_fee) | 236f16bab38d36625173640d5223f9fed48f34fe | 7,743 |
def get_address_from_public_key(public_key):
""" Get bytes from public key object and call method that expect bytes
:param public_key: Public key object
:param public_key: ec.EllipticCurvePublicKey
:return: address in bytes
:rtype: bytes
"""
public_key_bytes = get_public_key_bytes_compressed(public_key)
return get_address_from_public_key_bytes(public_key_bytes) | 775701261e07b9153807d9b5ae08f02050ecc51e | 7,744 |
import re
import math
def read_ORIGEN_gamma_spectrum(output_filename, cooling_time_string):
"""
Function for reading a gamma spectrum from an ORIGEN output file.
"""
#Too long text may cause problems, so check for it.
if len(cooling_time_string) >= 10:
print("The cooling time could not be found in the input, the header text \"" + cooling_time_string + "\" is too long.")
return 0,0
found_spectrum = False
bin_count = [0]
bin_edges = [0]
f = open(output_filename, 'r')
ORIGEN = f.read()
if len(ORIGEN) < 1:
#Did not read anything, or read an empty file. Return empty arrays
print("Failed to open ORIGEN output file " + output_filename)
return bin_edges, bin_count
#get the gamma spectra form the output
#The header we are looking for starts with this string, and ends with a total row, the data we want is in between.
spectrumpattern = re.compile("gamma spectra, photons\/sec\/basis(.*?)\s*totals", re.DOTALL)
if re.search(spectrumpattern, ORIGEN):
spectrum_list = re.findall(spectrumpattern, ORIGEN)
else:
#Failed to find any gamma spectrum, return empty arrays
print("Failed to find a gamma spectrum in ORIGEN output file " + output_filename)
return bin_edges, bin_count
for spectrum in spectrum_list:
spectrum_textlines = spectrum.splitlines()
#Get the spectrum table header, search for cooling_time_string in the header
headers = spectrum_textlines[3]
#after removing the 23 first characters, each column header should start with a space, followed
#by possibly more spaces for right-alignmnet, and then the cooling time string.
#Each such header is 10 characters long.
header_columns = headers[23:]
#Column headers are padded with spaces at the beginning to be 10 characters wide.
header_string = cooling_time_string.strip()
while len(header_string ) < 10:
header_string = ' ' + header_string
if header_columns.find(header_string) != -1:
column = math.ceil(header_columns.find(header_string)/10)
found_spectrum = True
#allocate memory
bin_count = [0] * (len(spectrum_textlines)-4)
bin_edges = [0] * (len(spectrum_textlines)-3)
#Table should start at row 4.
for i in range(4,len(spectrum_textlines)):
#read the gamma spectrum
line = spectrum_textlines[i].strip()
split_line = line.split(" ")
#The split lines should have the following format:
# <line number> <low bin edge> <hyphen> <high bin edge>
#<first cooling time bin count> <second cooling time bin count> <third...>
bin_count[i-4] = float(split_line[column + 3])
bin_edges[i-4] = float(split_line[1])
#Final upper bin edge.
bin_edges[len(spectrum_textlines)-4] = float(split_line[3])
if found_spectrum == False:
#Did not find the requested spectra in the file, return empty arrays.
print("Unable to find a gamma spectrum with cooling time " + cooling_time_string +
" in ORIGEN output file " + output_filename)
bin_count = [0]
bin_edges = [0]
return bin_edges, bin_count
else:
#Found the requested gamma spectrum, return it.
#If several are found, this will return the last one, which is typically the one of interest.
return bin_edges, bin_count | 1e722bea88e9947f7c297a07bb4f0c5cb5ec4419 | 7,745 |
def process_cases(list_):
"""Process cases and determine whether group flag or empty line."""
# Get information
is_empty = (len(list_) == 0)
if not is_empty:
is_group = list_[0].isupper()
is_comment = list_[0][0] == '#'
else:
is_group = False
is_comment = False
# Finishing
return is_empty, is_group, is_comment | 5a0dda6873417cfcd813efe30b64c9e0a71b9b11 | 7,746 |
def updateNestedDicts(d1, d2):
"""Updates two dictionaries, assuming they have the same entries"""
finalDict = createDictionary()
for key in d1:
#print(key)
newDict = updateDicts(d1[key], d2[key])
finalDict[key] = newDict
return finalDict | 29fa5218cb4bca67f8e358aebf742025dd541789 | 7,747 |
def page_not_found(e):
"""error handler for page not found"""
flash(e.description, 'danger')
return render_template('main/404.html'), 404 | a64941bca6bd9e90d35286e3d2474c2841ecb112 | 7,748 |
def get_cifar10_raw_data():
"""
Gets raw CIFAR10 data from http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz.
Returns:
X_train: CIFAR10 train data in numpy array with shape (50000, 32, 32, 3).
Y_train: CIFAR10 train labels in numpy array with shape (50000, ).
X_test: CIFAR10 test data in numpy array with shape (10000, 32, 32, 3).
Y_test: CIFAR10 test labels in numpy array with shape (10000, ).
"""
X_train, Y_train, X_test, Y_test = load_cifar10(CIFAR10_FOLDER)
return X_train, Y_train, X_test, Y_test | 1606c8aa00729a6fb9dca8fbd5663e78d5c93503 | 7,749 |
from typing import OrderedDict
def MovieMaker(images, dpath, site, scheck, coords, bandlist, datelist, bands):
""" Function to build the movie """
failed = 0
while failed <2:
spath = dpath + "UoL/FIREFLIES/VideoExports/%s" % coords["name"]
# for bands in bandcombo:
print("\n starting %s at:" % bands, pd.Timestamp.now())
# ========== Create a single dataarray for the raster images ===========
sets = OrderedDict()
if type(bands) == str:
imstack = images[bands]
sets[bands] = xr.concat(imstack, dim="time")
fnout = "%s/LANDSAT_5_7_8_%s_%s.mp4" % (spath, coords["name"], bands)
elif type(bands) == list:
bndnm = "multi_" + "_".join(bands)
for bnd in bands:
imstack = images[bnd]
sets[bnd] = xr.concat(imstack, dim="time")
fnout = "%s/LANDSAT_5_7_8_%s_%s.mp4" % (spath, coords["name"], bndnm)
else:
ipdb.set_trace()
# ========== Loop over each frame of the video ==========
nx = []
def frame_maker(index):
# ========== Pull the infomation from the pandas part of the loop ==========
indx = int(index)
info = datelist.iloc[int(indx)] #rowinfo[1]
# # ========== Check the dates i'm exporting ==========
# nx.append(frame.time.values)
# ========== create and internal subplot ==========
def _subplotmaker(ax, bnds, spt):
# ========== Get the data for the frame ==========
frame = sets[bnds].isel(time=int(indx))
# ========== Set the colors ==========
# if bnds == "NRG":
color = "blue"
# else:
# color = "purple"
# ========== Grab the data ==========
frame.plot.imshow(ax=ax, rgb="band")# , transform=ccrs.PlateCarree())
## =========== Setup the annimation ===========
ax.set_title(spt)
ax.scatter(coords.lon, coords.lat, 5, c=color, marker='+')#, transform=ccrs.PlateCarree())
# ========== Set up the box ==========
blonO = np.min([coords["lonb_COP_min"], coords["lonb_MOD_min"]])
blatO = np.min([coords["latb_COP_min"], coords["latb_MOD_min"]])
blonM = np.max([coords["lonb_COP_max"], coords["lonb_MOD_max"]])
blatM = np.max([coords["latb_COP_max"], coords["latb_MOD_max"]])
rect = mpl.patches.Rectangle(
(blonO,blatO),
blonM-blonO,
blatM-blatO,linewidth=2,edgecolor=color,facecolor='none')
ax.add_patch(rect)
# +++++ change the number od ticks
ax.xaxis.set_major_locator(plt.MaxNLocator(5))
# ========== Build the plots ==========
if type(bands) == str:
# Set up the figure
fig, axs = plt.subplots(1, figsize=(11,10))
# create the title
spt = "%s %s %s frame %d" % (bands, info.satellite, info.date.split(" ")[0], datelist.iloc[indx]["index"])
# make the figure
_subplotmaker(axs, bands, spt)
plt.axis('scaled')
else:
# Set up the figure
fig, axs = plt.subplots(1,len(bands), sharey=True, figsize=(20,8),)
# +++++ Loop over the figure combo +++++
for ax, bnds, in zip(axs, bands):
# make the figure
_subplotmaker(ax, bnds, bnds)
ax.set_aspect('equal')
# Get rid of the excess lats
for ax in axs.flat:
ax.label_outer()
# ========== Change parms for the entire plot =========
fig.suptitle("%s %s - Frame%03d" % (
info.satellite, pd.Timestamp(info.date).strftime('%d-%m-%Y'), datelist.iloc[indx]["index"]))
# ipdb.set_trace()
plt.axis('scaled')
# +++++ Make the images bigger by eleminating space +++++
fig.subplots_adjust(left=0.1, right=0.9, top=1, bottom=0, wspace=0, hspace=0) #top = 1, bottom = 1, right = 1, left = 1,
plt.tight_layout()
plt.margins(0,0)
return mplfig_to_npimage(fig)
# ========== Eposrt the videos ==========
mov = mpe.VideoClip(frame_maker, duration=int(datelist.shape[0]))
# plays the clip (and its mask and sound) twice faster
# newclip = clip.fl_time(lambda: 2*t, apply_to=['mask','audio'])
# fnout = "%s/LANDSAT_5_7_8_%s_complete.txt" % (spath, coords["name"])
print("Starting Write of the data at:", pd.Timestamp.now())
try:
mov.write_videofile(fnout, fps=1)
return
except Exception as ex:
warn.warn(str(ex))
print("Movie making failed. This will need to be redone")
failed +=1
raise ValueError | 3daba6917329e1be8d58c36bfe709488db34d430 | 7,751 |
def TypeUrlToMessage(type_url):
"""Returns a message instance corresponding to a given type URL."""
if not type_url.startswith(TYPE_URL_PREFIX):
raise ValueError("Type URL has to start with a prefix %s: %s" %
(TYPE_URL_PREFIX, type_url))
full_name = type_url[len(TYPE_URL_PREFIX):]
try:
return symbol_database.Default().GetSymbol(full_name)()
except KeyError as e:
raise ProtobufTypeNotFound(e.message) | 03727fd60bdebed6b47768f2ec489c68b0a8a45b | 7,752 |
def encode_sentence(tokenized_sentence, max_word_len):
"""
Encode sentence as one-hot tensor of shape [None, MAX_WORD_LENGTH,
CHARSET_SIZE].
"""
encoded_sentence = []
sentence_len = len(tokenized_sentence)
for word in tokenized_sentence:
# Encode every word as matrix of shape [MAX_WORD_LENGTH,
# CHARSET_SIZE] where each valid character gets encoded as one-hot
# row vector of word matrix.
encoded_word = np.zeros([max_word_len, len(CHARSET)])
for char, encoded_char in zip(word, encoded_word):
if char in CHARSET:
encoded_char[ENCODER[char]] = 1.0
encoded_sentence.append(encoded_word)
return np.array(encoded_sentence), sentence_len | 77cadac1b4d29976883cc4d8f7540992b997c381 | 7,753 |
def footnote_ref(key, index):
"""Renders a footnote
:returns: list of `urwid Text markup <http://urwid.org/manual/displayattributes.html#text-markup>`_
tuples.
"""
return render_no_change(key) | 52182e90a73f2b0fb4499b919b3ebf71b562dcbf | 7,754 |
def mconcat(*args):
"""
Apply monoidal concat operation in arguments.
This function infers the monoid from value, hence it requires at least
one argument to operate.
"""
values = args[0] if len(args) == 1 else args
instance = semigroup[type(values[0])]
return instance(*values) | 0c939ab0da77843b96c11dcf523557351a602a65 | 7,755 |
def parallelMeasurements(filename='CCD204_05325-03-02_Hopkinson_EPER_data_200kHz_one-output-mode_1.6e10-50MeV.txt',
datafolder='/Users/sammy/EUCLID/CTItesting/data/', gain1=1.17, limit=105, returnScale=False):
"""
:param filename:
:param datafolder:
:param gain1:
:param limit:
:return:
"""
tmp = np.loadtxt(datafolder + filename, usecols=(0, 5)) #5 = 152.55K
ind = tmp[:, 0]
values = tmp[:, 1]
values *= gain1
if returnScale:
return ind, values
else:
values = values[ind > -5.]
values = np.abs(values[:limit])
return values | e40dedd715d76a52729c218623a90b53123f4c27 | 7,756 |
from typing import Union
def get_all_urls(the_json: str) -> list:
"""
Extract all URLs and title from Bookmark files
Args:
the_json (str): All Bookmarks read from file
Returns:
list(tuble): List of tublle with Bookmarks url and title
"""
def extract_data(data: dict):
if isinstance(data, dict) and data.get('type') == 'url':
urls.append({'name': data.get('name'), 'url': data.get('url')})
if isinstance(data, dict) and data.get('type') == 'folder':
the_children = data.get('children')
get_container(the_children)
def get_container(o: Union[list, dict]):
if isinstance(o, list):
for i in o:
extract_data(i)
if isinstance(o, dict):
for k, i in o.items():
extract_data(i)
urls = list()
get_container(the_json)
s_list_dict = sorted(urls, key=lambda k: k['name'], reverse=False)
ret_list = [(l.get('name'), l.get('url')) for l in s_list_dict]
return ret_list | 3a76a42fd303e603709c7703fbe877bb47a64a5f | 7,758 |
from typing import Callable
from typing import Iterable
def _goertzel(
block_size: int,
sample_rate: float,
freq: float
) -> Callable[[Iterable[float]], float]:
"""
Goertzel algorithm info:
https://www.ti.com/lit/an/spra066/spra066.pdf
"""
k = round(block_size * (freq / sample_rate))
omega = (2 * pi * k) / block_size
cos_omega = 2 * cos(omega)
def _filter(samples: Iterable[float]) -> float:
s_0 = 0
s_1 = 0
s_2 = 0
for x_n in samples:
s_0 = x_n + cos_omega * s_1 - s_2
s_2 = s_1
s_1 = s_0
return s_0 - exp(-1.0 * omega) * s_1
return _filter | 4e9a039435ccc63cfa1506730c89c915f8cc14c4 | 7,759 |
def rotate_xyz(x,y,z,angles=None,inverse=False):
""" Rotate a set of vectors pointing in the direction x,y,z
angles is a list of longitude and latitude angles to rotate by.
First the longitude rotation is applied (about z axis), then the
latitude angle (about y axis).
"""
if angles==None:
return x,y,z
xyz = np.array([x,y,z])
for dphi,dlon,dlat in angles:
dphi*=c
dlon*=c
dlat*=c
m0 = np.array([[1,0,0],
[0, np.cos(dphi),np.sin(dphi)],
[0, -np.sin(dphi), np.cos(dphi)]])
m1 = np.array([[np.cos(dlon),-np.sin(dlon),0],
[np.sin(dlon), np.cos(dlon),0],
[0,0,1]])
m2 = np.array([[np.cos(dlat),0,-np.sin(dlat)],
[0,1,0],
[np.sin(dlat), 0, np.cos(dlat)]])
m = np.dot(np.dot(m1,m2),m0)
if inverse:
m = np.linalg.inv(m)
xyz2 = np.dot(m,xyz)
return xyz2 | 803668619f1ad46f0a48db88f2aba05800f85487 | 7,760 |
def indented_open(Filename, Indentation = 3):
"""Opens a file but indents all the lines in it. In fact, a temporary
file is created with all lines of the original file indented. The filehandle
returned points to the temporary file."""
IndentString = " " * Indentation
try:
fh = open(Filename, "rb")
except:
print "%s:error: indented opening of file '%s' " % (this_name, Filename)
sys.exit(-1)
new_content = ""
for line in fh.readlines():
new_content += IndentString + line
fh.close()
tmp_filename = Filename + ".tmp"
if tmp_filename not in temporary_files:
temporary_files.append(copy(tmp_filename))
fh = open(tmp_filename, "wb")
fh.write(new_content)
fh.close()
fh = open(tmp_filename)
return fh | 26ba2213c5e9c8fd7932c92f4a162e68e642a01e | 7,761 |
def gan_loss(
gan_model: tfgan.GANModel,
generator_loss_fn=tfgan.losses.modified_generator_loss,
discriminator_loss_fn=tfgan.losses.modified_discriminator_loss,
gradient_penalty_weight=None,
gradient_penalty_epsilon=1e-10,
gradient_penalty_target=1.0,
feature_matching=False,
add_summaries=False):
""" Create A GAN loss set, with support for feature matching.
Args:
bigan_model: the model
feature_matching: Whether to add a feature matching loss to the encoder
and generator.
"""
gan_loss = tfgan.gan_loss(
gan_model,
generator_loss_fn=generator_loss_fn,
discriminator_loss_fn=discriminator_loss_fn,
gradient_penalty_weight=gradient_penalty_weight,
gradient_penalty_target=1.0,
add_summaries=add_summaries)
if feature_matching:
fm_loss = feature_matching_loss(scope=gan_model.discriminator_scope.name)
if add_summaries:
tf.summary.scalar("feature_matching_loss", fm_loss)
# or combine the original adversarial loss with FM
gen_loss = gan_loss.generator_loss + fm_loss
disc_loss = gan_loss.discriminator_loss
gan_loss = tfgan.GANLoss(gen_loss, disc_loss)
return gan_loss | dfa1639e049737f943a70ea1d5dcdfe9463b0102 | 7,762 |
def list_subjects():
"""
List all subjects
"""
check_admin()
subjects = Subject.query.all()
return render_template('admin/subjects/subjects.html', subjects=subjects, title="Subjects") | aef910bbae3d25a573b23646ca849a2b790be680 | 7,763 |
async def async_setup(opp, config):
"""Set up the Tibber component."""
opp.data[DATA_OPP_CONFIG] = config
if DOMAIN not in config:
return True
opp.async_create_task(
opp.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=config[DOMAIN],
)
)
return True | 4186491c248d862186a93954ed47e301c4526aea | 7,764 |
def beautifyValue(v):
"""
Converts an object to a better version for printing, in particular:
- if the object converts to float, then its float value is used
- if the object can be rounded to int, then the int value is preferred
Parameters
----------
v : object
the object to try to beautify
Returns
-------
object or float or int
the beautified value
"""
try:
v = float(v)
if v.is_integer():
return int(v)
return v
except:
if type(v) == np.str_:
v = v.replace('\n', '').replace(' ', '_')
return v | aa0a8881989cbfde7a7b5f506c4f45b844df0753 | 7,765 |
def english_to_french(english_text):
""" A function written using ibm api to translate from english to french"""
translation = LT.translate(text=english_text, model_id='en-fr').get_result()
french_text = translation['translations'][0]['translation']
return french_text | 0f93fe02f8f0898b0d62c6ce4880b9eae4303459 | 7,766 |
def get_responsibilities():
"""Returns a list of the rooms in the approvers responsibility."""
email = get_jwt_identity()
# Checks if the reader is an approver
approver = Approver.query.filter_by(email=email).first()
if not approver:
return bad_request("This user does not have the approver role!")
room_list = get_responsibilites_helper(approver)
return ok({"responsibilities": room_list}) | 22ca15c30c5dc5bf5c528e2e19b96af8ab8f2d53 | 7,767 |
import datasets
def get_test_loader(dataset):
"""
Get test dataloader of source domain or target domain
:return: dataloader
"""
if dataset == 'MNIST':
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
transforms.Normalize(mean= params.dataset_mean, std= params.dataset_std)
])
data = datasets.MNIST(root= params.mnist_path, train= False, transform= transform,
download= True)
dataloader = DataLoader(dataset= data, batch_size= 1, shuffle= False)
elif dataset == 'MNIST_M':
transform = transforms.Compose([
# transforms.RandomCrop((28)),
transforms.CenterCrop((28)),
transforms.ToTensor(),
transforms.Normalize(mean= params.dataset_mean, std= params.dataset_std)
])
data = datasets.ImageFolder(root=params.mnistm_path + '/test', transform= transform)
dataloader = DataLoader(dataset = data, batch_size= 1, shuffle= False)
elif dataset == 'MNIST_M_5':
transform = transforms.Compose([
# transforms.RandomCrop((28)),
transforms.CenterCrop((28)),
transforms.ToTensor(),
transforms.Normalize(mean= params.dataset_mean, std= params.dataset_std)
])
data = datasets.ImageFolder(root=params.mnistm_5_path + '/test', transform= transform)
dataloader = DataLoader(dataset = data, batch_size= 1, shuffle= False)
elif dataset == 'SVHN':
transform = transforms.Compose([
transforms.CenterCrop((28)),
transforms.ToTensor(),
transforms.Normalize(mean= params.dataset_mean, std = params.dataset_std)
])
data = datasets.SVHN(root= params.svhn_path, split= 'test', transform = transform, download= True)
dataloader = DataLoader(dataset = data, batch_size= 1, shuffle= False)
#elif dataset == 'SynDig':
# transform = transforms.Compose([
# transforms.CenterCrop((28)),
# transforms.ToTensor(),
# transforms.Normalize(mean=params.dataset_mean, std=params.dataset_std)
# ])
#
# data = SynDig.SynDig(root= params.syndig_path, split= 'test', transform= transform, download= False)
#
# dataloader = DataLoader(dataset= data, batch_size= 1, shuffle= False)
elif dataset == 'dslr':
transform = transforms.Compose([
transforms.RandomCrop((224)),
transforms.ToTensor(),
transforms.Normalize(mean=params.dataset_mean, std=params.dataset_std)
])
data = datasets.ImageFolder(params.dslr_path + '/test', transform=transform)
dataloader = DataLoader(dataset=data, batch_size=params.batch_size, shuffle=True)
elif dataset == "art":
transform = transforms.Compose([
ResizeImage(256),
transforms.RandomCrop((224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225])
])
data = datasets.ImageFolder(params.art_path + '/test', transform=transform)
dataloader = DataLoader(dataset=data, batch_size=params.batch_size, shuffle=True)
elif dataset == "clipart":
transform = transforms.Compose([
ResizeImage(256),
transforms.RandomCrop((224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
data = datasets.ImageFolder(params.clipart_path + '/test', transform=transform)
dataloader = DataLoader(dataset=data, batch_size=params.batch_size, shuffle=True)
else:
raise Exception('There is no dataset named {}'.format(str(dataset)))
return dataloader | 52e1570a7911bf9234e76c27b2156b1a7f358164 | 7,768 |
def push(
message,
user: str = None,
api_token: str = None,
device: str = None,
title: str = None,
url: str = None,
url_title: str = None,
priority: str = None,
timestamp: str = None,
sound: str = None,
) -> typing.Union[http.client.HTTPResponse, typing.BinaryIO]:
"""Pushes the notification.
API Reference: https://pushover.net/api
Args:
message: Your message
user: The user/group key (not e-mail address) of your user (or you),
viewable when logged into our dashboard (often referred to as
USER_KEY in our documentation and code examples)
api_token: Your application's API token
device: Your user's device name to send the message directly to that
device, rather than all of the user's devices
title: Your message's title, otherwise your app's name is used
url: A supplementary URL to show with your message
url_title: A title for your supplementary URL, otherwise just the URL
is shown
priority: Send as:1 to always send as a quiet notification, 1 to
display as high--priority and bypass the user's quiet
hours, or 2 to also require confirmation from the user
timestamp: A Unix timestamp of your message's date and time to
display to the user, rather than the time your message is
received by our API
sound: The name of one of the sounds supported by device clients to
override the user's default sound choice
Returns:
HTTP response from API call
"""
if user is None or api_token is None:
user, api_token = get_credentials()
api_url = "https://api.pushover.net/1/messages.json"
if title is None:
if getattr(__main__, "__file__", None):
title = os.path.basename(__main__.__file__)
else:
title = "n8scripts"
payload_dict = {
"token": api_token,
"user": user,
"message": message,
"device": device,
"title": title,
"url": url,
"url_title": url_title,
"priority": priority,
"timestamp": timestamp,
"sound": sound,
}
payload = urllib.parse.urlencode({k: v for k, v in payload_dict.items() if v})
with urllib.request.urlopen(api_url, data=payload.encode()) as resp:
return resp | 3166be0bae5d21313cecaedf2fa8cf11c7bab0c5 | 7,770 |
def add_init_or_construct(template, variable_slot, new_data, scope, add_location=-1):
"""Add init or construct statement."""
if isinstance(new_data, list):
template[variable_slot][scope].extend(new_data)
return template
if add_location < 0:
template[variable_slot][scope].append(new_data)
else:
template[variable_slot][scope].insert(add_location, new_data)
return template | 125bc4e34dff837372dbbdc70c69a08a1e83e176 | 7,772 |
def im2col_indices(x, field_height, field_width, padding=1, stride=1):
""" An implementation of im2col based on some fancy indexing """
# Zero-pad the input
p = padding
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding,
stride)
cols = x_padded[:, k, i, j]
C = x.shape[1]
cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)
return cols | b2f3f24b3a03ea70efbf5f41cbbfd61fe2bf8cbd | 7,773 |
def nms_1d(src, win_size, file_duration):
"""1D Non maximum suppression
src: vector of length N
"""
pos = []
src_cnt = 0
max_ind = 0
ii = 0
ee = 0
width = src.shape[0]-1
while ii <= width:
if max_ind < (ii - win_size):
max_ind = ii - win_size
ee = np.minimum(ii + win_size, width)
while max_ind <= ee:
src_cnt += 1
if src[int(max_ind)] > src[int(ii)]:
break
max_ind += 1
if max_ind > ee:
pos.append(ii)
max_ind = ii+1
ii += win_size
ii += 1
pos = np.asarray(pos).astype(np.int)
val = src[pos]
# remove peaks near the end
inds = (pos + win_size) < src.shape[0]
pos = pos[inds]
val = val[inds]
# set output to between 0 and 1, then put it in the correct time range
pos = pos / float(src.shape[0])
pos = pos*file_duration
return pos, val | 70f2e15ce4044095d74d02fd87e779a2d3b206c2 | 7,775 |
import torch
def tensor_text_to_canvas(image, text=None, col=8, scale=False):
"""
:param image: Tensor / numpy in shape of (N, C, H, W)
:param text: [str, ] * N
:param col:
:return: uint8 numpy of (H, W, C), in scale [0, 255]
"""
if scale:
image = image / 2 + 0.5
if torch.is_tensor(image):
image = image.cpu().detach().numpy()
image = write_text_on_image(image, text) # numpy (N, C, H, W) in scale [0, 1]
image = vutils.make_grid(torch.from_numpy(image), nrow=col) # (C, H, W)
image = image.numpy().transpose([1, 2, 0])
image = np.clip(255 * image, 0, 255).astype(np.uint8)
return image | 5c37d9b3e72d5df14d71fa88aff429081c1f5469 | 7,776 |
import six
def is_sequence(input):
"""Return a bool indicating whether input is a sequence.
Parameters
----------
input
The input object.
Returns
-------
bool
``True`` if input is a sequence otherwise ``False``.
"""
return (isinstance(input, six.collections_abc.Sequence) and
not isinstance(input, six.string_types)) | 1b11275843adaf32618a09d77ec6053039085b54 | 7,777 |
def auto_prefetch_relationship(name, prepare_related_queryset=noop, to_attr=None):
"""
Given the name of a relationship, return a prepare function which introspects the
relationship to discover its type and generates the correct set of
`select_related` and `include_fields` calls to apply to efficiently load it. A
queryset function may also be passed, which will be applied to the related
queryset.
This is by far the most complicated part of the entire library. The reason
it's so complicated is because Django's related object descriptors are
inconsistent: each type has a slightly different way of accessing its related
queryset, the name of the field on the other side of the relationship, etc.
"""
def prepare(queryset):
related_descriptor = getattr(queryset.model, name)
if type(related_descriptor) in (
ForwardOneToOneDescriptor,
ForwardManyToOneDescriptor,
):
return prefetch_forward_relationship(
name,
related_descriptor.field.related_model.objects.all(),
prepare_related_queryset,
to_attr,
)(queryset)
if type(related_descriptor) is ReverseOneToOneDescriptor:
return prefetch_reverse_relationship(
name,
related_descriptor.related.field.name,
related_descriptor.related.field.model.objects.all(),
prepare_related_queryset,
to_attr,
)(queryset)
if type(related_descriptor) is ReverseManyToOneDescriptor:
return prefetch_reverse_relationship(
name,
related_descriptor.rel.field.name,
related_descriptor.rel.field.model.objects.all(),
prepare_related_queryset,
to_attr,
)(queryset)
if type(related_descriptor) is ManyToManyDescriptor:
field = related_descriptor.rel.field
if related_descriptor.reverse:
related_queryset = field.model.objects.all()
else:
related_queryset = field.target_field.model.objects.all()
return prefetch_many_to_many_relationship(
name,
related_queryset,
prepare_related_queryset,
to_attr,
)(queryset)
return prepare | baac4ed7215c89311badb22bafe8365fd7be2263 | 7,779 |
def no_conjugate_member(magic_flag):
"""should not raise E1101 on something.conjugate"""
if magic_flag:
something = 1.0
else:
something = 1.0j
if isinstance(something, float):
return something
return something.conjugate() | 5e32d31aa907ac9de2bd153bbe61354207262409 | 7,780 |
def sub_ntt(f_ntt, g_ntt):
"""Substraction of two polynomials (NTT representation)."""
return sub_zq(f_ntt, g_ntt) | b2e538a00bb4b46e52258080ad9007358c82bc71 | 7,781 |
def is_pareto_efficient(costs):
"""
Find the pareto-efficient points given an array of costs.
Parameters
----------
costs : np.ndarray
Array of shape (n_points, n_costs).
Returns
-------
is_efficient_maek : np.ndarray (dtype:bool)
Array of which elements in costs are pareto-efficient.
"""
is_efficient = np.arange(costs.shape[0])
n_points = costs.shape[0]
next_point_index = 0 # Next index in the is_efficient array to search for
while next_point_index<len(costs):
nondominated_point_mask = np.any(costs<costs[next_point_index], axis=1)
nondominated_point_mask[next_point_index] = True
is_efficient = is_efficient[nondominated_point_mask] # Remove dominated points
costs = costs[nondominated_point_mask]
next_point_index = np.sum(nondominated_point_mask[:next_point_index])+1
is_efficient_mask = np.zeros(n_points, dtype=bool)
is_efficient_mask[is_efficient] = True
return is_efficient_mask | c7564cab171b833b84bf16a24242666f05022eb2 | 7,782 |
def merge_dict(base, delta, merge_lists=False, skip_empty=False,
no_dupes=True, new_only=False):
"""
Recursively merges two dictionaries
including dictionaries within dictionaries.
Args:
base: Target for merge
delta: Dictionary to merge into base
merge_lists: if a list is found merge contents instead of replacing
skip_empty: if an item in delta is empty, dont overwrite base
no_dupes: when merging lists deduplicate entries
new_only: only merge keys not yet in base
"""
for k, d in delta.items():
b = base.get(k)
if isinstance(d, dict) and isinstance(b, dict):
merge_dict(b, d, merge_lists, skip_empty, no_dupes, new_only)
else:
if new_only and k in base:
continue
if skip_empty and not d:
# dont replace if new entry is empty
pass
elif all((isinstance(b, list), isinstance(d, list), merge_lists)):
if no_dupes:
base[k] += [item for item in d if item not in base[k]]
else:
base[k] += d
else:
base[k] = d
return base | 74b9d29a3137826319f10dbf6f86e65015c53659 | 7,783 |
def redirect_or_error(opt, key, override=''):
"""
Tests if a redirect URL is available and redirects, or raises a
MissingRequiredSetting exception.
"""
r = (override or opt)
if r:
return redirect(r)
raise MissingRequiredSetting('%s.%s' % (
options.KEY_DATA_DICT, key)) | 2dbe71b8332b79c242108cc133fc51bf195fac8a | 7,784 |
def stdev(df):
"""Calculate standard deviation of a dataframe."""
return np.std(df['rate'] - df['w1_rate']) | 8f7d49548dac617855c9232af6aed7ec04e9b64c | 7,785 |
def add_to_cart(listing_id):
"""Adds listing to cart with specified quantity"""
listing = Listing.query.filter_by(id=listing_id, available=True).first()
if not listing:
abort(404)
if not request.json:
abort(400)
if ('quantity' not in request.json or
type(request.json['quantity']) is not int):
abort(400)
cart_item = CartItem.query.filter_by(
merchant_id=current_user.id,
listing_id=listing_id
).first()
new_quantity = request.json['quantity']
is_currently_incart = cart_item is not None
if new_quantity == 0 and is_currently_incart:
db.session.delete(cart_item)
elif new_quantity != 0 and is_currently_incart:
cart_item.quantity = new_quantity
elif new_quantity != 0 and not is_currently_incart:
db.session.add(
CartItem(
merchant_id=current_user.id,
listing_id=listing_id,
quantity=new_quantity
)
)
db.session.commit()
name = Listing.query.filter_by(id=listing_id).first().name
return jsonify({'quantity': new_quantity, 'name': name}) | ee21468f432374c81f4fe8aada92a6ff757d8d38 | 7,786 |
def get_zip_code_prefixes(df_geolocation : pd.DataFrame) -> pd.DataFrame:
"""
Gets the first three and four first digits of zip codes.
"""
df = df_geolocation.copy()
df['geolocation_zip_code_prefix_1_digits'] = df['geolocation_zip_code_prefix'].str[0:1]
df['geolocation_zip_code_prefix_2_digits'] = df['geolocation_zip_code_prefix'].str[0:2]
df['geolocation_zip_code_prefix_3_digits'] = df['geolocation_zip_code_prefix'].str[0:3]
df['geolocation_zip_code_prefix_4_digits'] = df['geolocation_zip_code_prefix'].str[0:4]
return df | 8ba9ae223ba76871363b6c3ed452f157b8a848b0 | 7,787 |
def elina_scalar_infty(scalar):
"""
Return -1 if an ElinaScalar is -infinity, 0 if it is finite and 1 if it is +infinity.
Parameters
-----------
scalar : ElinaScalarPtr
Pointer to the ElinaScalar that needs to be tested for infinity.
Returns
-------
result : c_int
Integer stating the result of the testing.
"""
result = None
try:
elina_scalar_infty_c = elina_auxiliary_api.elina_scalar_infty
elina_scalar_infty_c.restype = c_int
elina_scalar_infty_c.argtypes = [ElinaScalarPtr]
result = elina_scalar_infty_c(scalar)
except:
print('Problem with loading/calling "elina_scalar_infty" from "libelinaux.so"')
print('Make sure you are passing ElinaScalarPtr to the function')
return result | 73d5dd7e552e94ce11f739386f225c3b2dbad741 | 7,788 |
import math
def get_target_grid(return_type, **kwargs):
"""
Function: get polar or cartasian coordinates of targets
Inputs:
- return_type: str. "cart" for cartasian coordinates; "polar" for polar coordinates.
- kwargs: additional params.
- rel_points: dictionary. relative length for target positions and heel positions
Outputs:
- if return cartasian coordinates: numpy array. x and y coordinates of targets in cartasian coordinates.
- if return polar coordinates: dictionary {type('c', 'l', 'h'):numpy array}. polar coordinates of target centers ('c')/lower bounds ('l')/upper bounds ('h')
"""
### unravel params.
if('rel_points' in kwargs.keys()):
rel_points = kwargs['rel_points']
### calculate ideal grid
#### before standardization
##### distance: normal
dT0T2 = dT0T5 = dT2T4 = dT4T5 = 1
dT0T4 = dT2T3 = (dT0T5 ** 2 + dT4T5 ** 2 -2*dT4T5*dT0T5*math.cos(math.radians(100)))**0.5
dT2T5 = dT3T7 = (dT0T5 ** 2 + dT4T5 ** 2 -2*dT0T2*dT0T5*math.cos(math.radians(80)))**0.5
dT0T3 = dT0T7 = ((dT2T5/2) ** 2 + (dT2T3*1.5) ** 2) ** 0.5
##### angles: normal
aT0T2 = math.radians(80)/2
aT0T5 = - math.radians(80)/2
aT0T3 = math.acos((dT0T3 ** 2 + dT0T7 ** 2 - dT3T7 ** 2)/(2*dT0T3*dT0T7))/2
aT0T7 = - aT0T3
aT0T4 = 0
##### target coordinates
T0 = np.array((0,0))
T2 = np.array((aT0T2, dT0T2))
T3 = np.array((aT0T3, dT0T3))
T4 = np.array((aT0T4, dT0T4))
T5 = np.array((aT0T5, dT0T2))
T7 = np.array((aT0T7, dT0T7))
target_grid_polar = np.stack((T0, T2, T3, T4, T5, T7), axis = 0)
target_grid_cart = np.zeros((6,2))
for i in range(6):
target_grid_cart[i,:] = polar_to_cartesian(target_grid_polar[i,1], target_grid_polar[i,0])
##### heel coordinates
alpha = 0.2354
a = 0.2957
b = 0.5
r_heels_cart = np.zeros((6,2))
r_heels_polar = np.zeros((6,2))
for n in range(1,7):
phi_n = -(alpha + (n-1)*(np.pi - 2*alpha)/5)
x = a*np.cos(phi_n)
y = b*np.sin(phi_n)
r, theta = cartesian_to_polar(-y, x)
r_heels_cart[n-1, :] = [-y,x]
r_heels_polar[n-1, :] = [theta, r]
##### intersect
c = my_help.line_intersection((r_heels_cart[2,:], target_grid_cart[2,:]),(r_heels_cart[3,:], target_grid_cart[5,:]))
#### after standardization
dTiC = np.zeros((6,1))
for i in range(1,6):
dTiC[i] = np.linalg.norm(target_grid_cart[i,:] - c)
dTiC = dTiC/dTiC[3]
aTiCT4 = np.zeros((6,1))
for i in range(1,6):
aTiCT4[i] = my_int.inner_angle(target_grid_cart[i,:] - c, target_grid_cart[3,:] - c, True)
if(i in [4,5]):
aTiCT4[i] = - aTiCT4[i]
### calculate output values
if(return_type == 'cart'):
grid_cart = np.zeros((6,2))
for i in range(1,6):
grid_cart[i,0],grid_cart[i,1] = polar_to_cartesian(dTiC[i][0], aTiCT4[i][0])
return grid_cart
elif(return_type == 'polar'):
target_grid_polar = {}
for t in ['c', 'l', 'h']:
T0 = np.array((aTiCT4[0], -rel_points[f'T0{t}']))
T2 = np.array((aTiCT4[1], rel_points[f'T2{t}']))
T3 = np.array((aTiCT4[2], rel_points[f'T3{t}']))
T4 = np.array((aTiCT4[3], rel_points[f'T4{t}']))
T5 = np.array((aTiCT4[4], rel_points[f'T5{t}']))
T3_ = np.array((aTiCT4[5], rel_points[f'T7{t}']))
C0 = np.array((aTiCT4[0], rel_points['center']))
target_grid_polar[t] = np.stack((T0, T2, T3, T4, T5, T3_, C0), axis = 0)
return target_grid_polar | d69bd81912502d0dde04fce0dc4a57201810f9df | 7,790 |
from datetime import datetime
def s2_filename_to_md(filename):
"""
This function converts the S2 filename into a small dict of metadata
:param filename:
:return: dict
"""
basename = system.basename(filename)
metadata = dict()
splits = basename.split("_")
if len(splits) < 4:
raise Exception("{} might not be a S2 product".format(filename))
metadata["tile"] = splits[3]
datestr = splits[1]
metadata["date"] = datetime.datetime.strptime(datestr[:-1], '%Y%m%d-%H%M%S-%f')
return metadata | 86c40009f915fd250091a68d52f79f5701f64270 | 7,791 |
from typing import Optional
def get_rest_api(id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRestApiResult:
"""
Resource Type definition for AWS::ApiGateway::RestApi
"""
__args__ = dict()
__args__['id'] = id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:apigateway:getRestApi', __args__, opts=opts, typ=GetRestApiResult).value
return AwaitableGetRestApiResult(
api_key_source_type=__ret__.api_key_source_type,
binary_media_types=__ret__.binary_media_types,
body=__ret__.body,
body_s3_location=__ret__.body_s3_location,
clone_from=__ret__.clone_from,
description=__ret__.description,
disable_execute_api_endpoint=__ret__.disable_execute_api_endpoint,
endpoint_configuration=__ret__.endpoint_configuration,
fail_on_warnings=__ret__.fail_on_warnings,
id=__ret__.id,
minimum_compression_size=__ret__.minimum_compression_size,
mode=__ret__.mode,
name=__ret__.name,
parameters=__ret__.parameters,
policy=__ret__.policy,
root_resource_id=__ret__.root_resource_id,
tags=__ret__.tags) | 86edad14cb2ccd8b5da5316db8ac9b66a25dbddd | 7,793 |
def regroup_if_changed(group, op_list, name=None):
"""Creates a new group for op_list if it has changed.
Args:
group: The current group. It is returned if op_list is unchanged.
op_list: The list of operations to check.
name: The name to use if a new group is created.
Returns:
Either group or a new group (or if op_list is empty then no_op).
"""
has_deltas = isinstance(op_list, sequence_with_deltas.SequenceWithDeltas)
if (group is None or len(group.control_inputs) != len(op_list) or
(has_deltas and op_list.has_changed())):
if has_deltas:
op_list.mark()
if op_list:
return tf.group(*op_list, name=name)
else:
return tf.no_op(name=name)
else:
return group | f6a811e34ac79d2563906c4971fa23b7316a0976 | 7,795 |
def spike_train_order_profile(*args, **kwargs):
""" Computes the spike train order profile :math:`E(t)` of the given
spike trains. Returns the profile as a DiscreteFunction object.
Valid call structures::
spike_train_order_profile(st1, st2) # returns the bi-variate profile
spike_train_order_profile(st1, st2, st3) # multi-variate profile of 3
# spike trains
spike_trains = [st1, st2, st3, st4] # list of spike trains
spike_train_order_profile(spike_trains) # profile of the list of spike trains
spike_train_order_profile(spike_trains, indices=[0, 1]) # use only the spike trains
# given by the indices
Additonal arguments:
:param max_tau: Upper bound for coincidence window, `default=None`.
:param indices: list of indices defining which spike trains to use,
if None all given spike trains are used (default=None)
:returns: The spike train order profile :math:`E(t)`
:rtype: :class:`.DiscreteFunction`
"""
if len(args) == 1:
return spike_train_order_profile_multi(args[0], **kwargs)
elif len(args) == 2:
return spike_train_order_profile_bi(args[0], args[1], **kwargs)
else:
return spike_train_order_profile_multi(args, **kwargs) | ab57e5de52c0064ad691501d131e66ed5b230093 | 7,796 |
def home():
"""Home page."""
form = LoginForm(request.form)
with open("POSCAR", "r") as samplefile:
sample_input = samplefile.read()
inputs = InputForm()
current_app.logger.info("Hello from the home page!")
# Handle logging in
if request.method == "POST":
if form.validate_on_submit():
login_user(form.user)
flash("You are logged in.", "success")
redirect_url = request.args.get("next") or url_for("user.members")
return redirect(redirect_url)
else:
flash_errors(login)
return render_template("public/home.html", form=form, inputs=inputs) | 0494bc54040677d6ae09992280afe8922141a93a | 7,797 |
def isUniqueSeq(objlist):
"""Check that list contains items only once"""
return len(set(objlist)) == len(objlist) | 4522c43967615dd54e261a229b05c742676c7f99 | 7,798 |
import torch
def compute_kld(confidences: torch.Tensor, reduction="mean") -> torch.Tensor:
"""
Args:
confidences (Tensor): a tensor of shape [N, M, K] of predicted confidences from ensembles.
reduction (str): specifies the reduction to apply to the output.
- none: no reduction will be applied,
- mean: the sum of the output will be divided by
the number of elements in the output.
Returns:
kld (Tensor): KL divergences for given confidences from ensembles.
- a tensor of shape [N,] when reduction is "none",
- a tensor of shape [,] when reduction is "mean".
"""
assert reduction in [
"none", "mean",
], f"Unknown reduction = \"{reduction}\""
kld = torch.zeros(confidences.size(0), device=confidences.device) # [N,]
ensemble_size = confidences.size(1)
if ensemble_size > 1:
pairs = []
for i in range(ensemble_size):
for j in range(ensemble_size):
pairs.append((i, j))
for (i, j) in pairs:
if i == j:
continue
kld += torch.nn.functional.kl_div(
confidences[:, i, :].log(),
confidences[:, j, :],
reduction="none", log_target=False,
).sum(1) # [N,]
kld = kld / (ensemble_size * (ensemble_size - 1))
if reduction == "mean":
kld = kld.mean() # [,]
return kld | 4fb57a18fdfae56dc04a2502c4cd21590bc31c93 | 7,799 |
def get_crypto_quote(symbol, info=None):
"""Gets information about a crypto including low price, high price, and open price
:param symbol: The crypto ticker.
:type symbol: str
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [dict] If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info.
:Dictionary Keys: * asset_currency
* display_only
* id
* max_order_size
* min_order_size
* min_order_price_increment
* min_order_quantity_increment
* name
* quote_currency
* symbol
* tradability
"""
id = get_crypto_info(symbol, info='id')
url = urls.crypto_quote(id)
data = helper.request_get(url)
return(helper.filter(data, info)) | db10e7493ef6e98ad0876d30357c6b5be8269776 | 7,800 |
def data_split(*args, **kwargs):
"""A function to split a dataset into train, test, and optionally
validation datasets.
**Arguments**
- ***args** : arbitrary _numpy.ndarray_ datasets
- An arbitrary number of datasets, each required to have
the same number of elements, as numpy arrays.
- **train** : {_int_, _float_}
- If a float, the fraction of elements to include in the training
set. If an integer, the number of elements to include in the
training set. The value `-1` is special and means include the
remaining part of the dataset in the training dataset after
the test and (optionally) val parts have been removed
- **val** : {_int_, _float_}
- If a float, the fraction of elements to include in the validation
set. If an integer, the number of elements to include in the
validation set. The value `0` is special and means do not form
a validation set.
- **test** : {_int_, _float_}
- If a float, the fraction of elements to include in the test
set. If an integer, the number of elements to include in the
test set.
- **shuffle** : _bool_
- A flag to control whether the dataset is shuffled prior to
being split into parts.
**Returns**
- _list_
- A list of the split datasets in train, [val], test order. If
datasets `X`, `Y`, and `Z` were given as `args` (and assuming a
non-zero `val`), then [`X_train`, `X_val`, `X_test`, `Y_train`,
`Y_val`, `Y_test`, `Z_train`, `Z_val`, `Z_test`] will be returned.
"""
# handle valid kwargs
train, val, test = kwargs.pop('train', -1), kwargs.pop('val', 0.0), kwargs.pop('test', 0.1)
shuffle = kwargs.pop('shuffle', True)
if len(kwargs):
raise TypeError('following kwargs are invalid: {}'.format(kwargs))
# validity checks
if len(args) == 0:
raise RuntimeError('Need to pass at least one argument to data_split')
# check for consistent length
n_samples = len(args[0])
for arg in args[1:]:
assert len(arg) == n_samples, 'args to data_split have different length'
# determine numbers
num_val = int(n_samples*val) if val<=1 else val
num_test = int(n_samples*test) if test <=1 else test
num_train = n_samples - num_val - num_test if train==-1 else (int(n_samples*train) if train<=1 else train)
assert num_train >= 0, 'bad parameters: negative num_train'
assert num_train + num_val + num_test <= n_samples, 'too few samples for requested data split'
# calculate masks
perm = np.random.permutation(n_samples) if shuffle else np.arange(n_samples)
train_mask = perm[:num_train]
val_mask = perm[-num_val:]
test_mask = perm[num_train:num_train+num_test]
# apply masks
masks = [train_mask, val_mask, test_mask] if num_val > 0 else [train_mask, test_mask]
# return list of new datasets
return [arg[mask] for arg in args for mask in masks] | e2ad6e52d2868020cc0bcb960533348a1f93ed31 | 7,802 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.