content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_transform_dest_array(output_size):
"""
Returns a destination array of the desired size. This is also used to define the
order of points necessary for cv2.getPerspectiveTransform: the order can change, but
it must remain consistent between these two arrays.
:param output_size: The size to make the output image ((width, height) tuple)
:return: The destination array, suitable to feed into cv2.getPerspectiveTransform
"""
bottom_right = [output_size[0] - 1, output_size[1] - 1]
bottom_left = [0, output_size[1] - 1]
top_left = [0, 0]
top_right = [output_size[0] - 1, 0]
return np.array(
[bottom_right, bottom_left, top_left, top_right],
dtype="float32") | 84f092b5f263f3dd65ea9dfb18890454666e982d | 3,648,931 |
def fetch(url):
"""
引数urlで与えられたURLのWebページを取得する。
WebページのエンコーディングはContent-Typeヘッダーから取得する。
戻り値:str型のHTML
"""
f = urlopen(url)
# HTTPヘッダーからエンコーディングを取得する(明示されていない場合はutf-8とする)。
encoding = f.info().get_content_charset(failobj="utf-8")
html = f.read().decode(encoding) # 得られたエンコーディングを指定して文字列にデコードする。
return html | 31b69019f35e983a7a6c9d60b4367502b6540c56 | 3,648,932 |
def get_rounded_coordinates(point):
"""Helper to round coordinates for use in permalinks"""
return str(round(point.x, COORDINATE_ROUND)) + '%2C' + str(round(point.y, COORDINATE_ROUND)) | a707864e4b62a91e609b3674bdfe0de7fdddf154 | 3,648,934 |
def rgb_to_hls(image: np.ndarray, eps: float = 1e-8) -> np.ndarray:
"""Convert a RGB image to HLS. Image data is assumed to be in the range
of [0.0, 1.0].
Args:
image (np.ndarray[B, 3, H, W]):
RGB image to be converted to HLS.
eps (float):
Epsilon value to avoid div by zero.
Returns:
hls (np.ndarray[B, 3, H, W]):
HLS version of the image.
"""
return cv2.cvtColor(image, cv2.COLOR_RGB2HLS) | 841379110bd273a7a6239e3598656c46acbde583 | 3,648,935 |
def array_max_dynamic_range(arr):
"""
Returns an array scaled to a minimum value of 0 and a maximum value of 1.
"""
finite_arr = arr[np.isfinite(arr)]
low = np.nanmin(finite_arr)
high = np.nanmax(finite_arr)
return (arr - low)/(high - low) | b2182c43dea2981b3759119cf1381a82a9e168b1 | 3,648,936 |
def production(*args):
"""Creates a production rule or list of rules from the input.
Supports two kinds of input:
A parsed string of form "S->ABC" where S is a single character, and
ABC is a string of characters. S is the input symbol, ABC is the output
symbols.
Neither S nor ABC can be any of the characters "-", ">" for obvious
reasons.
A tuple of type (S, Seq, ...) where S is the symbol of some hashable
type and seq is an finite iterable representing the output symbols.
Naturally if you don't want to use characters/strings to represent
symbols then you'll typically need to use the second form.
You can pass multiple inputs to generate multiple production rules,
in that case the result is a list of rules, not a single rule.
If you pass multiple inputs the symbol must differ since a simple
L-System only supports one production rule per symbol.
Example:
>>> production("F->Ab[]")
('F', ['A', 'b', '[', ']'])
>>> production("F->Ab[]", ("P", "bAz"), (1, (0,1)))
[('F', ['A', 'b', '[', ']']), ('P', ['b', 'A', 'z']), (1, [0, 1])]
"""
if len(args) < 1:
raise ValueError("missing arguments")
res = []
for a in args:
if issubclass(str, type(a)):
parts = a.split(sep="->", maxsplit=1)
if len(parts) < 2:
raise ValueError("couldn't parse invalid string \"{}\"".format(a))
res.append((parts[0], list(parts[1])))
elif issubclass(tuple, type(a)):
s, to, *vals = a
res.append((s, list(to)))
else:
raise TypeError("sorry don't know what to do with " + str(type(a)))
if len(res) == 1:
return res[0]
return res | bcb3e415a283f654ab65e0656a3c7e3912eeb53b | 3,648,937 |
def _unpack_compute(input_place, num, axis):
"""Unpack a tensor into `num` tensors along axis dimension."""
input_shape = get_shape(input_place)
for index, _ in enumerate(input_shape):
input_shape[index] = input_shape[index] if index != axis else 1
output_shape_list = [input_shape for i in range(num)]
offset = 0
out_tensor_list = []
for i, t_shape in enumerate(output_shape_list):
out_tensor = tvm.compute(
t_shape,
lambda *index, t_shape=t_shape:
input_place(*_index_offset(t_shape, axis, offset, *index)),
name='tensor' + str(i))
out_tensor_list.append(out_tensor)
offset = offset + 1
return tuple(out_tensor_list) | 89972e932d3c0bf3b5cbc548633dd2b2a6173c85 | 3,648,938 |
def flatten(items):
"""Convert a sequence of sequences to a single flat sequence.
Works on dictionaries, tuples, lists.
"""
result = []
for item in items:
if isinstance(item, list):
result += flatten(item)
else:
result.append(item)
return result | d44e3391f791dfd2ec9b323c37c510a415bb23bf | 3,648,939 |
from typing import Dict
def _datum_to_cap(datum: Dict) -> float:
"""Cap value of a datum."""
return _cap_str_to_mln_float(datum["cap"]) | 4554cb021f077e3b69495a6266a2596a968ee79d | 3,648,940 |
def add_eval_to_game(game: chess.pgn.Game, engine: chess.engine.SimpleEngine, analysis_time: float,
should_re_add_analysis: bool = False) -> chess.pgn.Game:
"""
MODIFIES "game" IN PLACE
"""
current_move = game
while len(current_move.variations):
if "eval" in current_move.comment and not should_re_add_analysis:
continue
score, actual_eval = get_score(current_move.board(), engine, analysis_time=analysis_time)
current_move.comment += f'[%eval {score}]'
if current_move.eval().pov(chess.WHITE) != actual_eval:
# assert not rounding error
assert abs(current_move.eval().pov(chess.WHITE).score() - actual_eval.score()) == 1, \
f"eval's not equal, not rounding error: {current_move.eval().pov(chess.WHITE)} != {actual_eval}"
current_move = current_move.variations[0]
return game | 72c677fc9f71cafca6af5b86cca69d896547835d | 3,648,941 |
def MC_no(a,b,N,pi,mp):
""" Monte Carlo simulation drawn from beta distribution for the uninsured agents
Args:
N (integer): number of draws
a (integer): parameter
b (integer): parameter
Returns:
(numpy float): Monte Carlo integration that computes expected utility for given gamma and premium
"""
x = np.random.beta(a,b,N)
return np.mean(utility(mp['y']-x,mp)) | 7910a1894839eaac89af9df61a3f64fbb1eaf933 | 3,648,942 |
def get_conflicting_types(type, tyepdef_dict):
"""Finds typedefs defined in the same class that conflict. General algo
is: Find a type definition that is identical to type but for a
different key. If the type definitions is coming from a different
class, neglect it. This is a pretty slow function for large dictionaries."""
conflicting_types = []
if type in typedef_dict:
typedef = typedef_dict[type] # Look for an identical typedef mapped under a different key.
for key, value in typedef_dict.items():
if((typedef == value) and (type != key) and (type.rpartition("::")[0] == key.rpartition("::")[0])):
conflicting_types.append(key)
return conflicting_types | 5270ddfbf8a1f887de7ea9fcf2dcd32511ce6a32 | 3,648,943 |
from typing import Tuple
def extract_entity_type_and_name_from_uri(uri: str) -> Tuple[str, str]:
"""
从entity uri中提取出其type和name
:param uri: 如 http://www.kg.com/kg/ontoligies/ifa#Firm/百度
:return: ('Firm', '百度')
"""
name_separator = uri.rfind('/')
type_separator = uri.rfind('#')
return uri[type_separator + 1: name_separator], uri[name_separator + 1:] | a70b1fdb5490f029cc6a88bee53eee048731a709 | 3,648,944 |
def ret_dict() -> dict:
"""
Returns
-------
"""
# blahs
return {} | 79a29f69f5d0389d266f917500d25d696415c25a | 3,648,946 |
def load_rokdoc_well_markers(infile):
"""
Function to load well markers exported from RokDoc in ASCII format.
"""
with open(infile, 'r') as fd:
buf = fd.readlines()
marker = []
well = []
md = []
tvdkb = []
twt = []
tvdss = []
x = []
y = []
for line in buf[5:]:
c1, c2, c3, c4, c5 = line.split("'")
c6, c7, c8, c9, c10, c11 = c5.strip().split()
marker.append(c2)
well.append(c4)
md.append(float(c6))
tvdkb.append(float(c7))
twt.append(float(c8))
tvdss.append(float(c9))
x.append(float(c10))
y.append(float(c11))
markers = {}
for each in list(set(well)):
markers[each] = {}
for i in range(len(marker)):
cur_well = well[i]
cur_marker = marker[i]
cur_md = md[i]
cur_tvdkb = tvdkb[i]
cur_tvdss = tvdss[i]
cur_twt = twt[i]
cur_x = x[i]
cur_y = y[i]
markers[cur_well][cur_marker] = {'md': cur_md, 'tvdkb': cur_tvdkb,
'tvdss': cur_tvdss, 'twt': cur_twt,
'x': cur_x, 'y': cur_y}
return markers | f3a781accdd84ff2f5aff12e59aeff05aa428d6a | 3,648,947 |
def get_fees():
"""
Returns all information related to fees configured for the institution.
:returns: String containing xml or an lxml element.
"""
return get_anonymous('getFees') | 17d16c65d8aefa5989f0c371ed2db2527691ccf9 | 3,648,948 |
import torch
from typing import Tuple
def resample_uv_to_bbox(
predictor_output: DensePoseChartPredictorOutput,
labels: torch.Tensor,
box_xywh_abs: Tuple[int, int, int, int],
) -> torch.Tensor:
"""
Resamples U and V coordinate estimates for the given bounding box
Args:
predictor_output (DensePoseChartPredictorOutput): DensePose predictor
output to be resampled
labels (tensor [H, W] of uint8): labels obtained by resampling segmentation
outputs for the given bounding box
box_xywh_abs (tuple of 4 int): bounding box that corresponds to predictor outputs
Return:
Resampled U and V coordinates - a tensor [2, H, W] of float
"""
x, y, w, h = box_xywh_abs
w = max(int(w), 1)
h = max(int(h), 1)
u_bbox = F.interpolate(predictor_output.u, (h, w), mode="bilinear", align_corners=False)
v_bbox = F.interpolate(predictor_output.v, (h, w), mode="bilinear", align_corners=False)
uv = torch.zeros([2, h, w], dtype=torch.float32, device=predictor_output.u.device)
for part_id in range(1, u_bbox.size(1)):
uv[0][labels == part_id] = u_bbox[0, part_id][labels == part_id]
uv[1][labels == part_id] = v_bbox[0, part_id][labels == part_id]
return uv | 655fa330a0fb68d0a6f94084b1a4fde2e1368792 | 3,648,949 |
from typing import List
def get_error_code(output: int,
program: List[int]
) -> int:
"""
Determine what pair of inputs, "noun" and "verb", produces the output.
The inputs should be provided to the program by replacing the values
at addresses 1 and 2. The value placed in address 1 is called the "noun",
and the value placed in address 2 is called the "verb".
It returns the error code: 100 * noun + verb
Implementation options:
- By brute force, looping twice over 0-99
- Looping over the noun linearly, and using binary search for the verb,
since all the values of the program are integers, and therefore
positive (IMPLEMENTED)
- Optimize the possible value intervals for both noun and verb checking
the possible min and max outputs for each pair
"""
# Reset the memory
program = program.copy()
# Linear loop over the noun
for noun in range(0, 100):
program[1] = noun
# Binary search over the verb
verb = binary_search_code(program, output)
# Return the code if found
if verb != -1:
return (100 * noun + verb)
raise ValueError('Code not found!') | a3ff93557217197c3988b1f2ffde0a114ec6de81 | 3,648,951 |
def page(page_id):
"""Gets one page from the database."""
page = Page.objects.get(id=page_id)
return render_template('page.html', page=page) | 011d0d96564e328674c9e919eed3647c41ebb0a4 | 3,648,952 |
def compute_Csigma_from_alphaandC(TT,minT,alphaT,CT,ibrav=4):
"""
This function calculate the difference between the constant stress heat capacity
:math:`C_{\sigma}` and the constant strain heat capacity :math:`C_{\epsilon}`
from the *V* (obtained from the input lattice parameters *minT*, the thermal
expansion tensor *alphaT* and the elastic constant tensor *CT*, all as a function
of temperature. This is essentially the anisotropic equivalent of the equation
:math:`Cp - Cv = T V beta^2 B0` for the isotropic case (volume only)
and it avoids a further numerical derivation to obtain :math:`C_{\sigma}`.
It is however more complex in the anisotropic case since *minT*, *alphaT* and
in particul the elastic constant tensor *CT* must me known in principle
including their temperature dependence.
.. Warning::
Still very experimental...
"""
CT = CT / RY_KBAR
Csigma = np.zeros(len(TT))
for i in range(1,len(TT)):
V = compute_volume(minT[i],ibrav)
for l in range(0,6):
for m in range(0,6):
temp = alphaT[i,l] * CT[l,m] * alphaT[i,m]
Csigma[i] = V * TT[i] * temp # this is C_sigma-C_epsilon at a given T
return Csigma | 66159810aeeadd4614f66b6c7bc43a11a5ebf28d | 3,648,953 |
def services(request):
"""
"""
context = {}
services = Service.objects.filter(active=True, hidden=False)
context["services"] = services
context["services_nav"] = True
return render(request, "services.html", context) | 45792f2032236a74f8edd2141bf10a2dd7b6c075 | 3,648,954 |
def _create_presigned_url(method, object_name, duration_in_seconds=600):
"""
Create presigned S3 URL
"""
s3_client = boto3.client('s3',
endpoint_url=CONFIG.get('s3', 'url'),
aws_access_key_id=CONFIG.get('s3', 'access_key_id'),
aws_secret_access_key=CONFIG.get('s3', 'secret_access_key'))
if method == 'get':
try:
response = s3_client.generate_presigned_url('get_object',
Params={'Bucket':CONFIG.get('s3', 'bucket'), 'Key': object_name},
ExpiresIn=duration_in_seconds)
except Exception:
logger.critical('Unable to generate presigned url for get')
return None
else:
try:
response = s3_client.generate_presigned_url('put_object',
Params={'Bucket':CONFIG.get('s3', 'bucket'), 'Key':object_name},
ExpiresIn=duration_in_seconds,
HttpMethod='PUT')
except Exception:
logger.critical('Unable to generate presigned url for put')
return None
return response | f285d90058d3f6d450e82917d883f97100b78889 | 3,648,955 |
def read_data(model_parameters, ARGS):
"""Read the data from provided paths and assign it into lists"""
data = pd.read_pickle(ARGS.path_data)
y = pd.read_pickle(ARGS.path_target)['target'].values
data_output = [data['codes'].values]
if model_parameters.numeric_size:
data_output.append(data['numerics'].values)
if model_parameters.use_time:
data_output.append(data['to_event'].values)
return (data_output, y) | 66f89d87b22579f3d06a1d8f0faf0db4bc0fd13d | 3,648,956 |
def _is_src(file):
""" Returns true if the file is a source file
Bazel allows for headers in the srcs attributes, we need to filter them out.
Args:
file (File): The file to check.
"""
if file.extension in ["c", "cc", "cpp", "cxx", "C", "c++", "C++"] and \
file.is_source:
return True
return False | b0466073d4d1b05c5cab37946fb6ca8432dc752d | 3,648,957 |
def constructResponseObject(responsePassed):
"""
constructs an Error response object, even if the
"""
if (not (responsePassed is None)):
temp_resp = Response()
temp_resp.status_code = responsePassed.status_code or 404
if((temp_resp.status_code >= 200) and (temp_resp.status_code < 300)):
temp_resp.status_code = 404
temp_resp.reason = 'Bad Request'
details = 'UnexpectedError'
temp_resp.headers = {'Content-Type': 'text/html', 'Warning': details}
else:
temp_resp.reason = responsePassed.reason or 'Bad Request'
details = responsePassed.content or 'UnexpectedError'
temp_resp.headers = {'Content-Type': 'text/html', 'WWW-Authenticate': details}
else:
temp_resp = Response()
temp_resp.reason = 'Bad Request'
temp_resp.status_code = 404
details = 'UnexpectedError'
temp_resp.headers = {'Content-Type': 'text/html', 'WWW-Authenticate': details}
return temp_resp | e5f5aa0f87db30598e85fe66e8bf3062eac4388c | 3,648,958 |
def calculate_signal_strength(rssi):
# type: (int) -> int
"""Calculate the signal strength of access point."""
signal_strength = 0
if rssi >= -50:
signal_strength = 100
else:
signal_strength = 2 * (rssi + 100)
return signal_strength | d5a0955446e0fe0548639ddd1a849f7e7901c36b | 3,648,959 |
from datetime import datetime
async def verify_email(token: str, auth: AuthJWT = Depends()):
"""Verify the user's email with the supplied token"""
# Manually assign the token value
auth._token = token # pylint: disable=protected-access
user = await User.by_email(auth.get_jwt_subject())
if user.email_confirmed_at is not None:
raise HTTPException(400, "Email is already verified")
if user.disabled:
raise HTTPException(400, "Your account is disabled")
user.email_confirmed_at = datetime.now(tz=timezone.utc)
await user.save()
return Response(status_code=200) | 4e6eebd22b6206fa20f9c03230c09b470c414740 | 3,648,960 |
def lookAtThisMethod(
first_parameter,
second_paramter=None,
third_parameter=32,
fourth_parameter="a short string as default argument",
**kwargs
):
"""The point of this is see how it reformats parameters
It might be fun to see what goes on
Here I guess it should respect this spacing, since we are in a comment.
We are done!
"""
return kwargs["whatever"](
first_parameter * third_parameter,
second_paramter,
fourth_parameter,
"extra string because I want to",
) | 8dab028b40184bb7cf686c524d5abd452cee2bc3 | 3,648,962 |
from typing import Sequence
from typing import Callable
from typing import List
from typing import Set
def data_incremental_benchmark(
benchmark_instance: GenericCLScenario,
experience_size: int,
shuffle: bool = False,
drop_last: bool = False,
split_streams: Sequence[str] = ("train",),
custom_split_strategy: Callable[
[ClassificationExperience], Sequence[AvalancheDataset]
] = None,
experience_factory: Callable[
[ClassificationStream, int], ClassificationExperience
] = None,
):
"""
High-level benchmark generator for a Data Incremental setup.
This generator accepts an existing benchmark instance and returns a version
of it in which experiences have been split in order to produce a
Data Incremental stream.
In its base form this generator will split train experiences in experiences
of a fixed, configurable, size. The split can be also performed on other
streams (like the test one) if needed.
The `custom_split_strategy` parameter can be used if a more specific
splitting is required.
Beware that experience splitting is NOT executed in a lazy way. This
means that the splitting process takes place immediately. Consider
optimizing the split process for speed when using a custom splitting
strategy.
Please note that each mini-experience will have a task labels field
equal to the one of the originating experience.
The `complete_test_set_only` field of the resulting benchmark instance
will be `True` only if the same field of original benchmark instance is
`True` and if the resulting test stream contains exactly one experience.
:param benchmark_instance: The benchmark to split.
:param experience_size: The size of the experience, as an int. Ignored
if `custom_split_strategy` is used.
:param shuffle: If True, experiences will be split by first shuffling
instances in each experience. This will use the default PyTorch
random number generator at its current state. Defaults to False.
Ignored if `custom_split_strategy` is used.
:param drop_last: If True, if the last experience doesn't contain
`experience_size` instances, then the last experience will be dropped.
Defaults to False. Ignored if `custom_split_strategy` is used.
:param split_streams: The list of streams to split. By default only the
"train" stream will be split.
:param custom_split_strategy: A function that implements a custom splitting
strategy. The function must accept an experience and return a list
of datasets each describing an experience. Defaults to None, which means
that the standard splitting strategy will be used (which creates
experiences of size `experience_size`).
A good starting to understand the mechanism is to look at the
implementation of the standard splitting function
:func:`fixed_size_experience_split_strategy`.
:param experience_factory: The experience factory.
Defaults to :class:`GenericExperience`.
:return: The Data Incremental benchmark instance.
"""
split_strategy = custom_split_strategy
if split_strategy is None:
split_strategy = partial(
fixed_size_experience_split_strategy,
experience_size,
shuffle,
drop_last,
)
stream_definitions: TStreamsUserDict = dict(
benchmark_instance.stream_definitions
)
for stream_name in split_streams:
if stream_name not in stream_definitions:
raise ValueError(
f"Stream {stream_name} could not be found in the "
f"benchmark instance"
)
stream = getattr(benchmark_instance, f"{stream_name}_stream")
split_datasets: List[AvalancheDataset] = []
split_task_labels: List[Set[int]] = []
exp: ClassificationExperience
for exp in stream:
experiences = split_strategy(exp)
split_datasets += experiences
for _ in range(len(experiences)):
split_task_labels.append(set(exp.task_labels))
stream_def = StreamUserDef(
split_datasets,
split_task_labels,
stream_definitions[stream_name].origin_dataset,
False,
)
stream_definitions[stream_name] = stream_def
complete_test_set_only = (
benchmark_instance.complete_test_set_only
and len(stream_definitions["test"].exps_data) == 1
)
return GenericCLScenario(
stream_definitions=stream_definitions,
complete_test_set_only=complete_test_set_only,
experience_factory=experience_factory,
) | e24756245b3d6b5126d32fb66541e4cd23a993c2 | 3,648,963 |
import typing
def generate_doc_from_endpoints(
routes: typing.List[tornado.web.URLSpec],
*,
api_base_url,
description,
api_version,
title,
contact,
schemes,
security_definitions,
security
):
"""Generate doc based on routes"""
from tornado_swagger.model import export_swagger_models # pylint: disable=C0415
swagger_spec = {
"openapi": "3.0.0",
"info": {
"title": title,
"description": _clean_description(description),
"version": api_version,
},
"basePath": api_base_url,
"schemes": schemes,
"components": {
"schemas": export_swagger_models(),
},
"paths": _extract_paths(routes),
}
if contact:
swagger_spec["info"]["contact"] = {"name": contact}
if security_definitions:
swagger_spec["securityDefinitions"] = security_definitions
if security:
swagger_spec["security"] = security
return swagger_spec | 943a3c7a8bdd71bce92089c9dd89a7c262124dc0 | 3,648,964 |
def _filter_builds(build: Build) -> bool:
"""
Determine if build should be filtered.
:param build: Build to check.
:return: True if build should not be filtered.
"""
if build.display_name.startswith("!"):
return True
return False | c3bbc91595752b92b77034afcdd35d4a0b70f737 | 3,648,965 |
def load_transformers(model_name, skip_model=False):
"""Loads transformers config, tokenizer, and model."""
config = AutoConfig.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(
model_name,
add_prefix_space=True,
additional_special_tokens=('[T]', '[P]'),
)
model = AutoModelForMaskedLM.from_pretrained(model_name, config=config)
return config, tokenizer, model | 49b8809745ba70b2a2a8ccd6063bbe2ea3acbae0 | 3,648,966 |
def build_test_data(data):
"""
Generates various features needed to predict
the class of the news.
Input: DataFrame
Returns Array of generated features.
"""
data = process(data)
generators = [
CountFeatureGenerator,
TfidfFeatureGenerator,
Word2VecFeatureGenerator,
SentimentFeatureGenerator,
ReadabilityFeatureGenerator
]
# Class generators one by one to generate features
features = [feature for generator in generators for feature in generator(data)]
print("Total number of raw features: {}".format(len(features)))
# Stack and return the features
return np.hstack(features) | a4bd3af16deff190471ffbd6028cb47b314d498f | 3,648,967 |
def set_password_for_sub_account(account_id, password):
"""
Create a message to set the password for a given sub-account.
:param account_id: Integer representing the ID of the account
:param password: String representing the password for the sub-account
:return: Message (dict)
"""
data = sanitize(account_id=account_id, password=password)
msg = message(method=ACCOUNT_GET_SUB_ACCOUNTS)
params = {"sid": data["account_id"], "password": data["password"]}
return add_params_to_message(params, msg) | 02887e73bd11c551f472c033e256882206e9042a | 3,648,968 |
def generate_batch(n, batch_size):
""" Generates a set of batch indices
Args:
n: total number of samples in set
batch_size: size of batch
Returns:
batch_index: a list of length batch_size containing randomly sampled indices
"""
batch_index = a.sample(range(n), batch_size)
return batch_index | a8fe7d9356b30824210c89e2defa4b6bae697ffd | 3,648,970 |
def solve(si, y, infile):
"""Conducts the solution step, based on the dopri5 integrator in scipy
:param si: the simulation info object
:type si: SimInfo
:param y: the solution vector
:type y: np.ndarray
:param infile: the imported infile module
:type infile: imported module
"""
n = ode(f_n).set_integrator('dopri5')
n.set_initial_value(y0_n(si), si.timer.
t0.magnitude)
n.set_f_params(si)
th = ode(f_th).set_integrator('dopri5', nsteps=infile.nsteps)
th.set_initial_value(y0_th(si), si.timer.t0.magnitude)
th.set_f_params(si)
while (n.successful() and
n.t < si.timer.tf.magnitude and
th.t < si.timer.tf.magnitude):
si.timer.advance_one_timestep()
si.db.record_all()
n.integrate(si.timer.current_time().magnitude)
update_n(n.t, n.y, si)
th.integrate(si.timer.current_time().magnitude)
update_th(th.t, n.y, th.y, si)
return si.y | 079e97394befb39d6c65dc0c8a7eb2d57cf37920 | 3,648,971 |
def base(request, format=None):
"""Informational version endpoint."""
message = f"Welcome to {VERSION} of the Cannlytics API. Available endpoints:\n\n"
for endpoint in ENDPOINTS:
message += f"{endpoint}\n"
return Response({ "message": message}, content_type="application/json") | 51d8deaa6b5fda2b69fc8674e0d9d927d910c0ba | 3,648,972 |
def discover(discover_system: bool = True) -> Discovery:
"""
Discover Reliably capabilities from this extension.
"""
logger.info("Discovering capabilities from chaostoolkit-reliably")
discovery = initialize_discovery_result(
"chaostoolkit-reliably", __version__, "reliably"
)
discovery["activities"].extend(load_exported_activities())
return discovery | 78bb7bcb086d08099f5585c3e27452647ecb8d64 | 3,648,973 |
from collections import Counter
def frequent_word(message: str) -> str:
"""get frequent word."""
words = Counter(message.split())
result = max(words, key=words.get)
print(result)
return result | 86af88287a8874d824b96e1a96e430555db64f2e | 3,648,974 |
def parse_bjobs_nodes(output):
"""Parse and return the bjobs command run with
options to obtain node list, i.e. with `-w`.
This function parses and returns the nodes of
a job in a list with the duplicates removed.
:param output: output of the `bjobs -w` command
:type output: str
:return: compute nodes of the allocation or job
:rtype: list of str
"""
nodes = []
lines = output.split("\n")
nodes_str = lines[1].split()[5]
nodes = nodes_str.split(":")
return list(dict.fromkeys(nodes)) | a582307d0d869d2dbde454928571246320cb6e31 | 3,648,975 |
def find_nearest_array(array, array_comparison, tol = 1e-4):
"""
Find nearest array
@ In, array, array-like, the array to compare from
@ In, array_comparison, array-like, the array to compare to
@ In, tol, float, the tolerance
"""
array_comparison = np.asarray(array_comparison)
indeces = np.zeros(len(array), dtype=bool)
notFound = np.zeros(len(array), dtype=bool)
for val in array_comparison:
idx, diff = find_nearest(array, val)
rel = (np.abs(diff / val)) if val != 0 else np.abs(val)
if rel <= tol:
indeces[idx] = True
else:
notFound[idx] = True
return indeces, not np.any(notFound) | 5c15cb58d50eef03ae7bcffed18bc60587fec0fc | 3,648,976 |
from datetime import datetime
def create_block_statistics_on_addition(
block_hash: str,
block_hash_parent: str,
chain_name: str,
deploy_cost_total: int,
deploy_count: int,
deploy_gas_price_avg: int,
era_id: int,
height: int,
is_switch_block: bool,
network: str,
size_bytes: str,
state_root_hash: str,
status: int,
timestamp: datetime,
proposer: str,
) -> BlockStatistics:
"""Returns a domain object instance: BlockStatistics.
"""
return BlockStatistics(
block_hash = block_hash,
block_hash_parent = block_hash_parent,
chain_name = chain_name,
deploy_cost_total = deploy_cost_total,
deploy_count = deploy_count,
deploy_gas_price_avg = deploy_gas_price_avg,
era_id = era_id,
height = height,
is_switch_block = is_switch_block,
network = network,
size_bytes = size_bytes,
state_root_hash = state_root_hash,
status = status,
timestamp = timestamp,
proposer = proposer,
) | 921e7045ff0df080a3769d0e62753e0e5a13e4af | 3,648,978 |
def text(title='Text Request', label='', parent=None, **kwargs):
"""
Quick and easy access for getting text input. You do not have to have a
QApplication instance, as this will look for one.
:return: str, or None
"""
# -- Ensure we have a QApplication instance
q_app = qApp()
# -- Get the text
name, ok = Qt.QtWidgets.QInputDialog.getText(
parent,
title,
label,
**kwargs
)
if not ok:
return None
return name | c3d0c4fab15f6882fea614f5eec252738abc3e1c | 3,648,980 |
def get_key_score(chroma_vector, keys, key_index):
"""Returns the score of an approximated key, given the index of the key weights to try out"""
chroma_vector = np.rot90(chroma_vector,3)
chroma_vector = chroma_vector[0,:]
key_vector = keys[key_index,:]
score = np.dot(key_vector,chroma_vector)
return score | b41d4f3af4d621ba46b8786a5c906c470454fcc1 | 3,648,981 |
def app():
"""Create the test application."""
return flask_app | 01fbd44671a342be38560bc4c5b089a55214caf3 | 3,648,982 |
def coord_for(n, a=0, b=1):
"""Function that takes 3 parameters or arguments, listed above, and returns a list of the interval division coordinates."""
a=float(a)
b=float(b)
coords = []
inc = (b-a)/ n
for x in range(n+1):
coords.append(a+inc*x)
return coords | 57e12200dcc113786c9deeb4865d7906d74c763f | 3,648,983 |
def find_indeces_vector(transect_lons, transect_lats, model_lons, model_lats,
tols={
'NEMO': {'tol_lon': 0.104, 'tol_lat': 0.0388},
'GEM2.5': {'tol_lon': 0.016, 'tol_lat': 0.012},
}):
"""Find all indeces for the given vector
:arg transect_lons: Longitude of point 1.
:type lon1: float or :py:class:`numpy.ndarray`
:arg transect_lats: Latitude of point 1.
:type lat1: float or :py:class:`numpy.ndarray`
:arg model_lons: Longitude of point 2.
:type lon2: float or :py:class:`numpy.ndarray`
:arg model_lats: Latitude of point 2.
:type lat2: float or :py:class:`numpy.ndarray`
:returns: vector of i and j indices associated with the input lons and lats
:rtype: float or :py:class:`numpy.ndarray`
"""
transect_i = np.array([])
transect_j = np.array([])
for k in range(0,len(transect_lons)):
i, j = find_closest_model_point(transect_lons[k], transect_lats[k], model_lons, model_lats,tols=tols)
try:
transect_i = np.append(transect_i, int(i))
transect_j = np.append(transect_j, int(j))
except:
transect_i = np.append(transect_i, np.nan)
transect_j = np.append(transect_j, np.nan)
return transect_i, transect_j | 93ad7a8cd16e154069618e606293e64ee3266501 | 3,648,984 |
def _make_prediction_ops(features, hparams, mode, num_output_classes):
"""Returns (predictions, predictions_for_loss)."""
del hparams, mode
logits = tf.layers.dense(
features, num_output_classes, name='logits')
confidences = tf.nn.softmax(logits)
confidence_of_max_prediction = tf.reduce_max(confidences, axis=-1)
predicted_index = tf.argmax(confidences, axis=-1)
predictions = {
'label': predicted_index,
'logits': logits,
'confidences': confidences,
'confidence_of_max_prediction': confidence_of_max_prediction
}
predictions_for_loss = logits
return predictions, predictions_for_loss | 0a58ab8753a39b3e9da67dfc7988383585e6562e | 3,648,985 |
def manhattan_loadings(
iteration,
gtf,
loadings,
title=None,
size=4,
hover_fields=None,
collect_all=False,
n_divisions=500,
):
"""modify hail manhattan plot"""
palette = [
'#1f77b4',
'#ff7f0e',
'#2ca02c',
'#d62728',
'#9467bd',
'#8c564b',
'#e377c2',
'#7f7f7f',
'#bcbd22',
'#17becf',
]
# add gene names, p-values, and locus info
loadings = loadings.annotate(gene_names=gtf[loadings.locus].gene_name)
pvals = hl.abs(loadings.loadings[iteration])
locus = loadings.locus
if hover_fields is None:
hover_fields = {}
hover_fields['locus'] = hl.str(locus)
hover_fields['gene'] = hl.str(loadings.gene_names)
source_pd = (
hl.plot.plots._collect_scatter_plot_data( # pylint: disable=protected-access
('_global_locus', locus.global_position()),
('_pval', pvals),
fields=hover_fields,
n_divisions=None if collect_all else n_divisions,
)
)
source_pd['p_value'] = source_pd['_pval']
source_pd['_contig'] = [locus.split(':')[0] for locus in source_pd['locus']]
observed_contigs = set(source_pd['_contig'])
ref = locus.dtype.reference_genome
observed_contigs = [
contig for contig in ref.contigs.copy() if contig in observed_contigs
]
contig_ticks = [
ref._contig_global_position(contig) # pylint: disable=protected-access
+ ref.contig_length(contig) // 2
for contig in observed_contigs
]
color_mapper = CategoricalColorMapper(
factors=ref.contigs, palette=palette[:2] * int((len(ref.contigs) + 1) / 2)
)
p = figure(
title=title, x_axis_label='Chromosome', y_axis_label='Loadings', width=1000
)
(
p,
_,
legend,
_,
_,
_,
) = hl.plot.plots._get_scatter_plot_elements( # pylint: disable=protected-access
p,
source_pd,
x_col='_global_locus',
y_col='_pval',
label_cols=['_contig'],
colors={'_contig': color_mapper},
size=size,
)
legend.visible = False
p.xaxis.ticker = contig_ticks
p.xaxis.major_label_overrides = dict(zip(contig_ticks, observed_contigs))
p.select_one(HoverTool).tooltips = [
t for t in p.select_one(HoverTool).tooltips if not t[0].startswith('_')
]
return p | 5f99c1f5a16ee35c056ef019870d2d89a31ba988 | 3,648,986 |
def preprocess_point(p, C):
"""Preprocess a single point (a clip).
WARN: NAN-preserving
Arguments:
p {ndarray} -- shape = (variable, C.joint_n, C.joint_d)
C {DDNetConfig} -- A Config object
Returns:
ndarray, ndarray -- X0, X1 to input to the net
"""
assert p.shape[1:] == (C.joint_n, C.joint_d)
p = zoom(p,target_l=C.frame_l,joints_num=C.joint_n,joints_dim=C.joint_d)
# interploate to the right number of frames
assert p.shape == (C.frame_l, C.joint_n, C.joint_d)
M = get_CG(p, C)
return M, p | a1f2c1eda877562439c0b194490a6ac50df6bd81 | 3,648,987 |
def link_to_existing_user_by_email_if_backend_is_trusted(backend, details, user=None, *args, **kwargs):
"""Return user entry with same email address as one returned on details."""
if user or not _is_trusted_email_backend(backend):
return
email = details.get('email')
if email:
# try to link accounts registered with the same email address,
# only if it's a single object. AuthException is raised if multiple
# objects are returned
try:
return {'user': EmailAddress.objects.get(email=email).user}
except MultipleObjectsReturned:
raise AuthException(kwargs['backend'], 'Not unique email address.')
except ObjectDoesNotExist:
pass | 58f2363030ae0b8f8d3533dedbae6c7af304136c | 3,648,988 |
def get_global_threshold(image_gray, threshold_value=130):
""" 이미지에 Global Threshold 를 적용해서 흑백(Binary) 이미지객체를 반환합니다.
하나의 값(threshold_value)을 기준으로 이미지 전체에 적용하여 Threshold 를 적용합니다.
픽셀의 밝기 값이 기준 값 이상이면 흰색, 기준 값 이하이면 검정색을 적용합니다.
이 때 인자로 입력되는 이미지는 Gray-scale 이 적용된 2차원 이미지여야 합니다.
:param image_gray:
:param threshold_value: 이미지 전체에 Threshold 를 적용할 기준 값.
:return: Global Threshold 를 적용한 흑백(Binary) 이미지
"""
copy = image_gray.copy() # copy the image to be processed
_, binary_image = cv2.threshold(copy, threshold_value, 255, cv2.THRESH_BINARY)
return binary_image | 03c344a40c5a84027d790ddf404106efe29716e9 | 3,648,990 |
import torch
def get_batch(src_gen, trgt_gen, batch_size=10):
"""
Return a batch of batch_size of results as in get_rotated_src_target_spirals
Args:
batch_size (int): number of samples in the batch
factor (float): scaling factor for the spiral
Return:
[torch.tensor,torch.tensor]: src and target batches
"""
batch_points = [src_gen.generate() for _ in range(batch_size)]
batch_targets = [trgt_gen.generate() for _ in range(batch_size)]
batch_points = [to_torch_tensor(i) for i in batch_points]
batch_targets = [to_torch_tensor(i) for i in batch_targets]
return torch.cat(batch_points), torch.cat(batch_targets) | 6e0e4549c12fb252c54496f7947e5787970b64eb | 3,648,991 |
def compute_crop_parameters(image_size, bbox, image_center=None):
"""
Computes the principal point and scaling factor for focal length given a square
bounding box crop of an image.
These intrinsic parameters are used to preserve the original principal point even
after cropping the image.
Args:
image_size (int or array): Size of image, either length of longer dimension or
(N, H, C).
bbox: Square bounding box in xyxy (4,).
image_center: Center of projection/principal point (2,).
Returns:
principal_point: Coordinates in NDC using Pytorch3D convention with (1, 1)
as upper-left (2,).
crop_scale (float): Scaling factor for focal length.
"""
bbox = np.array(bbox)
b = max(bbox[2:] - bbox[:2])
if isinstance(image_size, int):
h = w = image_size
else:
h, w, *c = image_size
image_size = max(image_size)
if image_center is None:
image_center = np.array([w / 2, h / 2])
bbox_center = (bbox[:2] + bbox[2:]) / 2
crop_scale = b / image_size
principal_point = 2 * (bbox_center - image_center) / b
return principal_point, crop_scale | 18ca5822bf86fb01ff8652fc9239ce6ae4d2801f | 3,648,992 |
def input_fn_builder(input_file, seq_length, num_labels, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([num_labels], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _cast_features(features):
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in features:
t = features[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
features[name] = t
return features
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
features = tf.parse_single_example(record, name_to_features)
return _cast_features(features)
def file_based_input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
def serving_input_receiver_fn():
"""An input_fn that expects a serialized tf.Example."""
serialized_tf_example = tf.placeholder(
dtype=tf.string,
name='input_example_tensor')
receiver_tensors = {'examples': serialized_tf_example}
features = tf.parse_example(serialized_tf_example, name_to_features)
features = _cast_features(features)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
if input_file is not None:
return file_based_input_fn
else:
return serving_input_receiver_fn | ad82ded8691561eb8f60b645497200cd36d94a21 | 3,648,993 |
def get_user_by_username(username):
"""Return User by username"""
try:
return User.objects.get(username=username)
except User.DoesNotExist:
return None | b6c676d22c7ef586392b20a072d2239c2dfce7e6 | 3,648,994 |
def get_xyz_to_rgb_matrix(name):
"""
XYZ to RGB の Matrix を求める。
DCI-P3 で D65 の係数を返せるように内部関数化した。
"""
if name != "DCI-P3":
xyz_to_rgb_matrix = RGB_COLOURSPACES[name].XYZ_to_RGB_matrix
else:
rgb_to_xyz_matrix\
= calc_rgb_to_xyz_matrix(RGB_COLOURSPACES[DCI_P3].primaries,
xy_to_XYZ(ILLUMINANTS[CMFS_NAME]['D65']))
xyz_to_rgb_matrix = linalg.inv(rgb_to_xyz_matrix)
return xyz_to_rgb_matrix | 007b71f52af4e23ada073c712a840e05c0ac33a5 | 3,648,995 |
def find_bordering_snapnums(
snap_times_gyr,
dGyr=.005,
tmin=None,
tmax=None):
""" """
## handle default maximum time
tmax = snap_times_gyr[-1] if tmax is None else tmax
## handle default minimum time
if tmin is None:
tmin = snap_times_gyr[0]
## remove dGyr so that tmin is included in arange below
elif tmin - dGyr > 0:
tmin = tmin-dGyr
## create list of times, -1e-9 to avoid landing exactly on a snapshot number
times_gyr = np.arange(tmax,tmin,-dGyr)[::-1]-1e-9
inds_next = np.argmax((times_gyr - snap_times_gyr[:,None]) < 0 ,axis=0)
inds_prev = inds_next-1
return (
times_gyr,
np.array(list(zip(inds_prev,inds_next))),
np.array(list(zip(snap_times_gyr[inds_prev],snap_times_gyr[inds_next])))) | 23a58ecce4036d9b7a6a91991de237dd30b87129 | 3,648,996 |
def maxIterationComb(N,k,l):
"""
title::
maxIterationComb
description::
Compute N!/k!l!(N-k-l)! (max iterations).
attributes::
N
Number of targets (graph size)
k
Number of human patrollers
l
Number of drones
returns::
Resulting maximum iterations (integer).
author::
Elizabeth Bondi ([email protected])
Hoon Oh, Haifeng Xu, Kai Wang
disclaimer::
This source code is provided "as is" and without warranties as to
performance or merchantability. The author and/or distributors of
this source code may have made statements about this source code.
Any such statements do not constitute warranties and shall not be
relied on by the user in deciding whether to use this source code.
This source code is provided without any express or implied warranties
whatsoever. Because of the diversity of conditions and hardware under
which this source code may be used, no warranty of fitness for a
particular purpose is offered. The user is advised to test the source
code thoroughly before relying on it. The user must assume the entire
risk of using the source code.
"""
return int(comb(N,k)*comb(N-k,l)) | d0019a1498a50f3733474fb0212627d1b061484d | 3,648,997 |
def create_project_details_list (project):
"""makes a projects details section for the html
Parameters
----------
project: HeatRecovery
A HeatRecovery object thats run function has been called
Returns
-------
dict
with values used by summary
"""
try:
costs = '${:,.0f}'.format(project.get_NPV_costs())
except ValueError:
costs = project.get_NPV_costs()
try:
benefits = '${:,.0f}'.format(project.get_NPV_benefits())
except ValueError:
benefits = project.get_NPV_benefits()
try:
net_benefits = '${:,.0f}'.format(project.get_NPV_net_benefit())
except ValueError:
net_benefits = project.get_NPV_net_benefit()
try:
BC = '{:,.1f}'.format(project.get_BC_ratio())
except ValueError:
BC = project.get_BC_ratio()
try:
source = "<a href='" + \
project.comp_specs['link'] + "'> link </a>"
except StandardError as e:
source = "unknown"
try:
notes = project.comp_specs['notes']
except StandardError as e:
notes = "N/a"
try:
potential_hr = '{:,.0f} gallons'.format(float(
project.comp_specs[
'proposed gallons diesel offset']))
except ValueError:
potential_hr =\
str(project.comp_specs[
'proposed gallons diesel offset'])
try:
dist = \
'{:,.0f} ft'.format(\
float(project.comp_specs['total feet piping needed']))
except ValueError:
dist = 'Unknown'
#~ print dist
return [
{'words':'Capital cost',
'value': costs},
{'words':'Lifetime energy cost savings',
'value': benefits},
{'words':'Net lifetime savings',
'value': net_benefits},
{'words':'Benefit-cost ratio',
'value': BC},
{'words':'Est. potential annual heating fuel gallons displaced',
'value': potential_hr},
{'words':'Number of buildings to be connected',
'value': str(project.comp_specs['estimate buildings to heat'])},
{'words':'Round-trip distance of piping',
'value': dist},
{'words':'Source',
'value': source},
{'words':'Notes',
'value': notes},
] | 74c68b0592939cc819091ac2d30bee44b455f27b | 3,648,998 |
def compute_cluster_top_objects_by_distance(precomputed_distances,
max_top_number=10,
object_clusters=None):
"""
Compute the most representative objects for each cluster
using the precomputed_distances.
Parameters
----------
precomputed_distances : np.array
array of shape (n_topics, n_objects) -
a matrix of pairwise distances: distance from ith cluster centroid to the jth object
max_top_number : int
maximum number of top objects of cluster (resulting number can be less than it)
(Default value = 10)
object_clusters : np,array
array of shape n_objects - precomputed clusters for objects
Returns
-------
clusters_top_objects : list of list of indexes
(Default value = None)
""" # noqa: W291
# prediction for objects
if object_clusters is None:
object_clusters = predict_cluster_by_precomputed_distances(precomputed_distances)
# transformation from list to dict
clusters = transform_cluster_objects_list_to_dict(object_clusters)
n_topics = precomputed_distances.shape[0]
clusters_top_objects = []
for cluster_label in range(n_topics):
# cluster is empty
if cluster_label not in clusters.keys():
clusters_top_objects.append([])
continue
cluster_objects = np.array(clusters[cluster_label])
cluster_objects_to_center_distances = (
precomputed_distances[cluster_label][cluster_objects]
)
if max_top_number >= cluster_objects.shape[0]:
# cluster is too small; grab all objects
indexes_of_top_objects = np.arange(0, cluster_objects.shape[0])
else:
# filter by distance with partition
indexes_of_top_objects = np.argpartition(
cluster_objects_to_center_distances,
kth=max_top_number
)[:max_top_number]
distances_of_top_objects = cluster_objects_to_center_distances[indexes_of_top_objects]
top_objects = cluster_objects[indexes_of_top_objects]
# sorted partitioned array
indexes_of_top_objects_sorted_by_distance = np.argsort(distances_of_top_objects)
sorted_top_objects = top_objects[indexes_of_top_objects_sorted_by_distance]
clusters_top_objects.append(sorted_top_objects.tolist())
return clusters_top_objects | 2ff29d6b59d2db3d9e169a44c0addf42d9abea9b | 3,648,999 |
def validate_ints(*args):
""" validates that inputs are ints only """
for value in args:
if not isinstance(value, int):
return False
return True | e56ebf78e072731188b2c8282289d307fcfaabdf | 3,649,001 |
def smooth_l1_loss(y_true, y_pred):
"""
Computes the smooth-L1 loss.
Parameters
----------
y_true : tensor
Ground-truth targets of any shape.
y_pred : tensor
Estimates of same shape as y_true.
Returns
-------
loss : tensor
The loss, sumed over all elements from the last dim of y_true, i.e.,
same shape as y_true without the last dim.
"""
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)
diff = tf.math.abs(y_true - y_pred)
less_than_one = tf.cast(tf.math.less(diff, 1.0), y_pred.dtype)
# smooth l1 loss, loss.shape == y_true.shape
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
# sum over all elements in the last dim
loss = tf.math.reduce_sum(loss, axis=-1)
return loss | dcf18b7d14feecdbc8b6637ccc3d35ff1880f9d2 | 3,649,002 |
def get_class_occurrences(layer_types):
"""
Takes in a numpy.ndarray of size (nb_points, 10) describing for each point of the track the types of clouds identified at each of the 10 heights
times counting the number of times 8 type of clouds was spotted vertically.
and returns occrrences (binary) as the label of the present/absent of cloud
The height and cloud type information is then lost.
"""
layer_types = np.array(layer_types)
total = 0
occurrences = np.zeros((layer_types.shape[0], 1))
for occ, labels in zip(occurrences, layer_types):
for l in labels:
# keep only cloud types (no 0 or -9)
if l > 0:
total += 1
if total > 0:
occ = 1
return occurrences | 20c611089a50751e6a6b1d0ebaedc02474b04443 | 3,649,003 |
def parse_duration(datestring):
"""
Parses an ISO 8601 durations into a float value containing seconds.
The following duration formats are supported:
-PnnW duration in weeks
-PnnYnnMnnDTnnHnnMnnS complete duration specification
Years and month are not supported, values must be zero!
"""
if not isinstance(datestring, str):
raise TypeError("Expecting a string %r" % datestring)
match = ISO8601_PERIOD_REGEX.match(datestring)
if not match:
raise ValueError("Unable to parse duration string %r" % datestring)
groups = match.groupdict()
for key, val in groups.items():
if key not in ('separator', 'sign'):
if val is None:
groups[key] = "0n"
# print groups[key]
if key in ('years', 'months'):
groups[key] = Decimal(groups[key][:-1].replace(',', '.'))
else:
# these values are passed into a timedelta object,
# which works with floats.
groups[key] = float(groups[key][:-1].replace(',', '.'))
if groups["years"] != 0 or groups["months"] != 0:
raise ValueError("Unable to parse duration string %r (Non zero year or month)" % datestring)
else:
ret = timedelta(days=groups["days"], hours=groups["hours"],
minutes=groups["minutes"], seconds=groups["seconds"],
weeks=groups["weeks"])
if groups["sign"] == '-':
ret = timedelta(0) - ret
return ret.total_seconds() | 4fce243684fd2305198ae49693327a110f781a1d | 3,649,004 |
import math
from functools import reduce
import operator
from typing import Counter
def knn(position, data_set, labels, k):
"""
k-近邻算法
:param position: 待分类点
:param data_set: 数据样本
:param labels: 标签集合
:param k: 取值
:return: 所属标签
"""
distance_list = []
for index, item in enumerate(data_set):
distance_list.append((
labels[index],
math.sqrt(reduce(operator.add, [(v - position[i]) ** 2 for i, v in enumerate(item)]))
))
distance_list = sorted(distance_list, key=lambda x: x, reverse=True)
result = Counter([val[0] for val in distance_list[:k]])
result_labels = sorted(result.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)
return result_labels[0][0] | 137f87c6a63fbafd3140694386dfd418108ad5b9 | 3,649,005 |
def _cals(raw):
"""Helper to deal with the .cals->._cals attribute change."""
try:
return raw._cals
except AttributeError:
return raw.cals | a08273a559b780022c04fe5d5d60a71c600fd481 | 3,649,006 |
def irnn_data_iterator(X, y, batch_size, math_engine):
"""Slices numpy arrays into batches and wraps them in blobs"""
def make_blob(data, math_engine):
"""Wraps numpy data into neoml blob"""
shape = data.shape
if len(shape) == 2: # data
# Wrap 2-D array into blob of (BatchWidth, Channels) shape
return neoml.Blob.asblob(math_engine, data,
(1, shape[0], 1, 1, 1, 1, shape[1]))
elif len(shape) == 1: # dense labels
# Wrap 1-D array into blob of (BatchWidth,) shape
return neoml.Blob.asblob(math_engine, data,
(1, shape[0], 1, 1, 1, 1, 1))
else:
assert(False)
start = 0
data_size = y.shape[0]
while start < data_size:
yield (make_blob(X[start : start+batch_size], math_engine),
make_blob(y[start : start+batch_size], math_engine))
start += batch_size | 5a6b6726d3d0f78929b2551b8720bfbbb39471eb | 3,649,007 |
def naive_forecast(series, steps_ahead=3, freq='D', series_name='naive'):
"""
Function fits data into the last available observation value.
INPUT:
:param series: pandas Series of data,
:param steps_ahead: number of steps into the future to predict, default is 3,
:param freq: (str) representation of a time frequency,
:param series_name: str
OUTPUT:
:return: series with extrapolated values equal to the last observation.
"""
obs = [series.iloc[-1] for _ in range(steps_ahead)]
future = set_future_series(forecasted_values=obs,
series_name=series_name,
last_date=series.index[-1],
steps_ahead=steps_ahead,
frequency=freq)
return future | 1b64edb39ab986d2e850ec5329fb2ed8ae5cd136 | 3,649,008 |
def show_current_task():
"""
显示当前任务正在运行的任务
:return:
"""
try:
current_user_name = session["user_name"]
current_user = RedisService.get_user(current_user_name)
current_task = TaskService.get_working_tasks(user_id=current_user.id)[0]
if current_task:
hook_rule = RedisService.get_task(current_task.id)["hook_rule"]
unscaned_url_num = UrlService.count(where=(Url.task_id == current_task.id, Url.status != TaskStatus.DONE))
scaned_url_num = UrlService.count(where=(Url.task_id == current_task.id, Url.status == TaskStatus.DONE))
total_url_num = unscaned_url_num + scaned_url_num
if current_task.task_status in [TaskStatus.KILLED, TaskStatus.DONE]:
percent = 100
else:
percent = 0 if total_url_num == 0 else (scaned_url_num / total_url_num) * 100
response_data = jsonify(status=200, message="查询成功",
data={'receiver_emails': current_task.receivers_email,
'task_name': current_task.task_name,
'create_time': current_task.created_time.strftime("%Y-%m-%d %H:%M"),
'percent': percent,
'unscaned_url_num': unscaned_url_num, 'scaned_url_num': scaned_url_num,
'total_url_num': total_url_num, 'hook_rule': hook_rule,
'task_id': current_task.id, "task_access_key": current_task.access_key,
'task_status': current_task.task_status, 'user_name': current_user_name})
return response_data
except Exception as e:
if isinstance(e, IndexError):
return jsonify(status=400, message="获取失败", data={"extra_info": "后台无正在运行任务,请登录后台并创建任务"})
logger.exception("show_current_task rasie error")
return jsonify(status=500, message="获取失败", data={"extra_info": "未知异常,可以联系管理员到后台查看"}) | 09e0077232343e46db606b9a3642bfb5d6b17a69 | 3,649,011 |
from rsc.service.ImageService import ImageService
def access_image(access_code:str):
"""
下载图像
post header : {
Content-Type: application/json,
access_token: access_token from vans-token-manager
client_id: client_id from vans-token-manager conf. create by developers.
}
:return:
"""
try:
# 获取图片
service = ImageService()
image_data, mime = service.get_image(access_code)
data = json_res_success({"image":image_data, "mime":mime})
return render_json(data)
except Exception as e:
return E400(str(e)) | d1ac9efd9e4ba7c7fb9f63d2f0d188e09367fee9 | 3,649,012 |
def workaround_issue_20(handler):
"""
Workaround for
https://github.com/pytest-dev/pytest-services/issues/20,
disabling installation of a broken handler.
"""
return hasattr(handler, 'socket') | 20d688aedad9e771362d97ad9cac391e7dbfac32 | 3,649,013 |
def item_count(sequences, sequence_column_name):
"""
input:Dataframe sequences
"""
item_max_id = sequences[sequence_column_name].map(max).max()
return int(item_max_id) | 9bcb64ff3389ef34ed297bca4f55b4de66ac5966 | 3,649,014 |
def bare_stft(x: Tensor, padded_window: Tensor, hop_size: int) -> Tensor:
"""Compute STFT of real 1D signal.
This function does not handle padding of x, and the window tensor.
This function assumes fft_size = window_size.
Args:
x: [..., n_sample]
padded_window: [fft_size], a window padded to fft_size.
hop_size: Also referred to as the frame shift.
Returns:
n_frame: see frame_signal definition.
X: [..., n_frame, fft_size],
where n_frame = n_sample // hop_size
"""
fft_size = len(padded_window)
# Squash x's batch_sizes
batch_size = x.shape[:-1]
n_sample = x.size(-1)
squashed_x = x.reshape(-1, 1, n_sample)
# shape: [prod(batch_size), 1, n_sample]
framed_squashed_x = frame_signal(squashed_x, fft_size, hop_size)
# shape: [prod(batch_size), fft_size, n_frame]
windowed_framed_squashed_x = \
framed_squashed_x * padded_window.unsqueeze(-1)
squashed_X = fft(
windowed_framed_squashed_x.transpose(-1, -2), dim=-1
) # shape: [prod(batch_size), n_frame, fft_size]
X = squashed_X.reshape(*batch_size, *(squashed_X.shape[1:]))
# shape: [*batch_size, n_frame, fft_size]
return X | ccefbdc55478de91640a322735dd02f87a185578 | 3,649,015 |
def IsDragResultOk(*args, **kwargs):
"""IsDragResultOk(int res) -> bool"""
return _misc_.IsDragResultOk(*args, **kwargs) | 87e4c1968b3e5d7adbdc4cfb393481fca647beba | 3,649,016 |
def element_wise(counter_method):
"""This is a decorator function allowing multi-process/thread input.
Note that this decorator should always follow the decorator 'tag_maker'.
"""
def _make_iterator(*args):
"""Make a compound iterator from a process iterator and
a thread one.
Note that 'Application' case should not execute this
function."""
monitor_level = args[1]
arg_pid = args[2]
if hasattr(arg_pid, '__iter__'):
pid_itr = (i for i in xrange(arg_pid[0], arg_pid[1]))
else:
pid_itr = (arg_pid,)
if monitor_level == 'Thread':
arg_tid = args[3]
if hasattr(arg_tid, '__iter__'):
tid_itr = (i for i in xrange(arg_tid[0], arg_tid[1]))
else:
tid_itr = (arg_tid,)
if monitor_level == 'Process':
return_itr = pid_itr
elif monitor_level == 'Thread':
return_itr = (pid_itr, tid_itr)
return return_itr
@wraps(counter_method)
def _element_wise(*args):
"""Distribute multi-process/thread input"""
if args[1] == 'Thread':
pid_itr, tid_itr = _make_iterator(*args)
retval = [counter_method(args[0], args[1], pid, tid)
for pid, tid in product(pid_itr, tid_itr)]
return np.array(retval)
elif args[1] == 'Process':
pid_itr = _make_iterator(*args)
retval = [counter_method(args[0], args[1], pid) for pid in pid_itr]
return np.array(retval)
elif args[1] == 'Application':
return np.array(counter_method(*args))
else:
print 'Unknown monitor level'
return _element_wise | 44c00b9a40b7dba53dcfaac52bc866341a924d01 | 3,649,020 |
import requests
def get_datacite_dates(prefix):
"""Get sumbitted date for DataCite DOIs with specific prefix"""
doi_dates = {}
doi_urls = {}
url = (
"https://api.datacite.org/dois?query=prefix:"
+ prefix
+ "&page[cursor]=1&page[size]=500"
)
next_link = url
meta = requests.get(next_link).json()["meta"]
for j in progressbar(range(meta["totalPages"])):
r = requests.get(next_link)
data = r.json()
for doi in data["data"]:
date = doi["attributes"]["registered"].split("T")[0]
doi_dates[doi["id"]] = date
doi_urls[doi["id"]] = doi["attributes"]["url"]
if "next" in data["links"]:
next_link = data["links"]["next"]
else:
next_link = None
return doi_dates, doi_urls | 2b75cdfbb7c5f7085ab95f22ec601fbccdac07ea | 3,649,021 |
def rate_answer():
"""
**Rates an already given answer**
**Args:**
* json:
* {"insight" : String with the name of the Insight
* "paper_id" : String with the paper_id which is in our case the completet link to the paper
* "upvote" : Boolean if the answer was upvoted(= true) or downvoted (= false)
* "answer" : String with the Answer}
**Returns:**
* json:
* {'status': 'success'}
"""
response_object = {'status': 'success'}
#fetch data from request
post_data = request.get_json()
in_insight_name = post_data.get('insight')
in_paper_id = post_data.get('paper_id')
in_paper_id = url_checker(in_paper_id)
in_upvote = post_data.get('upvote')
in_answer = post_data.get('answer')
#query 'information'
inf = Information.query.filter(Information.paper_id == in_paper_id).filter(Information.insight_name==str(in_insight_name)).first()
#query 'answers'
ans = Answers.query.filter(Answers.information_id==inf.information_id).all()
#upvote correct answer
if (in_upvote):
for a in ans:
if (a.answer==in_answer):
a.answer_upvotes = a.answer_upvotes + 1
a.answer_score = a.answer_score + 1
#downvote correct answer
else :
for a in ans:
if (a.answer==in_answer):
a.answer_downvotes = a.answer_downvotes + 2
a.answer_score = a.answer_score - 2
db.session.commit()
return jsonify(response_object) | 5efc00e015d2127f91462348050f2d445530690d | 3,649,022 |
def get_ip():
"""
Query the ipify service (https://www.ipify.org) to retrieve this machine's
public IP address.
:rtype: string
:returns: The public IP address of this machine as a string.
:raises: ConnectionError if the request couldn't reach the ipify service,
or ServiceError if there was a problem getting the IP address from
ipify's service.
"""
try:
resp = _get_ip_resp()
except RequestException:
raise ConnectionError("The request failed because it wasn't able to reach the ipify service. This is most likely due to a networking error of some sort.")
if resp.status_code != 200:
raise ServiceError('Received an invalid status code from ipify:' + str(resp.status_code) + '. The service might be experiencing issues.')
return resp.text | d560c90986cc99be3ad07c0099743821104e514a | 3,649,023 |
import ast
def update_plot(p1, p2, arrow, txt, ax, fig, reset_points, line):
"""
Given a line with an agent's move and the current plot, update
the plot based on the agent's move.
"""
l = line.strip()
if 'Agent score' in l:
txt.remove()
txt = plt.text(2, 33, 'Agent Score: {0:.2f}'.format(float(l.split()[2])),
fontsize=8)
reset_points = True
else:
p = ast.literal_eval(l[l.find('('):])
if 'actually at point' in l:
p1 = Circle(p, radius=0.2, facecolor='yellow')
ax.add_patch(p1)
elif 'actually attempting to reach point' in l:
p2 = Circle(p, radius=0.2, facecolor='green')
ax.add_patch(p2)
elif 'now at point' in l:
arrow = YAArrow(fig, p2.center, p1.center, width=0.1,
headwidth=0.5, facecolor='red')
ax.add_patch(arrow)
elif 'Resetting agent to point' in l:
p2 = Circle(p, radius=1, facecolor='green')
ax.add_patch(p2)
arrow = YAArrow(fig, p2.center, p1.center, width=0.25,
headwidth=1, facecolor='red')
ax.add_patch(arrow)
return p1, p2, arrow, txt, ax, fig, reset_points | 579b4f218fc17eae9b8595e916571e24eba27cb5 | 3,649,024 |
import csv
def upload_file_view(request):
"""Upload file page and retrieve headers"""
data = {}
global ROW_COUNT
if request.method == "GET":
return render(request, "pages/upload-file.html", data)
try:
if request.FILES:
csv_file = request.FILES['csv_file']
request.session['csv'] = str(csv_file)
if not csv_file.name.endswith('.csv'):
messages.error(request, 'File is not CSV type')
return redirect('upload-file')
decoded_file = csv_file.read().decode('utf-8').splitlines()
reader = csv.DictReader(decoded_file)
data['fieldnames'] = reader.fieldnames
data['filename'] = csv_file.name
fs = FileSystemStorage()
fs.save(csv_file.name, csv_file)
file = FilesStatus.objects.create(
user=request.user,
file_name=csv_file.name,
)
ROW_COUNT = sum(1 for row in reader)
request.session['file_status'] = file.id
else:
messages.error(request, 'No file was selected.')
return redirect('upload-file')
except IOError:
return messages.error(request, 'Could not read file')
return render(request, 'pages/upload-file.html', data) | 62d76c398aee02a61a3dd8dd7ac1251367786c9c | 3,649,025 |
from typing import Optional
def get_user_by_login_identifier(user_login_identifier) -> Optional[UserSchema]:
"""Get a user by their login identifier.
:param str user_login_identifier: The user's login identifier, either their \
``email`` or ``display_name`` are valid inputs
:return: The discovered user if they exist
:rtype: Optional[UserSchema]
"""
user = get_user_by_email(user_email=user_login_identifier)
if not user:
return get_user_by_display_name(user_display_name=user_login_identifier)
return user | 821ba79ed56ec9b918dbec60af91e9b472cdb689 | 3,649,026 |
def decode_fixed64(buf, pos):
"""Decode a single 64 bit fixed-size value"""
return decode_struct(_fixed64_fmt, buf, pos) | 298df6d28f77132bac6d9924b9628af5efa940b5 | 3,649,027 |
def from_xfr(xfr, zone_factory=Zone, relativize=True):
"""Convert the output of a zone transfer generator into a zone object.
@param xfr: The xfr generator
@type xfr: generator of dns.message.Message objects
@param relativize: should names be relativized? The default is True.
It is essential that the relativize setting matches the one specified
to dns.query.xfr().
@type relativize: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
z = None
for r in xfr:
if z is None:
if relativize:
origin = r.origin
else:
origin = r.answer[0].name
rdclass = r.answer[0].rdclass
z = zone_factory(origin, rdclass, relativize=relativize)
for rrset in r.answer:
znode = z.nodes.get(rrset.name)
if not znode:
znode = z.node_factory()
z.nodes[rrset.name] = znode
zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype,
rrset.covers, True)
zrds.update_ttl(rrset.ttl)
for rd in rrset:
rd.choose_relativity(z.origin, relativize)
zrds.add(rd)
z.check_origin()
return z | cc3aa11a8ff3dff6cf0609c3527daa193602b522 | 3,649,028 |
from datetime import datetime
def compare_sql_datetime_with_string(filter_on, date_string):
"""Filter an SQL query by a date or range of dates
Returns an SQLAlchemy `BinaryExpression` that can be used in a call to
`filter`.
`filter_on` should be an SQLAlchemy column expression that has a date or
datetime value.
`date_string` is a string that includes date(s) in format
`YYYY-MM-DD` and a range operator such as `>` or `<=`.
In full:
====================== ========================================
Query Description
====================== ========================================
YYYY-MM-DD Matches dates on day
>YYYY-MM-DD Matches dates after day
>=YYYY-MM-DD Matches dates on or after day
<YYYY-MM-DD Matches dates before day
<=YYYY-MM-DD Matches dates on or before day
YYYY-MM-DD..YYYY-MM-DD Matches dates between days (inclusively)
====================== ========================================
Examples:
>>> from app.models.main import AuditEvent
>>> # Equivalent to AuditEvent.created_at >= datetime.date(2012, 1, 1)
>>> compare_sql_datetime_with_string(AuditEvent.created_at, ">=2012-01-01")
<sqlalchemy.sql.elements.BinaryExpression object ...>
>>> # Equivalent to AuditEvent.created_at.between(datetime.date(2010, 1, 1), datetime.date(2019-01-31))
>>> AuditEvent.query.filter(
compare_sql_datetime_with_string(AuditEvent.created_at, "2010-01-01..2019-01-31"))
<app.models.main.AuditEvent.query_class object ...>
"""
filter_test = None
def parse_date(s):
return datetime.datetime.strptime(s, DATE_FORMAT)
if date_string.startswith(">="):
date = parse_date(date_string[2:])
filter_test = (filter_on >= date)
elif date_string.startswith(">"):
date = parse_date(date_string[1:])
filter_test = (filter_on > date)
elif date_string.startswith("<="):
date = parse_date(date_string[2:])
filter_test = (filter_on <= date)
elif date_string.startswith("<"):
date = parse_date(date_string[1:])
filter_test = (filter_on < date)
elif ".." in date_string:
args = date_string.partition("..")
from_ = parse_date(args[0])
to_ = parse_date(args[2])
filter_test = filter_on.between(from_, to_)
else:
date = parse_date(date_string)
filter_test = filter_on.between(date, date + datetime.timedelta(days=1))
return filter_test | 2938872ffdd3e30c2d364ae30e3a93fa560e55ef | 3,649,029 |
def get_users():
"""get_users() -> Fetch all users in the database"""
connect() # Connect
cursor.execute("SELECT * FROM users") # Select all users
item = cursor.fetchall()
users = []
for user in item:
users.append(format_user(user)) # Format the users
disconnect()
return users | e294406af8abbd0fa813beeadcd8b01552e4d206 | 3,649,030 |
def get_decoder_self_attention_bias(length):
"""Calculate bias for decoder that maintains model's autoregressive property.
Creates a tensor that masks out locations that correspond to illegal
connections, so prediction at position i cannot draw information from future
positions.
Args:
length: int length of sequences in batch.
Returns:
float tensor of shape [1, 1, length, length]
"""
with tf.name_scope("decoder_self_attention_bias"):
valid_locs = tf.linalg.band_part(tf.ones([length, length]), -1, 0)
valid_locs = tf.reshape(valid_locs, [1, 1, length, length])
decoder_bias = _NEG_INF * (1.0 - valid_locs)
return decoder_bias | a5de0984715cbc07c16c0ab5a5dc24edb7ca7602 | 3,649,031 |
from typing import Union
from typing import Tuple
import torch
import math
from re import X
def rollout_discrete(
x_grid: Tensor,
idx: Union[int, Tensor],
model: Model,
best_f: Union[float, Tensor],
bounds: Tensor,
quadrature: Union[str, Tuple] = "qmc",
horizon: int = 4,
num_y_samples: int = 10,
):
"""
continuous domain rollout, expectation estimated using (quasi) Monte Carlo or Gaussian-Hermite quadrature
EI_rollout(x) = E_y[ max(y-y0,0) + EI_rollout(x'| x, y) ], where x'=argmax EI(x' | x, y)
define f(y) = max(y-y0,0) + EI_rollout(x'|x,y)
then
EI_rollout(x) = \int w(y) f(y) dy
where the weight function w(y) is a Gaussian density function N(mu, sigma^2)
We can estimate this integral using quasi Monte Carlo samples from w(y)
or use Gauss-Hermite quadrature, as in Lam et al. (2016):
such a integration can be transformed into the standard Gaussian-Hermite quadrature formulation
EI_rollout(x) = 1/sqrt(pi) \int exp(-t^2) f(sqrt(2)*sigma*t+mu) dt, where t = (y-mu)/sqrt(2)/sigma
We first generate Gauss-Hermite quadrature sample locations t_i and weights w_i using numpy.polynomial.hermite.hermgauss
then estimate the expectation by
EI_rollout(x) \approx 1/sqrt(pi) \sum_i w_i f(sqrt(2)*sigma*t_i +mu)
:param x: a single point
:param model: the GP model
:param best_f: current best observed value
:param bounds: bounds of the domain, shape (2, d)
:param base_acquisition:
:param quadrature: Monte Carlo or Quasi Monte Carlo
:param horizon: rollout horizon
:param num_y_samples: number of (quasi) Monte Carlo samples for estimating the integral
:return:
"""
if horizon == 1:
acq_func = ExpectedImprovement(model=model, best_f=best_f)
return acq_func(x).item()
x = x_grid[idx]
# compute posterior
posterior = model.posterior(x)
if isinstance(quadrature, str) and quadrature == "qmc": # quasi Monte Carlo
with torch.no_grad():
sampler = SobolQMCNormalSampler(num_samples=num_y_samples)
samples = sampler(posterior).squeeze().numpy()
weights = torch.ones(num_y_samples) / num_y_samples
elif isinstance(quadrature, Tuple):
mu = posterior.mean.item()
sigma = torch.sqrt(posterior.variance).item()
samples, weights = np.polynomial.hermite.hermgauss(num_y_samples)
samples = np.sqrt(2.0) * sigma * samples + mu
weights /= np.sqrt(math.pi)
improvement_of_samples = np.zeros(num_y_samples)
for i in range(num_y_samples):
y_sample = samples[i]
one_step_improvement = max(y_sample - best_f, 0)
fake_model: Model = deepcopy(model)
x0 = model.train_inputs[0]
y0 = model.train_targets
train_x = torch.cat([x0, x.unsqueeze(0)], -2)
train_y = torch.cat([y0, Tensor([y_sample])])
fake_model.reinitialize(train_X=train_x, train_Y=train_y)
best_f_new = max(best_f, y_sample) # maximization problem
acq_func = ExpectedImprovement(model=fake_model, best_f=best_f_new)
ei_values = acq_func(X)
idx = torch.argmax(ei_values)
future_reward = rollout(
x_grid,
idx,
model=fake_model,
best_f=best_f_new,
bounds=bounds,
quadrature=quadrature,
horizon=horizon - 1,
num_y_samples=num_y_samples,
)
improvement_of_samples[i] = one_step_improvement + future_reward
return improvement_of_samples.dot(weights) | c400e0042a4ec236030d7e5d19615441b4cc0456 | 3,649,032 |
import itertools
import torch
def get_accumulative_accuracies(test_loaders, taskcla, result_file, network_cls='resnet32'):
""" Confusion matrix with progressively more classes considered """
iter_model = iter_task_models(network_cls, taskcla, result_file)
accuracies = np.zeros((len(taskcla), len(taskcla)))
classes_so_far = 0.
for task_model, model in enumerate(iter_model):
for task_eval in range(0, task_model+1):
full_test_loader = itertools.chain.from_iterable(test_loaders[:task_eval+1])
with torch.no_grad():
totals = 0.
correct = 0.
logits_mask = np.arange(sum([taskcla[i][1] for i in range(0, task_eval+1)]))
for inputs, targets in full_test_loader:
inputs = inputs.to(device)
targets = targets.to(device)
outputs = torch.cat(model(inputs), dim=1)
outputs = outputs[:, logits_mask]
preds = outputs.argmax(dim=1)
correct += (preds == targets).double().sum()
totals += len(targets)
accuracies[task_model, task_eval] = correct/totals
return accuracies | 3b10f89b80318c3bb2776af3d336c2b87d2a623e | 3,649,033 |
import copy
def readout_oper(config):
"""get the layer to process the feature asnd the cls token
"""
class Drop(object):
"""drop class
just drop the cls token
"""
def __init__(self, config):
if 'ViT' in config.MODEL.ENCODER.TYPE:
self.token_num = 1
elif 'DeiT' in config.MODEL.ENCODER.TYPE:
self.token_num = 2
self.feature_size = (config.DATA.CROP_SIZE[0] // config.MODEL.TRANS.PATCH_SIZE,
config.DATA.CROP_SIZE[1] // config.MODEL.TRANS.PATCH_SIZE)
def __call__(self, x):
x = x[:, self.token_num:]
x = x.transpose((0, 2, 1))
x = x.reshape((x.shape[0], x.shape[1], self.feature_size[0], self.feature_size[1]))
return x
class Add(object):
"""add class
add the cls token
"""
def __init__(self, config):
if 'ViT' in config.MODEL.ENCODER.TYPE:
self.token_num = 1
elif 'DeiT' in config.MODEL.ENCODER.TYPE:
self.token_num = 2
self.feature_size = (config.DATA.CROP_SIZE[0] // config.MODEL.TRANS.PATCH_SIZE,
config.DATA.CROP_SIZE[1] // config.MODEL.TRANS.PATCH_SIZE)
def __call__(self, x):
token = x[:, :self.token_num]
token = paddle.sum(token, axis=1).unsqueeze(1)
x = x[:, self.token_num:]
x = x + token
x = x.transpose((0, 2, 1))
x = x.reshape((x.shape[0], x.shape[1], self.feature_size[0], self.feature_size[1]))
return x
class Proj(nn.Layer):
"""porject class
use a linear layer to confuse the feature and the cls token
"""
def __init__(self, config):
super(Proj, self).__init__()
if 'ViT' in config.MODEL.ENCODER.TYPE:
self.token_num = 1
elif 'DeiT' in config.MODEL.ENCODER.TYPE:
self.token_num = 2
self.feature_size = (config.DATA.CROP_SIZE[0] // config.MODEL.TRANS.PATCH_SIZE,
config.DATA.CROP_SIZE[1] // config.MODEL.TRANS.PATCH_SIZE)
self.proj = nn.Sequential(
nn.Linear(2 * config.MODEL.TRANS.HIDDEN_SIZE, config.MODEL.TRANS.HIDDEN_SIZE),
nn.GELU()
)
def forward(self, x):
token = x[:, :self.token_num]
token = paddle.sum(token, axis=1).unsqueeze(1)
x = x[:, self.token_num:]
token = token.expand_as(x)
x = paddle.concat([x, token], axis=-1)
x = self.proj(x)
x = x.transpose((0, 2, 1))
x = x.reshape((x.shape[0], x.shape[1], self.feature_size[0], self.feature_size[1]))
return x
if config.MODEL.DPT.READOUT_PROCESS == 'drop':
return [copy.deepcopy(Drop(config)) for _ in range(4)]
if config.MODEL.DPT.READOUT_PROCESS == 'add':
return [copy.deepcopy(Add(config)) for _ in range(4)]
if config.MODEL.DPT.READOUT_PROCESS =='project':
return nn.LayerList([copy.deepcopy(Proj(config)) for _ in range(4)])
return None | 36d682851e24535b7b000ae1b343bb90ca2077d6 | 3,649,034 |
import json
def stream_n_messages(request, n):
"""Stream n JSON messages"""
n = int(n)
response = get_dict(request, 'url', 'args', 'headers', 'origin')
n = min(n, 100)
def generate_stream():
for i in range(n):
response['id'] = i
yield json.dumps(response, default=json_dumps_default) + '\n'
return Response(generate_stream(), headers={
"Content-Type": "application/json",
}) | ea8ec1dd939cc43baa3367696f44956b2aafa780 | 3,649,035 |
def read_covid():
"""Read parsed covid table"""
return pd.read_csv(_COVID_FILE, parse_dates=["date"]) | fd99256808be1772106260b1da47850de0584adb | 3,649,036 |
def define_network(*addr):
"""gives all network related data or host addresses if requested
addr = tuple of arguments netaddr/mask[nb of requested hosts]
"""
if len(addr) == 2:
# provides list of host-addresses for this subnet
# we do this by calling the generator host_g
host_g = addr[0].hosts()
return [next(host_g).exploded for i in range(addr[1])]
else:
netdef = [(' Network Address:',
addr[0].network_address.exploded),
(' Broadcast Address:',
addr[0].broadcast_address.exploded),
(' Valid Hosts:', 2 ** (32 - addr[0].prefixlen)-2),
(' Wildcard Mask:', addr[0].hostmask.exploded),
(' Mask bits:', addr[0].prefixlen),
]
return [(' '+addr[0].network_address.exploded+'/32', '')] \
if addr[0].prefixlen == 32 else netdef | 905cf702fda005645c608b9dadb84f3659d991c1 | 3,649,038 |
from typing import List
def init_anim() -> List:
"""Initialize the animation."""
return [] | 121fff8b4102c2961449d970307e762bd983bdbe | 3,649,039 |
def keep_digits(txt: str) -> str:
"""Discard from ``txt`` all non-numeric characters."""
return "".join(filter(str.isdigit, txt)) | 34387003ea03651dd2582b3c49f1095c5589167b | 3,649,040 |
import re
def camel_case_split(identifier):
"""Split camelCase function names to tokens.
Args:
identifier (str): Identifier to split
Returns:
(list): lower case split tokens. ex: ['camel', 'case']
"""
matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)
return [m.group(0) for m in matches] | f212bbe5cc33cb31bea023f726abf60a1491b7df | 3,649,041 |
from typing import Any
def _ensure_meadowrun_sqs_access_policy(iam_client: Any) -> str:
"""
Creates a policy that gives permission to read/write SQS queues for use with
grid_task_queue.py
"""
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.create_policy
ignore_boto3_error_code(
lambda: iam_client.create_policy(
PolicyName=_MEADOWRUN_SQS_ACCESS_POLICY_NAME,
PolicyDocument=_MEADOWRUN_SQS_ACCESS_POLICY_DOCUMENT,
),
"EntityAlreadyExists",
)
return (
f"arn:aws:iam::{_get_account_number()}:policy/"
f"{_MEADOWRUN_SQS_ACCESS_POLICY_NAME}"
) | 6bafad83dcf0ed82eac626ad36b9d73416757f37 | 3,649,042 |
def get_subset(dframe, strata, subsetno):
"""This function extracts a subset of the data"""
df_subset = pd.DataFrame(columns=list(dframe)) #initialize
df_real = dframe.dropna() #get rid of nans
edges = np.linspace(0, 1, strata+1) #edges of data strata
for i in range(0, strata):
df_temp = df_real[(df_real['gender diversity score'] > edges[i]) &
(df_real['gender diversity score'] < edges[i+1])]
temp_ind = np.round(np.linspace(0, len(df_temp)-1, subsetno/strata))
df_subset = pd.concat([df_subset,
df_temp.sort_values(by=['gender diversity score']).
iloc[temp_ind, :].reset_index(drop=True)], ignore_index=True)
return df_subset | c1657cbce23bb222f68bc5b7efe3fe54dfcc26bd | 3,649,043 |
import logging
def create_logger(name: str) -> logging.Logger:
"""Create logger, adding the common handler."""
if name is None:
raise TypeError("name is None")
logger = logging.getLogger(name)
# Should be unique
logger.addHandler(_LOGGING_HANDLER)
return logger | c4c888345586718f8b476368ef118656d9650469 | 3,649,044 |
def say_hello():
""" Say hello """
return utils.jsonify_success({
'message': 'Hello {}! You are logged in.'.format(current_user.email)
}) | 52c0f572c0bd521a3ab12f1781d39c37130a78d3 | 3,649,045 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.