content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import io
import torch
def load_image_buffer_to_tensor(image_buf, device):
"""Maps image bytes buffer to tensor
Args:
image_buf (bytes buffer): The image bytes buffer
device (object): The pytorch device object
Returns:
py_tensor tensor: Pytorch tensor
"""
image = Image.open(io.BytesIO(image_buf))
preprocess = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
input_tensor = preprocess(image)
input_batch = input_tensor.unsqueeze(0)
return input_batch.to(device, torch.float) | 43740d2f9b7eec64f54111e85e0a54787afc8100 | 7,200 |
def alpha2tand(freq, a, b, n):
"""Convert Halpern's 'a' and 'b' from an absorption coefficient
of the form `a*freq**b` to a (frequency-dependent) loss tangent.
Parameters
----------
freq : numpy array or float
The frequency (Hz) (or frequencies) at which to calculate the loss
tangent.
a : float
Halpern's 'a' coefficient
b : float
Halpern's 'b' coefficient
n : float
The real part of the material's refractive index
Returns
-------
tand : numpy array
The loss tangent of the material at the given frequency and
Halpern coefficients.
"""
imagn = alpha2imagn(freq, a, b, n)
# The complex index of refraction of a material is related to the
# complex (relative) permittivity by the relation:
# e_r = e' + i*e'' = n^2 = (n + i*k)^2 = n^2 - k^2 + i*2nk
# By equating the real and imaginary parts we are left with:
# e' = (n^2 - k^2); e'' = 2nk
# With this information we can find the loss tangent, which is simply
# the ratio of the real and imaginary parts of the relative
# permittivity:
# tand = (e''/e')
ep = n**2 - imagn**2
epp = 2 * n * imagn
tand = epp / ep
return tand | 2acf658e7d18a0e115ba557698cc4efd591ed26d | 7,201 |
def convert_path_to_pixels(path):
"""
Purpose:
---
This function should convert the obtained path (list of tuples) to pixels.
Teams are free to choose the number of points and logic for this conversion.
Input Arguments:
---
`path` : [ list ]
Path returned from task_4a.find_path() function.
Returns:
---
`pixel_path` : [ type can be decided by teams ]
Example call:
---
pixel_path = convert_path_to_pixels(path)
"""
############## ADD YOUR CODE HERE ##############
pixel_path = path
tmp = 64
for i in range(len(pixel_path)):
pixel_path[i][0] = path[i][0] * tmp * 2 + tmp
pixel_path[i][1] = path[i][1] * tmp * 2 + tmp
##################################################
print("Pixel path is : ", pixel_path)
return pixel_path | a50557f252d43f9c3df1b3781c1203dd518d3797 | 7,202 |
def uniform_prob(*args, prob=None, inside=None, pscale=1.):
""" Uniform probability function for discrete and continuous vtypes. """
# Detect ptype, default to prob if no values, otherwise detect vtype
assert len(args) >= 1, "Minimum of a single positional argument"
pscale = eval_pscale(pscale)
use_logs = iscomplex(pscale)
if prob is None:
prob = 0. if use_logs else 1.
vals = args[0]
if vals is None:
return prob
vtype = eval_vtype(vals) if callable(inside) else eval_vtype(inside)
# Set inside function by vtype if not specified
if not callable(inside):
if vtype in VTYPES[float]:
inside = lambda x: np.logical_and(x >= min(inside), x <= max(inside))
else:
inside = lambda x: np.isin(x, inside)
# If scalar, check within variable set
p_zero = NEARLY_NEGATIVE_INF if use_logs else 0.
if isscalar(vals):
prob = prob if inside(vals) else p_zero
# Otherwise treat as uniform within range
else:
p_true = prob
prob = np.tile(p_zero, vals.shape)
prob[inside(vals)] = p_true
# This section below is there just to play nicely with conditionals
if len(args) > 1:
for arg in args[1:]:
if use_logs:
prob = prob + uniform_prob(arg, inside=inside, pscale=0.j)
else:
prob = prob * uniform_prob(arg, inside=inside)
return prob | 75cd547fc2845cb94f5733310be0d7761ba379fb | 7,203 |
import glob
def obtenerListaArchivos(path: str):
""" genera una lista de los archivos alojados en str """
lista = glob.glob(path, recursive=True)
return lista | 3b9582dbf086a2af673cc75277041f32d001e215 | 7,204 |
def is_equal_to(amount: float) -> Predicate:
"""Says that a field is exactly equal to some constant amount."""
return is_nearly_equal_to(amount, tolerance=0, taper=0) | c2c9b795d7bb089834e8e11e980b9d794e69d97a | 7,205 |
from sys import version_info
def get_version():
"""Return the current version info.
The first call to this function will call version_info.load() and cache the
result for later calls.
"""
global _version
if _version is None:
_version = version_info.load()
return _version | 4ced584e4155bc5926c6d28e28a090d332a7387b | 7,206 |
def load_yaml(fname):
"""Load a YAML file."""
yaml = YAML(typ="safe")
# Compat with HASS
yaml.allow_duplicate_keys = True
# Stub HASS constructors
HassSafeConstructor.name = fname
yaml.Constructor = HassSafeConstructor
with open(fname, encoding="utf-8") as conf_file:
# If configuration file is empty YAML returns None
# We convert that to an empty dict
return yaml.load(conf_file) or {} | 957a5d171568592da89cfa58a69c746ffcf67d33 | 7,207 |
def unmix_cvxopt(data, endmembers, gammaConst=0, P=None):
"""
******************************************************************
unmix finds an accurate estimation of the proportions of each endmember
Syntax: P2 = unmix(data, endmembers, gammaConst, P)
This product is Copyright (c) 2013 University of Missouri and University
of Florida
All rights reserved.
CVXOPT package is used here. Parameters H,F,L,K,Aeq,beq are corresbonding to
P,q,G,h,A,B, respectively. lb and ub are element-wise bound constraints which
are added to matrix G and h respectively.
Inputs:
data = DxN matrix of N data points of dimensionality D
endmembers = DxM matrix of M endmembers with D spectral bands
gammaConst = Gamma Constant for SPT term
P = NxM matrix of abundances corresponding to N input pixels and M endmembers
Returns:
P2 = NxM matrix of new abundances corresponding to N input pixels and M endmembers
******************************************************************
"""
solvers.options['show_progress'] = False
X = data
M = endmembers.shape[1] # number of endmembers # endmembers should be column vectors
N = X.shape[1] # number of pixels
# Equation constraint Aeq*x = beq
# All values must sum to 1 (X1+X2+...+XM = 1)
Aeq = np.ones((1, M))
beq = np.ones((1, 1))
# Boundary Constraints ub >= x >= lb
# All values must be greater than 0 (0 ? X1,0 ? X2,...,0 ? XM)
lb = 0
ub = 1
g_lb = np.eye(M) * -1
g_ub = np.eye(M)
# import pdb; pdb.set_trace()
G = np.concatenate((g_lb, g_ub), axis=0)
h_lb = np.ones((M, 1)) * lb
h_ub = np.ones((M, 1)) * ub
h = np.concatenate((h_lb, h_ub), axis=0)
if P is None:
P = np.ones((M, 1)) / M
gammaVecs = np.divide(gammaConst, sum(P))
H = 2 * (endmembers.T @ endmembers)
cvxarr = np.zeros((N,M))
for i in range(N):
F = ((np.transpose(-2 * X[:, i]) @ endmembers) + gammaVecs).T
cvxopt_ans = solvers.qp(P=matrix(H.astype(np.double)), q=matrix(F.astype(np.double)), G=matrix(G.astype(np.double)), h=matrix(h.astype(np.double)), A=matrix(Aeq.astype(np.double)), b=matrix(beq.astype(np.double)))
cvxarr[i, :] = np.array(cvxopt_ans['x']).T
cvxarr[cvxarr < 0] = 0
return cvxarr | d529b412afde7a7eb35a02d5d039ec271285829f | 7,208 |
import logging
def _accumulate_reward(
timestep: dm_env.TimeStep,
episode_return: float) -> float:
"""Accumulates rewards collected over the course of an episode."""
if timestep.reward and timestep.reward != 0:
logging.info('Reward: %s', timestep.reward)
episode_return += timestep.reward
if timestep.first():
episode_return = 0
elif timestep.last():
logging.info('Episode return: %s', episode_return)
return episode_return | 8f96e9a5bbeb4babfd43283b6da8a7984e53f02b | 7,209 |
def unsafe_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve all tags, even those known to be
unsafe on untrusted input.
"""
return load(stream, UnsafeLoader) | ff74beb13746504508832cc9b658a8faf672d2ca | 7,210 |
import pickle
def load_tl_gan_model():
"""
Load the linear model (matrix) which maps the feature space
to the GAN's latent space.
"""
with open(FEATURE_DIRECTION_FILE, 'rb') as f:
feature_direction_name = pickle.load(f)
# Pick apart the feature_direction_name data structure.
feature_direction = feature_direction_name['direction']
feature_names = feature_direction_name['name']
num_feature = feature_direction.shape[1]
feature_lock_status = np.zeros(num_feature).astype('bool')
# Rearrange feature directions using Shaobo's library function.
feature_direction_disentangled = \
feature_axis.disentangle_feature_axis_by_idx(
feature_direction,
idx_base=np.flatnonzero(feature_lock_status))
return feature_direction_disentangled, feature_names | aeb0bd329e4c9f8c91ded7c80385c30e1fb69773 | 7,211 |
from typing import Optional
from pathlib import Path
import os
def _find_test_file_from_report_file(base_path: str, report: str) -> Optional[Path]:
"""
Find test file from cucumber report file path format
e.g) Test-features-foo-hoge.xml -> features/foo/hoge.feature or features/foo-hoge.feature
"""
report_file = os.path.basename(report)
report_file = report_file.lstrip(REPORT_FILE_PREFIX)
report_file = os.path.splitext(report_file)[0]
list = _create_file_candidate_list(report_file)
for l in list:
f = Path(base_path, l + ".feature")
if f.exists():
return f
return None | ebe53bc0fe5fefde6133d32a7ec6801810026626 | 7,212 |
def luminance(qcolor):
""" Gives the pseudo-equivalent greyscale value of this color """
r,g,b = qcolor.red(), qcolor.green(), qcolor.blue()
return int(0.2*r + 0.6*g + 0.2*b) | 9e1821da2c0c6e8d76aefe56d6ed659a728737bb | 7,213 |
def read_info(path, layer=None, encoding=None):
"""Read information about an OGR data source.
`crs` and `geometry` will be `None` and `features` will be 0 for a
nonspatial layer.
Parameters
----------
path : str or pathlib.Path
layer : [type], optional
Name or index of layer in data source. Reads the first layer by default.
encoding : [type], optional (default: None)
If present, will be used as the encoding for reading string values from
the data source, unless encoding can be inferred directly from the data
source.
Returns
-------
dict
{
"crs": "<crs>",
"fields": <ndarray of field names>,
"encoding": "<encoding>",
"geometry": "<geometry type>",
"features": <feature count>
}
"""
return ogr_read_info(str(path), layer=layer, encoding=encoding) | 7479c63223288c94ed4756350756473866d7b2b3 | 7,214 |
import time
import requests
import json
def _macro_cons_opec_month():
"""
欧佩克报告-月度, 数据区间从 20170118-至今
这里返回的具体索引日期的数据为上一个月的数据, 由于某些国家的数据有缺失,
只选择有数据的国家返回
:return: pandas.Series
阿尔及利亚 安哥拉 厄瓜多尔 加蓬 伊朗 伊拉克 科威特 利比亚 尼日利亚 \
2017-01-18 108.0 172.4 54.5 21.3 372.0 463.2 281.2 60.8 154.2
2017-02-13 104.5 165.1 52.7 19.9 377.5 447.6 271.8 67.5 157.6
2017-03-14 105.3 164.1 52.6 19.4 381.4 441.4 270.9 66.9 160.8
2017-04-12 105.6 161.4 52.6 19.8 379.0 440.2 270.2 62.2 154.5
2017-05-11 104.7 169.2 52.4 20.6 375.9 437.3 270.2 55.0 150.8
2017-06-13 105.9 161.3 52.8 20.4 379.5 442.4 270.5 73.0 168.0
2017-07-12 106.0 166.8 52.7 19.7 379.0 450.2 270.9 85.2 173.3
2017-08-10 105.9 164.6 53.6 20.5 382.4 446.8 270.3 100.1 174.8
2017-09-12 106.5 164.6 53.7 17.3 382.8 444.8 270.2 89.0 186.1
2017-10-11 104.6 164.1 53.6 20.1 382.7 449.4 270.0 92.3 185.5
2017-11-13 101.2 171.1 54.1 20.3 382.3 438.3 270.8 96.2 173.8
2017-12-13 101.3 158.1 53.3 19.7 381.8 439.6 270.3 97.3 179.0
2018-01-18 103.7 163.3 52.6 19.7 382.9 440.5 270.0 96.2 186.1
2018-04-12 98.4 152.4 51.8 18.3 381.4 442.6 270.4 96.8 181.0
2018-05-14 99.7 151.5 52.0 18.3 382.3 442.9 270.5 98.2 179.1
2018-06-12 103.1 152.5 51.9 18.9 382.9 445.5 270.1 95.5 171.1
2018-07-11 103.9 143.1 51.9 19.0 379.9 453.3 273.1 70.8 166.0
2018-08-13 106.2 145.6 52.5 18.8 373.7 455.6 279.1 66.4 166.7
2018-09-12 104.5 144.8 52.9 18.7 358.4 464.9 280.2 92.6 172.5
2018-10-11 104.9 151.9 53.1 18.7 344.7 465.0 281.2 105.3 174.8
2018-11-13 105.4 153.3 52.5 18.6 329.6 465.4 276.4 111.4 175.1
2018-12-12 105.2 152.1 52.5 17.6 295.4 463.1 280.9 110.4 173.6
2019-03-14 102.6 145.7 52.2 20.3 274.3 463.3 270.9 90.6 174.1
2019-04-10 101.8 145.4 52.4 21.4 269.8 452.2 270.9 109.8 173.3
2019-06-13 102.9 147.1 52.9 21.1 237.0 472.4 271.0 117.4 173.3
沙特 阿联酋 委内瑞拉 欧佩克产量
2017-01-18 1047.4 307.1 202.1 3308.5
2017-02-13 994.6 293.1 200.4 3213.9
2017-03-14 979.7 292.5 198.7 3195.8
2017-04-12 999.4 289.5 197.2 3192.8
2017-05-11 995.4 284.2 195.6 3173.2
2017-06-13 994.0 288.5 196.3 3213.9
2017-07-12 995.0 289.8 193.8 3261.1
2017-08-10 1006.7 290.5 193.2 3286.9
2017-09-12 1002.2 290.1 191.8 3275.5
2017-10-11 997.5 290.5 189.0 3274.8
2017-11-13 1000.0 291.1 186.3 3258.9
2017-12-13 999.6 288.3 183.4 3244.8
2018-01-18 991.8 287.8 174.5 3241.6
2018-04-12 993.4 286.4 148.8 3195.8
2018-05-14 995.9 287.2 143.6 3193.0
2018-06-12 998.7 286.5 139.2 3186.9
2018-07-11 1042.0 289.7 134.0 3232.7
2018-08-13 1038.7 295.9 127.8 3232.3
2018-09-12 1040.1 297.2 123.5 3256.5
2018-10-11 1051.2 300.4 119.7 3276.1
2018-11-13 1063.0 316.0 117.1 3290.0
2018-12-12 1101.6 324.6 113.7 3296.5
2019-03-14 1008.7 307.2 100.8 3054.9
2019-04-10 979.4 305.9 73.2 3002.2
2019-06-13 969.0 306.1 74.1 2987.6
"""
t = time.time()
res = requests.get(
JS_CONS_OPEC_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
big_df = pd.DataFrame()
for country in [item["datas"] for item in json_data["list"]][0].keys():
try:
value_list = [item["datas"][country] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["上个月"]
temp_df.name = country
big_df = big_df.append(temp_df)
except:
continue
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_opec_report",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
res = requests.get(f"https://datacenter-api.jin10.com/reports/dates?category=opec&_={str(int(round(t * 1000)))}",
headers=headers) # 日期序列
all_date_list = res.json()["data"]
need_date_list = [item for item in all_date_list if
item.split("-")[0] + item.split("-")[1] + item.split("-")[2] not in date_list]
for item in reversed(need_date_list):
res = requests.get(
f"https://datacenter-api.jin10.com/reports/list?category=opec&date={item}&_={str(int(round(t * 1000)))}",
headers=headers)
temp_df = pd.DataFrame(res.json()["data"]["values"],
columns=pd.DataFrame(res.json()["data"]["keys"])["name"].tolist()).T
temp_df.columns = temp_df.iloc[0, :]
temp_df = temp_df[['阿尔及利亚', '安哥拉', '厄瓜多尔', '加蓬', '伊朗', '伊拉克', '科威特', '利比亚', '尼日利亚', '沙特',
'阿联酋', '委内瑞拉', '欧佩克产量']].iloc[-2, :]
big_df[item] = temp_df
return big_df.T | 5ae77b64b5d66e14027d757ea840385d0fc96033 | 7,215 |
import argparse
def createparser():
"""Create an :class:`argparse.ArgumentParser` instance
:return: parser instance
:rtype: :class:`argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser(prog=__package__,
description=__doc__)
s = parser.add_subparsers()
# create compare subcommand
parser_compare = s.add_parser("compare",
help="Compare two versions"
)
parser_compare.set_defaults(which="compare")
parser_compare.add_argument("version1",
help="First version"
)
parser_compare.add_argument("version2",
help="Second version"
)
# create bump subcommand
parser_bump = s.add_parser("bump",
help="Bumps a version"
)
parser_bump.set_defaults(which="bump")
sb = parser_bump.add_subparsers(title="Bump commands",
dest="bump")
# Create subparsers for the bump subparser:
for p in (sb.add_parser("major",
help="Bump the major part of the version"),
sb.add_parser("minor",
help="Bump the minor part of the version"),
sb.add_parser("patch",
help="Bump the patch part of the version"),
sb.add_parser("prerelease",
help="Bump the prerelease part of the version"),
sb.add_parser("build",
help="Bump the build part of the version")):
p.add_argument("version",
help="Version to raise"
)
return parser | d5aa807be432d9e1aaa5d155b12fa1366c5fe050 | 7,216 |
def get_activation(preact_dict, param_name, hook_type):
"""
Hooks used for in sensitivity schedulers (LOBSTE, Neuron-LOBSTER, SERENE).
:param preact_dict: Dictionary in which save the parameters information.
:param param_name: Name of the layer, used a dictionary key.
:param hook_type: Hook type.
:return: Returns a forward_hook if $hook_type$ is forward, else a backward_hook.
"""
def forward_hook(model, inp, output):
preact_dict[param_name] = output
def backward_hook(module, grad_input, grad_output):
preact_dict[param_name] = None
preact_dict[param_name] = grad_output[0].detach().cpu()
return forward_hook if hook_type == "forward" else backward_hook | 8d5766178ef972e010b5be3a3826774f051dd3bd | 7,217 |
def createAbsorption(cfgstr):
"""Construct Absorption object based on provided configuration (using available factories)"""
return Absorption(cfgstr) | 587b0e12f845171ffd61d5d04c37b4ff98865216 | 7,218 |
def get_optimizer_config():
"""Gets configuration for optimizer."""
optimizer_config = configdict.ConfigDict()
# Learning rate scheduling. One of: ["fixed", "exponential_decay"]
optimizer_config.learning_rate_scheduling = "exponential_decay"
# Optimization algorithm. One of: ["SGD", "Adam", "RMSprop"].
optimizer_config.optim_type = "Adam"
# Adam beta1.
optimizer_config.beta1 = 0.9
# Adam beta2.
optimizer_config.beta2 = 0.999
# Norm clipping threshold applied for rnn cells (no clip if 0).
optimizer_config.norm_clip = 0.0
# Learning rate.
optimizer_config.initial_learning_rate = 0.001
# The learning rate decay 'epoch' length.
optimizer_config.lr_decay_steps = 12000
# The learning rate decay base, applied per epoch.
optimizer_config.lr_decay_base = 0.85
# RMSprop decay.
optimizer_config.decay = 0.9
# RMSprop moment.
optimizer_config.mom = 0.0
return optimizer_config | 1918cd8aa9ff8446dec8cb90ff529de97f05d5aa | 7,219 |
def flat2seq(x: Tensor, num_features: int) -> Tensor:
"""Reshapes tensor from flat format to sequence format.
Flat format: (batch, sequence x features)
Sequence format: (batch, sequence, features)
Args:
x (Tensor): a tensor in the flat format (batch, sequence x features).
num_features (int): number of features (last dimension) of the output tensor.
Returns:
Tensor: the transformed tensor in sequence format (batch, seq, features).
"""
if not is_flat(x):
raise ValueError(
'attempt to reshape tensor from flat format to sequence format failed. ',
f'Excepted input tensor with 2 dimensions, got {x.ndim}.'
)
return x.view(x.shape[0], -1, num_features) | d8bace4548d82352ebae28dc9be665b862b744d0 | 7,220 |
def run_results(results_data, time_column, pathway_column, table_letters, letters,
dataframe_T1, dataframe_T2, dataframe_T3, dataframe_T4,
original_transitions, simulation_transitions,
intervention_codes, target, individuals,
save_location, simulation_name, listed_times,
last_arrival, period):
"""Fill the four results tables."""
Table1_results = T1_results(results_data, time_column, pathway_column, dataframe_T1,
original_transitions, simulation_transitions,
intervention_codes, target, individuals,
save_location, simulation_name,
last_arrival, period)
Table2_results = T2_results(results_data, pathway_column, letters, dataframe_T2, simulation_name)
Table3_results = T3_results(results_data, pathway_column, dataframe_T3, save_location, simulation_name)
Table4_results = T4_results(results_data, table_letters, dataframe_T4, listed_times, simulation_name)
return(Table1_results, Table2_results, Table3_results, Table4_results) | 5292328a1f74d2ecb89daae465bace1a95eff538 | 7,221 |
from typing import Optional
def get_spatial_anchors_account(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSpatialAnchorsAccountResult:
"""
Get information about an Azure Spatial Anchors Account.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.mixedreality.get_spatial_anchors_account(name="example",
resource_group_name=azurerm_resource_group["example"]["name"])
pulumi.export("accountDomain", data["azurerm_spatial_anchors_account"]["account_domain"])
```
:param str name: Specifies the name of the Spatial Anchors Account. Changing this forces a new resource to be created. Must be globally unique.
:param str resource_group_name: The name of the resource group in which to create the Spatial Anchors Account.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:mixedreality/getSpatialAnchorsAccount:getSpatialAnchorsAccount', __args__, opts=opts, typ=GetSpatialAnchorsAccountResult).value
return AwaitableGetSpatialAnchorsAccountResult(
account_domain=__ret__.account_domain,
account_id=__ret__.account_id,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
resource_group_name=__ret__.resource_group_name) | db94c210af0c46cea2e3a95573f339ec65f7e7fe | 7,222 |
import re
def format_query(str_sql):
"""Strips all newlines, excess whitespace, and spaces around commas"""
stage1 = str_sql.replace("\n", " ")
stage2 = re.sub(r"\s+", " ", stage1).strip()
stage3 = re.sub(r"(\s*,\s*)", ",", stage2)
return stage3 | 5adb0f9c3314ba04bbf92c88e3ef17802b2afeb0 | 7,223 |
def make_ytick_labels(current_ticks, n, numstring = ""):
"""
"""
new_ticks = []
for item in current_ticks:
if int(item) == item:
new_ticks.append(f"{int(item)}{numstring}")
else:
new_ticks.append(f"{item:.1f}{numstring}")
return new_ticks | 2685126dc72305ccb7b4bf652fe645e9a39affd3 | 7,224 |
import re
def check_token(token):
"""
Returns `True` if *token* is a valid XML token, as defined by XML
Schema Part 2.
"""
return (token == '' or
re.match(
"[^\r\n\t ]?([^\r\n\t ]| [^\r\n\t ])*[^\r\n\t ]?$", token)
is not None) | b4e1d313fb64aad4c1c244cb18d3629e13b1c3af | 7,225 |
def generate_random_data(n=10):
"""Generate random data."""
return rand(10) | 872591efc14d28282b24138f80e19c92487bde6d | 7,226 |
import re
import os
def get_basenames(root, path, remove='.py'):
"""Get file basenames of a folder.
Args:
root (str): Root path
path (str): Path to folder
remove (str, optional): Defaults to '.py'. Part to remove from filename.
Returns:
list: list of names
"""
regex = re.compile(remove, re.IGNORECASE)
files = find_files(root, path, remove=remove)
return list(map(
lambda file: re.sub(regex, '', os.path.basename(file)),
files
)) | cdce93c20b8938f2c6176e82d6fde2d061da0d2c | 7,227 |
def get_phoible_feature_list(var_to_index):
"""
Function that takes a var_to_index object and return a list of Phoible segment features
:param var_to_index: a dictionary mapping variable name to index(column) number in Phoible data
:return :
"""
return list(var_to_index.keys())[11:] | a53995cd927d1cdc66fadb2a8e6af3f5e2effff0 | 7,228 |
def split_data(dataset):
"""Split pandas dataframe to data and labels."""
data_predictors = [
"Steps_taken",
"Minutes_sitting",
"Minutes_physical_activity",
"HR",
"BP",
]
X = dataset[data_predictors]
y = dataset.Health
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
return X, y, x_train, x_test, y_train, y_test | b15db522ff45dee825d64d7daf6604fb400bc677 | 7,229 |
def add_header(response):
"""
Add headers to both force latest IE rendering engine or Chrome Frame.
"""
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
return response | 6e755f47bd12095d80a941338c451e677f80bcc6 | 7,230 |
def recursive_isomorphism_counter(smp, matching, *,
unspec_cover, verbose, init_changed_cands, tmplt_equivalence=False,
world_equivalence=False):
"""
Recursive routine for solving subgraph isomorphism.
Parameters
----------
smp : MatchingProblem
A subgraph matching problem
matching : list
A list of tuples which designate what each template vertex is matched to
unspec_cover : np.array
Array of the indices of the nodes with more than 1 candidate
verbose : bool
Flag for verbose output
init_changed_cands : np.array
A binary array where element i is 1 if vertex i's candidates have
changed since the function was last called. The first time it is called,
this will be all zeros
tmplt_equivalence : bool
Flag indicating whether to use template equivalence
world_equivalence : bool
Flag indicating whether to use world equivalence
Returns
-------
int
The number of isomorphisms
"""
iterate_to_convergence(smp)
candidates = smp.candidates()
# If the node cover is empty, the unspec nodes are disconnected. Thus, we
# can skip straight to counting solutions to the alldiff constraint problem
if len(unspec_cover) == 0:
# Elimination filter is not needed here and would be a waste of time
node_to_cands = {node: smp.world.nodes[candidates[idx]]
for idx, node in enumerate(smp.tmplt.nodes)}
return count_alldiffs(node_to_cands)
# Since the node cover is not empty, we first choose some valid
# assignment of the unspecified nodes one at a time until the remaining
# unspecified nodes are disconnected.
n_isomorphisms = 0
unspec_cover_cands = candidates[unspec_cover,:]
node_idx = pick_minimum_domain_vertex(unspec_cover_cands)
cand_idxs = np.argwhere(candidates[node_idx]).flat
for i, cand_idx in enumerate(cand_idxs):
smp_copy = smp.copy()
# candidates_copy[node_idx] = one_hot(cand_idx, world.n_nodes)
smp_copy.add_match(node_idx, cand_idx)
matching.append((node_idx, cand_idx))
# Remove matched node from the unspecified list
new_unspec_cover = unspec_cover[:node_idx] + unspec_cover[node_idx+1:]
# recurse to make assignment for the next node in the unspecified cover
n_isomorphisms += recursive_isomorphism_counter(
smp_copy, matching, unspec_cover=new_unspec_cover,
verbose=verbose,
init_changed_cands=one_hot(node_idx, smp.tmplt.n_nodes))
# Unmatch template vertex
matching.pop()
# TODO: more useful progress summary
if verbose:
print("depth {}: {} of {}".format(len(unspec_cover), i,
len(cand_idxs)), n_isomorphisms)
# If we are using template equivalence, we can mark for all equivalent
# template vertices that cand_idx cannot be a cannot be a candidate.
if tmplt_equivalence:
for eq_t_vert in smp.tmplt.eq_classes[node_idx]:
smp.prevent_match(eq_t_vert, cand_idx)
return n_isomorphisms | 3bc46c76569b17e05484ba3e31ecb3e5207c86cc | 7,231 |
def draw_lane_on_unwarped_frame(frame, left_line, right_line, trsf_mtx_inv):
""" Drawing of the unwarped lane lines and lane area to the current frame.
Args:
left_line: left Line instance
right_line: right Line instance
trsf_mtx_inv: inverse of the perspective transformation matrix
"""
# Frame dimensions
height, width = frame.shape[0:2]
# Generate x and y values for plotting
y = np.linspace(0, frame.shape[0] - 1, frame.shape[0])
left_x = left_line.evaluate_average_polynomial(y)
right_x = right_line.evaluate_average_polynomial(y)
# Create a green lane area between the left and right lane lines
warped_lane_area = np.zeros_like(frame) # Warped at first
left_points = np.column_stack((left_x, y)).reshape((1, -1, 2)).astype(int)
right_points = np.flipud(
np.column_stack((right_x, y))).reshape((1, -1, 2)).astype(int)
vertices = np.hstack((left_points, right_points))
cv2.fillPoly(warped_lane_area, [vertices], (0, 255, 0))
# Unwarp the lane area
unwarped_lane = cv2.warpPerspective(
warped_lane_area, trsf_mtx_inv, (width, height))
# Overlay the unwarped lane area onto the frame
green_lane_on_frame = cv2.addWeighted(frame, 1., unwarped_lane, 0.3, 0)
# Draw the left and right lane polynomials into an empty and warped image
warped_lanes = np.zeros_like(frame)
left_points = np.column_stack((left_x, y)).reshape(-1, 1, 2)
right_points = np.column_stack((right_x, y)).reshape(-1, 1, 2)
warped_lanes = cv2.polylines(warped_lanes,
[left_points.astype(np.int32)],
isClosed=False, color=(255, 0, 0),
thickness=30)
warped_lanes = cv2.polylines(warped_lanes,
[right_points.astype(np.int32)],
isClosed=False, color=(0, 0, 255),
thickness=30)
# Unwarp the lane lines plot
lane_lines = cv2.warpPerspective(
warped_lanes, trsf_mtx_inv, (width, height))
# Create a mask of the unwarped lane lines to shadow the frame background
# a bit
gray = cv2.cvtColor(lane_lines, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY_INV)
# Black-out the area of the lane lines in the frame
frame_bg = cv2.bitwise_and(
green_lane_on_frame, green_lane_on_frame, mask=mask)
# Combine with complete frame to shadow the area of the lane lines a bit
shadowed_frame = cv2.addWeighted(frame_bg, 0.6, green_lane_on_frame, 0.4, 0)
return cv2.addWeighted(shadowed_frame, 1.0, lane_lines, 1.0, 0) | b59b3d99a17ba241aff0a9ac7e6d33497f1db803 | 7,232 |
import os
def _init_buffer_file() -> str:
"""Returns file path to the temporary buffer file. Creates the
temp directory and temp buffer file.
"""
if not os.path.exists(".git"):
raise NotAGitRepoException(f"No .git folder found. {os.getcwd()} is not a git repo!")
file_path = os.path.join(".git", "MKCOMMIT_BUFFER")
open(file_path, "w").close()
return file_path | 871346ec0f6960befa5365b9f63eca8d6adc6c62 | 7,233 |
def n_states_of_vec(l, nval):
""" Returns the amount of different states a vector of length 'l' can be
in, given that each index can be in 'nval' different configurations.
"""
if type(l) != int or type(nval) != int or l < 1 or nval < 1:
raise ValueError("Both arguments must be positive integers.")
return nval ** l | 98770fa5a5e62501bf365a4a5a40a932b2ba2450 | 7,234 |
def remove_items_from_dict(a_dict, bad_keys):
"""
Remove every item from a_dict whose key is in bad_keys.
:param a_dict: The dict to have keys removed from.
:param bad_keys: The keys to remove from a_dict.
:return: A copy of a_dict with the bad_keys items removed.
"""
new_dict = {}
for k in a_dict.keys():
if k not in bad_keys:
new_dict[k] = a_dict[k]
return new_dict | 7c665e372c2099441f8a661f1194a76a21edf01c | 7,235 |
def writeObject(img_array, obj_array, bbox):
"""Writes depression objects to the original image.
Args:
img_array (np.array): The output image array.
obj_array (np.array): The numpy array containing depression objects.
bbox (list): The bounding box of the depression object.
Returns:
np.array: The numpy array containing the depression objects.
"""
min_row, min_col, max_row, max_col = bbox
roi = img_array[min_row:max_row, min_col:max_col]
roi[obj_array > 0] = obj_array[obj_array > 0]
return img_array | 141cf9c3f47766a4020d737e743215db04761f54 | 7,236 |
def process_model(current_val):
"""
:param current_val: model generated by sat solver, atom is satisfied if in modal.
:return tuple of sets comprising true and false atoms.
"""
true_atoms, false_atoms = set(), set()
for atom in current_val:
if current_val[atom]:
true_atoms.add(str(atom))
else:
false_atoms.add(str(atom))
return true_atoms, false_atoms | 9cf90aec097091841c0f0ac820317f373a92e4c1 | 7,237 |
import re
def filter_strace_output(lines):
"""
a function to filter QEMU logs returning only the strace entries
Parameters
----------
lines : list
a list of strings representing the lines from a QEMU log/trace.
Returns
-------
list
a list of strings representing only the strace log entries
the entries will also be cleaned up if a page dump occurs in the middle of them
"""
#we only want the strace lines, so remove/ignore lines that start with the following:
line_starts= ['^[\d,a-f]{16}-', # pylint: disable=anomalous-backslash-in-string
'^page',
'^start',
'^host',
'^Locating',
'^guest_base',
'^end_',
'^brk',
'^entry',
'^argv_',
'^env_',
'^auxv_',
'^Trace',
'^--- SIGSEGV',
'^qemu'
]
filter_string = '|'.join(line_starts)
filtered = []
prev_line = ""
for line in lines:
if re.match(filter_string,line):
continue
# workaround for https://gitlab.com/qemu-project/qemu/-/issues/654
if re.search("page layout changed following target_mmap",line):
prev_line = line.replace("page layout changed following target_mmap","")
continue
if re.match('^ = |^= ', line):
line = prev_line+line
filtered.append(line)
return filtered | 01b6c048ebdf890e9124c387fc744e56cc6b7f4d | 7,238 |
def export_gmf_xml(key, dest, sitecol, imts, ruptures, rlz,
investigation_time):
"""
:param key: output_type and export_type
:param dest: name of the exported file
:param sitecol: the full site collection
:param imts: the list of intensity measure types
:param ruptures: an ordered list of ruptures
:param rlz: a realization object
:param investigation_time: investigation time (None for scenario)
"""
if hasattr(rlz, 'gsim_rlz'): # event based
smltpath = '_'.join(rlz.sm_lt_path)
gsimpath = rlz.gsim_rlz.uid
else: # scenario
smltpath = ''
gsimpath = rlz.uid
writer = hazard_writers.EventBasedGMFXMLWriter(
dest, sm_lt_path=smltpath, gsim_lt_path=gsimpath)
writer.serialize(
GmfCollection(sitecol, imts, ruptures, investigation_time))
return {key: [dest]} | 139e24feb476ab10c0f1192fd47f80b7bbe29ccb | 7,239 |
import functools
def track_state_change(entity_ids, from_state=None, to_state=None):
"""Decorator factory to track state changes for entity id."""
def track_state_change_decorator(action):
"""Decorator to track state changes."""
event.track_state_change(HASS, entity_ids,
functools.partial(action, HASS),
from_state, to_state)
return action
return track_state_change_decorator | 08f6e7f8354f51dfa54d233156585c84d9b811b3 | 7,240 |
def phrase():
"""Generate and return random phrase."""
return models.PhraseDescription(text=random_phrase.make_random_text()) | 458529df5d6dbd92b7a6545d92b836763a8411a6 | 7,241 |
def classify_tweets(text):
"""
classify tweets for tweets about car accidents and others
:param text: tweet text
:return: boolean, true if tweet is about car accident, false for others
"""
return text.startswith(u'בשעה') and (
(u'הולך רגל' in text or
u'הולכת רגל' in text or
u'נהג' in text or
u'אדם' in text)
and
(u'רכב' in text or
u'מכונית' in text or
u'אופנוע' in text or
u"ג'יפ" in text or
u'טרקטור' in text or
u'משאית' in text or
u'אופניים' in text or
u'קורקינט' in text)) | b34991a36febf2648cd83f2782ba4a8631e65a1a | 7,242 |
def _build_results(drift_type, raw_metrics):
"""Generate all results for queried time window or run id of some a datadriftdetector.
:param raw_metrics: origin data diff calculation results.
:return: a list of result dict.
"""
results = []
for metric in raw_metrics:
ep = _properties(metric.get_extended_properties())
if metric.name == OUTPUT_METRIC_DRIFT_COEFFICIENT:
# Overall drift coefficient; add to results return object
create_new_component = True
if create_new_component:
res = {KEY_NAME_Drift_TYPE: drift_type}
# attach result content
result_list = []
result_list.append(
_build_single_result_content(drift_type, metric.value, ep)
)
res["result"] = result_list
results.append(res)
return results | 2938257d54d0be0ac012c4ddc39952c6767b8a38 | 7,243 |
def no_test_server_credentials():
"""
Helper function that returns true when TEST_INTEGRATION_*
credentials are undefined or empty.
"""
client_id = getattr(settings, 'TEST_INTEGRATION_CLIENT_ID', None)
username = getattr(settings, 'TEST_INTEGRATION_USERNAME', None)
password = getattr(settings, 'TEST_INTEGRATION_PASSWORD', None)
app_read = getattr(settings, 'TEST_INTEGRATION_READ_CLIENT_ID', None)
app_write = getattr(settings, 'TEST_INTEGRATION_WRITE_CLIENT_ID', None)
return not (client_id and username and password and app_read and app_write) | a98f249e15f9d1c42aacd62a22024af577332275 | 7,244 |
from typing import Tuple
from typing import Any
def skip_spaces(st: ST) -> Tuple[ST, Any]:
"""
Pula espaços.
"""
pos, src = st
while pos < len(src) and src[pos].isspace():
pos += 1
return (pos, src), None | df0c549c8af18a66a6e2d1704991592516e62ffb | 7,245 |
def mixed_phone_list():
"""Return mixed phone number list."""
return _MIXED_PHONE_LIST_ | e607eb5778d8f4999fcbeb85ea5a3bb0ca04ee40 | 7,246 |
def bootstrap(config):
"""
Configure the existing account for subsequent deployer runs.
Create S3 buckets & folders, upload artifacts required by
infrastructure to them.
Args:
config: dictionary containing all variable settings required
to run terraform with
Returns:
config dict.
"""
config['project_config'] = config.get('project_config',
s3.get_bucket_name(config, 'data'))
config['tf_state_bucket'] = config.get('tf_state_bucket',
s3.get_bucket_name(config,'tfstate'))
logmsg = "{}: Creating S3 project bucket: {}"
logger.debug(logmsg.format(__name__, config['project_config']))
s3.create_bucket(config['project_config'])
logmsg = "{}: Creating S3 project bucket: {}"
logger.debug(logmsg.format(__name__, config['tf_state_bucket']))
s3.create_bucket(config['tf_state_bucket'])
upload_staged_artifacts(config)
return config | c06101d64113a9e2cf647d29cad32fa923ffbcb2 | 7,247 |
def get_report_summary(report):
"""
Retrieve the docstring summary content for the given report module.
:param report: The report module object
:returns: the first line of the docstring for the given report module
"""
summary = None
details = get_report_details(report)
if not details:
return
details = details.split('\n')
while details and not summary:
summary = details.pop(0)
return summary | ea350c527cfab62496110ae08eedd9841db10492 | 7,248 |
def load_dataset(dataset_identifier, train_portion='75%', test_portion='25%', partial=None):
"""
:param dataset_identifier:
:param train_portion:
:return: dataset with (image, label)
"""
# splits are not always supported
# split = ['train[:{0}]'.format(train_portion), 'test[{0}:]'.format(test_portion)]
ds = tfds.load(dataset_identifier, split='train', shuffle_files=True)
if partial is not None:
ds = ds.take(partial)
return ds | f836236f56ba8359194c21c20ea6767d296a0ee4 | 7,249 |
from typing import List
import functools
def stop(ids: List[str]):
"""Stop one or more instances"""
return functools.partial(ec2.stop_instances, InstanceIds=ids) | fdf6db088323e5874c01662cf931aa85143ac2aa | 7,250 |
import os
import re
def search_names(word, archive=TAXDB_NAME, name="names.dmp", limit=None):
"""
Processes the names.dmp component of the taxdump.
"""
# Needs a taxdump to work.
if not os.path.isfile(archive):
utils.error("taxdump file not found (download and build it first)")
# Open stream into the tarfile.
stream = open_tarfile(archive=archive, filename=name, limit=limit)
# The pattern may be regular expression.
patt = re.compile(word, re.IGNORECASE)
# Labels that will be searched.
valid = {'scientific name', 'equivalent name', 'genbank common name'}
def select(row):
taxid, name, label = row[0], row[2], row[6]
return label in valid and patt.search(name)
# Apply the selector.
stream = filter(select, stream)
for elems in stream:
taxid, name, label = elems[0], elems[2], elems[6]
yield taxid, name | 127f65d713df58a15bf01f498123dde8550727a3 | 7,251 |
def simple_parse(config_file):
"""
Do simple parsing and home-brewed type interference.
"""
config = ConfigObj(config_file, raise_errors=True)
config.walk(string_to_python_type)
# Now, parse input and output in the Step definition by hand.
_step_io_fix(config)
return(config) | 85a406125a644fb75a5d8986778de6ea9b8af52a | 7,252 |
import pickle
def deserialize_columns(headers, frames):
"""
Construct a list of Columns from a list of headers
and frames.
"""
columns = []
for meta in headers:
col_frame_count = meta["frame_count"]
col_typ = pickle.loads(meta["type-serialized"])
colobj = col_typ.deserialize(meta, frames[:col_frame_count])
columns.append(colobj)
# Advance frames
frames = frames[col_frame_count:]
return columns | 176d936a6019669f15049f11df00e14ad62238d7 | 7,253 |
import os
import sys
def get_site_config(sites_path=None, site_path=None):
"""Returns `site_config.json` combined with `sites/common_site_config.json`.
`site_config` is a set of site wide settings like database name, password, email etc."""
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_path or getattr(local, "site_path", None)
if sites_path:
common_site_config = os.path.join(sites_path, "common_site_config.json")
if os.path.exists(common_site_config):
config.update(get_file_json(common_site_config))
if site_path:
site_config = os.path.join(site_path, "site_config.json")
if os.path.exists(site_config):
config.update(get_file_json(site_config))
elif local.site and not local.flags.new_site:
print("{0} does not exist".format(local.site))
sys.exit(1)
#raise IncorrectSitePath, "{0} does not exist".format(site_config)
return _dict(config) | 84346f0307f3e4b278c5dfe7fb9662b9d59f8a87 | 7,254 |
def words_with_joiner(joiner):
"""Pass through words unchanged, but add a separator between them."""
def formatter_function(i, word, _):
return word if i == 0 else joiner + word
return (NOSEP, formatter_function) | 9f24b2e7d202663902da0bfccd8e9b96faebc152 | 7,255 |
def magerr2Ivar(flux, magErr):
"""
Estimate the inverse variance given flux and magnitude error.
The reason for this is that we need to correct the magnitude or
flux for Galactic extinction.
Parameters
----------
flux : scalar or array of float
Flux of the obejct.
magErr : scalar or array of float
Error of magnitude measurements.
"""
fluxErr = flux * ((10.0 ** (magErr/2.5)) - 1.0)
return 1.0 / (fluxErr ** 2.0) | 37c48c26f1b876ca4d77dc141b1728daaea24944 | 7,256 |
def create_policy_work_item_linking(repository_id, branch,
blocking, enabled,
branch_match_type='exact',
organization=None, project=None, detect=None):
"""Create work item linking policy.
"""
organization, project = resolve_instance_and_project(
detect=detect, organization=organization, project=project)
policy_client = get_policy_client(organization)
configuration = create_configuration_object(repository_id, branch, blocking, enabled,
'40e92b44-2fe1-4dd6-b3d8-74a9c21d0c6e', [], [], branch_match_type)
return policy_client.create_policy_configuration(configuration=configuration, project=project) | 230604606ba47c29386027503f45d30577cb5edf | 7,257 |
import numbers
def center_data(X, y, fit_intercept, normalize=False, copy=True,
sample_weight=None):
"""
Centers data to have mean zero along axis 0. This is here because
nearly all linear models will want their data to be centered.
If sample_weight is not None, then the weighted mean of X and y
is zero, and not the mean itself
"""
X = as_float_array(X, copy)
if fit_intercept:
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sp.issparse(X):
X_offset = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X -= X_offset
# XXX: currently scaled to variance=n_samples
if normalize:
X_std = np.sqrt(np.sum(X ** 2, axis=0))
X_std[X_std == 0] = 1
X /= X_std
else:
X_std = np.ones(X.shape[1])
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1])
X_std = np.ones(X.shape[1])
y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_std | d31b27868a6f1ee21ef4019df9954fc1136d73eb | 7,258 |
import socket
async def get_ipv4_internet_reachability(host, port, timeout):
"""
Host: 8.8.8.8 (google-public-dns-a.google.com)
OpenPort: 53/tcp
Service: domain (DNS/TCP)
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
socket.close()
return True
except socket.error as ex:
socket.close()
return False | ff1d335dec568810431c58b0bc1a72eb10e65372 | 7,259 |
def cov_pen(h_j, h_no_j):
"""
Personal implementation of covariance matrix with penalization.
:param h_j:
:param h_no_j:
:return:
"""
final_dim = h_j.shape[1]
cov_matrix = np.empty((final_dim, final_dim))
for row in range(final_dim):
for column in range(final_dim):
h_d = h_j[:, row]
h_d_no_j = h_no_j[:, row]
a = h_d - np.mean(h_d)
if row == column: # Diag
value = np.dot(a.T, a) + np.dot(h_d_no_j.T, h_d_no_j)
else:
h_i = h_j[:, column]
h_i_no_j = h_no_j[:, column]
b = h_i - np.mean(h_i)
value = np.dot(a.T, b) + np.dot(h_d_no_j.T, h_i_no_j)
cov_matrix[row, column] = value
return cov_matrix | 4a3ef366a072fd84d198597213ea544d88032ac5 | 7,260 |
def get_last_timestamp():
"""
获取当天23:59:59的时间戳
:return:
"""
# 获取明天0点的时间戳
future_timestamp = get_timestamp(-1)
# 明天0点的时间戳-1
last_timestamp = future_timestamp - 1
return last_timestamp | 7f4c07309f9be1437c1743f691402bae58a7ec34 | 7,261 |
from typing import List
from typing import Dict
import os
import timeit
def crawl(folder: str, search: str, maxnum: int, num_threads: int, crawlers: [List[str]] = ['GOOGLE', 'BING', 'BAIDU']) -> Dict[str, str]:
"""Crawl web sites for images"""
print('(1) Crawling ...')
# prepare folders
os.makedirs(folder, exist_ok=True)
sources = {}
for c in crawlers:
print(f' -> {c}', end='', flush=True)
run_command = lambda : crawl_run(c, folder, search, maxnum, num_threads)
runtime = timeit.timeit(run_command, 'gc.enable()', number=1)# / float((10**6))
print(f' ({runtime:.2f} sec)')
return {k: v for k, v in CustomDownloader.registry.items() if k is not None} | 409a338ad0cd49641c0a690434d5706664bc7c6e | 7,262 |
def _get_all_subclasses(typ, # type: Type[T]
recursive=True, # type: bool
_memo=None # type: Set[Type[Any]]
):
# type: (...) -> Iterable[Type[T]]
"""
Returns all subclasses of `typ`
Warning this does not support generic types.
See parsyfiles.get_all_subclasses() if one day generic types are needed (commented lines below)
:param typ:
:param recursive: a boolean indicating whether recursion is needed
:param _memo: internal variable used in recursion to avoid exploring subclasses that were already explored
:return:
"""
_memo = _memo or set()
# if we have collected the subclasses for this already, return
if typ in _memo:
return []
# else remember that we have collected them, and collect them
_memo.add(typ)
# if is_generic_type(typ):
# # We now use get_origin() to also find all the concrete subclasses in case the desired type is a generic
# sub_list = get_origin(typ).__subclasses__()
# else:
sub_list = typ.__subclasses__()
# recurse
result = [] # type: List[Type[T]]
for t in sub_list:
# only keep the origins in the list
# to = get_origin(t) or t
to = t
# noinspection PyBroadException
try:
if to is not typ and to not in result and issubclass(to, typ): # is_subtype(to, typ, bound_typevars={}):
result.append(to)
except Exception:
# catching an error with is_subtype(Dict, Dict[str, int], bound_typevars={})
pass
# recurse
if recursive:
for typpp in sub_list:
for t in _get_all_subclasses(typpp, recursive=True, _memo=_memo):
# unfortunately we have to check 't not in sub_list' because with generics strange things happen
# also is_subtype returns false when the parent is a generic
if t not in sub_list and issubclass(t, typ): # is_subtype(t, typ, bound_typevars={}):
result.append(t)
return result | a9a9c1186e195347f937961b928159c605b64ffe | 7,263 |
import logging
def conditions_summary(conditions):
"""
Return a dict of consumer-level observations, say, for display on a
smart mirror or tablet.
"""
keys = ['timestamp', 'dewpoint', 'barometricPressure', 'windDirection',
'windSpeed', 'windGust', 'precipitationLastHour', 'temperature',
'relativeHumidity', 'heatIndex']
summary = dict()
for key in keys:
try:
summary[key] = conditions['properties'][key]
except Exception as exc:
summary[key] = 'none'
logging.error('Error trying to read summary for key {0}: {1}', key, exc)
return summary | aa4c95fd892c63bd05abd24188b8931375973bc0 | 7,264 |
def InsertOrganisation(cur, con, entity_name: str = "Organisation") -> int:
""" Inserts a new Organisation into the database """
# Get information about the video game
print(f"Enter new {entity_name}'s details:")
row = {}
row["Name"] = input(f"Enter the name of the {entity_name}: ") or None
row["Headquarters"] = input(
f"Enter the headquarters of {entity_name} (Optional): ") or None
row["Founded"] = input(
f"Enter the date when the {entity_name} was founded in YYYY-MM-DD format: ") or None
row["Earnings"] = input(
f"Enter earnings of {entity_name} in USD (Optional): ") or 0
# Query to be executed
query = """INSERT INTO Organisations (Name, Headquarters,
Founded, Earnings)
VALUES (%(Name)s, %(Headquarters)s,
%(Founded)s, %(Earnings)s)
"""
print("\nExecuting")
print(query)
# Execute query
cur.execute(query, row)
# Get ID of last inserted organisation
cur.execute("SELECT LAST_INSERT_ID() AS OrganisationID")
return cur.fetchone()["OrganisationID"] | de22b6eeb446efab58a2124f1b26da1e9edb12ed | 7,265 |
def _rgb_to_hsv(rgbs):
"""Convert Nx3 or Nx4 rgb to hsv"""
rgbs, n_dim = _check_color_dim(rgbs)
hsvs = list()
for rgb in rgbs:
rgb = rgb[:3] # don't use alpha here
idx = np.argmax(rgb)
val = rgb[idx]
c = val - np.min(rgb)
if c == 0:
hue = 0
sat = 0
else:
if idx == 0: # R == max
hue = ((rgb[1] - rgb[2]) / c) % 6
elif idx == 1: # G == max
hue = (rgb[2] - rgb[0]) / c + 2
else: # B == max
hue = (rgb[0] - rgb[1]) / c + 4
hue *= 60
sat = c / val
hsv = [hue, sat, val]
hsvs.append(hsv)
hsvs = np.array(hsvs, dtype=np.float32)
if n_dim == 4:
hsvs = np.concatenate((hsvs, rgbs[:, 3]), axis=1)
return hsvs | ee4a2d9867351e61bf9b14de4ef2d05425285879 | 7,266 |
def find_correlation(convergence_data, lens_data, plot_correlation=False, plot_radii=False, impact=False, key=None):
"""Finds the value of the slope for plotting residuals against convergence. Magnitude of slope and error
quantify correlation between the two.
Inputs:
conv -- convergence.
mu_diff -- residuals.
"""
correlations = []
correlation_errs = []
for cone_radius in RADII[29::2]:
SNe_data = find_mu_diff(lens_data, cone_radius=cone_radius, impact=impact, key=key)
redshift_cut = np.logical_or(SNe_data['z'] > 0.2, SNe_data['z'] > 0.4)
mu_diff = SNe_data["mu_diff"][redshift_cut]
if impact:
if key is None:
conv = np.array(convergence_data[f"Radius{str(cone_radius)}"]["SNkappa"])[redshift_cut]
else:
conv = np.array(convergence_data[key][f"Radius{str(cone_radius)}"]["SNkappa"])[redshift_cut]
else:
conv = np.array(convergence_data[f"Radius{str(cone_radius)}"]["SNkappa"])[redshift_cut]
conv_rank = rankdata(conv)
mu_rank = rankdata(mu_diff)
diff = np.abs(conv_rank - mu_rank)
rho = 1 - 6 / (len(conv) * (len(conv) ** 2 - 1)) * np.sum(diff ** 2)
rho_err = np.sqrt((1 - rho ** 2) / (len(conv) - 1))
correlations.append(rho)
correlation_errs.append(rho_err)
if plot_correlation:
edges = np.linspace(-0.0065, 0.011, 6)
bins = (edges[1:] + edges[:-1]) / 2
mean_dmu = []
standard_error = []
for bin in bins:
dmus = []
for kappa, dmu in zip(conv, mu_diff):
if bin - 0.007 / 4 < kappa <= bin + 0.0007 / 4:
dmus.append(dmu)
mean_dmu.append(np.mean(dmus))
standard_error.append(np.std(dmus) / np.sqrt(len(dmus)))
plt.plot([min(conv), max(conv)], [0, 0], color=grey, linestyle='--')
plt.plot(conv, mu_diff, linestyle='', marker='o', markersize=2, color=colours[0])
plt.errorbar(bins, mean_dmu, standard_error, marker='s', color='r', markersize=3, capsize=3, linestyle='')
plt.xlabel('$\kappa$')
plt.ylabel('$\Delta\mu$')
# plt.xlim([-0.008, 0.011])
# plt.legend(frameon=0, loc='lower right')
# plt.ylim([-0.3, 0.3])
plt.text(0.0038, -0.19, f'$\\rho$ = {round(rho, 3)} $\pm$ {round(rho_err, 3)}', fontsize=16)
# print([convergence_cut[cuts2][i] for i in range(len(convergence_cut[cuts2]))])
# print([mu_diff_cut[cuts2][i] for i in range(len(convergence_cut[cuts2]))])
# print([SNmu_err_cut[cuts2][i] for i in range(len(convergence_cut[cuts2]))])
plt.show()
u_err = [correlations[i] + correlation_errs[i] for i in range(len(correlations))]
d_err = [correlations[i] - correlation_errs[i] for i in range(len(correlations))]
smooth_corr = savgol_filter([correlations[i] for i in range(len(correlations))], 11, 4)
smooth_u_err = savgol_filter(u_err, 11, 4)
smooth_d_err = savgol_filter(d_err, 11, 4)
if plot_radii:
plt.plot([6, 30], [0, 0], color=grey, linestyle='--')
plt.plot(RADII[29::2], smooth_corr, color=colours[0])
plt.plot(RADII[29::2], [correlations[i] for i in range(len(correlations))], marker='x', color=colours[1],
linestyle='')
plt.fill_between(RADII[29::2], smooth_u_err, smooth_d_err, color=colours[0], alpha=0.4)
plt.xlabel('Cone Radius (arcmin)')
plt.ylabel("Spearman's Rank Coefficient")
# plt.xlim([5, 30.1])
# plt.ylim([-0.18, 0.02])
plt.gca().invert_yaxis()
plt.show()
return [correlations, smooth_corr, smooth_u_err, smooth_d_err, np.array(u_err) - np.array(correlations)] | d507cd256a555b442fff1cd2a00862a5afdf0661 | 7,267 |
def ELCE2_null_estimator(p_err, K, rng):
"""
Compute the ELCE^2_u for one bootstrap realization.
Parameters
----------
p_err: numpy-array
one-dimensional probability error vector.
K: numpy-array
evaluated kernel function.
rng: type(np.random.RandomState())
a numpy random function
return
------
float: an unbiased estimate of ELCE^2_u
"""
idx = rng.permutation(len(p_err))
return ELCE2_estimator(K, p_err[idx]) | 5b42e36ade4aba416bb8cbf6790d22fd8e4913b1 | 7,268 |
import curses
def select_from(stdscr, x, y, value, slist, redraw):
"""
Allows user to select from a list of valid options
:param stdscr: The current screen
:param x: The start x position to begin printing
:param y: The start y position to begin pritning
:param value: The current value chosen
:param slist: A list of values to choose from
:return: A value within :param list
"""
k = 0
padwidth = 100
pad = curses.newpad(1, padwidth)
height, width = stdscr.getmaxyx()
try:
idx = slist.index(value)
except ValueError:
stdscr.clear()
stdscr.refresh()
curses_safe_addstr(stdscr, 0, 0, str(value))
curses_safe_addstr(stdscr, 1, 0, str(type(value)))
curses_safe_addstr(stdscr, 2, 0, ','.join(map(str, slist)))
curses_safe_addstr(stdscr, 3, 0, ','.join(
list(map(str, map(type, slist)))))
stdscr.getch()
stdscr.clear()
stdscr.refresh()
draw_status_bar(stdscr,
"Press 'q' to exit and 'UP' or 'DOWN' to select a value")
while k != KEY_ENTER and k != ord('q'):
pad.clear()
value = str(slist[idx])
if len(value) + x >= width:
value = value[:width - x - 1]
if len(value) > padwidth:
padwidth = len(value) * 2
pad = curses.newpad(1, padwidth)
pad.addstr(0, 0, str(value))
stdscr.move(y, x + len(str(value)))
pad.refresh(0, 0, y, x, y, width - x)
k = stdscr.getch()
if k == curses.KEY_UP and idx > 0:
idx -= 1
elif k == curses.KEY_DOWN and idx < len(slist) - 1:
idx += 1
elif k == curses.KEY_RESIZE:
stdscr.erase()
height, width = stdscr.getmaxyx()
redraw(stdscr)
draw_status_bar(
stdscr,
"Press 'q' to exit and 'UP' or 'DOWN' to select a value")
return slist[idx] | 208283731317418bbe5ae1d16386584eaebcd626 | 7,269 |
def describe(r):
"""Return a dictionary with various statistics computed on r:
mean, variance, skew, kurtosis, entropy, median.
"""
stats = {}
stats['mean'] = r.mean()
stats['variance'] = r.var()
stats['skew'] = skew(r)
stats['kurtosis'] = kurtosis(r)
stats['median'] = np.median(r)
stats['entropy'] = entropy(r)
stats['mode'] = mode(r)
return stats | 7ed070110ac327ef69cf1c05eefdda16c21f7f0d | 7,270 |
def value_iteration(env,maxiter):
"""
Just like policy_iteration, this employs a similar approach.
Steps (to iterate over):
1) Find your optimum state_value_function, V(s).
2) Keep iterating until convergence
3) Calculate your optimized policy
Outputs:
- Your final state_value_function, V(s)
- Optimal policy 'pi'
- Average reward vector (see note below)
- List of all value functions for all iterations
NOTE: In order to produce the graph showing average reward over each
iteration, the policy was calculated at each iteration. This is not
normally done for Value Iteration. This will slow down the computation
time for Value iteration. To return to traditional value iteration,
comment out the respective lines and remove the appropriate output
"""
# intialize the state-Value function
V = np.zeros(nS)
V_hm = np.copy(V)
V_hm.resize((1,V_hm.size))
V_hm = V_hm.tolist()
# intialize a random policy. Comment out for traditional Value_Iteration
policy = np.random.randint(0, 4, nS)
avg_r_VI_mat = []
n_episode = 100
# Iterate over your optimized function, breaking if not changing or difference < tolerance.
for i in range(maxiter):
prev_V = np.copy(V)
# evaluate given policy
difference, V = Optimum_V(env, prev_V, maxiter, gamma)
# improve policy. Comment out to return to traditional Value Iteration
policy = policy_update(env, policy, V, gamma)
#Play episodes based on the current policy. Comment out to return to traditional Value Iteration
wins_VI, total_reward_VI, avg_reward_VI = play_episodes(env, n_episode, policy, random = False)
avg_r_VI_mat.append(avg_reward_VI)
# save value function to list for animation
V_tmp = np.copy(V)
V_tmp = V_tmp.tolist()
V_hm.append(V_tmp)
# if State Value function has not changed over 10 iterations, it has converged.
if i % 10 == 0:
# if values of 'V' not changing after one iteration
if (np.all(np.isclose(V, prev_V))):
print("")
print('No Changes for 10 iterations. Value converged at iteration %d' %(i+1))
break
elif difference < tol:
print('Tolerance reached. Value converged at iteration %d' %(i+1))
break
# Initialize Optimal Policy
optimal_policy = np.zeros(nS, dtype = 'int8')
# Update your optimal policy based on optimal value function 'V'
optimal_policy = policy_update(env, optimal_policy, V, gamma)
return V, optimal_policy, avg_r_VI_mat, V_hm | c116d0408d6edfa82763febb030633b815d69812 | 7,271 |
def is_excluded(branch_name):
"""
We may want to explicitly exclude some BRANCHES from the list
of BRANCHES to be merged, check if the branch name supplied
is excluded if yes then do not perform merging into it.
Args:
branch_name: The branch to check if to be incorporated
in branching or not.
Retruns:
True if branch should be excluded, in this case no merges
will be performed into this branch, otherwise False.
"""
return branch_name in BRANCHES_TO_EXCLUDE | d38923a84e7a3f9a40ebd101de5c542156fff7aa | 7,272 |
def schedule_contrib_conv2d_winograd_without_weight_transform(attrs, outs, target):
"""Schedule definition of conv2d_winograd_without_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_without_weight_transform(outs) | e186a727ccf69c3292d8807abd003485308754db | 7,273 |
import os
import sys
import mne
from mne.preprocessing import read_ica
from nipype.utils.filemanip import split_filename as split_f
from ephypype.preproc import create_ts
def preprocess_set_ica_comp_fif_to_ts(fif_file, subject_id, n_comp_exclude,
is_sensor_space):
"""Preprocess ICA fif to ts."""
subj_path, basename, ext = split_f(fif_file)
(data_path, sbj_name) = os.path.split(subj_path)
print(('*** SBJ %s' % subject_id + '***'))
# Read raw
current_dir = os.getcwd()
if os.path.exists(os.path.join(current_dir, '../ica',
basename + '_ica' + ext)):
raw_ica_file = os.path.join(
current_dir, '../ica', basename + '_ica' + ext)
elif os.path.exists(os.path.join(current_dir, '../ica',
basename + '_filt_ica' + ext)):
raw_ica_file = os.path.join(
current_dir, '../ica', basename + '_filt_ica' + ext)
elif os.path.exists(os.path.join(current_dir, '../ica',
basename + '_filt_dsamp_ica' + ext)):
raw_ica_file = os.path.join(
current_dir, '../ica', basename + '_filt_dsamp_ica' + ext)
print(('*** raw_ica_file %s' % raw_ica_file + '***'))
raw = mne.io.read_raw_fif(raw_ica_file, preload=True)
# load ICA
if os.path.exists(os.path.join(current_dir, '../ica',
basename + '_ica_solution.fif')):
ica_sol_file = os.path.join(
current_dir, '../ica', basename + '_ica_solution.fif')
elif os.path.exists(os.path.join(current_dir, '../ica',
basename + '_filt_ica_solution.fif')):
ica_sol_file = os.path.join(
current_dir, '../ica', basename + '_filt_ica_solution.fif')
elif os.path.exists(os.path.join(current_dir, '../ica',
basename + "_filt_dsamp_ica_solution."
"fif")):
ica_sol_file = os.path.join(
current_dir, '../ica', basename + '_filt_dsamp_ica_solution.fif')
if os.path.exists(ica_sol_file) is False:
print(('$$$ Warning, no %s found' % ica_sol_file))
sys.exit()
else:
ica = read_ica(ica_sol_file)
print(('\n *** ica.exclude before set components= ', ica.exclude))
if subject_id in n_comp_exclude:
print(('*** ICA to be excluded for sbj %s ' % subject_id))
print((' ' + str(n_comp_exclude[subject_id]) + '***'))
session_dict = n_comp_exclude[subject_id]
session_names = list(session_dict.keys())
componentes = []
for s in session_names:
componentes = session_dict[s]
if len(componentes) == 0:
print('\n no ICA to be excluded \n')
else:
print(('\n *** ICA to be excluded for session %s ' % s +
' ' + str(componentes) + ' *** \n'))
ica.exclude = componentes
print(('\n *** ica.exclude after set components = ', ica.exclude))
# apply ICA to raw data
new_raw_ica_file = os.path.join(subj_path, basename + '_ica-raw.fif')
raw_ica = ica.apply(raw)
raw_ica.save(new_raw_ica_file, overwrite=True)
# save ICA solution
print(ica_sol_file)
ica.save(ica_sol_file)
(ts_file, channel_coords_file, channel_names_file,
raw.info['sfreq']) = create_ts(new_raw_ica_file)
if is_sensor_space:
return (ts_file, channel_coords_file, channel_names_file,
raw.info['sfreq'])
else:
return (raw_ica, channel_coords_file, channel_names_file,
raw.info['sfreq']) | f70cd29a4a1e189dfe46e2713dfce95c8a21e8f9 | 7,274 |
def _phi(r: FloatTensorLike, order: int) -> FloatTensorLike:
"""Coordinate-wise nonlinearity used to define the order of the
interpolation.
See https://en.wikipedia.org/wiki/Polyharmonic_spline for the definition.
Args:
r: input op.
order: interpolation order.
Returns:
`phi_k` evaluated coordinate-wise on `r`, for `k = r`.
"""
# using EPSILON prevents log(0), sqrt0), etc.
# sqrt(0) is well-defined, but its gradient is not
with tf.name_scope("phi"):
if order == 1:
r = tf.maximum(r, EPSILON)
r = tf.sqrt(r)
return r
elif order == 2:
return 0.5 * r * tf.math.log(tf.maximum(r, EPSILON))
elif order == 4:
return 0.5 * tf.square(r) * tf.math.log(tf.maximum(r, EPSILON))
elif order % 2 == 0:
r = tf.maximum(r, EPSILON)
return 0.5 * tf.pow(r, 0.5 * order) * tf.math.log(r)
else:
r = tf.maximum(r, EPSILON)
return tf.pow(r, 0.5 * order) | 80a41c99a4ef8b396d16b02a6217eaa9191105f6 | 7,275 |
def linbin(n, nbin=None, nmin=None):
"""Given a number of points to bin and the number of approximately
equal-sized bins to generate, returns [nbin_out,{from,to}].
nbin_out may be smaller than nbin. The nmin argument specifies
the minimum number of points per bin, but it is not implemented yet.
nbin defaults to the square root of n if not specified."""
if not nbin: nbin = int(np.round(n**0.5))
tmp = np.arange(nbin+1)*n//nbin
return np.vstack((tmp[:-1],tmp[1:])).T | 2d537131ad8d13e32375b74b9fa3e77088d046dd | 7,276 |
from bs4 import BeautifulSoup
def get_soup(url):
"""Gets the soup of the given URL.
:param url: (str) URL the get the soup from.
:return: Soup of given URL.
"""
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36'}
return BeautifulSoup(urllib_req.urlopen(urllib_req.Request(url, headers=header)), 'html.parser') | 69982a75a5c329b0d9e0ca7638c0ffffdc3ac21e | 7,277 |
def msd_id_to_dirs(msd_id):
"""Given an MSD ID, generate the path prefix.
e.g. TRABCD12345678 -> A/B/C/TRABCD12345678"""
return op.join(msd_id[2], msd_id[3], msd_id[4], msd_id) | e210ae919de4fc8b037a7e8d7aabb6858b6e07f9 | 7,278 |
def get_data_path(sub_path):
"""Returns path to file in data folder."""
return join(_data_folder_path, sub_path) | 847b59cfea7f4d42b65f166230c032e71bb92ecd | 7,279 |
import io
def read_avro_bytes(URL, open_with, start_byte, length, header, nrows=None):
"""Pass a specific file/bytechunk and convert to dataframe with cyavro
Both a python dict version of the header, and the original bytes that
define it, are required. The bytes are prepended to the data, so that the
C avro reader can interpret them.
"""
with open_with(URL, 'rb') as f:
f.seek(start_byte)
if start_byte == 0:
header = read_header(f)
f.seek(header['header_size'])
data = header['head_bytes'] + f.read(length)
if nrows is None:
b = io.BytesIO(data)
header['blocks'] = []
scan_blocks(b, header, len(data))
nrows = sum(b['nrows'] for b in header['blocks'])
f = cyavro.AvroReader()
f.init_bytes(data)
df, arrs = empty(header['dtypes'].values(), nrows, cols=header['dtypes'])
f.init_reader()
f.init_buffers(10000)
for i in range(0, nrows, 10000):
d = f.read_chunk()
for c in d:
s = [f for f in header['schema']['fields'] if f['name'] == c][0]
if 'logicalType' in s:
df[c].values[i:i + 10000] = time_convert(d[c], s)
else:
df[c].values[i:i + 10000] = d[c]
return df | 9542eb13c1247de35f00a1fa370aba721f8657cd | 7,280 |
def get_launches(method="", **query):
"""Gets launches based on query strings
Gets launches based on query strings from
the API
Parameters
----------
method : str (optional)
the method used for the request
query : keyword args
keyword args based on the API query strings
Returns
-------
list
a list of the launches
"""
return _get("launches", method, query) | ca162affdd7aef187985e0d2c75e153ad75db162 | 7,281 |
def state(obj):
"""Gets the UnitOfWork state of a mapped object"""
return obj.__ming__.state | 1072265fe175ffcd581d14af5d4ee85f2941a5e4 | 7,282 |
def save_file_in_path(file_path, content):
"""Write the content in a file
"""
try:
with open(file_path, 'w', encoding="utf-8") as f:
f.write(content)
except Exception as err:
print(err)
return None
return file_path | 7b1e453a9b2a8c1211e111a6e8db432811d84a7a | 7,283 |
import json
from datetime import datetime
def export_entity_for_model_and_options(request):
"""
Export entity list in a list of 'format' type.
@note EntityModelClass.export_list() must return a list of results.
User of the request is used to check for permissions.
"""
limit = int_arg(request.GET.get('limit', 100000))
app_label = request.GET['app_label']
validictory.validate(app_label, Entity.NAME_VALIDATOR)
model = request.GET['model']
validictory.validate(model, Entity.NAME_VALIDATOR)
columns = request.GET.getlist('columns[]', ['id'])
validictory.validate(model, COLUMNS_VALIDATOR)
file_format = request.GET['format']
validictory.validate(model, {"type": "string"})
content_type = ContentType.objects.get_by_natural_key(app_label, model)
entity_model = content_type.model_class()
sort_by = json.loads(request.GET.get('sort_by', '[]'))
if not len(sort_by) or sort_by[-1] not in ('id', '+id', '-id'):
order_by = sort_by + ['id']
else:
order_by = sort_by
if request.GET.get('search'):
search = json.loads(request.GET['search'])
else:
search = None
if request.GET.get('filters'):
filters = json.loads(request.GET['filters'])
else:
filters = None
export_list = getattr(entity_model, 'export_list')
if export_list and callable(export_list):
cursor = None
columns, items = export_list(columns, cursor, search, filters, order_by, limit, request.user)
else:
# nothing to export
columns, items = [], []
exporter = DataExporter(columns, items)
if file_format == 'csv':
data = exporter.export_data_as_csv()
elif file_format == 'xlsx':
data = exporter.export_data_as_xslx()
else:
raise SuspiciousOperation("Invalid format")
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S")
file_name = "%s-%s-%s" % (app_label, model, timestamp) + exporter.file_ext
response = StreamingHttpResponse(data, content_type=exporter.mime_type)
response['Content-Disposition'] = 'attachment; filename="' + file_name + '"'
response['Content-Length'] = exporter.size
return response | 5539d3fe66dd3163044acf7073e40e55cc1c3b5c | 7,284 |
def gen_instance_hv_map(ann, crop_shape):
"""Input annotation must be of original shape.
The map is calculated only for instances within the crop portion
but based on the original shape in original image.
Perform following operation:
Obtain the horizontal and vertical distance maps for each
nuclear instance.
"""
orig_ann = ann.copy() # instance ID map
fixed_ann = fix_mirror_padding(orig_ann)
# re-cropping with fixed instance id map
crop_ann = cropping_center(fixed_ann, crop_shape)
# TODO: deal with 1 label warning
crop_ann = morph.remove_small_objects(crop_ann, min_size=30)
x_map = np.zeros(orig_ann.shape[:2], dtype=np.float32)
y_map = np.zeros(orig_ann.shape[:2], dtype=np.float32)
inst_list = list(np.unique(crop_ann))
inst_list.remove(0) # 0 is background
for inst_id in inst_list:
inst_map = np.array(fixed_ann == inst_id, np.uint8)
inst_box = get_bounding_box(inst_map)
# expand the box by 2px
# Because we first pad the ann at line 207, the bboxes
# will remain valid after expansion
inst_box[0] -= 2
inst_box[2] -= 2
inst_box[1] += 2
inst_box[3] += 2
inst_map = inst_map[inst_box[0] : inst_box[1], inst_box[2] : inst_box[3]]
if inst_map.shape[0] < 2 or inst_map.shape[1] < 2:
continue
# instance center of mass, rounded to nearest pixel
inst_com = list(measurements.center_of_mass(inst_map))
inst_com[0] = int(inst_com[0] + 0.5)
inst_com[1] = int(inst_com[1] + 0.5)
inst_x_range = np.arange(1, inst_map.shape[1] + 1)
inst_y_range = np.arange(1, inst_map.shape[0] + 1)
# shifting center of pixels grid to instance center of mass
inst_x_range -= inst_com[1]
inst_y_range -= inst_com[0]
inst_x, inst_y = np.meshgrid(inst_x_range, inst_y_range)
# remove coord outside of instance
inst_x[inst_map == 0] = 0
inst_y[inst_map == 0] = 0
inst_x = inst_x.astype("float32")
inst_y = inst_y.astype("float32")
# normalize min into -1 scale
if np.min(inst_x) < 0:
inst_x[inst_x < 0] /= -np.amin(inst_x[inst_x < 0])
if np.min(inst_y) < 0:
inst_y[inst_y < 0] /= -np.amin(inst_y[inst_y < 0])
# normalize max into +1 scale
if np.max(inst_x) > 0:
inst_x[inst_x > 0] /= np.amax(inst_x[inst_x > 0])
if np.max(inst_y) > 0:
inst_y[inst_y > 0] /= np.amax(inst_y[inst_y > 0])
####
x_map_box = x_map[inst_box[0] : inst_box[1], inst_box[2] : inst_box[3]]
x_map_box[inst_map > 0] = inst_x[inst_map > 0]
y_map_box = y_map[inst_box[0] : inst_box[1], inst_box[2] : inst_box[3]]
y_map_box[inst_map > 0] = inst_y[inst_map > 0]
hv_map = np.dstack([x_map, y_map])
return x_map, y_map, hv_map | e6a5d6f50d91e0e6b7cf27cb05568b6526608ff9 | 7,285 |
import torch
def jaccard_loss(true, logits, eps=1e-7):
"""Computes the Jaccard loss, a.k.a the IoU loss.
Note that PyTorch optimizers minimize a loss. In this
case, we would like to maximize the jaccard loss so we
return the negated jaccard loss.
Args:
true: a tensor of shape [B, H, W] or [B, 1, H, W].
logits: a tensor of shape [B, C, H, W]. Corresponds to
the raw output or logits of the model.
eps: added to the denominator for numerical stability.
Returns:
jacc_loss: the Jaccard loss.
"""
num_classes = logits.shape[1]
if num_classes == 1:
true_1_hot = torch.eye(num_classes + 1)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
true_1_hot_f = true_1_hot[:, 0:1, :, :]
true_1_hot_s = true_1_hot[:, 1:2, :, :]
true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)
pos_prob = torch.sigmoid(logits)
neg_prob = 1 - pos_prob
probas = torch.cat([pos_prob, neg_prob], dim=1)
else:
true_1_hot = torch.eye(num_classes)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
probas = F.softmax(logits, dim=1)
true_1_hot = true_1_hot.type(logits.type())
dims = (0,) + tuple(range(2, true.ndimension()))
intersection = torch.sum(probas * true_1_hot, dims)
cardinality = torch.sum(probas + true_1_hot, dims)
union = cardinality - intersection
jacc_loss = (intersection / (union + eps)).mean()
return (1 - jacc_loss) | ae6c8f94662f48be81abf60c8a8fcd88f7ff7d81 | 7,286 |
def _get_distribution_schema():
""" get the schema for distribution type """
return schemas.load(_DISTRIBUTION_KEY) | 32af1d9547d978a8a57a799ba74723f21e05c756 | 7,287 |
def compute_transforms(rmf_coordinates, mir_coordinates, node=None):
"""Get transforms between RMF and MIR coordinates."""
transforms = {
'rmf_to_mir': nudged.estimate(rmf_coordinates, mir_coordinates),
'mir_to_rmf': nudged.estimate(mir_coordinates, rmf_coordinates)
}
if node:
mse = nudged.estimate_error(transforms['rmf_to_mir'],
rmf_coordinates,
mir_coordinates)
node.get_logger().info(f"Transformation estimate error: {mse}")
return transforms | 3190a94cc406bb1199df3480202df4a3258912f9 | 7,288 |
def merge_dicts(*list_of_dicts):
"""Merge a list of dictionaries and combine common keys into a list of values.
args:
list_of_dicts: a list of dictionaries. values within the dicts must be lists
dict = {key: [values]}
"""
output = {}
for dikt in list_of_dicts:
for k, v in dikt.items():
if not output.get(k):
output[k] = v
else:
output[k].extend(v)
output[k] = list(set(output[k]))
return output | 3d629bb9bc6af2a637a622fea158447b24c00bd0 | 7,289 |
def highpass_filter(src, size):
"""
highpass_filter(src, size)
ハイパスフィルター
引数
----------
src : AfmImg形式の画像
size : 整数
フィルターのサイズ
戻り値
-------
dst : AfmImg形式の画像
フィルターがかかった画像
"""
def highpass(dft_img_src, *args):
dft_img = dft_img_src.copy()
#マスク作成
mask = __make_filter(dft_img.shape, args[0], True)
#マスキング
dft_img = dft_img.real*mask + dft_img.imag*mask * 1j
return dft_img
dst = __dft_filter(src, highpass, size)
return dst | 7945e3556cdd2eb4fd1e2303cbba00052a4a5900 | 7,290 |
import re
import socket
def parse_target(target):
"""
解析目标为ip格式
:param str target: 待解析的目标
:return tuple scan_ip: 解析后的ip和域名
"""
scan_ip = ''
domain_result = ''
main_domain = ''
try:
url_result = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', target)
if url_result == []:
ip_result = re.findall(r"\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b", target)
if ip_result == []:
result = tldextract.extract(target)
main_domain = result.domain + '.' + result.suffix
domain_regex = re.compile(r'(?:[A-Z0-9_](?:[A-Z0-9-_]{0,247}[A-Z0-9])?\.)+(?:[A-Z]{2,6}|[A-Z0-9-]{2,}(?<!-))\Z', re.IGNORECASE)
domain_result = domain_regex.findall(target)
if domain_result:
scan_ip = socket.gethostbyname(domain_result[0])
else:
net = IP(target)
#print(net.len())
scan_ip = net
else:
scan_ip = ip_result[0]
else:
url_parse = urlparse(target)
result = tldextract.extract(target)
main_domain = result.domain + '.' + result.suffix
domain_regex = re.compile(r'(?:[A-Z0-9_](?:[A-Z0-9-_]{0,247}[A-Z0-9])?\.)+(?:[A-Z]{2,6}|[A-Z0-9-]{2,}(?<!-))\Z', re.IGNORECASE)
domain_result = domain_regex.findall(url_parse.netloc)
scan_ip = socket.gethostbyname(url_parse.hostname)
except Exception as e:
print(e)
finally:
pass
if domain_result:
domain_result = domain_result[0]
return scan_ip, main_domain, domain_result | 292d90eebefb8da5289b20914dfbcd9c294ee5b7 | 7,291 |
from typing import Type
def isWrappedScalarType(typ: Type) -> bool:
"""
Given a type, determine if it is a c10::scalar which we will wrap in a lazy Value.
Since we literally change the type from scalarT to valueT, information is lost.
This function helps build a list of wrapped scalars to save that information
"""
if isinstance(typ, BaseType):
# I am regretting my naming conventions, but now we are wrapping at::scalar in
# lazy value, while preserving other 'scalar' types as scalars in the IR
return typ.name == BaseTy.Scalar
elif isinstance(typ, (OptionalType, ListType)):
return isWrappedScalarType(typ.elem)
return False | 0d854c734ddf3441dd2524c56d1d84d85fc7ac22 | 7,292 |
def assign_topic(data, doc_topic_distr):
""" Assigns dominant topic to documents of corpus.
:param data: DF of preprocessed and filtered text data
:type data: pd.DataFrame
:param doc_topic_distr: Array of topic distribution per doc of corpus
:type doc_topic_distr: np.array
:return: DF incl assigned topics
:rtype: pd.DataFrame
"""
data["topic_distribution"] = doc_topic_distr.tolist()
data["topic"] = np.argmax(doc_topic_distr, axis=1) + 1
return data | d53661831d9ee1431f989b290396be3ebde0582a | 7,293 |
def _enable_scan_single_bytecode(code, name):
"""
Part of the ``_enable_scan`` that applies the scan behavior on a single
given list/set comprehension or generator expression code.
"""
bc = bytecode.Bytecode.from_code(code)
Instr = bytecode.Instr
# Updates LOAD_GLOBAL to LOAD_FAST when arg is name
for instr in bc:
if isinstance(instr, Instr) \
and instr.name == "LOAD_GLOBAL" and instr.arg == name:
instr.set("LOAD_FAST", name)
# Some needed information from the first/main FOR_ITER and the heading
# "filter" part of the generator expression or list/set comprehension
for_idx = next(idx for idx, instr in enumerate(bc)
if getattr(instr, "name", None) == "FOR_ITER")
for_instr = bc[for_idx]
begin_label_idx = for_idx - 1
try:
filter_last_idx = last(idx for idx, instr in enumerate(bc)
if isinstance(instr, Instr)
and instr.is_cond_jump()
and instr.arg == begin_label_idx)
except StopIteration:
filter_last_idx = for_idx
# Adds the block before the loop (i.e., first label) to append/add/yield
# the first input directly from FOR_ITER and save the first "prev"
# accordingly
heading_instructions = [("DUP_TOP",),
("STORE_FAST", name)] + {
"<listcomp>": [("LIST_APPEND", 2)],
"<setcomp>": [("SET_ADD", 2)],
"<genexpr>": [("YIELD_VALUE",),
("POP_TOP",)]
}[bc.name]
bc[begin_label_idx:begin_label_idx] = (
[instr.copy() for instr in bc[for_idx:filter_last_idx + 1]] +
[Instr(*args) for args in heading_instructions]
)
# Adds ending block that stores the result to prev before a new iteration
loop_instructions = ["SET_ADD", "LIST_APPEND", "YIELD_VALUE"]
ending_idx = next(-idx for idx, instr in enumerate(reversed(bc), 1)
if isinstance(instr, Instr)
and instr.name in loop_instructions)
ending_instructions = [("DUP_TOP",),
("STORE_FAST", name)]
bc[ending_idx:ending_idx] = \
[Instr(*args) for args in ending_instructions]
return bc.to_code() | d1bd12f50961869e09d4cbdc121e01abbe34232a | 7,294 |
def composite_rotation(r, p1=qt.QH([1, 0, 0, 0]), p2=qt.QH([1, 0, 0, 0])):
"""A composite function of next_rotation."""
return next_rotation(next_rotation(r, p1), p2) | 335363a18efb36d28c87b0266bd4dcdd27b6b85a | 7,295 |
def extract_vectors_ped_feature(residues, conformations, key=None, peds=None, features=None, indexes=False, index_slices=False):
"""
This function allows you to extract information of the ped features from the data structure. In particular allows:
- all rows or a specific subset of them, containing a certain feature (i.e., RD, EN, MED_ASA, etc ...)
- the interval extremes for a certain features (i.e., RD, EN, MED_ASA, etc ...)
- all the feature intervals as slices
:param residues: number of residues in the model
:param conformations: maximum number of conformations available
:param key: the key of the feature or None if considering all of them, default: False
:param peds: the ped id or None if considering all of them, default: False
:param features: matrix of features or None if extracting only the indexes, default: False
:param indexes: return (begin, end) indexes of a feature if it's True, default: False
:param index_slices: return all the intervals of the features if it's True, default: False
:return: begin/end, slices or features
"""
begin = end = -1
residues = int(residues)
conformations = int(conformations)
slices = []
if key == 'PED_ID' or index_slices:
begin = 0
end = 1
slices.append(slice(begin, end))
if key == 'RD' or index_slices:
begin = 1
end = conformations + 1
slices.append(slice(begin, end))
if key == 'EN' or index_slices:
begin = conformations + 1
end = conformations + residues + 1
slices.append(slice(begin, end))
if key == 'MED_ASA' or index_slices:
begin = conformations + residues + 1
end = conformations + 2 * residues + 1
slices.append(slice(begin, end))
if key == 'MED_RMSD' or index_slices:
begin = conformations + 2 * residues + 1
end = conformations + 3 * residues + 1
slices.append(slice(begin, end))
if key == 'MED_DIST' or index_slices:
begin = conformations + 3 * residues + 1
end = int(conformations + 3 * residues + 1 + residues * (residues - 1) / 2)
slices.append(slice(begin, end))
if key == 'STD_DIST' or index_slices:
begin = int(conformations + 3 * residues + 1 + residues * (residues - 1) / 2)
end = None
slices.append(slice(begin, end))
begin = int(begin)
if end is not None:
end = int(end)
if begin == -1:
return None
if index_slices:
return slices
if indexes is True or features is None:
return begin, end
if peds is None:
return features[:, begin:end]
else:
if isinstance(peds, int):
return np.array(features[peds][begin:end])
else:
return features[peds, begin:end] | 3d0efb833ffd80303e2494d017c12e1d06d10bcc | 7,296 |
def Load_File(filename):
"""
Loads a data file
"""
with open(filename) as file:
data = file.readlines()
return data | f49aa4474d9af0b8a778b9575e282eb579c103ab | 7,297 |
from scipy.ndimage import morphology
import numpy
def massage_isig_and_dim(isig, im, flag, band, nm, nu, fac=None):
"""Construct a WISE inverse sigma image and add saturation to flag.
unWISE provides nice inverse variance maps. These however have no
contribution from Poisson noise from sources, and so underestimate
the uncertainties dramatically in bright regions. This can pull the
whole fit awry in bright areas, since the sky model means that every
pixel feels every other pixel.
It's not clear what the best solution is. We make a goofy inverse
sigma image from the original image and the inverse variance image. It
is intended to be sqrt(ivar) for the low count regime and grow like
sqrt(1/im) for the high count regime. The constant of proportionality
should in principle be worked out; here I set it to 0.15, which worked
once, and it doesn't seem like this should depend much on which
WISE exposure the image came from? It's ultimately something like the gain
or zero point...
"""
if fac is None:
bandfacs = {1: 0.15, 2: 0.3}
bandfloors = {1: 0.5, 2: 2}
fac = bandfacs[band]
floor = bandfloors[band]
satbit = 16 if band == 1 else 32
satlimit = 85000 # if band == 1 else 130000
msat = ((flag & satbit) != 0) | (im > satlimit) | ((nm == 0) & (nu > 1))
# dilate = morphology.iterate_structure(
# morphology.generate_binary_structure(2, 1), 3)
xx, yy = numpy.mgrid[-3:3+1, -3:3+1]
dilate = xx**2+yy**2 <= 3**2
msat = morphology.binary_dilation(msat, dilate)
isig[msat] = 0
flag = flag.astype('i8')
# zero out these bits; we claim them for our own purposes.
massagebits = (extrabits['crowdsat'] | crowdsource.nodeblend_maskbit |
crowdsource.sharp_maskbit | extrabits['nebulosity'])
flag &= ~massagebits
flag[msat] |= extrabits['crowdsat']
flag[(flag & nodeblend_bits) != 0] |= crowdsource.nodeblend_maskbit
flag[(flag & sharp_bits) != 0] |= crowdsource.sharp_maskbit
sigma = numpy.sqrt(1./(isig + (isig == 0))**2 + floor**2 +
fac**2*numpy.clip(im, 0, numpy.inf))
sigma[msat] = numpy.inf
sigma[isig == 0] = numpy.inf
return (1./sigma).astype('f4'), flag | b0bf70ddfff3a6b0a48005b9e1069c5c5f670dac | 7,298 |
import subprocess
def sh(arg):
"""
Execute command in a background shell.
Args:
arg (str or list): shell command, or a list of shell commands.
"""
if isinstance(arg, list):
return [sh(a) for a in arg]
else:
return subprocess.check_output(arg, shell=True).decode("utf-8").strip() | bfde2eaca0b25a0c8012f5541b72a6f142d1180f | 7,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.