content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import urllib
import json
def fetch_object(object_id: int, url: str):
"""
Fetch a single object from a feature layer. We have to fetch objects one by one, because they
can get pretty big. Big enough, that if you ask for more than one at a time, you're likely to
encounter 500 errors.
object_id: object id to fetch (e.g. 1)
url: layer url to fetch (e.g. https://maps.gov.bc.ca/arcserver/rest/services/whse/bcgw_pub_whse_legal_admin_boundaries/MapServer/2)
"""
print(f'fetching object {object_id}')
params = {
'where': f'objectid={object_id}',
'geometryType': 'esriGeometryEnvelope',
'spatialRel': 'esriSpatialRelIntersects',
# 'outSR': '102100',
'outFields': '*',
'returnGeometry': 'true',
'returnIdsOnly': 'false',
'f': 'geojson'
}
encode_params = urllib.parse.urlencode(params).encode("utf-8")
print(f'{url}/query?{encode_params.decode()}')
with urllib.request.urlopen(f'{url}/query?', encode_params) as response:
json_data = json.loads(response.read())
return json_data | d193d9368eec79028beeb545a3fe411fa0c131bc | 14,000 |
def density_forecast_param(Yp, sigma, _, rankmatrix, errordist_normed, dof):
"""creates a density forecast for Yp with Schaake Schuffle
Parameters
----------
Yp: numpy.array
24-dimensional array with point-predictions of day ahead prices
sigma: numpy.array
Variance prediction for each hour
_ :
rankmatrix: numpy.array
Matrix with rank positions of forecast samples
errordist_normed: numpy.array
Realized normed prediction errors
dof: int
Degrees of Freedom of parametric margins
0: Normal distribution
>0: t-distribution
Returns
-------
newdataarray: numpy.array
Array containing the density predictions of day ahead price
"""
# Initialize
errordist=errordist_normed.copy()
nzero=np.size(rankmatrix,axis=0)
n_sample=np.size(errordist, axis=0)
sqrtsigma = np.sqrt(sigma)
#
for h in range(24):
# Assume Normal distribution for dof==0
if dof[0]==0:
errordist[:, h]=np.linspace(st.norm(Yp[0, h], sqrtsigma[h]).ppf(1 / (n_sample + 1)), st.norm(Yp[0, h], sqrtsigma[h]).ppf(n_sample / (n_sample + 1)), n_sample)
# Assume t-distribution with given degrees of freedom
else:
errordist[:, h] = np.linspace(st.t(loc=Yp[0, h], scale=sqrtsigma[h],df=dof[h]).ppf(1 / (n_sample + 1)),
st.t(loc=Yp[0, h], scale=sqrtsigma[h],df=dof[h]).ppf(n_sample / (n_sample + 1)), n_sample)
Yt = np.zeros(shape=(nzero, 24))
u_new = np.arange(1, nzero + 1) / (nzero + 1)
std_error = np.zeros(shape=(nzero, 24))
for h in range(24):
helper = np.sort(errordist[:, h])
std_error_pos = np.array(np.floor(u_new * np.size(errordist, axis=0)), dtype='int')
std_error[:, h] = helper[std_error_pos]
for i in range(nzero):
Yt[i, :] = std_error[i, :]
# order newdata according to rank-matrix
newdataarray = np.zeros(shape=(nzero, 24))
for col in range(24):
for i in range(0, nzero):
help = int(rankmatrix[i, col] - 1)
newdataarray[i, col] = Yt[help, col]
return newdataarray | 809458a7d3de0ae2997f392e52f91a9b4c02e181 | 14,001 |
def gaussian_blur(img: np.ndarray, kernel_size: int) -> np.ndarray:
"""Applies a Gaussian Noise kernel"""
if not is_valid_kernel_size(kernel_size):
raise ValueError(
"kernel_size must either be 0 or a positive, odd integer")
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0) | 6bedc6b15848c18ed52c8348f3bec1b4181f74d7 | 14,002 |
def get_controls_snapshots_count(selenium, src_obj):
"""Return dictionary with controls snapshots actual count and count taken
from tab title."""
controls_ui_service = webui_service.ControlsService(selenium)
return {
"controls_tab_count": controls_ui_service.get_count_objs_from_tab(
src_obj=src_obj),
"controls_count": len(controls_ui_service.get_list_objs_from_tree_view(
src_obj=src_obj))} | 5e6a11a2a94093850f810e0ec6c93037a9f40bca | 14,003 |
import random
import math
def fast_gnp_random_graph(n, p, seed=None, directed=False):
"""Returns a `G_{n,p}` random graph, also known as an Erdős-Rényi graph or
a binomial graph.
Parameters
----------
n : int
The number of nodes.
p : float
Probability for edge creation.
seed : int, optional
Seed for random number generator (default=None).
directed : bool, optional (default=False)
If ``True``, this function returns a directed graph.
Notes
-----
The `G_{n,p}` graph algorithm chooses each of the `[n (n - 1)] / 2`
(undirected) or `n (n - 1)` (directed) possible edges with probability `p`.
This algorithm runs in `O(n + m)` time, where `m` is the expected number of
edges, which equals `p n (n - 1) / 2`. This should be faster than
:func:`gnp_random_graph` when `p` is small and the expected number of edges
is small (that is, the graph is sparse).
See Also
--------
gnp_random_graph
References
----------
.. [1] Vladimir Batagelj and Ulrik Brandes,
"Efficient generation of large random networks",
Phys. Rev. E, 71, 036113, 2005.
"""
G = empty_graph(n)
G.name="fast_gnp_random_graph(%s,%s)"%(n,p)
if not seed is None:
random.seed(seed)
if p <= 0 or p >= 1:
return nx.gnp_random_graph(n,p,directed=directed)
w = -1
lp = math.log(1.0 - p)
if directed:
G = nx.DiGraph(G)
# Nodes in graph are from 0,n-1 (start with v as the first node index).
v = 0
while v < n:
lr = math.log(1.0 - random.random())
w = w + 1 + int(lr/lp)
if v == w: # avoid self loops
w = w + 1
while w >= n and v < n:
w = w - n
v = v + 1
if v == w: # avoid self loops
w = w + 1
if v < n:
G.add_edge(v, w)
else:
# Nodes in graph are from 0,n-1 (start with v as the second node index).
v = 1
while v < n:
lr = math.log(1.0 - random.random())
w = w + 1 + int(lr/lp)
while w >= v and v < n:
w = w - v
v = v + 1
if v < n:
G.add_edge(v, w)
return G | f84c577a4f575186913980c8d9a5dcc16d771291 | 14,004 |
import math
def round_to(f: float, p: int = 0) -> float:
"""Round to the specified precision using "half up" rounding."""
# Do no rounding, just return a float with full precision
if p == -1:
return float(f)
# Integer rounding
elif p == 0:
return round_half_up(f)
# Round to the specified precision
else:
whole = int(f)
digits = 0 if whole == 0 else int(math.log10(-whole if whole < 0 else whole)) + 1
return round_half_up(whole if digits > p else f, p - digits) | ad464bced2e2b1b87208f61e7ca73b42d5e31fa5 | 14,005 |
def get_interface_type(interface):
"""Gets the type of interface
"""
if interface.upper().startswith('GI'):
return 'GigabitEthernet'
elif interface.upper().startswith('TE'):
return 'TenGigabitEthernet'
elif interface.upper().startswith('FA'):
return 'FastEthernet'
elif interface.upper().startswith('FO'):
return 'FortyGigabitEthernet'
elif interface.upper().startswith('LON'):
return 'LongReachEthernet'
elif interface.upper().startswith('ET'):
return 'Ethernet'
elif interface.upper().startswith('VL'):
return 'Vlan'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('PO'):
return 'Port-channel'
elif interface.upper().startswith('NV'):
return 'nve'
elif interface.upper().startswith('TWE'):
return 'TwentyFiveGigE'
elif interface.upper().startswith('HU'):
return 'HundredGigE'
else:
return 'unknown' | 8a898f75e0e05715e0ced7258b8e8d4bf9905377 | 14,006 |
def __get_global_options(cmd_line_options, conf_file_options=None):
""" Get all global options
:type cmd_line_options: dict
:param cmd_line_options: Dictionary with all command line options
:type conf_file_options: dict
:param conf_file_options: Dictionary with all config file options
:returns: dict
"""
options = {}
for option in DEFAULT_OPTIONS['global'].keys():
options[option] = DEFAULT_OPTIONS['global'][option]
if conf_file_options and option in conf_file_options:
options[option] = conf_file_options[option]
if cmd_line_options and option in cmd_line_options:
options[option] = cmd_line_options[option]
return options | 3c9880616ae274f4254cdd29558f1022fdfc6ff4 | 14,007 |
def download_file(service, drive_file):
"""Download a file's content.
Args:
service: Drive API service instance.
drive_file: Drive File instance.
Returns:
File's content if successful, None otherwise.
"""
download_url = drive_file.get('downloadUrl')
if download_url:
resp, content = service._http.request(download_url)
if resp.status == 200:
#print 'Status: %s' % resp
return content
else:
#print 'An error occurred: %s' % resp
return None
else:
# The file doesn't have any content stored on Drive.
return None | fa8ad859e47dbaec0cb9a4eea0be5497239e359e | 14,008 |
def get_include_file_end_before(block: Block) -> str:
"""
>>> # test end-before set to 'end-marker'
>>> block = lib_test.get_test_block_ok()
>>> get_include_file_end_before(block)
'# end-marker'
>>> assert block.include_file_end_before == '# end-marker'
>>> # test end-before not set
>>> block = lib_test.get_test_block_end_before_not_set()
>>> get_include_file_end_before(block)
''
>>> # test end-before invalid
>>> block = lib_test.get_test_block_end_before_invalid()
>>> get_include_file_end_before(block) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Error in File ".../README.template.rst", Line 47106: option "end-before" has no value
"""
include_file_end_before = block.include_file_end_before
if lib_block_options.is_option_in_block('end-before', block):
include_file_end_before = lib_block_options.get_option_value_from_block_or_raise_if_empty_or_invalid('end-before', block)
block.include_file_end_before = include_file_end_before
return include_file_end_before | c8ae330fb24a2a7e5304d8a5e5c5438bf9346c63 | 14,009 |
import torch
import random
def add_random_circles(tensor: torch.Tensor, n_circles: int, equalize_overlaps: bool = True):
"""Adds n_circles random circles onto the image."""
height, width = tensor.shape
circle_img = torch.zeros_like(tensor)
for _ in range(n_circles):
circle_img = add_circle(circle_img, {'x': random.randint(0, width), 'y': random.randint(0, height)}, random.randint(1, int(max(height, width) / 30)))
tensor += (circle_img != 0)
if equalize_overlaps:
tensor = (tensor != 0)
return tensor.type(torch.FloatTensor) | 17e0cf8d53cf0f8b3c542f0fd0f49151c6842ba9 | 14,010 |
def sample_quadric_surface(quadric, center, samples):
"""Samples the algebraic distance to the input quadric at sparse locations.
Args:
quadric: Tensor with shape [..., 4, 4]. Contains the matrix of the quadric
surface.
center: Tensor with shape [..., 3]. Contains the [x,y,z] coordinates of the
center of the coordinate frame of the quadric surface in NIC space with a
top-left origin.
samples: Tensor with shape [..., N, 3], where N is the number of samples to
evaluate. These are the sample locations in the same space in which the
quadric surface center is defined. Supports broadcasting the batching
dimensions.
Returns:
distances: Tensor with shape [..., N, 1]. Contains the algebraic distance
to the surface at each sample.
"""
with tf.name_scope('sample_quadric_surface'):
batching_dimensions = quadric.get_shape().as_list()[:-2]
batching_rank = len(batching_dimensions)
tf_util.assert_shape(quadric, batching_dimensions + [4, 4],
'sample_quadric_surface:quadric')
tf_util.assert_shape(center, batching_dimensions + [-1],
'sample_quadric_surface:center')
tf_util.assert_shape(samples, batching_rank * [-1] + [-1, 3],
'sample_quadric_surface:samples')
# We want to transform the coordinates so that they are in the coordinate
# frame of the conic section matrix, so we subtract the center of the
# conic.
samples = samples - tf.expand_dims(center, axis=batching_rank)
sample_count = samples.get_shape().as_list()[-2]
homogeneous_sample_ones = tf.ones(
samples.get_shape().as_list()[:-1] + [1], dtype=tf.float32)
homogeneous_sample_coords = tf.concat([samples, homogeneous_sample_ones],
axis=-1)
# When we transform the coordinates per-image, we broadcast on both sides-
# the batching dimensions broadcast up the coordinate grid, and the
# coordinate center broadcasts up along the height and width.
# Per-pixel, the algebraic distance is v^T * M * v, where M is the matrix
# of the conic section, and v is the homogeneous column vector [x y z 1]^T.
half_distance = tf.matmul(
quadric, homogeneous_sample_coords, transpose_b=True)
rank = batching_rank + 2
half_distance = tf.transpose(
half_distance, perm=list(range(rank - 2)) + [rank - 1, rank - 2])
algebraic_distance = tf.reduce_sum(
tf.multiply(homogeneous_sample_coords, half_distance), axis=-1)
return tf.reshape(algebraic_distance,
batching_dimensions + [sample_count, 1]) | e4448be0058f4a8010a72eaf9506e95695b1b35e | 14,011 |
def mol2df(mols: Mols[pd.DataFrame], multiindex=False) -> pd.DataFrame:
"""
flattens a mol into a dataframe with the columns containing the start, stop and price
:param mols: mols to transform
:return:
"""
if multiindex:
flat = {
((start, stop), price): series
for (start, stop), mol in mols.items()
for price, series in mol.items()
}
else:
flat = {
f"{start} -> {stop}: {price}": series
for (start, stop), mol in mols.items()
for price, series in mol.items()
}
return pd.concat(flat, axis="columns") | 63b16fa99a9c76a29cbef8755cf29928f05637f6 | 14,012 |
from typing import Tuple
def load_sequence_classifier_configs(args) -> Tuple[WrapperConfig, pet.TrainConfig, pet.EvalConfig]:
"""
Load the model, training and evaluation configs for a regular sequence classifier from the given command line
arguments. This classifier can either be used as a standalone model or as the final classifier for PET/iPET.
"""
model_cfg = WrapperConfig(
model_type=args.model_type,
model_name_or_path=args.model_name_or_path,
wrapper_type=SEQUENCE_CLASSIFIER_WRAPPER,
task_name=args.task_name,
label_list=args.label_list,
max_seq_length=args.sc_max_seq_length,
verbalizer_file=args.verbalizer_file,
cache_dir=args.cache_dir,
)
train_cfg = pet.TrainConfig(
device=args.device,
per_gpu_train_batch_size=args.sc_per_gpu_train_batch_size,
per_gpu_unlabeled_batch_size=args.sc_per_gpu_unlabeled_batch_size,
n_gpu=args.n_gpu,
num_train_epochs=args.sc_num_train_epochs,
max_steps=args.sc_max_steps,
min_steps=args.sc_min_steps,
temperature=args.temperature,
gradient_accumulation_steps=args.sc_gradient_accumulation_steps,
weight_decay=args.weight_decay,
learning_rate=args.learning_rate,
adam_epsilon=args.adam_epsilon,
warmup_steps=args.warmup_steps,
logging_steps=args.logging_steps,
logging_number=args.logging_number,
max_grad_norm=args.max_grad_norm,
use_logits=args.method != "sequence_classifier",
local_rank=args.local_rank,
)
eval_cfg = pet.EvalConfig(
device=args.device,
n_gpu=args.n_gpu,
metrics=args.metrics,
per_gpu_eval_batch_size=args.sc_per_gpu_eval_batch_size,
local_rank=args.local_rank,
)
return model_cfg, train_cfg, eval_cfg | 8729851faae06ed7c0331960db4f933283e7278e | 14,013 |
def gender(word):
""" Returns the gender for the given word, either:
MALE, FEMALE, (MALE, FEMALE), (MALE, PLURAL) or (FEMALE, PLURAL).
"""
w = word.lower()
# Adjectives ending in -e: cruciale, difficile, ...
if w.endswith(("ale", "ile", "ese", "nte")):
return (MALE, FEMALE)
# Most nouns ending in -a (-e) are feminine, -o (-i) masculine:
if w.endswith(("ore", "ista", "mma")):
return MALE
if w.endswith(("a", u"tà", u"tù", "ione", "rice")):
return FEMALE
if w.endswith(("e", "oni")):
return (FEMALE, PLURAL)
if w.endswith("i"):
return (MALE, PLURAL)
if w.endswith("o"):
return MALE
return MALE | 7a8384d778b9aec9fcc5eb32f26c282805cdfa0b | 14,014 |
from typing import Counter
def fcmp(d,r):
"""
Compares two files, d and r, cell by cell. Float comparisons
are made to 4 decimal places. Extending this function could
be a project in and of itself.
"""
# we need to compare the files
dh=open(d,'rb')
rh=open(r,'rb')
dlines = dh.readlines()
rlines = rh.readlines()
boolCounter = Counter()
for dline, rline in zip(dlines,rlines):
for dc,rc in zip(dline.split(','), rline.split(',')):
if _isfloat(dc):
if round(float(dc),4)!=round(float(rc),4):
boolCounter[False] += 1
else:
boolCounter[True] += 1
else:
pass
if dc!=rc:
boolCounter[False]+= 1
else:
boolCounter[True]+= 1
dh.close()
rh.close()
if all(boolCounter):
return True
else:
return False | 9f6f24314316fbef26ce0fb404a88d34c3049b2b | 14,015 |
def is_vector_equal(vec1, vec2, tolerance=1e-10):
"""Compare if two vectors are equal (L1-norm) according to a tolerance"""
return np.all(np.abs(vec1 - vec2) <= tolerance) | 9bb42fa3bc2cbb25edd6eabeddb2aa2d8d93e5c8 | 14,016 |
def partition_pair(bif_point):
"""Calculate the partition pairs at a bifurcation point.
The number of nodes in each child tree is counted. The partition
pairs is the number of bifurcations in the two child subtrees
at each branch point.
"""
n = float(sum(1 for _ in bif_point.children[0].ipreorder()))
m = float(sum(1 for _ in bif_point.children[1].ipreorder()))
return (n, m) | 7889eb95a0ac3b2a7d1138061a4651b1e79427c0 | 14,017 |
def readPyCorrFit(file):
"""
Read header and data of .csv PyCorrFit output file
========== ===============================================================
Input Meaning
---------- ---------------------------------------------------------------
file String with path to .csv file
========== ===============================================================
========== ===============================================================
Output Meaning
---------- ---------------------------------------------------------------
outputdata Object with the tau, G, Gfit, Gres in data field, and separate
fields for the fitted values n, SP, offset, and chi2
========== ===============================================================
"""
# create object
outputdata = PyCorrFitData()
# read .csv header
f = open(file, "r")
if f.mode == "r":
contents = f.read()
start = contents.find("Parameters:")
[n, start] = readPyCorrFitSingleParam(contents, "# n\t", "\n", start)
[tauD, start] = readPyCorrFitSingleParam(contents, "_diff [ms]\t", "\n", start)
[SP, start] = readPyCorrFitSingleParam(contents, "# SP\t", "\n", start)
[offset, start] = readPyCorrFitSingleParam(contents, "# offset\t", "\n", start)
start = contents.find("Fitting:", start)
[chi2, start] = readPyCorrFitSingleParam(contents, "\t", "\n", start)
[Gfitstart, start] = readPyCorrFitSingleParam(contents, "# Ival start [ms]\t", "\n", start)
[Gfitstop, start] = readPyCorrFitSingleParam(contents, "# Ival end [ms]\t", "\n", start)
outputdata.n = n
outputdata.tauD = tauD
outputdata.SP = SP
outputdata.offset = offset
outputdata.chi2 = chi2
outputdata.Gfitstart = Gfitstart
outputdata.Gfitstop = Gfitstop
# load .csv file
data = csv2array(file)
# extract data
tau = data[:,0]
G = data[:,1]
Gfit = data[:,2]
Gres = G - Gfit
outputdata.data = np.stack((tau, G, Gfit, Gres), axis=1)
return outputdata | 0dcaa26c0ef2f8270748241cbd03bc6aaa750672 | 14,018 |
def end_of_time(t):
""" Return the next hour of the passed time. e.g, 18:25:36 --> 19:00:00 """
return t + timedelta(minutes=60) - timedelta(minutes=t.minute) - timedelta(seconds=t.second) | dce1f0cde67c834580edb349e0dfbcdee0b4d171 | 14,019 |
def modf(x):
"""modf(x)
Return the fractional and integer parts of x. Both results carry the sign
of x.
"""
signx = sign(x)
absx = Abs(x)
return (signx * Mod(absx, 1), signx * floor(absx)) | 18f4e9aca22591f2960bb6ddf28fcf677bedee65 | 14,020 |
from typing import Tuple
def get_user_from_request(request, available_query_params: list()) -> Tuple[User, GeneralApiResponse]:
"""
Entra com o request da view e uma lista de query params do user que podem ser consultados
Retorna um user caso seja si mesmo, ou tenha permissão de acesso a outros usuários
Retorna uma resposta de erro caso algo não possa ser completado
"""
get_keys = list(request.GET.keys())
if len(get_keys) != 0 or (len(get_keys) > 1 and 'page' in get_keys):
if request.user.is_superuser:
if any(query_param not in available_query_params for query_param in get_keys):
return None, GeneralApiResponse.bad_request() # algum query param na requisição de user ta zoado
ignore_page_query_params = {key: v for key, v in request.GET.dict().items() if key not in ['page']}
users = User.objects.filter(**ignore_page_query_params) # ignora o query param "page"
if not users.exists():
return None, GeneralApiResponse.not_found() # não achou usuário que atendesse à query
elif len(users) > 1:
return None, GeneralApiResponse.bad_request('a query retorna mais de um user')
else:
return users[0], None
else:
return None, GeneralApiResponse.unauthorized()
else:
return request.user, None | b9d0274ac5ea8e0cbc210b1f4f5e8d46398e8e6d | 14,021 |
def longest_CD(values):
"""
Return the sequence range for the longest continuous
disorder (CDl) subsequence.
"""
# Filter residues with score equal or greater than 0.5
# and store its position index
dis_res = [index for index, res in enumerate(values)
if float(res) >= 0.5]
# Initialize longest CD region
CDl = []
# Counter to store partial results of each continuous region
c = []
# Iterate over disordered residues list
for i, j in zip(dis_res, dis_res[1:]):
# Check if residues are consecutive
if j - i == 1:
# Update counter
c.append(i)
# Not consecutive
else:
# Add last residue of the interval
c.append(i)
# Update CDl
if len(c) > len(CDl):
CDl = c
# Reset counter for the next interval
c = []
return CDl | f07b74b9553c156d2d4b62e17ea02b466a16fe74 | 14,022 |
def get_read_length(filename):
""" Return the first read length of fastq file.
:param str filename: fastq file.
"""
with FastqReader(filename) as filin:
read_len = len(next(iter(filin)))
return read_len | 961af7ff12c422c68349dabee064acd465a1a090 | 14,023 |
def no_outliers_estimator(base_estimator, x, alpha=0.01):
""" Calculate base_estimator function after removal of extreme quantiles
from the sample
"""
x = np.array(x)
if len(x.shape) < 3:
x = np.expand_dims(x, -1)
low_value = np.quantile(x, alpha, axis=(0, 1))
high_value = np.quantile(x, 1 - alpha, axis=(0, 1))
result = np.zeros(x.shape[2], x.dtype)
for i in range(x.shape[2]):
x_ch = x[:, :, i]
x_ch = x_ch[(x_ch >= low_value[i]) & (x_ch <= high_value[i])]
result[i] = base_estimator(x_ch)
return result | 3c23f9cacc1108d6ecb24b690ff731e3a3554b44 | 14,024 |
from server.forms import FormNotCompleteError, FormValidationError
from typing import cast
from typing import Tuple
def error_state_to_dict(err: ErrorState) -> ErrorDict:
"""Return an ErrorDict based on the exception, string or tuple in the ErrorState.
Args:
err: ErrorState from a api error state
Returns:
An ErrorDict containing the error message a status_code and a traceback if available
"""
# Import here to prevent cyclic imports
if isinstance(err, FormValidationError):
return {
"class": type(err).__name__,
"error": str(err),
"traceback": err,
"validation_errors": err.errors, # type:ignore
"status_code": HTTPStatus.BAD_REQUEST,
}
elif isinstance(err, FormNotCompleteError):
return {
"class": type(err).__name__,
"error": str(err),
"traceback": err,
"form": err.form,
"status_code": HTTPStatus.NOT_EXTENDED,
}
elif isinstance(err, Exception):
if is_api_exception(err):
err = cast(ApiException, err)
return {
"class": type(err).__name__,
"error": err.reason,
"status_code": err.status,
"body": err.body,
"headers": "\n".join(f"{k}: {v}" for k, v in err.headers.items()),
"traceback": err,
}
return {
"class": type(err).__name__,
"error": str(err),
"traceback": show_ex(err),
}
elif isinstance(err, tuple):
cast(Tuple, err)
error, status_code = err
return {"error": str(error), "status_code": int(status_code)}
elif isinstance(err, str):
return {"error": err}
elif isinstance(err, dict) and "error" in err: # type: ignore
return err
else:
raise TypeError("ErrorState should be a tuple, exception or string") | 79cf9a971886241c8760bf0091af0c91a4d80ade | 14,025 |
from typing import Set
def english_words() -> Set[str]:
"""Return a set of english words from the nltk corpus "words".
Returns:
Set of english words.
"""
nltk_resource("corpora/words")
return set(nltk.corpus.words.words()) | 2cda38fb0026805c7792bcf45727492b09b38a89 | 14,026 |
def bipartite_matching_wrapper(a, b, score_func, symmetric=False):
"""A wrapper to `bipartite_matching()` that returns `(matches, unmatched_in_a, unmatched_in_b)`
The list of `matches` contains tuples of `(score, a_element, b_element)`. The two unmatched
lists are elements from each of the respective input lists.
"""
found_a, found_b = set(), set()
matches = []
for score, i, j in bipartite_matching(a, b, score_func, symmetric=symmetric):
matches.append((score, i, j))
found_a.add(i)
found_b.add(j)
unmatched_in_a = set(a) - found_a
unmatched_in_b = set(b) - found_b
return matches, unmatched_in_a, unmatched_in_b | 702c290b6874b595fb0249c865c5723c84d485ba | 14,027 |
import itertools
def gen_seldicts(
da,
dims=None,
check_empty=True,
unstack=True
):
"""
TODO: improve documentation
generates a list of dictionaries to be passed into dataarray selection
functions.
Parameters
----------
da : xr.DataArray
datarray to generate selection dicts for
dims
dimensions to generate seldicts over, if None then use all dimensions
check_empty : bool
only generate seldicts that give values that are not all nan
unstack : if
Returns
-------
seldicts : List[Dict]
"""
if unstack:
#unstacks in case of multiindex. using unstacked seldict on stacked multindex da seems to work
da = da.unstack()
if dims is None:
dims = list(da.dims)
idxs = {dim: da.indexes[dim] for dim in dims}
seldicts = [dict(zip(idxs, x)) for x in itertools.product(*idxs.values())]
seldicts_red = []
if check_empty:
# checks to see if the seldict produces all nans and only appends the
# seldict to the list if that is not true
for i, seldict in enumerate(seldicts):
sel = da.sel(seldict).values
t = (sel != sel) # test for nan
if type(t) == np.ndarray:
t = t.all()
if not t:
seldicts_red.append(seldict)
seldicts = seldicts_red
return seldicts | 2bd46bcf9d94ab64593d889bbae89f4e07d689b2 | 14,028 |
def get_interface_type(interface):
"""Gets the type of interface
Args:
interface (str): full name of interface, i.e. Ethernet1/1, loopback10,
port-channel20, vlan20
Returns:
type of interface: ethernet, svi, loopback, management, portchannel,
or unknown
"""
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown' | 8196bfa37ef25f0fa1c08577d215329ecc977c4a | 14,029 |
def create_int_feature_list(name, key, prefix="", module_dict=None):
"""Creates accessor functions for bytes feature lists.
The provided functions are has_${NAME}, get_${NAME}_size, get_${NAME}_at,
clear_${NAME}, and add_${NAME}.
example = tensorflow.train.SequenceExample()
add_image_timestamp(1000000, example)
add_image_timestamp(2000000, example)
if has_image_timestamp:
for i in range(get_image_timestamp_size(example):
timestamp = get_image_timestamp_at(i, example)
clear_image_timestamp(example)
Args:
name: the name of the feature to use in function names.
key: the key for this feature in the SequenceExample.
prefix: a prefix to append to the key in the SequenceExample
module_dict: adds the functions to the corresponding module dict.
"""
def _has(sequence_example, prefix=prefix):
return has_feature_list(key, sequence_example, prefix=prefix)
def _get_size(sequence_example, prefix=prefix):
return get_feature_list_size(key, sequence_example, prefix=prefix)
def _get_at(index, sequence_example, prefix=prefix):
return get_int_at(key, index, sequence_example, prefix=prefix)
def _clear(sequence_example, prefix=prefix):
clear_feature_list(key, sequence_example, prefix=prefix)
def _add(value, sequence_example, prefix=prefix):
add_int(key, value, sequence_example, prefix=prefix)
def _get_key(prefix=prefix):
return merge_prefix(prefix, key)
def _get_default_parser():
return tf.io.FixedLenSequenceFeature((), tf.int64)
function_dict = {
"has_" + name: _has,
"get_" + name + "_size": _get_size,
"get_" + name + "_at": _get_at,
"clear_" + name: _clear,
"add_" + name: _add,
"get_" + name + "_key": _get_key,
"get_" + name + "_default_parser": _get_default_parser,
}
add_functions_to_module(function_dict, module_dict) | 58b08f518050a67db72f0572a78f7dab5a68d468 | 14,030 |
def ROC(y_pred, y_true, positive_column = 0,draw = True):
"""
ROC
"""
y_pred = y_pred[:,0]
y_true = y_true[:,0]
# sort by y_pred
sort_index = np.argsort(-y_pred)
y_pred = y_pred[sort_index]
y_true = y_true[sort_index]
tprs = []
fprs = []
positive_num = (y_true == 1.0).sum()
negivate_num = len(y_true) - positive_num
for threshold in np.arange(0,1+0.1,0.1):
t = ((y_true == 1.0)& (y_pred >= threshold)).sum()
f = ((y_true == 0.0) & (y_pred >= threshold)).sum()
tprs.append(t*1.0/positive_num)
fprs.append(f*1.0/negivate_num)
if draw:
plt.plot(fprs,tprs,c='r')
plt.show()
return tprs, fprs | efeefbd570988c83f912345794cbd19e15ec67a2 | 14,031 |
def ignore_check(self, channel: discord.TextChannel, ignore_dm: bool = False, from_main: bool = False):
"""
A function that checks whether or not that channel allows command.
Args:
self: instance of the class this command calls or this can be commands.Bot
channel (discord.TextChannel): the channel the command call happened in
ignore_dm (bool): whether or not the command is being ignored in direct messages
from_main (bool): indicator for whether or not this call is from Main.py, which switches changes how self is
read
Returns:
True: if channel needs to be ignored
False: if channel is fine
"""
if ignore_dm:
if channel.type is discord.ChannelType.private:
return True
try:
if from_main:
ignore = self.get_cog("Ignores").find(channel.guild.id, channel.id)
else:
ignore = self.bot.get_cog('Ignores').find(channel.guild.id, channel.id)
except AttributeError:
return False
if ignore:
return True
return False | 284ba6432d792a3382383cf9b53a5932897b5e53 | 14,032 |
def network_count_allocated_ips(context, network_id):
"""Return the number of allocated non-reserved ips in the network."""
return IMPL.network_count_allocated_ips(context, network_id) | 33f7ce340d222c3843962e6e64a06440e5dfd526 | 14,033 |
def _parse_transform_spec( transform_spec ):
"""
Parses a transform specification into its name and parameters dictionary.
Raises ValueError if the specification is invalid, it represents an unknown
transform, or if the encoded parameters do not match the transform's expected
types.
Takes 1 argument:
transform_spec - Transform specification string. See lookup_transform() for
details.
Returns 2 values:
transform_name - Name of the specified transform.
transform_parameters - Dictionary of parameters for the specified transform.
Dictionary values are cast to the types expected
the transform.
"""
try:
# break the "<name>:<parameters>" string. make sure we don't break
# the <parameters> into multiple components so it can contain colons
# in the (key, value) pairs.
(transform_name,
transform_parameters_spec) = transform_spec.split( ":",
maxsplit=1 )
except ValueError:
raise ValueError( "Failed to get a transform name and parameters "
"specification from '{:s}'.".format(
transform_spec ) )
# make sure this is a known transform.
if transform_name not in _transform_map:
raise ValueError( "Unknown transform '{:s}'!".format(
transform_name ) )
# get the associated parameter parser for this transform.
_, parameter_parser = _transform_map[transform_name]
try:
# split the remaining <parameters> into (key, value) pairs. each
# (key, value) set is colon-delimited, and each set equal
# sign-delimited.
#
# e.g. "parameter1=value1:parameter2=value2a,value2b,value2c"
#
transform_parameters = dict( map( lambda key_value: key_value.split( "=" ),
transform_parameters_spec.split( ":" ) ) )
# map individual parameters to their expected data types.
transform_parameters = parameter_parser( transform_parameters )
except ValueError as e:
raise ValueError( "<parameters> -> (<key>, <value>) ({:s})".format(
str( e ) ) )
return (transform_name, transform_parameters) | b914d96d9ad1e8da3deb10f1c6500c2ee58b4928 | 14,034 |
from typing import Union
from typing import List
def parse_text_multiline(data: Union[str, List[str]]) -> str:
"""Parse the text in multiline mode."""
if isinstance(data, str):
return data
elif isinstance(data, list) and all(map(is_str, data)):
return '\n'.join(data)
else:
raise ValueError(data) | ba8e50422a89de14a464d4917138c5faa051124d | 14,035 |
def get_latest_episode_release(series, downloaded=True, season=None):
"""
:param series series: SQLAlchemy session
:param downloaded: find only downloaded releases
:param season: season to find newest release for
:return: Instance of Episode or None if not found.
"""
session = Session.object_session(series)
releases = (
session.query(Episode)
.join(Episode.releases, Episode.series)
.filter(Series.id == series.id)
)
if downloaded:
releases = releases.filter(EpisodeRelease.downloaded == True)
if season is not None:
releases = releases.filter(Episode.season == season)
if series.identified_by and series.identified_by != 'auto':
releases = releases.filter(Episode.identified_by == series.identified_by)
if series.identified_by in ['ep', 'sequence']:
latest_episode_release = releases.order_by(
desc(Episode.season), desc(Episode.number)
).first()
elif series.identified_by == 'date':
latest_episode_release = releases.order_by(desc(Episode.identifier)).first()
else:
# We have to label the order_by clause to disambiguate from Release.first_seen #3055
latest_episode_release = releases.order_by(
desc(Episode.first_seen.label('ep_first_seen'))
).first()
if not latest_episode_release:
log.debug(
'no episodes found for series `%s` with parameters season: %s, downloaded: %s',
series.name,
season,
downloaded,
)
return
log.debug(
'latest episode for series %s, with downloaded set to %s and season set to %s',
series,
downloaded,
season,
)
return latest_episode_release | bcf01b5f3af00bb9bc8ddfb0609be771c9350a79 | 14,036 |
def _set_user_permissions_for_volumes(users, volumes):
"""
Returns the section of the user data script to create a Linux
user group and grant the group permission to access the mounted
volumes on the EC2 instance.
"""
group_name = 'volumes'
user_data_script_section = f"""
groupadd {group_name}
"""
for user in users:
user_data_script_section += f"""
usermod -a -G {group_name} {user.login}
"""
for volume in volumes:
user_data_script_section += f"""
chgrp -R {group_name} {volume.mount}
chmod -R 2775 {volume.mount}
"""
return user_data_script_section | 2d262a52cfa2f3e142da3dd7767dcc6cff14c929 | 14,037 |
def cached_examples():
"""This view should be cached for 60 sec"""
examples = ExampleModel.query()
return render_template('list_examples_cached.html', examples=examples) | f598589967f82daaf3c7e9cb88f7679786e5bf18 | 14,038 |
def corona_surface_integral(solution, E, candidate_attach_pts, corona_elem, phys_param, debug_flag=False):
"""
Surface integral around the points that are marked as possible attachment candidates
"""
pcg_idx_vec = np.zeros((len(candidate_attach_pts.keys())), dtype=np.int64)
Q_vec = np.zeros((len(candidate_attach_pts.keys())))
for i, pt in enumerate(candidate_attach_pts):
pcg_idx_vec[i] = pt
elem_above_thresh_in_radius = np.intersect1d(candidate_attach_pts[pt], corona_elem)
# if debug_flag:
# pts_list = [229096, 229099, 229129, 229132, 229155, 229167, 229168, 229171, 229176, 229189, 229190, 229191, 229195, 229196, 229201, 229213, 229214, 229217, 229218, 229220, 229233, 229234, 229238, 229241, 229244, 229245, 229261, 229270, 229286, 229419]
# elem_indicator = np.zeros((solution['surf_mesh']['t'].shape[0], len(pts_list)))
# if pt in pts_list:
# logger.info('visualizing')
# logger.info(pt)
# elem_indicator = np.zeros((solution['surf_mesh']['t'].shape[0], 1))
# # elem_indicator[candidate_attach_pts[pt]] = 1
# elem_indicator[elem_above_thresh_in_radius] = 1
# viz.generate_vtu(solution['surf_mesh']['p'], solution['surf_mesh']['t'], None, None, {'cell_data': {0: 'E threshold'}}, 'test_radius{}'.format(pt), False, cell_data=elem_indicator)
Q_vec[i] = phys_param['eps0']*quadrature.surface_integral(solution['surf_mesh'], solution['master'], E, elem_above_thresh_in_radius)
return Q_vec, pcg_idx_vec | 72f16e1f9afbd35f6f877263abe1af9d0ebbf6d0 | 14,039 |
from typing import Callable
import datasets
def librispeech_adversarial(
split_type: str = "adversarial",
epochs: int = 1,
batch_size: int = 1,
dataset_dir: str = None,
preprocessing_fn: Callable = None,
cache_dataset: bool = True,
framework: str = "numpy",
clean_key: str = "clean",
adversarial_key: str = "adversarial",
) -> datasets.ArmoryDataGenerator:
"""
Adversarial dataset based on Librispeech-dev-clean using Universal
Perturbation with PGD.
split_type - one of ("adversarial")
returns:
Generator
"""
if clean_key != "clean":
raise ValueError(f"{clean_key} != 'clean'")
if adversarial_key != "adversarial":
raise ValueError(f"{adversarial_key} != 'adversarial'")
return datasets._generator_from_tfds(
"librispeech_adversarial:1.0.0",
split_type=split_type,
batch_size=batch_size,
epochs=epochs,
dataset_dir=dataset_dir,
preprocessing_fn=preprocessing_fn,
as_supervised=False,
supervised_xy_keys=("audio", "label"),
variable_length=bool(batch_size > 1),
cache_dataset=cache_dataset,
framework=framework,
lambda_map=lambda x, y: ((x[clean_key], x[adversarial_key]), y),
) | 2ab2da4f56194dada3cd361371ef32b1f2fd6194 | 14,040 |
def search4letters(phrase, letters='aeiou'):
"""
->return a set of the 'letters' found in 'phrase'.
:param phrase: phrase where the search will be made
:param letters:set of letters that will be searched for in the sentence
:return returns a set ()
"""
return set(letters).intersection(set(phrase)) | e58d0863aa090ac3644cd7bf26e783efe2956d35 | 14,041 |
import argparse
def handle_cmdline_args():
"""
Return an object with attributes 'infile' and
'outfile', after handling the command line arguments
"""
parser = argparse.ArgumentParser(
description='Generate synthetic data from a specification in a json '
'file using the "synth-method" described in the json file. ')
parser.add_argument(
'-i', dest='infile', required=True,
help='The input json file. Must contain a "synth-method" property')
parser.add_argument(
'-o', dest='outfile_prefix', required=True,
help='The prefix of the output paths (data json and csv), '
'relative to the QUIPP-pipeline root directory')
args = parser.parse_args()
return args | 434316ca9333182b370e4863b6cda5fe2fb37b25 | 14,042 |
import gc
def merge_flights(prev_flights_filename, next_flights_filename, ids_df, log):
"""
Gets the next days flights that are the continuation of the previous days
flights and merges them with the previous days flights.
It writes the new next days and previous days flights to files prepended
with new.
it returns True if successful, False otherwise.
"""
new_items_df = get_next_day_items(next_flights_filename, ids_df, log)
# free memory used by get_next_day_items
gc.collect()
prev_flights_df = pd.DataFrame()
try:
prev_flights_df = pd.read_csv(prev_flights_filename,
index_col='FLIGHT_ID',
converters={'FLIGHT_ID': lambda x: UUID(x)},
memory_map=True)
log.info('%s read ok', prev_flights_filename)
except EnvironmentError:
log.error('could not read file: %s', prev_flights_filename)
return False
# merge next days flight data with the previous days flight data
update_flight_data(prev_flights_df, new_items_df)
# Output the new previous flights
new_prev_flights_filename = 'new_' + prev_flights_filename
try:
is_bz2 = has_bz2_extension(prev_flights_filename)
if is_bz2:
new_prev_flights_filename = new_prev_flights_filename[:-BZ2_LENGTH]
prev_flights_df.to_csv(new_prev_flights_filename, index=True,
date_format=ISO8601_DATETIME_FORMAT)
log.info('written file: %s', new_prev_flights_filename)
except EnvironmentError:
log.error('could not write file: %s', new_prev_flights_filename)
return False
return True | 6d0cec2c8cc66d04facdde01e24ce0b3aa57dc55 | 14,043 |
def findClusters( peaks, thresh ):
"""Since the peaks are in sequence, this method follows a very simplistic
approach. For each peak it checks its distance from the previous peak. If
it is less than threshold, it clusters that peak with the previous one.
Note that in each of the clusters, input order is maintained."""
clusters, cluster = [], []
cluster.append(peaks[0])
for peak in peaks[1:]:
if euclideanDistance( cluster[-1], peak ) < thresh:
cluster.append( peak )
else:
clusters.append(cluster)
cluster = [peak]
clusters.append( cluster )
print( clusters )
return clusters | f74e504557e7c7e796d29290dccabe043ac70dc0 | 14,044 |
import os
def docker_compose_file(pytestconfig):
"""Get docker compose file"""
return os.path.join(str(pytestconfig.rootdir), "docker-compose.yml") | 8f723f0a6144bf687567c6e998ae54495dcd936d | 14,045 |
import os
def sample():
"""
Returns the path to the sample of the given name.
"""
def inner(name):
return os.path.join(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'samples'
), name
)
return inner | e042b6da15ee85d818ac830e6cfd74a1f11745a2 | 14,046 |
def acq_max_single_seed(ac, gp, y_max, bounds):
"""
A function to find the maximum of the acquisition function using
the 'L-BFGS-B' method.
Input Parameters
----------
ac: The acquisition function object that return its point-wise value.
gp: A gaussian process fitted to the relevant data.
y_max: The current maximum known value of the target function.
bounds: The variables bounds to limit the search of the acq max.
Returns
-------
:return: x_max, The arg max of the acquisition function.
"""
# Start with the lower bound as the argmax
x_max = bounds[:, 0]
#max_acq = None
dim=bounds.shape[0]
x_tries = np.random.uniform(bounds[:, 0], bounds[:, 1],size=(50*dim, dim))
# evaluate
y_tries=ac(x_tries,gp=gp, y_max=y_max)
#find x optimal for init
idx_max=np.argmax(y_tries)
x_init_max=x_tries[idx_max]
#x_try=np.array(bounds[:, 0])
# Find the minimum of minus the acquisition function
res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
x_init_max.reshape(1, -1),
bounds=bounds,
method="L-BFGS-B")
x_max = res.x
#max_acq = -res.fun
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return np.clip(x_max, bounds[:, 0], bounds[:, 1]) | 5a705e15e41be8063f476a40b1cfae9385b98af7 | 14,047 |
def futures_pig_rank(symbol: str = "外三元") -> pd.DataFrame:
"""
价格排行榜
https://zhujia.zhuwang.cc/lists.shtml
:param symbol: choice of {"外三元", "内三元", "土杂猪", "玉米", "豆粕"}
:type symbol: str
:return: 价格排行榜
:rtype: pandas.DataFrame
"""
if symbol == "外三元":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
elif symbol == "内三元":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-1.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
elif symbol == "土杂猪":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-2.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
elif symbol == "玉米":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-3.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df
elif symbol == "豆粕":
temp_df = pd.read_html("https://zhujia.zhuwang.cc/lists-4.shtml")[0]
temp_df.columns = [
'排名',
'品种',
'省份',
'价格-公斤',
'价格-斤',
]
temp_df['价格-公斤'] = temp_df['价格-公斤'].str.strip("元")
temp_df['价格-斤'] = temp_df['价格-斤'].str.strip("元")
temp_df['价格-公斤'] = pd.to_numeric(temp_df['价格-公斤'])
temp_df['价格-斤'] = pd.to_numeric(temp_df['价格-斤'])
return temp_df | 9afc155021afc2b8ffbef4a0e778f1ab6360219f | 14,048 |
import math
def psubl_T(T):
"""
EQ 6 / Sublimation Pressure
"""
T_star = 273.16
p_star = 611.657E-6
a = (-0.212144006E2, 0.273203819E2, -0.610598130E1)
b = ( 0.333333333E-2, 0.120666667E1, 0.170333333E1)
theta = T / T_star
sum = 0
for i in range(0, 3):
sum += a[i] * theta ** b[i]
pi_subl = math.exp((theta ** -1) * sum)
return pi_subl * p_star | 0e3f875fc2d249c78a5db6268dcc0df31213a7ff | 14,049 |
def map_key_values(f, dct):
"""
Like map_with_obj but expects a key value pair returned from f and uses it to form a new dict
:param f: Called with a key and value
:param dct:
:return:
"""
return from_pairs(values(map_with_obj(f, dct))) | 0918ff4ff9ab994b10fe2543dce305f99b7278fb | 14,050 |
def plot_ppc(
ax,
length_plotters,
rows,
cols,
figsize,
animated,
obs_plotters,
pp_plotters,
posterior_predictive,
pp_sample_ix,
kind,
alpha,
linewidth,
mean,
xt_labelsize,
ax_labelsize,
jitter,
total_pp_samples,
legend,
markersize,
animation_kwargs,
num_pp_samples,
):
"""Matplotlib ppc plot."""
if ax is None:
fig, axes = _create_axes_grid(length_plotters, rows, cols, figsize=figsize)
else:
axes = np.ravel(ax)
if len(axes) != length_plotters:
raise ValueError(
"Found {} variables to plot but {} axes instances. They must be equal.".format(
length_plotters, len(axes)
)
)
if animated:
fig = axes[0].get_figure()
if not all([ax.get_figure() is fig for ax in axes]):
raise ValueError("All axes must be on the same figure for animation to work")
for i, ax_i in enumerate(axes):
var_name, selection, obs_vals = obs_plotters[i]
pp_var_name, _, pp_vals = pp_plotters[i]
dtype = posterior_predictive[pp_var_name].dtype.kind
# flatten non-specified dimensions
obs_vals = obs_vals.flatten()
pp_vals = pp_vals.reshape(total_pp_samples, -1)
pp_sampled_vals = pp_vals[pp_sample_ix]
if kind == "kde":
plot_kwargs = {"color": "C5", "alpha": alpha, "linewidth": 0.5 * linewidth}
if dtype == "i":
plot_kwargs["drawstyle"] = "steps-pre"
ax_i.plot([], color="C5", label="Posterior predictive {}".format(pp_var_name))
if dtype == "f":
plot_kde(
obs_vals,
label="Observed {}".format(var_name),
plot_kwargs={"color": "k", "linewidth": linewidth, "zorder": 3},
fill_kwargs={"alpha": 0},
ax=ax_i,
legend=legend,
)
else:
bins = get_bins(obs_vals)
_, hist, bin_edges = histogram(obs_vals, bins=bins)
hist = np.concatenate((hist[:1], hist))
ax_i.plot(
bin_edges,
hist,
label="Observed {}".format(var_name),
color="k",
linewidth=linewidth,
zorder=3,
drawstyle=plot_kwargs["drawstyle"],
)
pp_densities = []
pp_xs = []
for vals in pp_sampled_vals:
vals = np.array([vals]).flatten()
if dtype == "f":
pp_density, lower, upper = _fast_kde(vals)
pp_x = np.linspace(lower, upper, len(pp_density))
pp_densities.append(pp_density)
pp_xs.append(pp_x)
else:
bins = get_bins(vals)
_, hist, bin_edges = histogram(vals, bins=bins)
hist = np.concatenate((hist[:1], hist))
pp_densities.append(hist)
pp_xs.append(bin_edges)
if animated:
animate, init = _set_animation(
pp_sampled_vals, ax_i, dtype=dtype, kind=kind, plot_kwargs=plot_kwargs
)
else:
if dtype == "f":
ax_i.plot(np.transpose(pp_xs), np.transpose(pp_densities), **plot_kwargs)
else:
for x_s, y_s in zip(pp_xs, pp_densities):
ax_i.plot(x_s, y_s, **plot_kwargs)
if mean:
if dtype == "f":
rep = len(pp_densities)
len_density = len(pp_densities[0])
new_x = np.linspace(np.min(pp_xs), np.max(pp_xs), len_density)
new_d = np.zeros((rep, len_density))
bins = np.digitize(pp_xs, new_x, right=True)
new_x -= (new_x[1] - new_x[0]) / 2
for irep in range(rep):
new_d[irep][bins[irep]] = pp_densities[irep]
ax_i.plot(
new_x,
new_d.mean(0),
color="C0",
linestyle="--",
linewidth=linewidth,
zorder=2,
label="Posterior predictive mean {}".format(pp_var_name),
)
else:
vals = pp_vals.flatten()
bins = get_bins(vals)
_, hist, bin_edges = histogram(vals, bins=bins)
hist = np.concatenate((hist[:1], hist))
ax_i.plot(
bin_edges,
hist,
color="C0",
linewidth=linewidth,
label="Posterior predictive mean {}".format(pp_var_name),
zorder=2,
linestyle="--",
drawstyle=plot_kwargs["drawstyle"],
)
ax_i.tick_params(labelsize=xt_labelsize)
ax_i.set_yticks([])
elif kind == "cumulative":
drawstyle = "default" if dtype == "f" else "steps-pre"
ax_i.plot(
*_empirical_cdf(obs_vals),
color="k",
linewidth=linewidth,
label="Observed {}".format(var_name),
drawstyle=drawstyle,
zorder=3
)
if animated:
animate, init = _set_animation(
pp_sampled_vals,
ax_i,
kind=kind,
alpha=alpha,
drawstyle=drawstyle,
linewidth=linewidth,
)
else:
pp_densities = np.empty((2 * len(pp_sampled_vals), pp_sampled_vals[0].size))
for idx, vals in enumerate(pp_sampled_vals):
vals = np.array([vals]).flatten()
pp_x, pp_density = _empirical_cdf(vals)
pp_densities[2 * idx] = pp_x
pp_densities[2 * idx + 1] = pp_density
ax_i.plot(
*pp_densities, alpha=alpha, color="C5", drawstyle=drawstyle, linewidth=linewidth
)
ax_i.plot([], color="C5", label="Posterior predictive {}".format(pp_var_name))
if mean:
ax_i.plot(
*_empirical_cdf(pp_vals.flatten()),
color="C0",
linestyle="--",
linewidth=linewidth,
drawstyle=drawstyle,
label="Posterior predictive mean {}".format(pp_var_name)
)
ax_i.set_yticks([0, 0.5, 1])
elif kind == "scatter":
if mean:
if dtype == "f":
plot_kde(
pp_vals.flatten(),
plot_kwargs={
"color": "C0",
"linestyle": "--",
"linewidth": linewidth,
"zorder": 3,
},
label="Posterior predictive mean {}".format(pp_var_name),
ax=ax_i,
legend=legend,
)
else:
vals = pp_vals.flatten()
bins = get_bins(vals)
_, hist, bin_edges = histogram(vals, bins=bins)
hist = np.concatenate((hist[:1], hist))
ax_i.plot(
bin_edges,
hist,
color="C0",
linewidth=linewidth,
label="Posterior predictive mean {}".format(pp_var_name),
zorder=3,
linestyle="--",
drawstyle="steps-pre",
)
_, limit = ax_i.get_ylim()
limit *= 1.05
y_rows = np.linspace(0, limit, num_pp_samples + 1)
jitter_scale = y_rows[1] - y_rows[0]
scale_low = 0
scale_high = jitter_scale * jitter
obs_yvals = np.zeros_like(obs_vals, dtype=np.float64)
if jitter:
obs_yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(obs_vals))
ax_i.plot(
obs_vals,
obs_yvals,
"o",
color="C0",
markersize=markersize,
alpha=alpha,
label="Observed {}".format(var_name),
zorder=4,
)
if animated:
animate, init = _set_animation(
pp_sampled_vals,
ax_i,
kind=kind,
height=y_rows.mean() * 0.5,
markersize=markersize,
)
else:
for vals, y in zip(pp_sampled_vals, y_rows[1:]):
vals = np.ravel(vals)
yvals = np.full_like(vals, y, dtype=np.float64)
if jitter:
yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(vals))
ax_i.plot(
vals, yvals, "o", zorder=2, color="C5", markersize=markersize, alpha=alpha
)
ax_i.plot([], "C5o", label="Posterior predictive {}".format(pp_var_name))
ax_i.set_yticks([])
if var_name != pp_var_name:
xlabel = "{} / {}".format(var_name, pp_var_name)
else:
xlabel = var_name
ax_i.set_xlabel(make_label(xlabel, selection), fontsize=ax_labelsize)
if legend:
if i == 0:
ax_i.legend(fontsize=xt_labelsize * 0.75)
else:
ax_i.legend([])
if animated:
ani = animation.FuncAnimation(
fig, animate, np.arange(0, num_pp_samples), init_func=init, **animation_kwargs
)
return axes, ani
else:
return axes | 83d01e6b9f9f170b9e8dc2ff3cf95916106196c5 | 14,051 |
import importlib
def load_module(name):
"""Load the named module without registering it in ``sys.modules``.
Parameters
----------
name : string
Module name
Returns
-------
mod : module
Loaded module
"""
spec = importlib.util.find_spec(name)
mod = importlib.util.module_from_spec(spec)
mod.__spec__ = spec
mod.__loader__ = spec.loader
spec.loader.exec_module(mod)
return mod | 762c99efcc17f9f1d1659cdae52989c9cfa9423a | 14,052 |
def make_no_graph_input_fn(graph_data, args, treatments, outcomes, filter_test=False):
"""
A dataset w/ all the label processing, but no graph structure.
Used at evaluation and prediction time
"""
def input_fn():
vertex_dataset = tf.data.Dataset.from_tensor_slices(
({'vertex_index': np.expand_dims(np.array(range(graph_data.num_vertices)), 1),
'is_positive': np.expand_dims(np.array(range(graph_data.num_vertices)), 1)},))
data_processing = adapters.compose(
adapters.append_vertex_labels(treatments, 'treatment'),
adapters.append_vertex_labels(outcomes, 'outcome'),
adapters.make_split_vertex_labels(
graph_data.num_vertices, args.proportion_censored,
np.random.RandomState(args.seed)),
adapters.format_features_labels())
dataset = vertex_dataset.map(data_processing, 8)
if filter_test:
def filter_test_fn(features, labels):
return tf.equal(tf.squeeze(features['in_test']), 1)
dataset = dataset.filter(filter_test_fn)
batch_size = args.batch_size
dataset = dataset.batch(batch_size=batch_size, drop_remainder=False)
return dataset
return input_fn | 8526a64b55608f986ef4b000b2cb75a99160e1a0 | 14,053 |
import torch
def compute_gradient_penalty(D, real_samples, fake_samples):
"""Calculates the gradient penalty loss for WGAN GP"""
# Random weight term for interpolation between real and fake samples
alpha = torch.tensor(np.random.random((real_samples.size(0), 1, 1, 1,1)), dtype = real_samples.dtype, device = real_samples.device)
# Get random interpolation between real and fake samples
#print(alpha.shape, fake_samples.shape)
interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
d_interpolates = D(interpolates)
fake = Variable(Tensor(d_interpolates.shape[0], 1).fill_(1.0), requires_grad=False).view(-1)
#print(d_interpolates.shape, interpolates.shape, fake.shape)
# Get gradient w.r.t. interpolates
gradients = autograd.grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=fake,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty | 110e4854284be694c0813fd5fc71d2ff51d3b6d8 | 14,054 |
import copy
import json
def _perform_aggregation(resource, pipeline, options):
"""
.. versionadded:: 0.7
"""
# TODO move most of this down to the Mongo layer?
# TODO experiment with cursor.batch_size as alternative pagination
# implementation
def parse_aggregation_stage(d, key, value):
for st_key, st_value in d.items():
if isinstance(st_value, dict):
parse_aggregation_stage(st_value, key, value)
if key == st_value:
d[st_key] = value
response = {}
documents = []
req = parse_request(resource)
req_pipeline = copy.deepcopy(pipeline)
if req.aggregation:
try:
query = json.loads(req.aggregation)
except ValueError:
abort(400, description='Aggregation query could not be parsed.')
for key, value in query.items():
if key[0] != '$':
pass
for stage in req_pipeline:
parse_aggregation_stage(stage, key, value)
if req.max_results > 1:
limit = {"$limit": req.max_results}
skip = {"$skip": (req.page - 1) * req.max_results}
req_pipeline.append(skip)
req_pipeline.append(limit)
cursor = app.data.aggregate(resource, req_pipeline, options)
for document in cursor:
documents.append(document)
response[config.ITEMS] = documents
# PyMongo's CommandCursor does not return a count, so we cannot
# provide paination/total count info as we do with a normal (non-aggregate)
# GET request.
return response, None, None, 200, [] | f0c8bbd35dbc8f40c1dcd2a7851fa18585387e5f | 14,055 |
def tab(num):
"""
Get tab indentation.
Parameters
----------
num : int
indentation depth
"""
return num * 4 * " " | 39311a9f28aa70f105271432916745dddeb0b46a | 14,056 |
def merge_sort(lst):
"""Sorts the input list into ascending order."""
if len(lst) < 2:
return lst
half = len(lst) // 2
# This variant of merge sort uses O(N * log N) memory, since list slicing in Python 3 creates a copy.
return merge(merge_sort(lst[:half]), merge_sort(lst[half:])) | e8cada6428fde5aa430497c3c562dc4361c11c1e | 14,057 |
from typing import Optional
def get_top_experts_per_item_dispatcher(gates: Array, name: str,
num_selected_experts: int,
batch_priority: bool,
capacity: Optional[int] = None,
capacity_factor: Optional[float] = None,
**dispatcher_kwargs) -> BaseDispatcher:
"""Returns a dispatcher implementing Top-Experts-Per-Item routing.
For each item, the `num_selected_experts` experts with the largest gating
score are selected in a greedy fashion. However, because each expert has a
fixed `capacity`, if more items than `capacity` select a given expert some of
the assignments will be ignored. All top-1 choices have priority over top-2
choices and so on. In addition, the choices that are ignored also depend on
`batch_priority`. If it is False, the "Vanilla" algorithm is used, meaning
that items in earlier positions of the array have priority. If it is True, the
"Batch Priority Routing" algorithm (see https://arxiv.org/abs/2106.05974) is
used, which gives more priority to the items whose largest score is greater.
Args:
gates: (S, E) array with the gating values for each (item, expert).
These values will also be used as combine_weights for the selected pairs.
name: String with the type of dispatcher to use (supported values are
"einsum" and "indices").
num_selected_experts: Maximum number of experts to select per each item (K).
batch_priority: Whether to use batch priority routing or not.
capacity: If given, maximum number of items processed by each expert.
Either this or `capacity_factor` must be given.
capacity_factor: If given, sets the `capacity` to this factor of S * K / E.
Either this or `capacity` must be given.
**dispatcher_kwargs: Additional arguments for the dispatcher object.
Returns:
A dispatcher.
"""
if (capacity is None) == (capacity_factor is None):
raise ValueError(
"You must specify either 'capacity' or 'capacity_factor', and not both."
f" Current values are capacity = {capacity!r}, "
f"capacity_factor = {capacity_factor!r}")
if not capacity:
group_size, num_experts = gates.shape
capacity = _compute_capacity(
# Target number of tokens to split among the `num_experts` experts.
num_tokens=group_size * num_selected_experts,
num_experts=num_experts,
capacity_factor=capacity_factor)
fn_map = {
"einsum": _get_top_experts_per_item_einsum_dispatcher,
"indices": _get_top_experts_per_item_expert_indices_dispatcher,
}
if name not in fn_map:
raise ValueError(f"Unknown dispatcher type: {name!r}")
return fn_map[name](gates, num_selected_experts, capacity, batch_priority,
**dispatcher_kwargs) | 94e090bc3de59fd03903151fa2e34b2daca50198 | 14,058 |
import subprocess
def nix_prefetch_url(url, algo='sha256'):
"""Prefetches the content of the given URL."""
print(f'nix-prefetch-url {url}')
out = subprocess.check_output(['nix-prefetch-url', '--type', algo, url])
return out.decode('utf-8').rstrip() | 9aed687bed1a015a4da03836ce6438b0cf9b55ec | 14,059 |
def find_files_list(*args, **kwargs):
""" Returns a list of find_files generator"""
return list(find_files(*args, **kwargs)) | b51595dbc75308c583b75c3151c41ea84aafaeaf | 14,060 |
def bool_from_string(subject, strict=False, default=False):
"""Interpret a subject as a boolean.
A subject can be a boolean, a string or an integer. Boolean type value
will be returned directly, otherwise the subject will be converted to
a string. A case-insensitive match is performed such that strings
matching 't','true', 'on', 'y', 'yes', or '1' are considered True and,
when `strict=False`, anything else returns the value specified by
'default'.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if isinstance(subject, bool):
return subject
if not isinstance(subject, str):
subject = str(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ", ".join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = ("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {"val": subject,
"acceptable": acceptable}
raise ValueError(msg)
else:
return default | b3f7728eb5fdd4c660144279200daabd25034bf3 | 14,061 |
from typing import Optional
from typing import Tuple
def scan_quality_check(label: str,
pivots: list,
energies: list,
scan_res: float = rotor_scan_resolution,
used_methods: Optional[list] = None,
log_file: Optional[str] = None,
species: Optional[ARCSpecies] = None,
preserve_params: Optional[list] = None,
trajectory: Optional[list] = None,
original_xyz: Optional[dict] = None,
) -> Tuple[bool, str, str, dict]:
"""
Checks the scan's quality:
1. Based on intermediate conformers if available:
- whether the initial and final points are consistent
- whether it is relatively "smooth"
2. Based on the PES curve (if intermediate conformers are unavailable):
- whether the initial and final points are consistent
- whether it is relatively "smooth"
3. Common:
- whether the optimized geometry indeed represents the minimum energy conformer (for a non-TS species)
- whether the barrier height is reasonable
4. Based on requested parameters to preserve:
- whether specified atom distances to preserve criteria aren't violated
Args:
label (str): The species label.
pivots (list): The rotor pivots.
energies (list): The scan energies in kJ/mol.
scan_res (float, optional): The scan resolution in degrees.
used_methods (list, optional): Troubleshooting methods already tried out.
log_file (str, optional): The path to the output file.
species (ARCSpecies, optional): The ARCSpecies this scan is related to.
preserve_params (list, optional): Entries are length 2 lists of atom indices (1-indexed) between which the
distance as well as a torsion dihedral angle with these atoms as its pivots
must be preserved throughout the scan to a tolerance.
trajectory (list, optional): Entries are Cartesian coordinates along the scan trajectory.
original_xyz (dict, optional): The optimized coordinated for the species.
Returns: Tuple[bool, str, str, dict]
- Whether to invalidate this rotor, ``True`` to invalidate.
- Reason for invalidating this rotor.
- Error or warning message.
- Troubleshooting methods to apply, including conformational changes.
Todo:
- adjust to ND
"""
message, invalidation_reason = '', ''
invalidate = False
actions = dict()
used_methods = used_methods or list()
energies = np.array(energies, np.float64)
scan_conformers = None
# Check if the conformer based method is valid
if log_file:
try:
scan_conformers = parse_scan_conformers(log_file)
except NotImplementedError:
message = f'Rotor scan quality check using conformer internal coordinates ' \
f'has not been implemented for current ESS. Using PES curve based ' \
f'check for rotor scan of {label} between pivots {pivots}.'
logger.warning(message)
# 1. Check based on intermediate conformers
if scan_conformers is not None and (species is None or not species.is_ts):
bonds = scan_conformers[scan_conformers['type'] == 'R']
angles = scan_conformers[scan_conformers['type'] == 'A']
non_scan_rotor = scan_conformers[(scan_conformers['type'] == 'D') \
& (scan_conformers['scan'] == False)]
scan_rotor = scan_conformers[scan_conformers['scan'] == True]
# 1.1 Find significant changes of internal coordinates
expected_step_num = int(360 / scan_res)
# 5 below refers to type, atoms, scan, redundant and initial guess
actual_step_num = scan_conformers.shape[1] - 5
step_num = min(expected_step_num, actual_step_num)
changed_ic_dict = {}
for index_1 in range(step_num + 1):
if index_1 != 0:
# Compare the 'adjacent' conformers
index_2 = index_1 - 1
delta = scan_res # scan[index_1] - scan[index_2] = scan_res
elif step_num == expected_step_num:
# Compare the first and the last conformer
index_2 = step_num
delta = 0
else:
# When the scan is not finished as desired
continue
# Identify changes by type
bond_change = (2 * (bonds[index_1] - bonds[index_2]) /
(bonds[index_1] + bonds[index_2])).abs() > preserve_params_in_scan['bond']
angle_change = (angles[index_1] - angles[index_2]).abs() > preserve_params_in_scan['angle']
non_scan_rotor_change = check_torsion_change(torsions=non_scan_rotor,
index_1=index_1,
index_2=index_2,
threshold=preserve_params_in_scan['dihedral'])
scan_rotor_change = check_torsion_change(torsions=scan_rotor,
index_1=index_1,
index_2=index_2,
threshold=preserve_params_in_scan['dihedral'],
delta=delta)
# Summarize changes
change_sum = pd.concat([bond_change,
angle_change,
non_scan_rotor_change,
scan_rotor_change])
changed_ics = change_sum[change_sum == True].index.to_list()
# Save changes in the format of {conformer index: problematic ics}
if changed_ics:
invalidate = True
changed_ic_dict.update({index_1: changed_ics})
# 1.2 Check broken bond and any lowest conformation
# Exclude those with broken bonds (different species)
# Better to just freeze the broken bond when bond changing first happens
for conf_index, ics in changed_ic_dict.items():
# R(X,Y) refers to bonds in ics
broken_bonds = [ic for ic in ics if 'R' in ic]
if broken_bonds and conf_index != 0:
# Find the bond that changes the most, to avoid accompanied changes, like C-O transforms
# to C=O, which we don't want to freeze. If other bonds need to be frozen as well,
# it can be done in the following troubleshooting.
bonds = scan_conformers.loc[broken_bonds, :]
bond_change = (2 * (bonds[conf_index] - bonds[conf_index - 1]) /
(bonds[conf_index] + bonds[conf_index - 1])).abs()
broken_bond_label = bond_change.sort_values().index[-1] # the largest change
# Freeze the bonds, no further freezing other ics to prevent over-constraining
broken_bonds = [scan_conformers['atoms'][broken_bond_label]]
invalidate = True
invalidation_reason = f'Bond ({broken_bonds}) broke during the scan.'
message = f'Rotor scan of {label} between pivots {pivots} has broken bonds: ' \
f'{broken_bonds}. ARC will attempt to troubleshoot this rotor scan.'
logger.error(message)
actions = {'freeze': broken_bonds}
return invalidate, invalidation_reason, message, actions
# If no bond broke, ideally all conformers should be isomorphic.
# Switch to the lowest conformer
energy_diff = energies[0] - np.min(energies)
# Use tighter threshold to find lower conformer
if energy_diff >= 0.5 or energy_diff > 0.5 * (max(energies) - min(energies)) \
and (species is None or not species.is_ts):
invalidate = True
invalidation_reason = f'Another conformer for {label} exists which is ' \
f'{energy_diff:.2f} kJ/mol lower.'
message = f'Species {label} is not oriented correctly around pivots {pivots}, ' \
f'searching for a better conformation...'
logger.info(message)
# Find the dihedrals in degrees of the lowest conformer:
min_index = np.argmin(energies)
conf_xyzs = parse_1d_scan_coords(log_file)
actions = {'change conformer': conf_xyzs[min_index]}
return invalidate, invalidation_reason, message, actions
# 1.3 Check consistency
if 0 in changed_ic_dict.keys() and len(changed_ic_dict) == 1:
# A smooth scan with different initial and final conformer.
invalidate = True
invalidation_reason = 'Inconsistent initial and final conformers'
message = f'Rotor scan of {label} between pivots {pivots} has inconsistent initial ' \
f'and final conformers.\nInternal coordinates {changed_ic_dict[0]} are different. ' \
f'ARC will attempt to troubleshoot this rotor scan.'
logger.error(message)
actions = {'freeze': [scan_conformers['atoms'][ic_label]
for ic_label in changed_ic_dict[0]]}
return invalidate, invalidation_reason, message, actions
elif len(changed_ic_dict) > 0:
# Not a smooth scan.
invalidate = True
invalidation_reason = 'Significant difference observed between consecutive conformers'
message = f'Rotor scan of {label} between pivots {pivots} is inconsistent between ' \
f'two consecutive conformers.\nInconsistent consecutive conformers and problematic ' \
f'internal coordinates:'
changed_ic_label = []
for index, ics in changed_ic_dict.items():
if index > 0: # Do not include the initial/final differences which may include more ics
message += f'\nconformer #{index:>3d} / #{index+1:>3d} '
message += ', '.join(ics)
changed_ic_label += ics
message += '\nARC will attempt to troubleshoot this rotor scan.'
# list(set()) is used to remove duplicate labels
changed_ic_label = list(set(changed_ic_label))
logger.error(message)
actions = {'freeze': [scan_conformers['atoms'][ic_label]
for ic_label in changed_ic_label]}
return invalidate, invalidation_reason, message, actions
else:
# 2. Check rotor scan quality according to the PES curve
# 2.1. Check consistency between initial and final points
if abs(energies[-1] - energies[0]) > inconsistency_az:
# initial and final points differ by more than `inconsistency_az` kJ/mol.
# seems like this rotor broke the conformer. Invalidate
invalidate = True
invalidation_reason = f'initial and final points are inconsistent by more than {inconsistency_az:.2f} kJ/mol'
message = f'Rotor scan of {label} between pivots {pivots} is inconsistent by more ' \
f'than {inconsistency_az:.2f} kJ/mol between initial and final positions. ' \
f'Initial energy = {energies[0]}, final energy = {energies[-1]}. ARC will ' \
f'attempt to troubleshoot this rotor scan.'
logger.error(message)
actions = {'inc_res': None, 'freeze': 'all'}
return invalidate, invalidation_reason, message, actions
# 2.2. Check consistency between consecutive points
for j in range(len(energies) - 1):
if abs(energies[j] - energies[j + 1]) > inconsistency_ab * np.max(energies):
# Two consecutive points on the scan differ by more than `inconsistency_ab` kJ/mol.
# This is a serious inconsistency. Invalidate
invalidate = True
invalidation_reason = f'Two consecutive points are inconsistent by more than ' \
f'{inconsistency_ab * max(energies):.2f} kJ/mol'
message = f'Rotor scan of {label} between pivots {pivots} is inconsistent by' \
f'more than {inconsistency_ab * max(energies):.2f} kJ/mol between ' \
f'two consecutive points. ARC will attempt to troubleshoot this rotor scan.'
logger.error(message)
# Propose a method
# Try increasing resolution firstly, and try increasing res. and freezing all
# torsions jointly, afterwards.
# TODO: If we figure out that solely increasing res. is not effective,
# we can simplify the process to actions = {'inc_res': None, 'freeze': 'all'}
if any(['scan_res' in used_method for used_method in used_methods]):
# Check if increasing scan resolution is ever applied
if not any([used_method['scan_trsh'] != '' for used_method in used_methods]):
# Case where freezing torisions has not been applied
actions = {'inc_res': None, 'freeze': 'all'}
else:
# Since all torsions are frozen, there's not much we can do except increasing
# scan resolution. But it is not that effective either. So stop and do nothing.
pass
else:
# Case where neither increasing scan resolution nor freezing
# torisions has been applied
actions = {'inc_res': None}
return invalidate, invalidation_reason, message, actions
# 2.3 Check energy and change conformation if needed:
energy_diff = energies[0] - np.min(energies)
if energy_diff >= 2 or energy_diff > 0.5 * (max(energies) - min(energies)) \
and (species is None or not species.is_ts):
invalidate = True
invalidation_reason = f'Another conformer for {label} exists which is {energy_diff:.2f} kJ/mol lower.'
message = f'Species {label} is not oriented correctly around pivots {pivots}. ' \
f'Another conformer exists which is {energy_diff:.2f} kJ/mol lower. ' \
f'searching for a better conformation...'
logger.info(message)
# Find the lowest conformer, and use the new conformer for further jobs.
# Since at this point, the scan has passed previous checks, the possibility
# to switch to a non-isomorphic conformer is low.
min_index = np.argmin(energies)
conf_xyzs = parse_1d_scan_coords(log_file)
actions = {'change conformer': conf_xyzs[min_index]}
return invalidate, invalidation_reason, message, actions
# 3. Check the barrier height
if (np.max(energies) - np.min(energies)) > maximum_barrier:
# The barrier for the internal rotation is higher than `maximum_barrier`
num_wells = determine_rotor_symmetry(label=label,
pivots=pivots,
rotor_path='',
energies=energies,
return_num_wells=True,
log=False,
)[-1]
if num_wells == 1:
invalidate = True
invalidation_reason = f'The rotor scan has a barrier of {np.max(energies) - np.min(energies):.2f} ' \
f'kJ/mol, which is higher than the maximal barrier for rotation ' \
f'({maximum_barrier:.2f} kJ/mol)'
message = f'Rotor scan of {label} between pivots {pivots} has a barrier ' \
f'larger than {maximum_barrier:.2f} kJ/mol. Invalidating rotor.'
logger.warning(message)
return invalidate, invalidation_reason, message, actions
else:
logger.warning(f'The maximal barrier for rotor {pivots} of {label} is '
f'{(np.max(energies) - np.min(energies)):.2f} kJ/mol, which is higher than the set threshold '
f'of {maximum_barrier} kJ/mol. Since this mode when treated as torsion has {num_wells}, '
f'this mode is not invalidated: treating it as a vibrational mode will be less accurate than '
f'the hindered rotor treatment, since the entropy contribution from the population of '
f'this species at the higher wells will not be taken into account. NOT invalidating this '
f'torsional mode.')
# 4. Check requested atom constraints are preserved (particularly useful for TSs)
if preserve_params is not None:
success = True
pivots = list()
for atoms in preserve_params:
for i, xyz in enumerate(trajectory):
if i != 0:
# check that the distance between this atom pair is preserved relative to the previous entry
# in the trajectory, as well as relative to the original value (final_xyz).
current_distance = calculate_distance(coords=xyz, atoms=atoms, index=1)
previous_distance = calculate_distance(coords=trajectory[i - 1], atoms=atoms, index=1)
original_distance = calculate_distance(coords=original_xyz, atoms=atoms, index=1)
if previous_distance * (1.0 - preserve_params_in_scan['bond']) < \
current_distance < \
previous_distance * (1.0 + preserve_params_in_scan['bond']) \
or original_distance * (1.0 - preserve_params_in_scan['bond']) < \
current_distance < \
original_distance * (1.0 + preserve_params_in_scan['bond']):
success = False
pivots.append(atoms)
message = f"The rotor breaks the TS around pivots {pivots}: In trajectory {i}, the distance " \
f"between pivots is {current_distance} Angstroms, which is " \
f"{current_distance / previous_distance:.2f} of the previous frame, and " \
f"{current_distance / original_distance:.2f} of the original geometry."
break
if species.mol is not None:
scan = [determine_smallest_atom_index_in_scan(atom1=species.mol.atoms.index(atoms[0]),
atom2=species.mol.atoms.index(atoms[1]),
mol=species.mol)]
scan.extend(atoms)
scan.append(
determine_smallest_atom_index_in_scan(atom1=species.mol.atoms.index(atoms[1]),
atom2=species.mol.atoms.index(atoms[0]),
mol=species.mol))
# check that a dihedral angle with this atom pair as its pivots is preserved relative to the
# previous entry in the trajectory, as well as relative to the original value (final_xyz).
current_dihedral = calculate_dihedral_angle(coords=xyz, torsion=scan, index=1)
previous_dihedral = calculate_dihedral_angle(coords=trajectory[i - 1], torsion=scan, index=1)
original_dihedral = calculate_dihedral_angle(coords=original_xyz, torsion=scan, index=1)
if abs(current_dihedral - previous_dihedral) < preserve_params_in_scan['dihedral'] \
or abs(current_dihedral - original_dihedral) < preserve_params_in_scan['dihedral']:
success = False
pivots.append(atoms)
message = f"The rotor breaks the TS around pivots {pivots}: In trajectory {i}, the " \
f"dihedral angle is {current_dihedral} degrees, a " \
f"{abs(current_dihedral - previous_dihedral)} change relative to the previous " \
f"frame, and a {abs(current_dihedral - original_dihedral)} change relative to " \
f"the original geometry."
break
if species.mol is None:
logger.warning(
f'Cannot check that the dihedral angle of {species.label} is consistent throughout rotor '
f'scans without a .mol attribute')
if not success:
invalidate = True
invalidation_reason = message
logger.info(message)
actions = dict()
return invalidate, invalidation_reason, message, actions
return invalidate, invalidation_reason, message, actions | 3b14e0d576d06c03c34a0e800e9fb0449d3a1428 | 14,062 |
import logging
def get_msg_timeout(options):
"""Reads the configured sbd message timeout from each device.
Key arguments:
options -- options dictionary
Return Value:
msg_timeout (integer, seconds)
"""
# get the defined msg_timeout
msg_timeout = -1 # default sbd msg timeout
cmd = generate_sbd_command(options, "dump")
(return_code, out, err) = run_command(options, cmd)
for line in out.split("\n"):
if len(line) == 0:
continue
if "msgwait" in line:
tmp_msg_timeout = int(line.split(':')[1])
if -1 != msg_timeout and tmp_msg_timeout != msg_timeout:
logging.warn(\
"sbd message timeouts differ in different devices")
# we only save the highest timeout
if tmp_msg_timeout > msg_timeout:
msg_timeout = tmp_msg_timeout
return msg_timeout | 4b2df955ac796da38b5b9fa176477fec3c0470a2 | 14,063 |
import requests
import logging
def odata_getone(url, headers):
"""
Get a single object from Odata
"""
r = requests.get(url, headers=headers)
if not r.ok:
logging.warning(f"Fetch url {url} hit {r.status_code}")
return None
rjson = r.json()
if 'error' in rjson:
logging.warning(f"Fetching of {url} returned error {r.text}")
return None
return rjson | 5d6c668845132d821f175a2e8c1a924492a9eb2f | 14,064 |
import json
def _tokenizer_from_json(json_string):
"""Parses a JSON tokenizer configuration file and returns a
tokenizer instance.
# Arguments
json_string: JSON string encoding a tokenizer configuration.
# Returns
A Keras Tokenizer instance
"""
tokenizer_config = json.loads(json_string)
config = tokenizer_config.get('config')
word_counts = json.loads(config.pop('word_counts'))
word_docs = json.loads(config.pop('word_docs'))
index_docs = json.loads(config.pop('index_docs'))
# Integer indexing gets converted to strings with json.dumps()
index_docs = {int(k): v for k, v in index_docs.items()}
index_word = json.loads(config.pop('index_word'))
index_word = {int(k): v for k, v in index_word.items()}
word_index = json.loads(config.pop('word_index'))
tokenizer = tf.keras.preprocessing.text.Tokenizer(**config)
tokenizer.word_counts = word_counts
tokenizer.word_docs = word_docs
tokenizer.index_docs = index_docs
tokenizer.word_index = word_index
tokenizer.index_word = index_word
return tokenizer | 665485d9faad1352927879e81c381dd81b77b5c5 | 14,065 |
from typing import List
from pathlib import Path
def get_all_pip_requirements_files() -> List[Path]:
"""
If the root level hi-ml directory is available (e.g. it has been installed as a submodule or
downloaded directly into a parent repo) then we must add it's pip requirements to any environment
definition. This function returns a list of the necessary pip requirements files. If the hi-ml
root directory does not exist (e.g. hi-ml has been installed as a pip package, this is not necessary
and so this function returns an empty list.)
:return: An list list of pip requirements files in the hi-ml and hi-ml-azure packages if relevant,
or else an empty list
"""
files = []
if paths.is_himl_used_from_git_repo():
git_root = paths.git_repo_root_folder()
for folder in [Path("hi-ml") / "run_requirements.txt", Path("hi-ml-azure") / "run_requirements.txt"]:
files.append(git_root / folder)
return files | 7ce5a327af6961ad23555ba5334246b75d8bd782 | 14,066 |
def load_data(dataset_name: str, split: str) -> object:
"""
Load the data from datasets library and convert to dataframe
Parameters
----------
dataset_name : str
name of the dataset to be downloaded.
split : str
type of split (train or test).
Returns
-------
object
dataframe.
"""
data = load_dataset(dataset_name, split=split)
logger.info(split + " dataset downloaded!")
return data | f6dc374d8c12fa74b9f390a1766af369791bc3b2 | 14,067 |
def horizontal_south_link_neighbor(shape, horizontal_ids, bad_index_value=-1):
"""ID of south horizontal link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
horizontal_ids : array of int
Array of all horizontal link ids *must be of len(horizontal_links)*.
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of south horizontal neighbor links. Length of
number_of_horizontal_links.
Examples
--------
The following example uses this grid::
*--27-->*--28-->*--29-->*--30-->*
*--18-->*--19-->*--20-->*--21-->*
*---9-->*--10-->*--11-->*--12-->*
*---0-->*---1-->*---2-->*---3-->*
.. note::
Only horizontal links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the horizontal IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.components.overland_flow._links import (horizontal_link_ids,
... horizontal_north_link_neighbor)
>>> rmg = RasterModelGrid((4, 5))
>>> horizontal_links = horizontal_link_ids(rmg.shape).flatten()
>>> horizontal_south_link_neighbor(rmg.shape, horizontal_links)
array([-1, -1, -1, -1, 0, 1, 2, 3, 9, 10, 11, 12, 18, 19, 20, 21])
"""
links = np.roll(horizontal_ids.reshape((shape[0], shape[1] - 1)), 1, axis=0)
links[0, :] = bad_index_value
return links.reshape(-1) | 413fdd5a4af8a0e77b0c3ab191bac60f2ba2cc26 | 14,068 |
def _get_output(algorithm, iport=0, iconnection=0, oport=0, active_scalar=None,
active_scalar_field='point'):
"""A helper to get the algorithm's output and copy input's vtki meta info"""
ido = algorithm.GetInputDataObject(iport, iconnection)
data = wrap(algorithm.GetOutputDataObject(oport))
data.copy_meta_from(ido)
if active_scalar is not None:
data.set_active_scalar(active_scalar, preference=active_scalar_field)
return data | dd70cbb1ee6c2d6ed085fc589c24e88fc62a17ab | 14,069 |
def read_cesar_out(cesar_line):
"""Return ref and query sequence."""
cesar_content = cesar_line.split("\n")
# del cesar_content[0]
fractions = parts(cesar_content, 4)
cesar_fractions = []
for fraction in fractions:
if len(fraction) == 1:
continue
ref_seq = fraction[1]
query_name = fraction[2][1:]
query_seq = fraction[3]
if len(ref_seq) != len(query_seq):
die("Error! Ref and query sequences must have the same length!")
elif len(ref_seq) == 0:
die("Error! The input is empty!")
fraction = (query_name, ref_seq, query_seq)
cesar_fractions.append(fraction)
return cesar_fractions | fb1a1a66647fb6d3e6fec1b27d26836067c6b023 | 14,070 |
def aa_i2c_slave_write_stats (aardvark):
"""usage: int return = aa_i2c_slave_write_stats(Aardvark aardvark)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_i2c_slave_write_stats(aardvark) | 87e64465b1bc79c7ab48e39274e39cab17f74755 | 14,071 |
import json
def get_aliases_user(request):
"""
Returns all the Aliases
API_ENDPOINT:api/v1/aliases
----------
payload
{
"email":"[email protected]"
}
"""
alias_array = []
payload = {}
print("came to get_aliases_user()")
data_received = json.loads(request.body)
email = data_received["email"]
print(f"Email body:{email}")
db_data = Aliases.objects.filter(user__email=email)
print(f"QuerySet->{db_data}")
for x in db_data:
alias_array.append(x.alias)
return JsonResponse({"alias":alias_array}, safe=False) | 2501fa15bafc2214585bd1e7d568a9a685725020 | 14,072 |
def _sorted_attributes(features, attrs, attribute):
"""
When the list of attributes is a dictionary, use the
sort key parameter to order the feature attributes.
evaluate it as a function and return it. If it's not
in the right format, attrs isn't a dict then returns
None.
"""
sort_key = attrs.get('sort_key')
reverse = attrs.get('reverse')
assert sort_key is not None, "Configuration " + \
"parameter 'sort_key' is missing, please " + \
"check your configuration."
# first, we find the _minimum_ ordering over the
# group of key values. this is because we only do
# the intersection in groups by the cutting
# attribute, so can only sort in accordance with
# that.
group = dict()
for feature in features:
val = feature[1].get(sort_key)
key = feature[1].get(attribute)
val = _no_none_min(val, group.get(key))
group[key] = val
# extract the sorted list of attributes from the
# grouped (attribute, order) pairs, ordering by
# the order.
all_attrs = sorted(group.iteritems(),
key=lambda x: x[1], reverse=bool(reverse))
# strip out the sort key in return
return [x[0] for x in all_attrs] | 473c3d30c4fde5f00932adfb50c4d34c08324d54 | 14,073 |
import subprocess
import time
import random
def get_gpus(num_gpu=1, worker_index=-1, format=AS_STRING):
"""Get list of free GPUs according to nvidia-smi.
This will retry for ``MAX_RETRIES`` times until the requested number of GPUs are available.
Args:
:num_gpu: number of GPUs desired.
:worker_index: index "hint" for allocation of available GPUs.
Returns:
Comma-delimited string of GPU ids, or raises an Exception if the requested number of GPUs could not be found.
"""
# get list of gpus (index, uuid)
list_gpus = subprocess.check_output(["nvidia-smi", "--list-gpus"]).decode()
logger.debug("all GPUs:\n{0}".format(list_gpus))
# parse index and guid
gpus = [x for x in list_gpus.split('\n') if len(x) > 0]
def parse_gpu(gpu_str):
cols = gpu_str.split(' ')
return cols[5].split(')')[0], cols[1].split(':')[0]
gpu_list = [parse_gpu(gpu) for gpu in gpus]
free_gpus = []
retries = 0
while len(free_gpus) < num_gpu and retries < MAX_RETRIES:
smi_output = subprocess.check_output(["nvidia-smi", "--format=csv,noheader,nounits", "--query-compute-apps=gpu_uuid"]).decode()
logger.debug("busy GPUs:\n{0}".format(smi_output))
busy_uuids = [x for x in smi_output.split('\n') if len(x) > 0]
for uuid, index in gpu_list:
if uuid not in busy_uuids:
free_gpus.append(index)
if len(free_gpus) < num_gpu:
logger.warn("Unable to find available GPUs: requested={0}, available={1}".format(num_gpu, len(free_gpus)))
retries += 1
time.sleep(30 * retries)
free_gpus = []
logger.info("Available GPUs: {}".format(free_gpus))
# if still can't find available GPUs, raise exception
if len(free_gpus) < num_gpu:
smi_output = subprocess.check_output(["nvidia-smi", "--format=csv", "--query-compute-apps=gpu_uuid,pid,process_name,used_gpu_memory"]).decode()
logger.info(": {0}".format(smi_output))
raise Exception("Unable to find {} free GPU(s)\n{}".format(num_gpu, smi_output))
# Get logical placement
num_available = len(free_gpus)
if worker_index == -1:
# use original random placement
random.shuffle(free_gpus)
proposed_gpus = free_gpus[:num_gpu]
else:
# ordered by worker index
if worker_index * num_gpu + num_gpu > num_available:
worker_index = worker_index * num_gpu % num_available
proposed_gpus = free_gpus[worker_index * num_gpu:(worker_index * num_gpu + num_gpu)]
logger.info("Proposed GPUs: {}".format(proposed_gpus))
if format == AS_STRING:
return ','.join(str(x) for x in proposed_gpus)
elif format == AS_LIST:
return proposed_gpus
else:
raise Exception("Unknown GPU format") | 97956eaffa16514e34737d0733a69a1b176f7067 | 14,074 |
def ldensity_laplace_uniform_dist(prob_laplace, location, scale, low, high,
val):
"""
A mixture of a Laplace and a uniform distribution
"""
return np.log((prob_laplace * np.exp(-np.abs(val - location) / scale) / (2 * scale))
+ ((1 - prob_laplace) / (high - low))) | b069b2de4c2da3c69245b6a5507b61e918d5bb76 | 14,075 |
def readConfirmInput():
"""asks user for confirmation
Returns:
bool: True if user confirms, False if doesn't
"""
try:
result = readUserInput("(y/n): ") # UnrecognisedSelectionException
return 'y' in result[0].lower() # IndexError
except (UnrecognisedSelectionException, IndexError) as e:
return False | 007fe5e0002711db7cd0bcb1869dcbef9c667213 | 14,076 |
def linkElectron(inLep, inLepIdx, lepCollection, genPartCollection):
"""process input Electron, find lineage within gen particles
pass "find" as inLepIdx of particle to trigger finding within the method"""
linkChain = []
lepIdx = -1
if inLepIdx == "find":
for Idx, lep in enumerate(lepCollection):
if inLep == lep:
lepIdx = Idx
break
elif -1 < inLepIdx < len(lepCollection):
lepIdx = inLepIdx
else:
lepIdx = -999
tmpMoth = inLep.genPartIdx
#temporary deltaR with a default (only stored under logic error) and a calculation against the 'head' of the chain
tmpDeltaR = -9999.786
if len(linkChain) > 0:
tmpDeltaR = deltaR(inPart, linkChain[0][6])
elif len(linkChain) == 0:
tmpDeltaR = 0.0
linkChain.append( ("Electron", lepIdx, tmpMoth, inLep.pdgId, tmpDeltaR, inLep.genPartFlav, inLep) )
if -1 < tmpMoth < len(genPartCollection):
__ = linkGenPart(genPartCollection[tmpMoth], tmpMoth, genPartCollection, linkChain=linkChain)
return linkChain | 87747414f5e086f16a455dbc732f86ddcb0db630 | 14,077 |
def status():
"""Determines whether or not if CrowdStrike Falcon is loaded.
:return: A Boolean on whether or not crowdstrike is loaded.
:rtype: bool
.. code-block:: bash
salt '*' crowdstrike.status
"""
if not __salt__['crowdstrike.system_extension']():
# if we should be using a kext, just check the kext as falconctl stats
# can take a long time to run if falcon is already unloaded.
if not __salt__['kext.running']('com.crowdstrike.sensor'):
return False
try:
__salt__['crowdstrike.falconctl']('stats', timeout=5)
return True
except CommandExecutionError:
return False | e9bdbce3e290967b95d58ddf75c2054e06542043 | 14,078 |
def sparse_search(arr, s):
""" 10.5 Sparse Search: Given a sorted array of strings that is interspersed
with empty strings, write a method to find the location of a given string.
EXAMPLE:
Input: find "ball" in {"at", "", "", "" , "ball", "", "", "car", "" , "" , "dad", ""}
Output: 4
"""
def spread(arr, middle, left, right):
k = 1
while middle - k >= left and middle + k <= right:
if arr[middle - k] != "":
return middle - k
if arr[middle + k] != "":
return middle + k
k += 1
return middle
def rec_sparse_search(arr, s, left, right):
if left > right:
return None
middle = (left + right) / 2
if arr[middle] == "":
new_middle = spread(arr, middle, left, right)
if new_middle == middle:
return None
middle = new_middle
if arr[middle] == s:
return middle
if arr[middle] < s:
return rec_sparse_search(arr, s, left, middle - 1)
return rec_sparse_search(arr, s, middle + 1, right)
return rec_sparse_search(arr, s, 0, len(arr) - 1) | 605a56c518539117a83382c9e73d37d5e56b535f | 14,079 |
import uuid
def uuid_pk():
"""
Generate uuid1 and cut it to 12.
UUID default size is 32 chars.
"""
return uuid.uuid1().hex[:12] | 9efb12a6e72b02adcd4a64ca721ceab8c688055a | 14,080 |
def infected_symptomatic_00x80():
"""
Real Name: b'Infected symptomatic 00x80'
Original Eqn: b'Infected symptomatic 00+Infected symptomatic 80'
Units: b'person'
Limits: (None, None)
Type: component
b''
"""
return infected_symptomatic_00() + infected_symptomatic_80() | 0a3500659fad466c92fcd3d073003094c56efe9d | 14,081 |
def stencilCompare(firstElem, secondElem):
"""
stencilCompare(const std::pair< int, FP_PRECISION > &firstElem, const std::pair< int,
FP_PRECISION > &secondElem) -> bool
Comparitor for sorting k-nearest stencil std::pair objects
"""
return _openmoc.stencilCompare(firstElem, secondElem) | 3eda1a57e521134e77ba55ae38771f151699fdfd | 14,082 |
import random
def bisect_profiles_wrapper(decider, good, bad, perform_check=True):
"""Wrapper for recursive profile bisection."""
# Validate good and bad profiles are such, otherwise bisection reports noise
# Note that while decider is a random mock, these assertions may fail.
if perform_check:
if decider.run(good, save_run=False) != StatusEnum.GOOD_STATUS:
raise ValueError('Supplied good profile is not actually GOOD')
if decider.run(bad, save_run=False) != StatusEnum.BAD_STATUS:
raise ValueError('Supplied bad profile is not actually BAD')
common_funcs = sorted(func for func in good if func in bad)
if not common_funcs:
return {'ranges': [], 'individuals': []}
# shuffle because the results of our analysis can be quite order-dependent
# but this list has no inherent ordering. By shuffling each time, the chances
# of finding new, potentially interesting results are increased each time
# the program is run
random.shuffle(common_funcs)
results = bisect_profiles(decider, good, bad, common_funcs, 0,
len(common_funcs))
results['ranges'].sort()
results['individuals'].sort()
return results | 8fbe2f018c7dfb7fdeb71dd5080993a9773a41d7 | 14,083 |
import typing
def rolling_median_with_nan_forward_fill(vector: typing.List[float], window_length: int) -> typing.List[float]:
"""Computes a rolling median of a vector of floats and returns the results. NaNs will be forward filled."""
forward_fill(vector)
return rolling_median_no_nan(vector, window_length) | 708cd1f6371846ea3b7acb4d7b59a7a61f85de7c | 14,084 |
def build_class_docstring(class_to_doc: ClassToDocument, formatter: Formatter) -> str:
"""A function to build the docstring of a class
Parameters
----------
class_to_doc : ClassToDocument
The class to document
formatter : Formatter
The formatter to use
Returns
-------
docstring : str
The docstring for this class
"""
_logger.debug(f"Build class docstring for '{class_to_doc.name}'...")
return formatter.format_docstring(nb_base_tab=class_to_doc.nb_base_tab,
description=class_to_doc.description,
fields={
'Attributes': class_to_doc.attributes,
'Public methods': class_to_doc.public_methods,
'Protected methods': class_to_doc.protected_methods,
}) | ab9c487cd0c3059675e476fbab541f95fb912d2b | 14,085 |
from typing import Optional
from typing import Dict
def Subprocess(
identifier: Optional[str] = None, variables: Optional[Dict] = None,
env: Optional[Dict] = None, volume: Optional[str] = None
) -> Dict:
"""Get base configuration for a subprocess worker with the given optional
arguments.
Parameters
----------
identifier: string, default=None
Unique worker identifier. If no identifier is given, a new unique
identifier will be generated.
variables: dict, default=None
Mapping with default values for placeholders in command template
strings.
env: dict, default=None
Default settings for environment variables when executing workflow
steps. These settings can get overridden by step-specific settings.
volume: string, default=None
Identifier for the storage volume that the worker has access to.
Returns
-------
dict
"""
return WorkerSpec(
worker_type=SUBPROCESS_WORKER,
variables=variables,
env=env,
identifier=identifier,
volume=volume
) | 97a90179c91ec862c6008e12ae6c12368ec301c5 | 14,086 |
def get_var(name):
"""
Returns the value of a settings variable.
The full name is CONTROLLED_VOCABULARY_ + name.
First look into django settings.
If not found there, use the value defined in this file.
"""
full_name = "CONTROLLED_VOCABULARY_" + name
ret = globals().get(full_name, None)
ret = getattr(settings, full_name, ret)
return ret | 3c7b5507a387917b9639510023948571160b5973 | 14,087 |
def get_schema_from_dataset_url_carbon(dataset_url,
key=None,
secret=None,
endpoint=None,
proxy=None,
proxy_port=None,
filesystem=None):
"""Returns a :class:`petastorm.unischema.Unischema` object loaded from a dataset specified by a url.
:param dataset_url: A dataset URL
:param key: access key
:param secret: secret key
:param endpoint: endpoint_url
:param proxy: proxy
:param proxy_port: proxy_port
:param filesystem: filesystem
:return: A :class:`petastorm.unischema.Unischema` object
"""
# Get a unischema stored in the dataset metadata.
stored_schema = get_schema_carbon(CarbonDataset(dataset_url,
key=key,
secret=secret,
endpoint=endpoint,
proxy=proxy,
proxy_port=proxy_port,
filesystem=filesystem))
return stored_schema | 2c562e39232dfbe1ac7359d0c88bd2a1efa5a334 | 14,088 |
import time
def get_all_metrics(model, epoch, val_x, val_y, start_time, loss_fn):
"""每个epoch结束后在发展集上预测,得到一些指标
:param model: tf.keras.Model, epoch训练后的模型
:param epoch: int, 轮数
:param val_x: tf.data.Dataset, 发展集的输入, 和val_y一样的sample_size
:param val_y: tf.data.Dataset, 发展集的标签
:param start_time: time.time, 开始时间
:param loss_fn: 损失函数
:return: 模型在发展集上的损失
"""
y_pred_val, y_true_val = [], []
loss_val = 0
sample_size_val = 0
for x_tmp, y_tmp in zip(val_x.as_numpy_iterator(), val_y.as_numpy_iterator()):
pred_tmp = model.predict(x_tmp)
y_pred_val.append(pred_tmp)
y_true_val.append(y_tmp)
loss_tmp = loss_fn(y_tmp, pred_tmp)
loss_val += np.sum(loss_tmp)
sample_size_val += x_tmp[0].shape[0]
# 计算损失
loss_val /= sample_size_val
# 计算auc
y_pred = np.concatenate(y_pred_val).astype(dtype=float)
y_true = np.concatenate(y_true_val).astype(dtype=float)
roc_auc_val = roc_auc_score(y_true, y_pred)
# 转化预测概率为类别
y_pred = np.where(y_pred > 0.5, np.ones_like(y_pred), np.zeros_like(y_pred))
# 计算混淆矩阵相关的
recall = recall_score(y_true=y_true, y_pred=y_pred)
precision = precision_score(y_true=y_true, y_pred=y_pred)
accuracy = accuracy_score(y_true=y_true, y_pred=y_pred)
line = f"""For epoch {epoch}, on val set loss is {round(loss_val, 5)}, auc is {round(roc_auc_val, 4)},
recall is {round(recall, 4)}, precision is {round(precision, 4)}, accuracy is {round(accuracy, 4)},
confusion_matrix is {confusion_matrix(y_true=y_true, y_pred=y_pred)}"""
line += f", time elapsed {(time.time() - start_time) / 60} mins"
print("HZJ info: ", line)
return loss_val | 24025cbdcc702ca32c5887f1ec2ccf424d492e69 | 14,089 |
def get_labels(decode_steps: DecodeSteps) -> LabelsDict:
"""Returns labels dict given DecodeSteps."""
return {
"target_action_types": decode_steps.action_types,
"target_action_ids": decode_steps.action_ids,
} | 66047e41b3d173e53b676a60e48647e3862aac16 | 14,090 |
def get_body_barycentric_posvel(body, time, ephemeris=None):
"""Calculate the barycentric position and velocity of a solar system body.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
position, velocity : tuple of `~astropy.coordinates.CartesianRepresentation`
Tuple of barycentric (ICRS) position and velocity.
See also
--------
get_body_barycentric : to calculate position only.
This is faster by about a factor two for JPL kernels, but has no
speed advantage for the built-in ephemeris.
Notes
-----
The velocity cannot be calculated for the Moon. To just get the position,
use :func:`~astropy.coordinates.get_body_barycentric`.
"""
return _get_body_barycentric_posvel(body, time, ephemeris) | 41be03294a5cd21163afae9650f556fc64257110 | 14,091 |
def recurDraw(num, data):
"""
Purpose: to draw polygons
Parameters: num - indicator of what layer the program is on, data - instance
of the Data class
Returns: data - instance of the data class
Calls: recurDraw - itself, Data - data processing class, toDraw - drawing
intermediary function
"""
if num == 0:
return num
num -= 1
data = recurDraw(num, data)
data = Data(num, data)
toDraw(data)
return data | d94d2f250396b6acfcf02306fd78b180f070aa92 | 14,092 |
def cont4():
"""
Two clusters, namely <cont1> (5 contours) and <cont3> 4 contours).
The enclosing contours of the clusters have a different value.
Contains 3 minima.
"""
cont_min = [
cncc(5, (6.00, 3.00), 0.2, (1, 1)),
cncc(2, (7.00, 4.00), 0.1, (4, 1), rmin=0.15),
cncc(2, (6.25, 3.25), 0.3, (6, 1), rmin=1.6, no_min=True),
cncc(5, (3.00, 3.00), 0.2, (1, 1)),
]
cont = [e for lst in cont_min for e in lst[0]]
min = [e for lst in cont_min for e in lst[1]]
return cont, min | c83cb48c3bc257dcf1ead50312d186464acdd57d | 14,093 |
def predict(test_data, qrnn, add_noise = False):
"""
predict the posterior mean and median
"""
if add_noise:
x_noise = test_data.add_noise(test_data.x, test_data.index)
x = (x_noise - test_data.mean)/test_data.std
y_prior = x_noise
y = test_data.y_noise
y0 = test_data.y
else:
x = (test_data.x - test_data.mean)/test_data.std
y_prior = test_data.x
y = test_data.y
y0 = test_data.y0
y_pre = qrnn.predict(x.data)
y_pos_mean = qrnn.posterior_mean(x.data)
return y_pre, y_prior, y0, y, y_pos_mean, x.data | d45e843d529babb99baa160ad976c0c9753da42d | 14,094 |
def handle_login_GET():
"""
Displays the index (the login page).
"""
if request.args.get('next'):
url_kwargs = dict(next=request.args.get('next'))
else:
url_kwargs = {}
try:
weblab_api.api.check_user_session()
except SessionNotFoundError:
pass # Expected behavior
else:
# User is already logged in, send him to the next url
return redirect(get_next_url())
return render_template("webclient/login.html", url_kwargs = url_kwargs) | f496519518b5d3b8a71ff4a8e60be2a2fe2110f3 | 14,095 |
import copy
def get_role_actions():
"""Returns the possible role to actions items in the application.
Returns:
dict(str, list(str)). A dict presenting key as role and values as list
of actions corresponding to the given role.
"""
return copy.deepcopy(_ROLE_ACTIONS) | 79b53e4003b1dc9264d9210f03395ce32d737c1e | 14,096 |
import json
def jsons_str_tuple_to_jsons_tuple(ctx, param, value):
"""
Converts json str into python map
"""
if value is None:
return []
else:
return [json.loads(a) for a in value] | 8b6f03650d566d74b0400868f12b59c2fa37bc3e | 14,097 |
def get_webelements_in_active_area(xpath, **kwargs):
"""Find element under another element.
If ${ACTIVE_AREA_FUNC} returns an element then the xpath is searched from
that element. Otherwise the element is searched under body element.
Parameters
----------
xpath : str
Xpath expression without xpath= prefix.
Returns
-------
:obj:`list` of :obj:`WebElement`
List of visible WebElements.
"""
active_area_xpath = CONFIG["ActiveAreaXpath"]
if ACTIVE_AREA_FUNCTION is not None:
active_area = ACTIVE_AREA_FUNCTION()
if active_area:
xpath = xpath.replace('//', './/', 1)
else:
driver = browser.get_current_browser()
active_area = driver.find_element_by_xpath(active_area_xpath)
else:
driver = browser.get_current_browser()
try:
active_area = driver.find_element_by_xpath(active_area_xpath)
if active_area is None:
logger.debug('Got None for active area. Is page still loading '
'or is it missing body tag?')
return None
# //body not found, is page still loading? Return None to continue looping
except NoSuchElementException:
logger.debug("Cannot locate //body element. Is page still loading?")
return None
try:
webelements = active_area.find_elements_by_xpath(xpath)
logger.trace('XPath {} matched {} webelements'
.format(xpath, len(webelements)))
webelements = get_visible_elements_from_elements(webelements, **kwargs)
except StaleElementReferenceException:
raise QWebStalingElementError('Got StaleElementException')
except (JavascriptException, InvalidSelectorException) as e:
logger.debug('Got {}, returning None'.format(e))
webelements = None
return webelements | 91579a7195c865734b4119e33d0668a4951eb3f4 | 14,098 |
import sys
def create_double_group():
"""
Returns: Create two simple control for all object under selected
"""
selections = cm.ls(selection=True)
if len(selections) < 1:
return om.MGlobal.displayError("This function need at lest two object to work with")
for selection in selections:
if "End" in str(selection):
continue
else:
for each_name in list_tail_name:
if str(each_name) in selection:
base_name = selection.replace(str(each_name), "")
else:
base_name = selection
parent = cm.listRelatives(selection, parent=True)
group_orient = cm.group(empty=True, world=True, name="{0}_orient".format(base_name))
group_offset = cm.group(empty=True, world=True, name="{0}_offset".format(base_name))
cm.parent(group_offset, group_orient)
if parent is not None:
cm.parent(group_orient, parent)
cm.matchTransform(group_orient, selection)
cm.makeIdentity(group_orient, apply=True, scale=True)
cm.parent(selection, group_offset)
sys.stdout.write("Create double group completed.\n") | a240c5f7220e4e9270db1b6e84204ebf399e77b0 | 14,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.