content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import os
def fetch_preset(output_filename=None, nproc=8, add_structure=True):
"""
Fetches preset list of docs determined via trial and error,
An initial query via the frontend on 06/28/2019 showed 12870,
and subsequent sampling of ids from 8000-25000 yielded all 12820.
Successful query ids were stored in indices.json, up which this
function should be able extract all of the relevant data.
Args:
output_filename (str): output filename for all collected docs
nproc (int): number of processes to use
Returns:
(List): list of isotherm documents
"""
# Load indices from json doc
iso_ids = loadfn(os.path.join(MOF_TDA_PATH, "ingest", "indices.json"))
# Fetch all docs from ids
isotherms = fetch_many_docs(iso_ids, nproc=nproc)
# Dump to json if output specified
if output_filename is not None:
dumpfn(isotherms, output_filename)
return isotherms | ca2073d5f12f2c09f0aea5596c7d9919277a17ce | 16,800 |
import sys
def parse_args():
"""Function to read CCB-ID command line arguments
Args:
None - reads from sys.argv
Returns:
an argparse object
"""
# create the argument parser
parser = args.create_parser(description='Apply a CCB-ID species classification model to csv or image data.')
# set up the arguments for dealing with file i/o
args.input(parser)
args.mask(parser)
args.output(parser)
args.ecodse(parser)
args.models(parser, help='path to the ccbid model to apply', default=None, required=True)
# arguments to turn on certian flags or set specific parameters
args.remove_outliers(parser)
args.aggregate(parser)
args.labels(parser)
args.cpus(parser) # maybe add function to model object to update the n_cpus in each model
args.verbose(parser)
# parse the inputs from sys.argv
return parser.parse_args(sys.argv[1:]) | a2a7ff8a7f1fa4cd757bbd0cc0bd6a33a84e1032 | 16,801 |
import typing
def compute_accuracy(data):
"""Return [wpm, accuracy]."""
prompted_text = data["promptedText"][0]
typed_text = data.get("typedText", [""])[0]
start_time = float(data["startTime"][0])
end_time = float(data["endTime"][0])
return [typing.wpm(typed_text, end_time - start_time),
typing.accuracy(typed_text, prompted_text)] | c10b5d681392c71967b86f12d33be3edc1361446 | 16,802 |
from typing import IO
def write_file(filename: str, content: str, mode: str = "w") -> IO:
"""Save content to a file, overwriting it by default."""
with open(filename, mode) as file:
file.write(content)
return file | 5d6b7ac1f9097d00ae2b67e3d34f1135c4e90946 | 16,803 |
def get_minimum_integer_attribute_value(node, attribute_name):
"""
Returns the minimum value that a specific integer attribute has set
:param node: str
:param attribute_name: str
:return: float
"""
return maya.cmds.attributeQuery(attribute_name, min=True, node=node)[0] | ce36c252478e9cb5d5e5ade3e2d70716d206748a | 16,804 |
import numpy as np
import yt
import string
def get_star_locs(plotfile):
"""Given a plotfile, return the location of the primary and the secondary."""
ds = yt.load(plotfile)
# Get a numpy array corresponding to the density.
problo = ds.domain_left_edge.v
probhi = ds.domain_right_edge.v
dim = ds.domain_dimensions
dx = (probhi - problo) / dim
dens = (ds.covering_grid(level=0, left_edge=[0.0, 0.0, 0.0], dims=ds.domain_dimensions)['density']).v
# Calculate the orbital parameters
M_solar = 1.99e33
Gconst = 6.67e-8
M_P = 0.90
M_S = 0.60
M_P = M_P * M_solar
M_S = M_S * M_solar
# Get a numpy array corresponding to the density.
a = (Gconst * (M_P + M_S) * rot_period**2 / (4.0 * np.pi**2))**(1.0/3.0)
a_2 = a / (1 + M_S / M_P)
a_1 = (M_S / M_P) * a_2
# Guess the locations of the stars based on perfect circular rotation
f = open(plotfile + '/job_info', 'r')
for line in f:
if string.find(line, "rotational_period") > 0:
rot_period = float(string.split(line, "= ")[1])
break
f.close()
t = (ds.current_time).v
center = (probhi + problo) / 2.0
loc_P = [-a_1 * np.cos(2 * np.pi * t / rot_period) + center[0], -a_1 * np.sin(2 * np.pi * t / rot_period) + center[1], 0.0 + center[2]]
loc_S = [ a_2 * np.cos(2 * np.pi * t / rot_period) + center[0], a_2 * np.sin(2 * np.pi * t / rot_period) + center[1], 0.0 + center[2]]
loc_P = np.array(loc_P)
loc_S = np.array(loc_S)
# Create an array of the zone positions
x = problo[0] + dx[0] * (np.arange(dim[0]) + 0.5e0)
y = problo[1] + dx[1] * (np.arange(dim[1]) + 0.5e0)
z = problo[2] + dx[2] * (np.arange(dim[2]) + 0.5e0)
xx, yy, zz = np.meshgrid(x, y, z, indexing="ij")
rr = (xx**2 + yy**2 + zz**2)**0.5
# Now what we'll do is to split up the grid into two parts.
# zones that are closer to the primary's expected location and
# zones that are closer to the secondary's expected location.
rr_P = ( (xx - loc_P[0])**2 + (yy - loc_P[1])**2 + (zz - loc_P[2])**2 )**0.5
rr_S = ( (xx - loc_S[0])**2 + (yy - loc_S[1])**2 + (zz - loc_S[2])**2 )**0.5
P_idx = np.where( rr_P < rr_S )
S_idx = np.where( rr_S < rr_P )
# Now, do a center of mass sum on each star.
xx_P_com = np.sum( dens[P_idx] * xx[P_idx] ) / np.sum(dens[P_idx])
yy_P_com = np.sum( dens[P_idx] * yy[P_idx] ) / np.sum(dens[P_idx])
zz_P_com = np.sum( dens[P_idx] * zz[P_idx] ) / np.sum(dens[P_idx])
xx_S_com = np.sum( dens[S_idx] * xx[S_idx] ) / np.sum(dens[S_idx])
yy_S_com = np.sum( dens[S_idx] * yy[S_idx] ) / np.sum(dens[S_idx])
zz_S_com = np.sum( dens[S_idx] * zz[S_idx] ) / np.sum(dens[S_idx])
return [xx_P_com, yy_P_com, zz_P_com, xx_S_com, yy_S_com, zz_S_com] | 429758abd92d4eff7a1948278bbe8c348ba83862 | 16,805 |
def get_list(_list, persistent_attributes):
"""
Check if the user supplied a list and if its a custom list, also check for for any saved lists
:param _list: User supplied list
:param persistent_attributes: The persistent attribs from the app
:return: The list name , If list is custom or not
"""
if _list is not None and (_list.lower() != 'watchlist' and _list.lower() != 'watch list'):
return _list, True
else:
# if default isnt set use watchlist
if "list" in persistent_attributes:
if persistent_attributes["list"] != 'watchlist' and persistent_attributes["list"] != 'watch list':
_list = persistent_attributes["list"]
_usecustomlist = True
else:
_list = 'watchlist'
_usecustomlist = False
else:
_list = 'watchlist'
_usecustomlist = False
return _list, _usecustomlist | 497fa8427660bafa3cc3023abf0132973693dc6e | 16,806 |
import socket
import re
def inode_for_pid_sock(pid, addr, port):
"""
Given a pid that is inside a network namespace, and the address/port of a LISTEN socket,
find the inode of the socket regardless of which pid in the ns it's attached to.
"""
expected_laddr = '%02X%02X%02X%02X:%04X' % (addr[3], addr[2], addr[1], addr[0], socket.htons(port))
for line in open('/proc/{}/net/tcp'.format(pid), 'r').readlines():
parts = re.split(r'\s+', line.strip())
local_addr = parts[1]
remote_addr = parts[2]
if remote_addr != '00000000:0000': continue # not a listen socket
if local_addr == expected_laddr:
return int(parts[9]) | 4d47d9de118caa87854b96bf759a75520b8409cb | 16,807 |
from typing import List
from typing import Tuple
import logging
def get_edges_from_route_matrix(route_matrix: Matrix) -> List[Tuple]:
"""Returns a list of the edges used in a route according to the route matrix
:param route_matrix: A matrix indicating which edges contain the optimal route
:type route_matrix: Matrix
:return: The row and column for the edge in the matrix
:rtype: Tuple
:yield: List of tuples for each edge connecting two nodes
:rtype: List[Tuple]
"""
def get_first_row(route_matrix):
for row in range(len(route_matrix)):
nodes_in_row = sum(route_matrix[row])
if nodes_in_row == 1:
return row
elif nodes_in_row == 0:
continue
else:
raise ValueError(f'Invalid number of nodes in row: {nodes_in_row}')
def get_next_node_from_row(i, route_matrix):
for j in range(len(route_matrix)):
if route_matrix[i][j] == 1:
return (i, j)
raise ValueError(f"Node {i} is not connected to another node.")
edges = []
route_length = np.sum(route_matrix)
row = get_first_row(route_matrix)
while len(edges) < route_length:
try:
to_node = get_next_node_from_row(row, route_matrix)
row = to_node[1]
edges.append(to_node)
except ValueError:
logging.info('End of open route found.')
# transpose the matrix
route_matrix = [[route_matrix[j][i] for j in range(len(route_matrix))] for i in range(len(route_matrix))]
# reverse the edges
edges = [(edges[-1][1], edges[-1][0])]
row = edges[0][1]
return edges | 32e84bc782cdf3939affa881f0c2cf23ff81eeee | 16,808 |
def nicer(string):
"""
>>> nicer("qjhvhtzxzqqjkmpb")
True
>>> nicer("xxyxx")
True
>>> nicer("uurcxstgmygtbstg")
False
>>> nicer("ieodomkazucvgmuy")
False
"""
pair = False
for i in range(0, len(string) - 3):
for j in range(i + 2, len(string) - 1):
if string[i:i + 2] == string[j:j + 2]:
pair = True
break
if not pair:
return False
for i in range(0, len(string) - 2):
if string[i] == string[i + 2]:
return True
return False | 7c543bbd39730046b1ab3892727cca3a9e027662 | 16,809 |
from typing import Union
def multiple_choice(value: Union[list, str]):
""" Handle a single string or list of strings """
if isinstance(value, list):
# account for this odd [None] value for empty multi-select fields
if value == [None]:
return None
# we use string formatting to handle the possibility that the list contains ints
return ", ".join([f"{val}" for val in value])
return value | aae54f84bc1ccc29ad9ad7ae205e130f66601131 | 16,810 |
def Jnu_vD82(wav):
"""Estimate of ISRF at optical wavelengths by van Dishoeck & Black (1982)
see Fig 1 in Heays et al. (2017)
Parameters
----------
wav : array of float
wavelength in angstrom
Returns
-------
Jnu : array of float
Mean intensity Jnu in cgs units
"""
if wav is not None and not isinstance(wav, au.quantity.Quantity):
wav = (wav*au.angstrom).to(au.angstrom)
else:
wav = wav.to(au.angstrom)
w = wav.value
return 2.44e-16*w**2.7/au.cm**2/au.s/au.Hz | 287dbf88d7a5ba58ca8792cd78ff61393df3aae2 | 16,811 |
def _coexp_ufunc(m0, exp0, m1, exp1):
""" Returns a co-exp couple of couples """
# Implementation for real
if (m0 in numba_float_types) and (m1 in numba_float_types):
def impl(m0, exp0, m1, exp1):
co_m0, co_m1 = m0, m1
d_exp = exp0 - exp1
if m0 == 0.:
exp = exp1
elif m1 == 0.:
exp = exp0
elif (exp1 > exp0):
co_m0 = _exp2_shift(co_m0, d_exp)
exp = exp1
elif (exp0 > exp1):
co_m1 = _exp2_shift(co_m1, -d_exp)
exp = exp0
else: # exp0 == exp1
exp = exp0
return (co_m0, co_m1, exp)
# Implementation for complex
elif (m0 in numba_complex_types) or (m1 in numba_complex_types):
def impl(m0, exp0, m1, exp1):
co_m0, co_m1 = m0, m1
d_exp = exp0 - exp1
if m0 == 0.:
exp = exp1
elif m1 == 0.:
exp = exp0
elif (exp1 > exp0):
co_m0 = (_exp2_shift(co_m0.real, d_exp)
+ 1j * _exp2_shift(co_m0.imag, d_exp))
exp = exp1
elif (exp0 > exp1):
co_m1 = (_exp2_shift(co_m1.real, -d_exp)
+ 1j * _exp2_shift(co_m1.imag, -d_exp))
exp = exp0
else: # exp0 == exp1
exp = exp0
return (co_m0, co_m1, exp)
else:
raise TypingError("datatype not accepted {}{}".format(m0, m1))
return impl | 11df0f4c06edb758945b7a86940edd4975c47c85 | 16,812 |
def get_lorem(length=None, **kwargs):
""" Get a text (based on lorem ipsum.
:return str:
::
print get_lorem() # -> atque rerum et aut reiciendis...
"""
lorem = ' '.join(g.get_choices(LOREM_CHOICES))
if length:
lorem = lorem[:length]
return lorem | a3ece5c011d69e0a532bcb4b91fa6583dd028c1d | 16,813 |
import warnings
def try_get_graphql_scalar_type(property_name, property_type_id):
"""Return the matching GraphQLScalarType for the property type id or None if none exists."""
maybe_graphql_type = ORIENTDB_TO_GRAPHQL_SCALARS.get(property_type_id, None)
if not maybe_graphql_type:
warnings.warn(
'Ignoring property "{}" with unsupported property type: '
"{}".format(property_name, PROPERTY_TYPE_ID_TO_NAME[property_type_id])
)
return maybe_graphql_type | 70c4406b9cd08b3de6e48a473e62869470f579b1 | 16,814 |
import requests
def get(path):
"""Get GCE metadata value."""
attribute_url = (
'http://{}/computeMetadata/v1/'.format(_METADATA_SERVER) + path)
headers = {'Metadata-Flavor': 'Google'}
operations_timeout = environment.get_value('URL_BLOCKING_OPERATIONS_TIMEOUT')
response = requests.get(
attribute_url, headers=headers, timeout=operations_timeout)
response.raise_for_status()
return response.text | 044db931369de13e6c16db9007fe4bad28a940a8 | 16,815 |
def greedy_helper(hyper_list, node_dict, fib_heap, total_weight, weight=None):
"""
Greedy peeling algorithm. Peel nodes iteratively based on their current degree.
Parameters
----------
G: undirected, graph (networkx)
node_dict: dict, node id as key, tuple (neighbor list, heap node) as value. Here heap node is a
pointer to the corresponding node in fibheap.
fibheap: FibonacciHeap, support fast extraction of min degree node and value change.
total_weight: edge weight sum.
weight: str that specify the edge attribute name of edge weight; None if the graph is unweighted.
Returns
----------
H: list, subset of nodes corresponding to densest subgraph.
max_avg: float, density of H induced subgraph.
new_loads: dict, new loads for nodes, only used for the flowless algorithm when T>1.
"""
n = len(node_dict.keys())
avg_degree = total_weight / n
H = list(node_dict.keys())
max_avg = avg_degree
new_loads = dict()
for i in range(n - 1):
# find min node from graph (remove from heap)
to_remove = fib_heap.extract_min()
node_to_remove = to_remove.value
degree_to_remove = to_remove.key
new_loads[node_to_remove] = degree_to_remove
for e_index in node_dict[node_to_remove][0]:
e = hyper_list[e_index]
for neighbor in e:
if neighbor != node_to_remove:
fib_heap.decrease_key(node_dict[neighbor][1], node_dict[neighbor][1].key - 1)
node_dict[neighbor][0].remove(e_index)
total_weight -= 1
del node_dict[node_to_remove]
avg_degree = total_weight / (n - i - 1)
if max_avg < avg_degree:
max_avg = avg_degree
H = list(node_dict.keys())
return H, max_avg, new_loads | b2c0f3e91e6c9a80a8396dc104abc804af8875e5 | 16,816 |
def CleanFloat(number, locale = 'en'):
"""\
Return number without decimal points if .0, otherwise with .x)
"""
try:
if number % 1 == 0:
return str(int(number))
else:
return str(float(number))
except:
return number | 03ccc3bfe407becf047515b618621058acff37e7 | 16,817 |
def ssd_bboxes_encode(boxes):
"""
Labels anchors with ground truth inputs.
Args:
boxex: ground truth with shape [N, 5], for each row, it stores [y, x, h, w, cls].
Returns:
gt_loc: location ground truth with shape [num_anchors, 4].
gt_label: class ground truth with shape [num_anchors, 1].
num_matched_boxes: number of positives in an image.
"""
def jaccard_with_anchors(bbox):
"""Compute jaccard score a box and the anchors."""
# Intersection bbox and volume.
ymin = np.maximum(y1, bbox[0])
xmin = np.maximum(x1, bbox[1])
ymax = np.minimum(y2, bbox[2])
xmax = np.minimum(x2, bbox[3])
w = np.maximum(xmax - xmin, 0.)
h = np.maximum(ymax - ymin, 0.)
# Volumes.
inter_vol = h * w
union_vol = vol_anchors + (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) - inter_vol
jaccard = inter_vol / union_vol
return np.squeeze(jaccard)
pre_scores = np.zeros((config.num_ssd_boxes), dtype=np.float32)
t_boxes = np.zeros((config.num_ssd_boxes, 4), dtype=np.float32)
t_label = np.zeros((config.num_ssd_boxes), dtype=np.int64)
for bbox in boxes:
label = int(bbox[4])
scores = jaccard_with_anchors(bbox)
idx = np.argmax(scores)
scores[idx] = 2.0
mask = (scores > matching_threshold)
mask = mask & (scores > pre_scores)
pre_scores = np.maximum(pre_scores, scores * mask)
t_label = mask * label + (1 - mask) * t_label
for i in range(4):
t_boxes[:, i] = mask * bbox[i] + (1 - mask) * t_boxes[:, i]
index = np.nonzero(t_label)
# Transform to tlbr.
bboxes = np.zeros((config.num_ssd_boxes, 4), dtype=np.float32)
bboxes[:, [0, 1]] = (t_boxes[:, [0, 1]] + t_boxes[:, [2, 3]]) / 2
bboxes[:, [2, 3]] = t_boxes[:, [2, 3]] - t_boxes[:, [0, 1]]
# Encode features.
bboxes_t = bboxes[index]
default_boxes_t = default_boxes[index]
bboxes_t[:, :2] = (bboxes_t[:, :2] - default_boxes_t[:, :2]) / (default_boxes_t[:, 2:] * config.prior_scaling[0])
tmp = np.maximum(bboxes_t[:, 2:4] / default_boxes_t[:, 2:4], 0.000001)
bboxes_t[:, 2:4] = np.log(tmp) / config.prior_scaling[1]
bboxes[index] = bboxes_t
num_match = np.array([len(np.nonzero(t_label)[0])], dtype=np.int32)
return bboxes, t_label.astype(np.int32), num_match | 1e0a07c1305fe2b1ba99f535609d2d52d72befa8 | 16,818 |
def _get_partial_prediction(input_data: dt.BatchedTrainTocopoData,
target_data_token_ids: dt.NDArrayIntBO,
target_data_is_target_copy: dt.NDArrayBoolBOV,
target_data_is_target_pointer: dt.NDArrayBoolBOV
) -> dt.BatchedTrainTocopoData:
"""Create BatchedTrainTocopoData that contains the latest predictions.
This function creates BatchedTrainTocopoData for the autoregressive
prediction. The returned batched_partial_prediction contains the prediction
made so far by the autoregressive prediction, notebly
BatchedTrainTocopoTargetData.token_ids,
BatchedTrainTocopoTargetData.is_target_copy and
BatchedTrainTocopoTargetData.is_target_pointer. batched_partial_prediction
should be used by the autoregressive prediction to generate the next
prediction.
Args:
input_data: The input data that we generate the autoregressive prediction.
We used it copy the BatchedTrainGraphNodeData and
BatchedTrainGraphEdgeData. But BatchedTrainTocopoTargetData should not be
copied from the input data since it contains the ground truth.
target_data_token_ids: Token ids that the autoregressive prediction
predicted so far.
target_data_is_target_copy: is_target_copy matrix that the
autoregressive prediction predicted so far.
target_data_is_target_pointer: is_target_pointer that the
autoregressive prediction predicted so far.
Returns:
A instance of BatchedTrainTocopoData, where the BatchedTrainGraphNodeData
and BatchedTrainGraphEdgeData is the same as input_data. But
BatchedTrainTocopoTargetData holds the prediction made so far.
"""
# BatchedTrainTocopoTargetData contains the latest prediction.
# We must not copy from input_data, but rather use the target_data_token_ids,
# target_data_is_target_copy and target_data_is_target_pointer that are
# predicted by the autoregressive prediction.
batched_partial_prediction_tocopo_target_data = (
dt.BatchedTrainTocopoTargetData(
token_ids=target_data_token_ids,
is_target_copy=target_data_is_target_copy,
is_target_pointer=target_data_is_target_pointer))
# BatchedTrainGraphNodeData and BatchedTrainGraphEdgeData is the same as the
# input_data.
batched_partial_prediction_graph_node_data = dt.BatchedTrainGraphNodeData(
token_ids=input_data.node_data.token_ids,
type_ids=input_data.node_data.type_ids,
token_positions=input_data.node_data.token_positions,
pointer_candidates=input_data.node_data.pointer_candidates
)
batched_partial_prediction_graph_edge_data = dt.BatchedTrainGraphEdgeData(
edges=input_data.edge_data.edges,
time_edges=input_data.edge_data.time_edges)
batched_partial_prediction = dt.BatchedTrainTocopoData(
node_data=batched_partial_prediction_graph_node_data,
edge_data=batched_partial_prediction_graph_edge_data,
target_data=batched_partial_prediction_tocopo_target_data
)
return batched_partial_prediction | 1a0fdc53e4e49bf3d0c0824eca6ba381d7a72f1f | 16,819 |
from tqdm import tqdm_notebook as tqdm
from tqdm import tqdm
from tqdm import tqdm as tqdm
def get_energy_spectrum_old(udata, x0=0, x1=None, y0=0, y1=None,
z0=0, z1=None, dx=None, dy=None, dz=None, nkout=None,
window=None, correct_signal_loss=True, remove_undersampled_region=True,
cc=1.75, notebook=True):
"""
DEPRECATED: TM cleaned up the code, and improved the literacy and transparency of the algorithm- TM (Sep 2020)
Returns 1D energy spectrum from velocity field data
... The algorithm implemented in this function is VERY QUICK because it does not use the two-point autorcorrelation tensor.
... Instead, it converts u(kx, ky, kz)u*(kx, ky, kz) into u(kr)u*(kr). (here * dentoes the complex conjugate)
... CAUTION: Must provide udata with aspect ratio ~ 1
...... The conversion process induces unnecessary error IF the dimension of u(kx, ky, kz) is skewed.
...... i.e. Make udata.shape like (800, 800), (1024, 1024), (512, 512) for accurate results.
... KNOWN ISSUES:
...... This function returns a bad result for udata with shape like (800, 800, 2)
Parameters
----------
udata: nd array
epsilon: nd array or float, default: None
dissipation rate used for scaling energy spectrum
If not given, it uses the values estimated using the rate-of-strain tensor
nu: flaot, viscosity
x0: int
index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1].
x1: int
index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1].
y0: int
index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1].
y1: int
index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1].
t0: int
index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1].
t1: int
index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1].
dx: float
spacing in x
dy: float
spacing in y
dz: float
spacing in z
nkout: int, default: None
number of bins to compute energy/dissipation spectrum
notebook: bool, default: True
Use tqdm.tqdm_notebook if True. Use tqdm.tqdm otherwise
window: str
Windowing reduces undesirable effects due to the discreteness of the data.
A wideband window such as 'flattop' is recommended for turbulent energy spectra.
For the type of applying window function, choose from below:
boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen, bohman, blackmanharris, nuttall, barthann,
kaiser (needs beta), gaussian (needs standard deviation), general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation), exponential (needs decay scale),
tukey (needs taper fraction)
correct_signal_loss: bool, default: True
If True, it would compensate for the loss of the signals due to windowing.
Always recommended to obtain accurate spectral densities.
remove_undersampled_region: bool, default: True
If True, it will not sample the region with less statistics.
cc: float, default: 1.75
A numerical factor to compensate for the signal loss due to approximations.
... cc=1.75 was obtained from the JHTD data.
Returns
-------
e_k: numpy array
Energy spectrum with shape (number of data points, duration)
e_k_err: numpy array
Energy spectrum error with shape (number of data points, duration)
kk: numpy array
Wavenumber with shape (number of data points, duration)
"""
print('get_energy_spectrum_old(): is DEPRECATED since 09/01/20')
print('... Still works perfectly. Yet, TM highly recommends to use the updated function: get_energy_spectrum()')
if notebook:
print('Using tqdm_notebook. If this is a mistake, set notebook=False')
else:
def delete_masked_elements(data, mask):
"""
Deletes elements of data using mask, and returns a 1d array
Parameters
----------
data: N-d array
mask: N-d array, bool
Returns
-------
compressed_data
"""
data_masked = ma.array(data, mask=mask)
compressed_data = data_masked.compressed()
'...Reduced data using a given mask'
return compressed_data
def convert_nd_spec_to_1d(e_ks, ks, nkout=None, cc=1.75):
"""
Convert the results of get_energy_spectrum_nd() into a 1D spectrum
... This is actually a tricky problem.
Importantly, this will output the SPECTRAL DENSITY
not power which is integrated spectral density (i.e.- spectral density * delta_kx * delta_ky * delta_ky.)
... Ask Takumi for derivation. The derivation goes like this.
...... 1. Start with the Parseval's theorem.
...... 2. Write the discretized equation about the TKE: Average TKE = sum deltak * E(k)
...... 3. Using 1, write down the avg TKE
...... 4. Equate 2 and 3. You get e_k1d * jacobian / (n_samples * deltak)
...... IF deltak = deltakr where deltakr = np.sqrt(deltakx**2 + deltaky**2) for 2D
...... where e_k1d is just a histogram value obtained from the DFT result (i.e. POWER- spectral density integrated over a px)
...... 5. Finally, convert this into the SPECTRAL DENSITY. This is two-fold.
...... 5.1.
...... e_k1d * jacobian / (n_samples * deltak) is not necessarily the correct density
...... if deltak is not equal to deltakr.
...... This is because e_k1d comes from the histogram of the input velocity field.
...... One can show that the correction is just (deltak / deltakr) ** dim
...... 5.2
...... After 5.1, this is finally the integrated power between k and k + deltak
...... Now divide this by deltak to get the spectral density.
Parameters
----------
e_ks
ks
nkout
d: int/float, DIMENSION OF THE FLOW (NOT DIMENSION OF AVAILABLE VELOCITY FIELD)
... For 3D turbulence, d = 3
... d is equal to 3 even if udata is an 2D field embedded in an actual 3D field,
... For 2D turbulence, d = 2
Returns
-------
"""
dim = ks.shape[0]
duration = e_ks.shape[-1]
if dim == 2:
deltakx, deltaky = ks[0, 0, 1] - ks[0, 0, 0], \
ks[1, 1, 0] - ks[1, 0, 0]
e_ks *= deltakx * deltaky # use the raw DFT outputs (power=integrated density over a px)
deltakr = np.sqrt(deltakx ** 2 + deltaky ** 2) # radial k spacing of the velocity field
dx, dy = 2.*np.pi / ks[0, 0, 0] * -0.5, 2.*np.pi / ks[1, 0, 0] * -0.5
if dim == 3:
deltakx, deltaky, deltakz = ks[0, 0, 1, 0] - ks[0, 0, 0, 0], \
ks[1, 1, 0, 0] - ks[1, 0, 0, 0], \
ks[2, 0, 0, 1] - ks[2, 0, 0, 0]
e_ks *= deltakx * deltaky * deltakz # use the raw DFT outputs (power=integrated density over a px)
deltakr = np.sqrt(deltakx ** 2 + deltaky ** 2 + deltakz ** 2) # radial k spacing of the velocity field
dx, dy, dz = 2.*np.pi / ks[0, 0, 0] * -0.5, 2.*np.pi / ks[1, 0, 0] * -0.5, 2.*np.pi / ks[2, 0, 0] * -0.5
kk = np.zeros((ks.shape[1:]))
for i in range(dim):
kk += ks[i, ...] ** 2
kk = np.sqrt(kk) # radial k
if nkout is None:
nkout = int(np.max(ks.shape[1:]) * 0.8)
shape = (nkout, duration)
e_k1ds = np.empty(shape)
e_k1d_errs = np.empty(shape)
k1ds = np.empty(shape)
if remove_undersampled_region:
kx_max, ky_max = np.nanmax(ks[0, ...]), np.nanmax(ks[1, ...])
k_max = np.nanmin([kx_max, ky_max])
if dim == 3:
kz_max = np.nanmax(ks[2, ...])
k_max = np.nanmin([k_max, kz_max])
for t in range(duration):
# flatten arrays to feed to binned_statistic\
kk_flatten, e_knd_flatten = kk.flatten(), e_ks[..., t].flatten()
if remove_undersampled_region:
mask = np.abs(kk_flatten) > k_max
kk_flatten = delete_masked_elements(kk_flatten, mask)
e_knd_flatten = delete_masked_elements(e_knd_flatten, mask)
# get a histogram
k_means, k_edges, binnumber = binned_statistic(kk_flatten, kk_flatten, statistic='mean', bins=nkout)
k_binwidth = (k_edges[1] - k_edges[0])
k1d = k_edges[1:] - k_binwidth / 2
e_k1d, _, _ = binned_statistic(kk_flatten, e_knd_flatten, statistic='mean', bins=nkout)
e_k1d_err, _, _ = binned_statistic(kk_flatten, e_knd_flatten, statistic='std', bins=nkout)
# # WEIGHTED AVERAGE
# ke_k1d, _, _ = binned_statistic(kk_flatten, kk_flatten * e_knd_flatten, statistic='mean', bins=nkout)
# e_k1d = ke_k1d / k1d
# ke_k1d_err, _, _ = binned_statistic(kk_flatten, kk_flatten * e_knd_flatten, statistic='std', bins=nkout)
# e_k1d_err = ke_k1d_err / k1d
# One must fix the power by some numerical factor due to the DFT and the definition of E(k)
n_samples = len(kk_flatten)
deltak = k1d[1] - k1d[0]
if dim == 2:
jacobian = 2 * np.pi * k1d
elif dim == 3:
jacobian = 4 * np.pi * k1d ** 2
# Insert to a big array
# ... A quick derivation of this math is given in the docstring.
k1ds[..., t] = k1d
# OLD stuff
# e_k1ds[..., t] = e_k1d * jacobian / (n_samples * deltak)
# e_k1d_errs[..., t] = e_k1d_err * jacobian / (n_samples * deltak)
# print deltak
# Old stuff 2: scaling that works?
# e_k1ds[..., t] = e_k1d * jacobian / (n_samples * deltak) * (deltak / deltakr) ** dim / deltak
# e_k1d_errs[..., t] = e_k1d_err * jacobian / (n_samples * deltak) * (deltak / deltakr) ** dim / deltak
# print(dx, dy, deltakr, deltakx * dx * ks.shape[2])
print(deltakr, deltak)
# 2019-2020 August
# e_k1ds[..., t] = e_k1d * jacobian / (n_samples * deltakr ** 2) * cc
# e_k1d_errs[..., t] = e_k1d_err * jacobian / (n_samples * deltakr ** 2) * cc
# # Update in Aug, 2020- TM
e_k1ds[..., t] = e_k1d * jacobian / (n_samples * deltakr ** 2) * cc
e_k1d_errs[..., t] = e_k1d_err * jacobian / (n_samples * deltakr ** 2) * cc
return e_k1ds, e_k1d_errs, k1ds
dim, duration = len(udata), udata.shape[-1]
e_ks, ks = get_energy_spectrum_nd_old(udata, x0=x0, x1=x1, y0=y0, y1=y1, z0=z0, z1=z1, dx=dx, dy=dy, dz=dz,
window=window, correct_signal_loss=correct_signal_loss)
e_k, e_k_err, kk = convert_nd_spec_to_1d(e_ks, ks, nkout=nkout, cc=cc)
# #### NORMALIZATION IS NO LONGER NEEDED #### - Takumi, Apr 2019
# # normalization
# energy_avg, energy_avg_err = get_spatial_avg_energy(udata, x0=x0, x1=x1, y0=y0, y1=y1, z0=z0, z1=z1)
#
# for t in range(duration):
# I = np.trapz(e_k[0:, t], kk[0:, t])
# print I
# N = I / energy_avg[t] # normalizing factor
# e_k[:, t] /= N
# e_k_err[:, t] /= N
if notebook:
return e_k, e_k_err, kk | aa29358215897f3bcb630d2c62b679d2b6ebef88 | 16,820 |
def createDefaultClasses(datasetTXT):
"""
:param datasetTXT: dict with text from txt files indexed by filename
:return: Dict with key:filename, value:list of lists with classes per sentence in the document
"""
classesDict = {}
for fileName in datasetTXT:
classesDict[fileName] = []
sentences = nltkSentenceSplit(datasetTXT[fileName], verbose=False)
for sentence in sentences:
sentence = nltkTokenize(sentence)
classesDict[fileName].append([int(0) for _ in sentence])
return classesDict | 8bec5768710a929c21f75fa70865e25f340409f6 | 16,821 |
def getGlobals():
"""
:return: (dict)
"""
return globals() | 0fa230d341ba5435b33c9e6a9d9f793f99a74238 | 16,822 |
from typing import Iterable
from typing import List
def split_text_to_words(words: Iterable[str]) -> List[Word]:
"""Transform split text into list of Word."""
return [Word(word, len(word)) for word in words] | 6317e794a5397da44be96216308573ae9d5a788f | 16,823 |
import khorosjx
def init_module_operation():
"""This function imports the primary modules for the package and returns ``True`` when successful."""
khorosjx.init_module('admin', 'content', 'groups', 'spaces', 'users')
return True | d6cbc3b94d4b4005d301d9b597bb7086e211bfa2 | 16,824 |
def connect_to_rds(aws, region):
"""
Return boto connection to the RDS in the specified environment's region.
"""
set_progress('Connecting to AWS RDS in region {0}.'.format(region))
wrapper = aws.get_api_wrapper()
client = wrapper.get_boto3_client(
'rds',
aws.serviceaccount,
aws.servicepasswd,
region
)
return client | cdfaa984c6795c7e03f0d8b3e3620f6de757fcbb | 16,825 |
def export_graphviz(DecisionTreeClassificationModel, featureNames=None, categoryNames=None, classNames=None,
filled=True, roundedCorners=True, roundLeaves=True):
"""
Generates a DOT string out of a Spark's fitted DecisionTreeClassificationModel, which
can be drawn with any library capable of handling the DOT format.
If you want to plot in a single step, please use the function plot_tree().
Arguments:
DecisionTreeClassificationModel -- a pyspark.ml.classification.DecisionTreeClassificationModel
instance
featureNames -- a list with the feature names. This
is probably the same list you usually
pass to your VectorAssembler constructor
categoryNames -- a dictionary with the featureNames that
are categorical as the keys, and the
different categories as the values.
This is probably the featureNames as key,
StringIndexerModel.labels attribute as value
for each categorical feature
classNames -- a list with the class names for your target
column. This is probably the StringIndexerModel.labels
for your target column
filled -- boolean which indicates whether to fill nodes with colour
or not. Color gamma will be the prediction class for each
node, and color intensity the impurity at such node
roundedCorners -- boolean which indicates whether to round
rectangle corners for the nodes
roundLeaves -- boolean which indicates whether to represent leaf
nodes as ellipses rather than rectangles
Returns:
a DOT string ready to be processed by any DOT handling library
"""
tree_dict = loads(generate_tree_json(DecisionTreeClassificationModel, withNodeIDs=False))
num_classes = get_num_classes(tree_dict)
color_brew = generate_color_brew(num_classes)
node_list = []
tree_dict_with_id = add_node_ids(tree_dict)
graph = relations_to_str(tree_dict_with_id,
featureNames=featureNames,
categoryNames=categoryNames,
classNames=classNames,
numClasses=num_classes,
nodeList=node_list,
filled=filled,
roundLeaves=roundLeaves,
color_brew=color_brew)
node_properties = "\n".join(node_list)
filled_and_rounded = []
if filled:
filled_and_rounded.append("filled")
if roundedCorners:
filled_and_rounded.append("rounded")
dot_string = """digraph Tree {
node [shape=box style="%s"]
subgraph body {
%s
%s}
}""" % (",".join(filled_and_rounded), "".join(graph), node_properties)
return dot_string | eb4484136fbbe92537a3f030375f6ac80081befd | 16,826 |
def _get_next_sequence_values(session, base_mapper, num_values):
"""Fetches the next `num_values` ids from the `id` sequence on the `base_mapper` table.
For example, if the next id in the `model_id_seq` sequence is 12, then
`_get_next_sequence_values(session, Model.__mapper__, 5)` will return [12, 13, 14, 15, 16].
"""
assert _has_normal_id_primary_key(
base_mapper
), "_get_next_sequence_values assumes that the sequence produces integer values"
id_seq_name = _get_id_sequence_name(base_mapper)
# Table.schema is the canonical place to get the name of the schema.
# See https://docs.sqlalchemy.org/en/13/core/metadata.html#sqlalchemy.schema.Table.params.schema
schema = base_mapper.entity.__table__.schema
sequence = sqlalchemy.Sequence(id_seq_name, schema=schema)
# Select the next num_values from `sequence`
raw_ids = tuples_to_scalar_list(
session.connection().execute(
sqlalchemy.select([sequence.next_value()]).select_from(
sqlalchemy.text("generate_series(1, :num_values)")
),
{"num_values": num_values},
)
)
assert len(raw_ids) == num_values, u"Expected to get {} new ids, instead got {}".format(
num_values, len(raw_ids)
)
# session.execute returns `long`s since Postgres sequences use `bigint` by default.
# However, we need ints since the column type for our primary key is `integer`.
return [int(id_) for id_ in raw_ids] | 63ad9e5e55228dd873ee2c5d9080d223c89e1bc6 | 16,827 |
def overview(request):
"""
Dashboard: Process overview page.
"""
responses_dict = get_data_for_user(request.user)
responses_dict_by_step = get_step_responses(responses_dict)
# Add step status dictionary
step_status = get_step_completeness(responses_dict_by_step)
responses_dict_by_step['step_status'] = step_status
responses_dict_by_step['active_page'] = 'overview'
responses_dict_by_step['derived'] = get_derived_data(responses_dict)
# Dashnav needs filing option to determine which steps to show
for question in responses_dict_by_step['signing_filing']:
responses_dict_by_step[question['question_id']] = question['value']
response = render(request, 'overview.html', context=responses_dict_by_step)
# set this session variable after the page is already rendered
request.session['viewed_dashboard_during_session'] = True
return response | 4ac165cf5b4bf7de6f060d6649935f25fcf5a0a9 | 16,828 |
def _guess_os():
"""Try to guess the current OS"""
try:
abi_name = ida_typeinf.get_abi_name()
except:
abi_name = ida_nalt.get_abi_name()
if "OSX" == abi_name:
return "macos"
inf = ida_idaapi.get_inf_structure()
file_type = inf.filetype
if file_type in (ida_ida.f_ELF, ida_ida.f_AOUT, ida_ida.f_COFF):
return "linux"
elif file_type == ida_ida.f_MACHO:
return "macos"
elif file_type in (
ida_ida.f_PE,
ida_ida.f_EXE,
ida_ida.f_EXE_old,
ida_ida.f_COM,
ida_ida.f_COM_old,
):
return "windows"
else:
# Default
return "linux"
#raise UnhandledOSException("Unrecognized OS type") | bb2cb2f0c294f2554ec419ee1bdea665abaf6957 | 16,829 |
def create_conf(name, address, *services):
"""Create an Apple TV configuration."""
atv = conf.AppleTV(name, address)
for service in services:
atv.add_service(service)
return atv | 0326a4c21b39ef12fe916f3a3fbee34af52c12a2 | 16,830 |
def log_transform(x):
""" Log transformation from total precipitation in mm/day"""
tp_max = 23.40308390557766
y = np.log(x*(np.e-1)/tp_max + 1)
return y | 61783d103db36ed668e494f557550caef611b84a | 16,831 |
from datetime import datetime
import requests
import json
def get_flight(arguments):
"""
connects to skypicker servive and get most optimal flight base on search criteria
:param arguments: inputs arguments from parse_arg
:return dict: flight
"""
api_url = 'https://api.skypicker.com/flights?v=3&'
adults = '1'
# convert time format 2018-04-13 -> 13/04/2018
date = datetime.datetime.strptime(arguments.date, "%Y-%m-%d").strftime("%d/%m/%Y")
fly_from = arguments.origin
fly_to = arguments.to
sort = arguments.sort
if arguments.days_in_destination == 'oneway':
# constructing search query for ONEWAY flight
type_flight = 'oneway'
query_string = '&flyFrom=' + fly_from + \
'&to=' + fly_to + \
'&dateFrom=' + date + \
'&dateTo=' + date + \
'&typeFlight=' + type_flight + \
'&adults=' + adults + \
'&sort=' + sort + \
'&asc=1'
else:
# constructing search query for RETURN flight
days_in_destination = arguments.days_in_destination
type_flight = 'round'
query_string = 'daysInDestinationFrom=' + days_in_destination + \
'&daysInDestinationTo=' + days_in_destination + \
'&flyFrom=' + fly_from + \
'&to=' + fly_to + \
'&dateFrom=' + date + \
'&dateTo=' + date + \
'&typeFlight=' + type_flight + \
'&adults=' + adults + \
'&sort=' + sort + \
'&asc=1'
if arguments.verbose:
print(query_string)
get_data = requests.get(api_url + query_string)
json_data = json.loads(get_data.content)
flights = json_data['data']
# return first flight in the sorted list
if arguments.verbose:
print(flights[0])
return flights[0] | 690b7bd170b8b83f4b83f5c0ce98da919134107c | 16,832 |
def use_ip_alt(request):
"""
Fixture that gives back 2 instances of UseIpAddrWrapper
1) use ip4, dont use ip6
2) dont use ip4, use ip6
"""
use_ipv4, use_ipv6 = request.param
return UseIPAddrWrapper(use_ipv4, use_ipv6) | c33d74b6888124413d1430e4873140475db4748e | 16,833 |
import torch
def radius_gaussian(sq_r, sig, eps=1e-9):
"""Compute a radius gaussian (gaussian of distance)
Args:
sq_r: input radiuses [dn, ..., d1, d0]
sig: extents of gaussians [d1, d0] or [d0] or float
Returns:
gaussian of sq_r [dn, ..., d1, d0]
"""
return torch.exp(-sq_r / (2 * sig**2 + eps)) | cd5bb2bb85641b1200ce67cb7eb52bc1705cd0a1 | 16,834 |
from typing import List
from typing import Dict
from typing import Any
def index_papers_to_geodata(papers: List[Paper]) -> Dict[str, Any]:
"""
:param papers: list of Paper
:return: object
"""
geodata = {}
for paper in papers:
for file in paper.all_files():
for location in file.locations.all():
if location.id not in geodata:
geodata[location.id] = {
"id": location.id,
"name": location.description,
"coordinates": location.geometry,
"papers": {},
}
if paper.id not in geodata[location.id]["papers"]:
if paper.paper_type:
paper_type = paper.paper_type.paper_type
else:
paper_type = _("Paper")
geodata[location.id]["papers"][paper.id] = {
"id": paper.id,
"name": paper.name,
"type": paper_type,
"url": reverse("paper", args=[paper.id]),
"files": [],
}
geodata[location.id]["papers"][paper.id]["files"].append(
{
"id": file.id,
"name": file.name,
"url": reverse("file", args=[file.id]),
}
)
return geodata | f892d84e3dc8f239885b5c4110c931b088922bcc | 16,835 |
def _get_all_prefixed_mtds(
prefix: str,
groups: t.Tuple[str, ...],
update_groups_by: t.Optional[t.Union[t.FrozenSet[str],
t.Set[str]]] = None,
prefix_removal: bool = False,
custom_class_: t.Any = None,
) -> t.Dict[str, t.Tuple]:
"""Get all methods prefixed with ``prefix`` in predefined feature ``groups``.
The predefined metafeature groups are inside ``VALID_GROUPS`` attribute.
Args:
prefix (:obj:`str`): gather methods prefixed with this value.
groups (:obj:`Tuple` of :obj:`str`): a tuple of feature group names.
It can assume value :obj:`NoneType`, which is interpreted as ``no
filter`` (i.e. all features of all groups will be returned).
return_groups (:obj:`bool`, optional): if True, then the returned value
will be a :obj:`dict` (instead of a :obj:`tuple`) which maps each
group (as keys) with its correspondent values (as :obj:`tuple`s).
update_groups_by (:obj:`set` of :obj:`str`, optional): values to filter
``groups``. This function also returns a new version of ``groups``
with all its elements that do not contribute with any new method
for the final output. It other words, it is removed any group which
do not contribute to the output of this function. This is particu-
larly useful for precomputations, as it helps avoiding unecessary
precomputation methods from feature groups not related with user
selected features.
prefix_removal (:obj:`bool`, optional): if True, then the returned
method names will not have the ``prefix``.
custom_class_ (Class, optional): used for inner testing purposes. If
not None, the given class will be used as reference to extract
the prefixed methods.
Returns:
If ``filter_groups_by`` argument is :obj:`NoneType` or empty:
tuple: with all filtered methods by ``group``.
Else:
tuple(tuple, tuple): the first field is the output described above,
the second field is a new version of ``groups``, with all ele-
ments that do not contribute with any element listed in the set
``update_groups_by`` removed.
"""
groups = tuple(set(VALID_GROUPS).intersection(groups))
if not groups and custom_class_ is None:
return {"methods": tuple(), "groups": tuple()}
if custom_class_ is None:
verify_groups = tuple(VALID_GROUPS)
verify_classes = tuple(VALID_MFECLASSES)
else:
verify_groups = ("test_methods", )
verify_classes = (custom_class_, )
methods_by_group = {
ft_type_id: get_prefixed_mtds_from_class(
class_obj=mfe_class,
prefix=prefix,
prefix_removal=prefix_removal)
for ft_type_id, mfe_class in zip(verify_groups, verify_classes)
if ft_type_id in groups or custom_class_ is not None
}
gathered_methods = [] # type: t.List[t.Union[str, TypeMtdTuple]]
new_groups = [] # type: t.List[str]
for group_name in methods_by_group:
group_mtds = methods_by_group[group_name]
gathered_methods += group_mtds
if update_groups_by:
group_mtds_names = {
remove_prefix(mtd_pack[0], prefix=MTF_PREFIX)
if not prefix_removal
else mtd_pack[0]
for mtd_pack in group_mtds
}
if not update_groups_by.isdisjoint(group_mtds_names):
new_groups.append(group_name)
ret_val = {
"methods": tuple(gathered_methods),
} # type: t.Dict[str, t.Tuple]
if update_groups_by:
ret_val["groups"] = tuple(new_groups)
return ret_val | 2387fb3f2aa0416ad9837f6c1b4c27488d406fea | 16,836 |
import hashlib
def _extract_values_from_certificate(cert):
"""
Gets Serial Number, DN and Public Key Hashes. Currently SHA1 is used
to generate hashes for DN and Public Key.
"""
logger = getLogger(__name__)
# cert and serial number
data = {
u'cert': cert,
u'issuer': cert.get_issuer().der(),
u'serial_number': cert.get_serial_number(),
u'algorithm': rfc2437.id_sha1,
u'algorithm_parameter': univ.Any(hexValue='0500') # magic number
}
# DN Hash
data[u'name'] = cert.get_subject()
cert_der = data[u'name'].der()
sha1_hash = hashlib.sha1()
sha1_hash.update(cert_der)
data[u'name_hash'] = sha1_hash.hexdigest()
# public key Hash
data['key_hash'] = _get_pubickey_sha1_hash(cert).hexdigest()
# CRL and OCSP
data['crl'] = None
ocsp_uris0 = []
for idx in range(cert.get_extension_count()):
e = cert.get_extension(idx)
if e.get_short_name() == b'authorityInfoAccess':
for line in str(e).split(u"\n"):
m = OCSP_RE.match(line)
if m:
logger.debug(u'OCSP URL: %s', m.group(1))
ocsp_uris0.append(m.group(1))
elif e.get_short_name() == b'crlDistributionPoints':
for line in str(e).split(u"\n"):
m = CRL_RE.match(line)
if m:
logger.debug(u"CRL: %s", m.group(1))
data['crl'] = m.group(1)
if len(ocsp_uris0) == 1:
data['ocsp_uri'] = ocsp_uris0[0]
elif len(ocsp_uris0) == 0:
data['ocsp_uri'] = u''
else:
raise OperationalError(
msg=u'More than one OCSP URI entries are specified in '
u'the certificate',
errno=ER_FAILED_TO_GET_OCSP_URI,
)
data[u'is_root_ca'] = cert.get_subject() == cert.get_issuer()
return data | caa22f85fa26a3f386b33c52fff6562c8e9714ea | 16,837 |
from functools import reduce
def cartesian_product(arrays):
"""Create a cartesian product array from a list of arrays.
It is used to create x-y coordinates array from x and y arrays.
Stolen from stackoverflow
http://stackoverflow.com/a/11146645
"""
broadcastable = np.ix_(*arrays)
broadcasted = np.broadcast_arrays(*broadcastable)
rows, cols = reduce(np.multiply, broadcasted[0].shape), len(broadcasted)
out = np.empty(rows * cols, dtype=broadcasted[0].dtype)
start, end = 0, rows
for a in broadcasted:
out[start:end] = a.reshape(-1)
start, end = end, end + rows
return out.reshape(cols, rows).T | 552b898a9187df637cc5f10b49e6a1fe004af95c | 16,838 |
def advanced_split(string, *symbols, contain=False, linked='right'):
"""
Split a string by symbols
If contain is True, the result will contain symbols
The choice of linked decides symbols link to which adjacent part of the result
"""
if not isinstance(string, str):
raise Exception('String must be str!')
for each in symbols:
if not isinstance(each, str):
raise Exception('Symbol must be str!')
linked = linked.lower()
if linked not in ['left', 'right']:
raise Exception('Linked must be left or right!')
if not len(symbols):
return [string]
result = []
symbols_len = tuple([len(each) for each in symbols])
if contain:
tail = ''
while 1:
index = len(string)
num = -1
for _num, each in enumerate(symbols):
_index = string.find(each)
if _index < index and _index + 1:
index = _index
num = _num
if num == -1:
temp = tail + string if contain and linked == 'right' and tail else string
if temp:
result.append(temp)
break
temp = string[:index]
if contain and linked == 'left':
tail = symbols[num]
if contain:
if tail:
if linked == 'left':
temp = temp + tail
if linked == 'right':
temp = tail + temp
if contain and linked == 'right':
tail = symbols[num]
string = string[index+symbols_len[num]:]
if temp:
result.append(temp)
return result | 3e46fcc0c3fa6ab99b9d4d45cf950d9ad3f03ac1 | 16,839 |
def _get_resource_info(
resource_type="pod",
labels={},
json_path=".items[0].metadata.name",
errors_to_ignore=("array index out of bounds: index 0",),
verbose=False,
):
"""Runs 'kubectl get <resource_type>' command to retrieve info about this resource.
Args:
resource_type (string): "pod", "service", etc.
labels (dict): (eg. {'name': 'phenotips'})
json_path (string): a json path query string (eg. ".items[0].metadata.name")
errors_to_ignore (list):
verbose (bool):
Returns:
(string) resource value (eg. "postgres-410765475-1vtkn")
"""
l_arg = ""
if labels:
l_arg = "-l" + ",".join(["%s=%s" % (key, value) for key, value in labels.items()])
output = run(
"kubectl get %(resource_type)s %(l_arg)s -o jsonpath={%(json_path)s}" % locals(),
errors_to_ignore=errors_to_ignore,
print_command=False,
verbose=verbose,
)
return output.strip('\n') if output is not None else None | b9a98fe469eb7aa5fcfb606db0948cb53410ddec | 16,840 |
def rotate_line_about_point(line, point, degrees):
"""
added 161205
This takes a line and rotates it about a point a certain number of degrees.
For use with clustering veins.
:param line: tuple contain two pairs of x,y values
:param point: tuple of x, y
:param degrees: number of degrees to rotate by
:return: line (now rotated)
"""
# point will serve as axis
axis = point
# unpack line
p0, p1 = line
# and get the line's degrees and length
line_deg = line_to_angle(line)
d = (abs(p0[0] - p1[0]), abs(p0[1] - p1[1]))
line_length = sqrt(d[0] ^ 2 + d[1] ^ 2)
# calculate radius between points and axis
d = (abs(p0[0] - axis[0]), abs(p0[1] - axis[1]))
r0 = sqrt(d[0] ^ 2 + d[1] ^ 2)
# r1 = float((p1[0] - axis[0]) ^ 2 + (p1[1] - axis[1]) ^ 2) ^ 0.5
# find degrees that first line is above x-axis
p0_deg = line_to_angle((axis, p0))
# now rotate line one to be level to degrees
p0_cos = cos(degrees * (pi / 180.0))
p0_sin = sin(degrees * (pi / 180.0))
p0_n = (r0 * p0_cos, r0 * p0_sin)
# and move p1 to be in respect to p0
new_deg = line_deg - p0_deg
# normalize degrees
while new_deg > 360:
new_deg -= 360
while new_deg < 0:
new_deg += 360
# get second point of line now since all variables are known
p1_cos = cos(new_deg * (pi / 180.0))
p1_sin = sin(new_deg * (pi / 180.0))
# get new p1
p1_n = (p1_cos * line_length + p0_n[0], p1_sin * line_length + p0_n[1])
# return new line
return p0_n, p1_n | c5954604d6f7852e66fe7b19f53193271582619d | 16,841 |
def arith_relop(a, t, b):
"""
arith_relop(a, t, b)
This is (arguably) a hack.
Represents each function as an integer 0..5.
"""
return [(t == 0).implies(a < b),
(t == 1).implies(a <= b),
(t == 2).implies(a == b),
(t == 3).implies(a >= b),
(t == 4).implies(a > b),
(t == 5).implies(a != b)
] | 8b06d545e8d651803683b36facafb647f38fb2ff | 16,842 |
import logging
def initialise_framework(options):
"""This function initializes the entire framework
:param options: Additional arguments for the component initializer
:type options: `dict`
:return: True if all commands do not fail
:rtype: `bool`
"""
logging.info("Loading framework please wait..")
# No processing required, just list available modules.
if options["list_plugins"]:
show_plugin_list(db, options["list_plugins"])
finish()
target_urls = load_targets(session=db, options=options)
load_works(session=db, target_urls=target_urls, options=options)
start_proxy()
start_transaction_logger()
return True | e62b34189e330fdaea7ec6c81084616bd015a587 | 16,843 |
def get_registration_form() -> ConvertedDocument:
"""
Вернуть параметры формы для регистрации
:return: Данные формы профиля + Логин и пароль
"""
form = [
gen_field_row('Логин', 'login', 'text', validate_rule='string'),
gen_field_row('Пароль', 'password', 'password'),
gen_field_row('Токен', 'token', 'text', validate_rule='token')
] + convert_mongo_model(Profile)
return form | 76bcab98d840523e94234c456cb1ccbd2b1f9129 | 16,844 |
def get_docker_stats(dut):
"""
Get docker ps
:param dut:
:return:
"""
command = 'docker stats -a --no-stream'
output = st.show(dut, command)
return output | cd994701c622ce9ea1f6f123f24b9913aa02698d | 16,845 |
import argparse
import os
def parse_commandline_arguments():
"""Parses command line arguments and adjusts internal data structures."""
# Define script command line arguments
parser = argparse.ArgumentParser(description='Run object detection inference on input image.')
parser.add_argument('-w', '--workspace_dir',
help='sample workspace directory')
parser.add_argument('-d', '--data',
help="Specify the data directory where it is saved in. $TRT_DATA_DIR will be overwritten by this argument.")
args, _ = parser.parse_known_args()
data_dir = os.environ.get('TRT_DATA_DIR', None) if args.data is None else args.data
if data_dir is None:
raise ValueError("Data directory must be specified by either `-d $DATA` or environment variable $TRT_DATA_DIR.")
PATHS.set_data_dir_path(data_dir)
# Set workspace dir path if passed by user
if args.workspace_dir:
PATHS.set_workspace_dir_path(args.workspace_dir)
try:
os.makedirs(PATHS.get_workspace_dir_path())
except:
pass
# Verify Paths after adjustments. This also exits script if verification fails
PATHS.verify_all_paths()
return args | fdefe92824917b18b5aff89c13362ed5dbca0be5 | 16,846 |
import os
def fetch_latency(d: str, csim: bool = False):
"""Fetch the simulated latency, measured in cycles."""
tb_sim_report_dir = os.path.join(
d, "tb" if not csim else "tb.csim", "solution1", "sim", "report"
)
if not os.path.isdir(tb_sim_report_dir):
return None
tb_sim_report = get_single_file_with_ext(tb_sim_report_dir, "rpt")
if not tb_sim_report:
return None
tb_sim_report = os.path.join(tb_sim_report_dir, tb_sim_report)
if not os.path.isfile(tb_sim_report):
return None
latency = None
with open(tb_sim_report, "r") as f:
for line in reversed(f.readlines()):
if latency:
break
comps = [x.strip() for x in line.strip().split("|")]
# there are 9 columns, +2 before & after |
# the 2nd column should give PASS.
if len(comps) == 11 and comps[2].upper() == "PASS":
latency = comps[-2] # from the last column.
# The report is malformed.
if not latency:
return None
try:
# Will raise error if latency is not an integer.
return int(latency)
except:
return None | 881078f338f5dee611725ab8a06331f09e1ca45c | 16,847 |
def enthalpyvap(temp=None,pres=None,dvap=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dvap0=None,chkbnd=False,
mathargs=None):
"""Calculate ice-vapour vapour enthalpy.
Calculate the specific enthalpy of water vapour for ice and water
vapour in equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dvap0: Initial guess for the water vapour density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dvap0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Enthalpy in J/kg.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> enthalpyvap(temp=270.)
2495132.21977
>>> enthalpyvap(pres=100.)
2463525.19629
"""
temp, pres, dvap = eq_tp(temp=temp,pres=pres,dvap=dvap,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dvap0=dvap0,chkbnd=chkbnd,
mathargs=mathargs)
hv = flu2.enthalpy(temp,dvap)
return hv | dadc59bf28272de3a298b89cb13901825fd58c95 | 16,848 |
async def get_eng_hw(module: tuple[str, ...], task: str) -> Message:
"""
Стандартный запрос для английского
"""
return await _get_eng_content('zadanie-{}-m-{}-z'.format(*module), task) | 15e5425173c643074dde08c6753ffcd333414565 | 16,849 |
def _choose_split_axis(v: Variable) -> Axis:
"""
For too-large texture `v`, choose one axis which is the best one to reduce texture size by splitting `v` in that axis.
Args:
v: Variable, whose size is too large (= this variable has :code:`SplitTarget` attribute)
Returns:
axis
"""
ops = list(v.input_to)
if v.output_from is not None:
ops += [v.output_from]
splittable_axes = list(v.order.axes)
for op in ops:
_op_splittable_axes = _listup_splittable_axis(v, op) + [attr.axis for attr in op.get_attribute(Tensorwise)]
for a in list(splittable_axes):
if a not in _op_splittable_axes:
splittable_axes.remove(a)
if len(splittable_axes) == 0:
raise ValueError("No axis is splittable")
# Calculate the size of a side of texture which will be changed when each axis is split
#
# ex) OrderNC, N=512, C=2048, texture(width=2048, height=512)
# => If axis `N` is split, then height will be changed => N: 512 (=height)
# If axis `C` is split, then width will be changed => C: 2048 (=width)
#
# ex) OrderNCHW, N=1, C=512, H=13, W=13, texture(width=2048, height=43)
# => TexW == W*H*(partial of C) texture width consists of axis W, H and C.
# TexH == (partial of C)*N texture height consists of axis C and N.
# => N cannot be split => N: -1
# C is related both width and height. In this case, use large one. => C: 2048
# H is included in width => H: 2048
# W is also included in width => W: 2048
axis_corresponding_texture_size = AxisKeyDict()
element_per_pixel = ChannelMode.elements_per_pixel(v)
tex_h, tex_w = TextureShape.get(v)
tex_w = (tex_w + element_per_pixel - 1) // element_per_pixel
for a in v.order.axes:
if v.shape_dict[a] == 1:
# This axis cannot be split
axis_corresponding_texture_size[a] = -1
elif v.stride_dict[a] >= tex_w * element_per_pixel:
axis_corresponding_texture_size[a] = tex_h
elif v.stride_dict[a] * v.shape_dict[a] >= tex_w * element_per_pixel:
axis_corresponding_texture_size[a] = max(tex_h, tex_w)
else:
axis_corresponding_texture_size[a] = tex_w
splittable_axes.sort(key=lambda a: axis_corresponding_texture_size[a], reverse=True)
target_axis = splittable_axes[0]
console.debug(f"===========================================================================")
console.debug(f"{v}")
console.debug(f" original order: {v.order}")
console.debug(f" original shape: {v.shape}")
console.debug(f" texture shape: {TextureShape.get(v)}")
console.debug(f"")
console.debug(f" splittable axis: {splittable_axes}")
console.debug(f" split axis: {target_axis}")
console.debug(f"")
console.debug(f" related operators:")
for related_op in ops:
console.debug(f"---------------------------------------------------------------------------")
traverse.dump_op(related_op)
console.debug(f"")
if axis_corresponding_texture_size[target_axis] <= 0:
raise NotImplementedError(f"Variable is too large to handle in WebGL backend: {v}")
return target_axis | b48acb753dc357464103e963a2c1d5470051cf11 | 16,850 |
import json
def get_image_blobs(pb):
""" Get an image from the sensor connected to the MicroPython board,
find blobs and return the image, a list of blobs, and the time it
took to find the blobs (in [ms])
"""
raw = json.loads(run_on_board(pb, script_get_image, no_print=True))
img = np.flip(np.transpose(np.reshape(raw, (8, 8))))
time_str = run_on_board(pb, script_get_blob_list, no_print=True)
t_ms = float(time_str.split("= ")[1].split("m")[0])
blobs_str = run_on_board(pb, script_print_blob_list, no_print=True)
blobs_str = blobs_str.replace("nan", "0")
blobs = json.loads(blobs_str.replace('(', '[').replace(')', ']'))
return img, blobs, t_ms | 5d563aeb490c5c1d509e442a3f7210bcfd9d6779 | 16,851 |
def classification_report(y_true, y_pred, digits=2, suffix=False):
"""Build a text report showing the main classification metrics.
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a classifier.
digits : int. Number of digits for formatting output floating point values.
Returns:
report : string. Text summary of the precision, recall, F1 score for each class.
Examples:
>>> from seqeval.metrics import classification_report
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> print(classification_report(y_true, y_pred))
precision recall f1-score support
<BLANKLINE>
MISC 0.00 0.00 0.00 1
PER 1.00 1.00 1.00 1
<BLANKLINE>
avg / total 0.50 0.50 0.50 2
<BLANKLINE>
"""
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
name_width = 0
d1 = defaultdict(set)
d2 = defaultdict(set)
for e in true_entities:
d1[e[0]].add((e[1], e[2]))
name_width = max(name_width, len(e[0]))
for e in pred_entities:
d2[e[0]].add((e[1], e[2]))
last_line_heading = 'avg / total'
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)
report = head_fmt.format(u'', *headers, width=width)
report += u'\n\n'
row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\n'
ps, rs, f1s, s = [], [], [], []
for type_name, true_entities in d1.items():
pred_entities = d2[type_name]
nb_correct = len(true_entities & pred_entities)
nb_pred = len(pred_entities)
nb_true = len(true_entities)
p = 100 * nb_correct / nb_pred if nb_pred > 0 else 0
r = 100 * nb_correct / nb_true if nb_true > 0 else 0
f1 = 2 * p * r / (p + r) if p + r > 0 else 0
report += row_fmt.format(*[type_name, p, r, f1, nb_true], width=width, digits=digits)
ps.append(p)
rs.append(r)
f1s.append(f1)
s.append(nb_true)
report += u'\n'
# compute averages
report += row_fmt.format(last_line_heading,
np.average(ps, weights=s),
np.average(rs, weights=s),
np.average(f1s, weights=s),
np.sum(s),
width=width, digits=digits)
return report | 6158c82879b2894c96479bb96f986e348ef02b00 | 16,852 |
def tidy_conifer(ddf: DataFrame) -> DataFrame:
"""Tidy up the raw conifer output."""
result = ddf.drop(columns=["marker", "identifier", "read_lengths", "kraken"])
result[["name", "taxonomy_id"]] = result["taxa"].str.extract(
r"^(?P<name>[\w ]+) \(taxid (?P<taxonomy_id>\d+)\)$", expand=True
)
return result.drop(columns=["taxa"]).categorize(
columns=["name", "taxonomy_id"], index=False
) | 88e55855d5f9ca8859a0e058a593aadd44774387 | 16,853 |
import os
def load(name=None):
"""
Loads or initialises a convolutional neural network.
"""
if name is not None:
path = os.path.join(AmfConfig.get_appdir(), 'trained_networks', name)
else:
path = AmfConfig.get('model')
if path is not None and os.path.isfile(path):
print(f'* Trained model: {path}')
model = keras.models.load_model(path)
if model.name == CNN1_NAME:
AmfConfig.set('level', 1)
print('* Classes: colonised (M+), non-colonised (M−), background (Other).')
else: # elif model.name == CNN2_NAME
AmfConfig.set('level', 2)
print('* Classes: arbuscules (A), vesicles (V), '
'hyphopodia (H), intraradical hyphae (IH).')
return model
else:
if AmfConfig.get('run_mode') == 'train':
print('* Initializes a new network.')
if AmfConfig.get('level') == 1:
return create_cnn1()
else:
return create_cnn2()
else: # missing pre-trained model in prediction mode.
AmfLog.error('A pre-trained model is required in prediction mode',
exit_code=AmfLog.ERR_NO_PRETRAINED_MODEL) | cdd052a8a41fcb662ebd26aee51cdc05439be419 | 16,854 |
import collections
def get_duplicates(lst):
"""Return a list of the duplicate items in the input list."""
return [item for item, count in collections.Counter(lst).items() if count > 1] | 8f10226c904f95efbee447b4da5dc5764b18f6d2 | 16,855 |
def relu(x, alpha=0):
"""
Rectified Linear Unit.
If alpha is between 0 and 1, the function performs leaky relu.
alpha values are commonly between 0.1 and 0.3 for leaky relu.
Parameters
----------
x : numpy array
Values to be activated.
alpha : float, optional
The scale factor for the linear unit.
Typical values are between 0.1 and 0.3.
The default is 0.1.
Returns
-------
z : numpy array
The activated values.
"""
z = x.copy()
z[x < 0] = z[x < 0]*alpha
return z | f18b331ef66d14a29e1ad5f14b610af583ea7b3a | 16,856 |
def build_unique_dict(controls):
"""Build the disambiguated list of controls
Separated out to a different function so that we can get
the control identifiers for printing.
"""
name_control_map = UniqueDict()
# collect all the possible names for all controls
# and build a list of them
for ctrl in controls:
ctrl_names = get_control_names(ctrl, controls)
# for each of the names
for name in ctrl_names:
name_control_map[name] = ctrl
return name_control_map | 931b90a34e151550c399b314d368a54e3c816796 | 16,857 |
def serialize_thrift_object(thrift_obj, proto_factory=Consts.PROTO_FACTORY):
"""Serialize thrift data to binary blob
:param thrift_obj: the thrift object
:param proto_factory: protocol factory, set default as Compact Protocol
:return: string the serialized thrift payload
"""
return Serializer.serialize(proto_factory(), thrift_obj) | f6845b7539da82dc0555e11b0013db034d297e70 | 16,858 |
from typing import Optional
def signal(
fn: Optional[WorkflowSignalFunc] = None,
*,
name: Optional[str] = None,
dynamic: Optional[bool] = False,
):
"""Decorator for a workflow signal method.
This is set on any async or non-async method that you wish to be called upon
receiving a signal. If a function overrides one with this decorator, it too
must be decorated.
Signal methods can only have positional parameters. Best practice for
non-dynamic signal methods is to only take a single object/dataclass
argument that can accept more fields later if needed. Return values from
signal methods are ignored.
Args:
fn: The function to decorate.
name: Signal name. Defaults to method ``__name__``. Cannot be present
when ``dynamic`` is present.
dynamic: If true, this handles all signals not otherwise handled. The
parameters of the method must be self, a string name, and a
``*args`` positional varargs. Cannot be present when ``name`` is
present.
"""
def with_name(name: Optional[str], fn: WorkflowSignalFunc) -> WorkflowSignalFunc:
if not name:
_assert_dynamic_signature(fn)
# TODO(cretz): Validate type attributes?
setattr(fn, "__temporal_signal_definition", _SignalDefinition(name=name, fn=fn))
return fn
if name is not None or dynamic:
if name is not None and dynamic:
raise RuntimeError("Cannot provide name and dynamic boolean")
return partial(with_name, name)
if fn is None:
raise RuntimeError("Cannot create signal without function or name or dynamic")
return with_name(fn.__name__, fn) | 8e6f3581590a7a429b4e5e838241328deb817e96 | 16,859 |
from matplotlib.colors import LinearSegmentedColormap
def cmap_RdBu(values, vmin = None, vmax = None):
"""Generates a blue/red colorscale with white value centered around the value 0
Parameters
----------
values : PandasSeries, numpy array, list or tuple
List of values to be used for creating the color map
vmin : type
Minimum value in the color map, if None then the min(values) is used
vmax : type
Maximum value in the color map, if None then the max(values) is used
Returns
-------
type
Description of returned object.
"""
if vmin != None:
scoremin = vmin
else:
scoremin = min(values)
if vmax != None:
scoremax = vmax
else:
scoremax = max(values)
cmap2 = LinearSegmentedColormap.from_list('mycmap', [(0, 'blue'),
(-scoremin/(scoremax-scoremin), 'white'),
(1, 'red')])
return(cmap2) | b6bb207a8a1cccf13b87212125f74287b2a3cc9a | 16,860 |
def _add_noise(audio, snr):
"""
Add complex gaussian noise to signal with given SNR.
:param audio(np.array):
:param snr(float): sound-noise-ratio
:return: audio with added noise
"""
audio_mean = np.mean(audio**2)
audio_mean_db = 10 * np.log10(audio_mean)
noise_mean_db = snr - audio_mean_db
noise_mean = 10 ** (noise_mean_db/10)
return audio + np.random.normal(0, np.sqrt(noise_mean), len(audio)) | 4f77e7a2893dc0bdcaf5e170c5e17371127b80d5 | 16,861 |
from tvm.relay.testing import mlp
def mlp_net():
"""The MLP test from Relay.
"""
return mlp.get_net(1) | 4e48dfb04bab1434bd581b7fc6cd5e2257c88022 | 16,862 |
def build_ind_val_dsets(dimensions, is_spectral=True, verbose=False, base_name=None):
"""
Creates VirtualDatasets for the position or spectroscopic indices and values of the data.
Remember that the contents of the dataset can be changed if need be after the creation of the datasets.
For example if one of the spectroscopic dimensions (e.g. - Bias) was sinusoidal and not linear, The specific
dimension in the Spectroscopic_Values dataset can be manually overwritten.
Parameters
----------
dimensions : Dimension or array-like of Dimension objects
Sequence of Dimension objects that provides all necessary instructions for constructing the indices and values
datasets
is_spectral : bool, optional. default = True
Spectroscopic (True) or Position (False)
verbose : Boolean, optional
Whether or not to print statements for debugging purposes
base_name : str / unicode, optional
Prefix for the datasets. Default: 'Position_' when is_spectral is False, 'Spectroscopic_' otherwise
Returns
-------
ds_inds : VirtualDataset
Reduced Spectroscopic indices dataset
ds_vals : VirtualDataset
Reduces Spectroscopic values dataset
Notes
-----
`steps`, `initial_values`, `labels`, and 'units' must be the same length as
`dimensions` when they are specified.
Dimensions should be in the order from fastest varying to slowest.
"""
warn('build_ind_val_dsets is available only for legacy purposes and will be REMOVED in a future release.\n'
'Please consider using write_ind_val_dsets in hdf_utils instead', DeprecationWarning)
if isinstance(dimensions, Dimension):
dimensions = [dimensions]
if not isinstance(dimensions, (list, np.ndarray, tuple)):
raise TypeError('dimensions should be array-like ')
if not np.all([isinstance(x, Dimension) for x in dimensions]):
raise TypeError('dimensions should be a sequence of Dimension objects')
if base_name is not None:
if not isinstance(base_name, (str, unicode)):
raise TypeError('base_name should be a string')
if not base_name.endswith('_'):
base_name += '_'
else:
base_name = 'Position_'
if is_spectral:
base_name = 'Spectroscopic_'
unit_values = [x.values for x in dimensions]
indices, values = build_ind_val_matrices(unit_values, is_spectral=is_spectral)
if verbose:
print('Indices:')
print(indices)
print('Values:')
print(values)
# Create the slices that will define the labels
region_slices = get_aux_dset_slicing([x.name for x in dimensions], is_spectroscopic=is_spectral)
# Create the VirtualDataset for both Indices and Values
ds_indices = VirtualDataset(base_name + 'Indices', indices, dtype=INDICES_DTYPE)
ds_values = VirtualDataset(base_name + 'Values', VALUES_DTYPE(values), dtype=VALUES_DTYPE)
for dset in [ds_indices, ds_values]:
dset.attrs['labels'] = region_slices
dset.attrs['units'] = [x.units for x in dimensions]
return ds_indices, ds_values | 8876a9e61fd51a1e517adc7fb210172dc28e1b1e | 16,863 |
def groupByX(grp_fn, messages):
"""
Returns a dictionary keyed by the requested group.
"""
m_grp = {}
for msg in getIterable(messages):
# Ignore messages that we don't have all the timing for.
if msg.isComplete() or not ignore_incomplete:
m_type = grp_fn(msg)
if m_type in m_grp:
m_grp[m_type].append(msg)
else:
m_grp[m_type] = [msg]
return m_grp | 9ebf63cfe81e8c8b6f19d90c725415cafdbcd636 | 16,864 |
import math
def regular_poly_circ_rad_to_side_length(n_sides, rad):
"""Find side length that gives regular polygon with `n_sides` sides an
equivalent area to a circle with radius `rad`."""
p_n = math.pi / n_sides
return 2 * rad * math.sqrt(p_n * math.tan(p_n)) | 939ff5de399d7f0a31750aa03562791ee83ee744 | 16,865 |
def dbl_colour(days):
"""
Return a colour corresponding to the number of days to double
:param days: int
:return: str
"""
if days >= 28:
return "orange"
elif 0 < days < 28:
return "red"
elif days < -28:
return "green"
else:
return "yellow" | 46af7d57487f17b937ad5b7332879878cbf84220 | 16,866 |
def create_model(data_format):
"""Model to recognize digits in the MNIST data set.
Network structure is equivalent to:
https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/examples/tutorials/mnist/mnist_deep.py
and
https://github.com/tensorflow/models/blob/master/tutorials/image/mnist/convolutional.py
But uses the tf.keras API.
Args:
data_format: Either 'channels_first' or 'channels_last'. 'channels_first' is
typically faster on GPUs while 'channels_last' is typically faster on
CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
Returns:
A tf.keras.Model. """
# pylint: disable=no-member
if data_format == 'channels_first':
input_shape = [1, 28, 28]
else:
assert data_format == 'channels_last'
input_shape = [28, 28, 1]
return Sequential(
[
Reshape(target_shape=input_shape, input_shape=(28 * 28,)),
Conv2D(32, 5, padding='same', data_format=data_format, activation=tf.nn.relu,
kernel_initializer='random_uniform'),
MaxPool2D((2, 2), (2, 2), padding='same', data_format=data_format),
Conv2D(64, 5, padding='same', data_format=data_format, activation=tf.nn.relu,
kernel_initializer='random_uniform'),
MaxPool2D((2, 2), (2, 2), padding='same', data_format=data_format),
Flatten(),
Dense(1024, activation=tf.nn.relu, kernel_initializer='random_uniform'),
Dropout(0.4),
Dense(10, kernel_initializer='random_uniform')
]) | d6fe45e5cfef5246a220600b67e24cddceeebd3a | 16,867 |
def run_noncentered_hmc(model_config,
num_samples=2000,
burnin=1000,
num_leapfrog_steps=4,
num_adaptation_steps=500,
num_optimization_steps=2000):
"""Given a (centred) model, this function transforms it to a fully non-centred
one, and runs HMC on the reparametrised model.
"""
tf.reset_default_graph()
return run_parametrised_hmc(
model_config=model_config,
interceptor=ed_transforms.ncp,
num_samples=num_samples,
burnin=burnin,
num_leapfrog_steps=num_leapfrog_steps,
num_adaptation_steps=num_adaptation_steps,
num_optimization_steps=num_optimization_steps) | 95065fb8c8ee778f0d300b46f285f8f3bb026aed | 16,868 |
import collections
def get_project_apps(in_app_list):
""" Application definitions for app name.
Args:
in_app_list: (list) - names of applications
Returns:
tuple (list, dictionary) - list of dictionaries with apps definitions
dictionary of warnings
"""
apps = []
warnings = collections.defaultdict(list)
if not in_app_list:
return apps, warnings
missing_app_msg = "Missing definition of application"
application_manager = ApplicationManager()
for app_name in in_app_list:
if application_manager.applications.get(app_name):
apps.append({"name": app_name})
else:
warnings[missing_app_msg].append(app_name)
return apps, warnings | 4e9be8ffddf44aba740414a8ee020376eda3a761 | 16,869 |
def read(G):
""" Wrap a NetworkX graph class by an ILPGraph class
The wrapper class is used store the graph and the related variables of an optimisation problem
in a single entity.
:param G: a `NetworkX graph <https://networkx.org/documentation/stable/reference/introduction.html#graphs>`__
:return: an :py:class:`~graphilp.imports.ilpgraph.ILPGraph`
"""
result = ILPGraph()
result.set_nx_graph(G)
return result | cb5db29d210d944047dbdf806ecfaaa274d517e8 | 16,870 |
def slog_det(obs, **kwargs):
"""Computes the determinant of a matrix of Obs via np.linalg.slogdet."""
def _mat(x):
dim = int(np.sqrt(len(x)))
if np.sqrt(len(x)) != dim:
raise Exception('Input has to have dim**2 entries')
mat = []
for i in range(dim):
row = []
for j in range(dim):
row.append(x[j + dim * i])
mat.append(row)
(sign, logdet) = anp.linalg.slogdet(np.array(mat))
return sign * anp.exp(logdet)
if isinstance(obs, np.ndarray):
return derived_observable(_mat, (1 * (obs.ravel())).tolist(), **kwargs)
elif isinstance(obs, list):
return derived_observable(_mat, obs, **kwargs)
else:
raise TypeError('Unproper type of input.') | 20b4016653d83303ac671a5d2641d4c344393b0a | 16,871 |
def make_optimiser_form(optimiser):
"""Make a child form for the optimisation settings.
:param optimiser: the Optimiser instance
:returns: a subclass of FlaskForm; NB not an instance!
"""
# This sets up the initial form with the optimiser's parameters
OptimiserForm = make_component_form(optimiser)
# Now add options for specifying objectives
OptimiserForm.obj_min_A = BooleanField('Minimise A', default=True)
OptimiserForm.obj_min_sigma_varA = BooleanField('Minimise variance in A')
OptimiserForm.obj_min_B = BooleanField('Minimise B')
OptimiserForm.obj_max_C = BooleanField('Maximise C')
# Options saying which variables to optimise
OptimiserForm.var_bool_param = BooleanField(
'Optimise the choice of a binary option',
default=True)
OptimiserForm.var_int_param = BooleanField('Optimise the range of an integer',
default=True)
return OptimiserForm | 745c4a9c4268d31687215f4acec709a5eacfcbf0 | 16,872 |
def prepare_for_evaluate(test_images, test_label):
"""
It will preprocess and return the images and labels for tesing.
:param original images for testing
:param original labels for testing
:return preprocessed images
:return preprocessed labels
"""
test_d = np.stack([preprocessing_for_testing(test_images[i]) for i in range(10000)], axis=0)
test_new_image, test_new_label = test_d, test_label
# Shuffle for 20 times
for time in range(20):
test_new_image, test_new_label = shuffle(test_d, test_label,
random_state=randint(0, test_images.shape[0]))
return test_new_image, test_new_label | abe60fc558c6cc2c951a4efee758d2746608d8d1 | 16,873 |
def ab_group_to_dict(group):
"""Convert ABGroup to Python dict. Return None if group is empty."""
d = {'name': '', 'emails': [], 'is_group': True, 'is_company': False}
d['name'] = group.valueForProperty_(AB.kABGroupNameProperty)
for person in group.members():
identifier = group.distributionIdentifierForProperty_person_(
AB.kABEmailProperty, person)
if identifier:
emails = person.valueForProperty_(AB.kABEmailProperty)
email = emails.valueAtIndex_(
emails.indexForIdentifier_(identifier))
# log.debug('{} is in group {}'.format(email, d['name']))
d['emails'].append(email)
if not len(d['emails']):
return None
return d | 49f86b7ee4c4b4ce7f9f8c3db1811439e5fa5926 | 16,874 |
async def async_setup(hass, config):
"""Set up the PEVC modbus component."""
hass.data[DOMAIN] = {}
return True | cde898e2904f8e9cfcf60d260ee2476326877dd9 | 16,875 |
from typing import Any
def deserialize_value(val: str) -> Any:
"""Deserialize a json encoded string in to its original value"""
return _unpack_value(
seven.json.loads(check.str_param(val, "val")),
whitelist_map=_WHITELIST_MAP,
descent_path="",
) | d01ce83488ea743aae298b15d1fe5f4faac6adbc | 16,876 |
import binascii
import os
def gen_signature(priv_path, pub_path, sign_path, passphrase=None):
"""
creates a signature for the given public-key with
the given private key and writes it to sign_path
"""
with salt.utils.files.fopen(pub_path) as fp_:
mpub_64 = fp_.read()
mpub_sig = sign_message(priv_path, mpub_64, passphrase)
mpub_sig_64 = binascii.b2a_base64(mpub_sig)
if os.path.isfile(sign_path):
return False
log.trace(
"Calculating signature for %s with %s",
os.path.basename(pub_path),
os.path.basename(priv_path),
)
if os.path.isfile(sign_path):
log.trace(
"Signature file %s already exists, please remove it first and " "try again",
sign_path,
)
else:
with salt.utils.files.fopen(sign_path, "wb+") as sig_f:
sig_f.write(salt.utils.stringutils.to_bytes(mpub_sig_64))
log.trace("Wrote signature to %s", sign_path)
return True | b3bc99fc0faf38cc83e7d441417147d9fc127b66 | 16,877 |
import six
import collections
def stringify(value):
"""
PHPCS uses a , separated strings in many places
because of how it handles options we have to do bad things
with string concatenation.
"""
if isinstance(value, six.string_types):
return value
if isinstance(value, collections.Iterable):
return ','.join(value)
return str(value) | 1ca24ff986f3cd02c845ad0e11b8b1cfd3c7f779 | 16,878 |
def read_requirements_file(path):
""" reads requirements.txt file """
with open(path) as f:
requires = []
for line in f.readlines():
if not line:
continue
requires.append(line.strip())
return requires | ab224bd3adac7adef76a2974a9244042f9aedf84 | 16,879 |
def vsa_get_all(context):
"""
Get all Virtual Storage Array records.
"""
session = get_session()
return session.query(models.VirtualStorageArray).\
options(joinedload('vsa_instance_type')).\
filter_by(deleted=can_read_deleted(context)).\
all() | 3568997b060fbeab115ec79c2c0cba77f78c6cba | 16,880 |
import os
import glob
import logging
def find_files_match_names_across_dirs(list_path_pattern, drop_none=True):
""" walk over dir with images and segmentation and pair those with the same
name and if the folder with centers exists also add to each par a center
.. note:: returns just paths
:param list(str) list_path_pattern: list of paths with image name patterns
:param bool drop_none: drop if there are some none - missing values in rows
:return: DF<path_1, path_2, ...>
>>> def _mp(d, n):
... return os.path.join(update_path('data_images'),
... 'drosophila_ovary_slice', d, n)
>>> df = find_files_match_names_across_dirs([_mp('image', '*.jpg'),
... _mp('segm', '*.png'),
... _mp('center_levels', '*.csv')])
>>> len(df) > 0
True
>>> df.columns.tolist()
['path_1', 'path_2', 'path_3']
>>> df = find_files_match_names_across_dirs([_mp('image', '*.png'),
... _mp('segm', '*.jpg'),
... _mp('center_levels', '*.csv')])
>>> len(df)
0
"""
list_path_pattern = [pp for pp in list_path_pattern if pp is not None]
assert len(list_path_pattern) > 1, 'at least 2 paths required'
for p in list_path_pattern:
assert os.path.exists(os.path.dirname(p)), \
'missing "%s"' % os.path.dirname(p)
def _get_name(path, pattern='*'):
name = os.path.splitext(os.path.basename(path))[0]
for s in pattern.split('*'):
name = name.replace(s, '')
return name
def _get_paths_names(path_pattern):
paths_ = glob.glob(path_pattern)
if not paths_:
return [None], [None]
names_ = [_get_name(p, os.path.basename(path_pattern)) for p in paths_]
return paths_, names_
logging.info('find match files...')
paths_0, names_0 = _get_paths_names(list_path_pattern[0])
list_paths = [paths_0]
for path_pattern_n in list_path_pattern[1:]:
paths_n = [None] * len(paths_0)
name_pattern = os.path.basename(path_pattern_n)
list_files = glob.glob(path_pattern_n)
logging.debug('found %i files in %s', len(list_files), path_pattern_n)
for path_n in list_files:
name_n = _get_name(path_n, name_pattern)
if name_n in names_0:
idx = names_0.index(name_n)
paths_n[idx] = path_n
list_paths.append(paths_n)
col_names = ['path_%i' % (i + 1) for i in range(len(list_paths))]
df_paths = pd.DataFrame(list(zip(*list_paths)), columns=col_names)
# filter None
if drop_none:
df_paths.dropna(inplace=True)
return df_paths | 3ee8b79ccce1c7b873037bb59067acbb893df047 | 16,881 |
import threading
def thread_it(obj, timeout = 10):
""" General function to handle threading for the physical components of the system. """
thread = threading.Thread(target = obj.run())
thread.start()
# Run the 'run' function in the obj
obj.ready.wait(timeout = timeout)
# Clean up
thread.join()
obj.ready.clear()
return None | 02ed60a560ffa65f0364aa7414b1fda0d3e62ac5 | 16,882 |
def _subsize_sub_pixel_align_cy_ims(pixel_aligned_cy_ims, subsize, n_samples):
"""
The inner loop of _sub_pixel_align_cy_ims() that executes on a "subsize"
region of the larger image.
Is subsize is None then it uses the entire image.
"""
n_max_failures = n_samples * 2
sub_pixel_offsets = np.zeros((n_samples, pixel_aligned_cy_ims.shape[0], 2))
pixel_aligned_cy0_im = pixel_aligned_cy_ims[0]
im_mea = pixel_aligned_cy_ims.shape[-1]
assert pixel_aligned_cy_ims.shape[-2] == im_mea
def _subregion(im, pos):
if subsize is None:
return im
else:
return imops.crop(im, off=pos, dim=WH(subsize, subsize), center=False)
sample_i = 0
n_failures = 0
while sample_i < n_samples and n_failures < n_max_failures:
try:
if subsize is None:
pos = XY(0, 0)
else:
pos = XY(
np.random.randint(0, im_mea - subsize - 16),
np.random.randint(0, im_mea - subsize - 16),
)
subregion_pixel_aligned_cy0_im = _subregion(pixel_aligned_cy0_im, pos)
for cy_i, pixel_aligned_cy_im in enumerate(pixel_aligned_cy_ims):
if cy_i == 0:
continue
# Use a small region to improve speed
subregion_pixel_aligned_cy_im = _subregion(pixel_aligned_cy_im, pos)
try:
_dy, _dx = _subpixel_align_one_im(
subregion_pixel_aligned_cy0_im, subregion_pixel_aligned_cy_im,
)
sub_pixel_offsets[sample_i, cy_i, :] = (_dy, _dx)
except Exception:
# This is a general exception handler because there
# are a number of ways that the _subpixel_align_one_im
# can fail including linear algebera, etc. All
# of which end up with a skip and a retry.
n_failures += 1
raise AlignmentError
sample_i += 1
except AlignmentError:
# Try again with a new pos
if n_failures >= n_max_failures:
raise AlignmentError
return np.mean(sub_pixel_offsets, axis=0) | f96a2bc9b4c55976fd4c49da3f59afa991c53ff1 | 16,883 |
def obj_setclass(this, klass):
"""
set Class for `this`!!
"""
return this.setclass(klass) | 4447df2f3055f21c9066a254290cdd037e812b64 | 16,884 |
def format(number, separator=' ', format=None, add_check_digit=False):
"""Reformat the number to the standard presentation format. The separator
used can be provided. If the format is specified (either 'hex' or 'dec')
the number is reformatted in that format, otherwise the current
representation is kept. If add_check_digit is True a check digit will be
added if it is not present yet."""
# first parse the number
number, cd = _parse(number)
# format conversions if needed
if format == 'dec' and len(number) == 14:
# convert to decimal
number = '%010d%08d' % (int(number[0:8], 16), int(number[8:14], 16))
if cd:
cd = calc_check_digit(number)
elif format == 'hex' and len(number) == 18:
# convert to hex
number = '%08X%06X' % (int(number[0:10]), int(number[10:18]))
if cd:
cd = calc_check_digit(number)
# see if we need to add a check digit
if add_check_digit and not cd:
cd = calc_check_digit(number)
# split number according to format
if len(number) == 14:
number = [number[i * 2:i * 2 + 2]
for i in range(7)] + [cd]
else:
number = (number[:5], number[5:10], number[10:14], number[14:], cd)
return separator.join(x for x in number if x) | 6890ed398eb7c173540c0392b9ed8ef66f8d170b | 16,885 |
def parse_equal_statement(line):
"""Parse super-sequence statements"""
seq_names = line.split()[1:]
return seq_names | ee0de00a990ac10c365af16dccf491b7ea8ed785 | 16,886 |
def B5(n):
"""Factor Variables B5."""
return np.maximum(0, c4(n) - 3 * np.sqrt(1 - c4(n) ** 2)) | bc2fbd91e337310fe5d4326d55440ce2055da650 | 16,887 |
def y_yhat_plots(y, yh, title="y and y_score", y_thresh=0.5):
"""Output plots showing how y and y_hat are related:
the "confusion dots" plot is analogous to the confusion table,
and the standard ROC plot with its AOC value.
The y=1 threshold can be changed with the y_thresh parameter.
"""
# The predicted y value with threshold = y_thresh
y_pred = 1.0 * (yh > y_thresh)
# Show table of actual and predicted counts
crosstab = pd.crosstab(y, y_pred, rownames=[
'Actual'], colnames=[' Predicted'])
print("\nConfusion matrix (y_thresh={:.3f}):\n\n".format(y_thresh),
crosstab)
# Calculate the various metrics and rates
tn = crosstab[0][0]
fp = crosstab[1][0]
fn = crosstab[0][1]
tp = crosstab[1][1]
##print(" tn =",tn)
##print(" fp =",fp)
##print(" fn =",fn)
##print(" tp =",tp)
this_fpr = fp / (fp + tn)
this_fnr = fn / (fn + tp)
this_recall = tp / (tp + fn)
this_precision = tp / (tp + fp)
this_accur = (tp + tn) / (tp + fn + fp + tn)
this_posfrac = (tp + fn) / (tp + fn + fp + tn)
print("\nResults:\n")
print(" False Pos = ", 100.0 * this_fpr, "%")
print(" False Neg = ", 100.0 * this_fnr, "%")
print(" Recall = ", 100.0 * this_recall, "%")
print(" Precision = ", 100.0 * this_precision, "%")
print("\n Accuracy = ", 100.0 * this_accur, "%")
print(" Pos. fract. = ", 100.0 * this_posfrac, "%")
# Put them in a dataframe
ysframe = pd.DataFrame([y, yh, y_pred], index=[
'y', 'y-hat', 'y-pred']).transpose()
# If the yh is discrete (0 and 1s only) then blur it a bit
# for a better visual dots plot
if min(abs(yh - 0.5)) > 0.49:
ysframe["y-hat"] = (0.51 * ysframe["y-hat"]
+ 0.49 * np.random.rand(len(yh)))
# Make a "confusion dots" plot
# Add a blurred y column
ysframe['y (blurred)'] = y + 0.1 * np.random.randn(len(y))
# Plot the real y (blurred) vs the predicted probability
# Note the flipped ylim values.
ysframe.plot.scatter('y-hat', 'y (blurred)', figsize=(12, 5),
s=2, xlim=(0.0, 1.0), ylim=(1.8, -0.8))
# show the "correct" locations on the plot
plt.plot([0.0, y_thresh], [0.0, 0.0], '-',
color='green', linewidth=5)
plt.plot([y_thresh, y_thresh], [0.0, 1.0], '-',
color='gray', linewidth=2)
plt.plot([y_thresh, 1.0], [1.0, 1.0], '-',
color='green', linewidth=5)
plt.title("Confusion-dots Plot: " + title, fontsize=16)
# some labels
ythr2 = y_thresh/2.0
plt.text(ythr2 - 0.03, 1.52, "FN", fontsize=16, color='red')
plt.text(ythr2 + 0.5 - 0.03, 1.52, "TP", fontsize=16, color='green')
plt.text(ythr2 - 0.03, -0.50, "TN", fontsize=16, color='green')
plt.text(ythr2 + 0.5 - 0.03, -0.50, "FP", fontsize=16, color='red')
plt.show()
# Make the ROC curve
# Set the y-hat as the index and sort on it
ysframe = ysframe.set_index('y-hat').sort_index()
# Put y-hat back as a column (but the sorting remains)
ysframe = ysframe.reset_index()
# Initialize the counts for threshold = 0
p_thresh = 0
FN = 0
TN = 0
TP = sum(ysframe['y'])
FP = len(ysframe) - TP
# Assemble the fpr and recall values
recall = []
fpr = []
# Go through each sample in y-hat order,
# advancing the threshold and adjusting the counts
for iprob in range(len(ysframe['y-hat'])):
p_thresh = ysframe.iloc[iprob]['y-hat']
if ysframe.iloc[iprob]['y'] == 0:
FP -= 1
TN += 1
else:
TP -= 1
FN += 1
# Recall and FPR:
recall.append(TP / (TP + FN))
fpr.append(FP / (FP + TN))
# Put recall and fpr in the dataframe
ysframe['Recall'] = recall
ysframe['FPR'] = fpr
# - - - ROC - - - could be separate routine
zoom_in = False
# Calculate the area under the ROC
roc_area = 0.0
for ifpr in range(1, len(fpr)):
# add on the bit of area (note sign change, going from high fpr to low)
roc_area += 0.5 * (recall[ifpr] + recall[ifpr - 1]
) * (fpr[ifpr - 1] - fpr[ifpr])
plt.figure(figsize=(8, 8))
plt.title("ROC: " + title, size=16)
plt.plot(fpr, recall, '-b')
# Set the scales
if zoom_in:
plt.xlim(0.0, 0.10)
plt.ylim(0.0, 0.50)
else:
# full range:
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
# The reference line
plt.plot([0., 1.], [0., 1.], '--', color='orange')
# The point at the y_hat = y_tresh threshold
if True:
plt.plot([this_fpr], [this_recall], 'o', c='blue', markersize=15)
plt.xlabel('False Postive Rate', size=16)
plt.ylabel('Recall', size=16)
plt.annotate('y_hat = {:.2f}'.format(y_thresh),
xy=(this_fpr + 0.015,
this_recall), size=14, color='blue')
plt.annotate(' Pos.Fraction = ' +
' {:.0f}%'.format(100 * this_posfrac),
xy=(this_fpr + 0.02, this_recall - 0.03),
size=14, color='blue')
# Show the ROC area (shows on zoomed-out plot)
plt.annotate('ROC Area = ' + str(roc_area)
[:5], xy=(0.4, 0.1), size=16, color='blue')
# Show the plot
plt.show()
return ysframe | 82a8154bd618cc1451a44b2b42fca0407e9979cb | 16,888 |
def _derive_scores(model, txt_file, base_words):
"""
Takes a model, a text file, and a list of base words.
Returns a dict of {base_word: score}, where score is an integer between 0
and 100 which represents the average similarity of the text to the given
word.
"""
with open(txt_file, 'r') as f:
text = f.read()
words = sample_words(text)
# This is a list of dicts of the form {base_word: score}.
raw_scores = [_single_word_score(model, base_words, word) for word in words]
summed_scores = {}
for base_word in base_words:
summed_scores[base_word] = sum([item[base_word] for item in raw_scores])
summed_scores[base_word] = round(
100 * summed_scores[base_word] / len(words)
)
return summed_scores | 9c377b5dec742f5ba174f780252fd78d162a8713 | 16,889 |
import os
def verifyRRD(fix_rrd=False):
"""
Go through all known monitoring rrds and verify that they
match existing schema (could be different if an upgrade happened)
If fix_rrd is true, then also attempt to add any missing attributes.
"""
global rrd_problems_found
global monitorAggregatorConfig
mon_dir = monitorAggregatorConfig.monitor_dir
status_dict = {}
completed_stats_dict = {}
completed_waste_dict = {}
counts_dict = {}
# initialize the RRD dictionaries to match the current schema for verification
for tp in list(status_attributes.keys()):
if tp in list(type_strings.keys()):
tp_str = type_strings[tp]
attributes_tp = status_attributes[tp]
for a in attributes_tp:
status_dict[f"{tp_str}{a}"] = None
for jobrange in glideFactoryMonitoring.getAllJobRanges():
completed_stats_dict[f"JobsNr_{jobrange}"] = None
for timerange in glideFactoryMonitoring.getAllTimeRanges():
completed_stats_dict[f"Lasted_{timerange}"] = None
completed_stats_dict[f"JobsLasted_{timerange}"] = None
for jobtype in glideFactoryMonitoring.getAllJobTypes():
for timerange in glideFactoryMonitoring.getAllMillRanges():
completed_waste_dict[f"{jobtype}_{timerange}"] = None
for jobtype in ("Entered", "Exited", "Status"):
for jobstatus in ("Wait", "Idle", "Running", "Held"):
counts_dict[f"{jobtype}{jobstatus}"] = None
for jobstatus in ("Completed", "Removed"):
counts_dict["{}{}".format("Entered", jobstatus)] = None
# FROM: lib2to3.fixes.fix_ws_comma
# completed_waste_dict["%s_%s"%(jobtype, timerange)]=None
#
# for jobtype in ('Entered', 'Exited', 'Status'):
# for jobstatus in ('Wait', 'Idle', 'Running', 'Held'):
# counts_dict["%s%s"%(jobtype, jobstatus)]=None
# for jobstatus in ('Completed', 'Removed'):
# counts_dict["%s%s"%('Entered', jobstatus)]=None
#
# verifyHelper(os.path.join(total_dir,
# "Status_Attributes.rrd"), status_dict, fix_rrd)
# verifyHelper(os.path.join(total_dir,
# "Log_Completed.rrd"),
# glideFactoryMonitoring.getLogCompletedDefaults(), fix_rrd)
# verifyHelper(os.path.join(total_dir,
# "Log_Completed_Stats.rrd"), completed_stats_dict, fix_rrd)
# verifyHelper(os.path.join(total_dir,
# "Log_Completed_WasteTime.rrd"), completed_waste_dict, fix_rrd)
# verifyHelper(os.path.join(total_dir,
# "Log_Counts.rrd"), counts_dict, fix_rrd)
# for filename in os.listdir(dir):
# if filename[:6]=="entry_":
# entrydir=os.path.join(dir, filename)
# for subfilename in os.listdir(entrydir):
# if subfilename[:9]=="frontend_":
# current_dir=os.path.join(entrydir, subfilename)
# verifyHelper(os.path.join(current_dir,
# "Status_Attributes.rrd"), status_dict, fix_rrd)
# verifyHelper(os.path.join(current_dir,
# "Log_Completed.rrd"),
# glideFactoryMonitoring.getLogCompletedDefaults(), fix_rrd)
# verifyHelper(os.path.join(current_dir,
# "Log_Completed_Stats.rrd"), completed_stats_dict, fix_rrd)
# verifyHelper(os.path.join(current_dir,
# "Log_Completed_WasteTime.rrd"),
# completed_waste_dict, fix_rrd)
# verifyHelper(os.path.join(current_dir,
# "Log_Counts.rrd"), counts_dict, fix_rrd)
# return not rrd_problems_found
completed_dict = glideFactoryMonitoring.getLogCompletedDefaults()
rrdict = {
"Status_Attributes.rrd": status_dict,
"Log_Completed.rrd": completed_dict,
"Log_Completed_Stats.rrd": completed_stats_dict,
"Log_Completed_WasteTime.rrd": completed_waste_dict,
"Log_Counts.rrd": counts_dict,
}
for dir_name, sdir_name, f_list in os.walk(mon_dir):
for file_name in f_list:
if file_name in list(rrdict.keys()):
verifyHelper(os.path.join(dir_name, file_name), rrdict[file_name], fix_rrd)
return not rrd_problems_found | 485842bcf262059125b9d9d6af91bb7c97b82704 | 16,890 |
def features_ids_argument_parser() -> ArgumentParser:
"""
Creates a parser suitable to parse the argument describing features ids in different subparsers
"""
parser = ArgumentParser(add_help=False, parents=[collection_option_parser()])
parser.add_argument(FEATURES_IDS_ARGNAME, nargs='+',
help='features identifiers or features UUIDs')
return parser | df24ebaff182c88c7ad6cf38e2e4a5784d54a48b | 16,891 |
def isolate_blue_blocks(image, area_min=10, side_ratio=0.5):
"""Return a sequence of masks on the original area showing significant blocks of blue."""
contours, _ = cv2.findContours(
blue(image).astype(np.uint8) * 255, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
)
rects = []
for c in contours:
x, y, w, h = cv2.boundingRect(c)
if min(w, h) / max(w, h) > side_ratio and cv2.contourArea(c) > area_min:
rects.append((x, y, w, h))
masks = np.zeros_like()
return filtered | 7cd6e895750f208e18c60a48d2bc0e8d1710d6c0 | 16,892 |
from typing import Union
from io import StringIO
from pathlib import Path
from typing import Optional
from typing import Dict
from typing import Callable
from typing import List
from typing import Tuple
def read_gtf(
filepath_or_buffer: Union[str, StringIO, Path],
expand_attribute_column: bool = True,
infer_biotype_column: bool = False,
column_converters: Optional[Dict[str, Callable[..., str]]] = None,
usecols: Optional[List[str]] = None,
features: Optional[Tuple[str]] = None,
chunksize: int = 1024 * 1024,
) -> pd.DataFrame:
"""
Parse a GTF into a dictionary mapping column names to sequences of values.
Parameters
----------
filepath_or_buffer : str or buffer object
Path to GTF file (may be gzip compressed) or buffer object
such as StringIO
expand_attribute_column : bool
Replace strings of semi-colon separated key-value values in the
'attribute' column with one column per distinct key, with a list of
values for each row (using None for rows where key didn't occur).
infer_biotype_column : bool
Due to the annoying ambiguity of the second GTF column across multiple
Ensembl releases, figure out if an older GTF's source column is actually
the gene_biotype or transcript_biotype.
column_converters : dict, optional
Dictionary mapping column names to conversion functions. Will replace
empty strings with None and otherwise passes them to given conversion
function.
usecols : list of str or None
Restrict which columns are loaded to the give set. If None, then
load all columns.
features : set of str or None
Drop rows which aren't one of the features in the supplied set
chunksize : int
"""
if isinstance(filepath_or_buffer, str):
filepath_or_buffer = Path(filepath_or_buffer)
if isinstance(filepath_or_buffer, Path) and not filepath_or_buffer.exists():
logger.exception(f"GTF file does not exist: {filepath_or_buffer}")
raise FileNotFoundError
if expand_attribute_column:
result_df = parse_gtf_and_expand_attributes(
filepath_or_buffer, chunksize=chunksize, restrict_attribute_columns=usecols
)
else:
result_df = parse_gtf(
filepath_or_buffer, chunksize=chunksize, features=features
)
if column_converters:
for column_name in column_converters:
result_df[column_name] = result_df[column_name].astype(
column_converters[column_name], errors="ignore"
)
# Hackishly infer whether the values in the 'source' column of this GTF
# are actually representing a biotype by checking for the most common
# gene_biotype and transcript_biotype value 'protein_coding'
if infer_biotype_column:
unique_source_values = result_df["source"].unique()
if "protein_coding" in unique_source_values:
column_names = result_df.columns.unique()
# Disambiguate between the two biotypes by checking if
# gene_biotype is already present in another column. If it is,
# the 2nd column is the transcript_biotype (otherwise, it's the
# gene_biotype)
if "gene_biotype" not in column_names:
logger.info("Using column 'source' to replace missing 'gene_biotype'")
result_df["gene_biotype"] = result_df["source"]
if "transcript_biotype" not in column_names:
logger.info(
"Using column 'source' to replace missing 'transcript_biotype'"
)
result_df["transcript_biotype"] = result_df["source"]
if usecols is not None:
column_names = result_df.columns.unique()
valid_columns = [c for c in usecols if c in column_names]
result_df = result_df[valid_columns]
return result_df | c7478d88ce5d6d6e823bf28c965f366b9b8d1522 | 16,893 |
from whoosh.filedb.filestore import FileStorage
from sys import version
def version_in(dirname, indexname = None):
"""Returns a tuple of (release_version, format_version), where
release_version is the release version number of the Whoosh code that
created the index -- e.g. (0, 1, 24) -- and format_version is the
version number of the on-disk format used for the index -- e.g. -102.
The second number (format version) may be useful for figuring out if you
need to recreate an index because the format has changed. However, you
can just try to open the index and see if you get an IndexVersionError
exception.
Note that the release and format version are available as attributes
on the Index object in Index.release and Index.version.
:param dirname: the file path of a directory containing an index.
:param indexname: the name of the index. If None, the default index name is used.
:returns: ((major_ver, minor_ver, build_ver), format_ver)
"""
storage = FileStorage(dirname)
return version(storage, indexname=indexname) | 25b9f2416fa5d0213b24785aedbd5afd43edfba6 | 16,894 |
def trimAlphaNum(value):
"""
Trims alpha numeric characters from start and ending of a given value
>>> trimAlphaNum(u'AND 1>(2+3)-- foobar')
u' 1>(2+3)-- '
"""
while value and value[-1].isalnum():
value = value[:-1]
while value and value[0].isalnum():
value = value[1:]
return value | e9d44ea5dbe0948b9db0c71a5ffcdd5c80e95746 | 16,895 |
def hrm_job_title_represent(id, row=None):
""" FK representation """
if row:
return row.name
elif not id:
return current.messages.NONE
db = current.db
table = db.hrm_job_title
r = db(table.id == id).select(table.name,
limitby = (0, 1)).first()
try:
return r.name
except:
return current.messages.UNKNOWN_OPT | ca3d2bfb4056b28b712f4cbf37c8c91d840c0161 | 16,896 |
def is_empty_array_expr(ir: irast.Base) -> bool:
"""Return True if the given *ir* expression is an empty array expression.
"""
return (
isinstance(ir, irast.Array)
and not ir.elements
) | dcf3775e7544ad64e9a533238a9549ed21dc3393 | 16,897 |
def get_raw_entity_names_from_annotations(annotations):
"""
Args:
annotated_utterance: annotated utterance
Returns:
Wikidata entities we received from annotations
"""
raw_el_output = annotations.get("entity_linking", [{}])
entities = []
try:
if raw_el_output:
if isinstance(raw_el_output[0], dict):
entities = raw_el_output[0].get("entity_ids", [])
if isinstance(raw_el_output[0], list):
entities = raw_el_output[0][0]
except Exception as e:
error_message = f"Wrong entity linking output format {raw_el_output} : {e}"
sentry_sdk.capture_exception(e)
logger.exception(error_message)
return entities | 482be69ef5fec52b70ade4839b48ae2f4155033b | 16,898 |
def nextPara(file, line):
"""Go forward one paragraph from the specified line and return the line
number of the first line of that paragraph.
Paragraphs are delimited by blank lines. It is assumed that the
current line is standalone (which is bogus).
- file is an array of strings
- line is the starting point (zero-based)"""
maxLine = len(file) - 1
# Skip over current paragraph
while (line != maxLine and not isempty(file[line])):
line = line + 1
# Skip over white space
while (line != maxLine and isempty(file[line])):
line = line + 1
return line | a104042225bc9404a5b5fe8f5410a3021e45f64d | 16,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.