content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def partition(arr, left, right):
"""[summary]
The point of a pivot value is to select a value,
find out where it belongs in the array while moving everything lower than that value
to the left, and everything higher to the right.
Args:
arr ([array]): [Unorderd array]
left ([int]): [Left index of the array]
right ([int]): [Right index of the array]
Returns:
[int]: [the value of the lowest element]
"""
pivot = arr[right]
low = left - 1
for current in range(left, right):
if arr[current] <= pivot:
low += 1
swap(arr, current, low)
swap(arr, right, low + 1)
return low + 1 | 30f1448861a7a9fa2f119e31482f1f715c9e1ce0 | 11,600 |
def graphatbottleneck(g,m,shallfp=True):
"""handles the bottleneck transformations for a pure graph ae, return g, compressed, new input, shallfp=True=>convert vector in matrix (with gfromparam), can use redense to add a couple dense layers around the bottleneck (defined by m.redense*)"""
comp=ggoparam(gs=g.s.gs,param=g.s.param)([g.X])
if m.shallredense:
for e in m.redenseladder:
comp=Dense(e,activation=m.redenseactivation,kernel_initializer=m.redenseinit)(comp)
inn2=Input(m.redenseladder[-1])
use=inn2
for i in range(len(m.redenseladder)-1,-1,-1):
use=Dense(m.redenseladder[i],activation=m.redenseactivation,kernel_initializer=m.redenseinit)(use)
use=Dense(g.s.gs*g.s.param,activation=m.redenseactivation,kernel_initializer=m.redenseinit)(use)
else:
inn2=Input(g.s.gs*g.s.param)
use=inn2
if shallfp:
taef1=gfromparam(gs=g.s.gs,param=g.s.param)([use])
else:
taef1=inn2
g.X=taef1
g.A=None
return g,comp,inn2 | b46967b40fce669c3e74d52e31f814bbf96ce8c0 | 11,601 |
def categorical_onehot_binarizer(feature, feature_scale=None, prefix='columns', dtype='int8'):
"""Transform between iterable of iterables and a multilabel format, sample is simple categories.
Args:
feature: pd.Series, sample feature.
feature_scale: list, feature categories list.
prefix: String to append DataFrame column names.
dtype: default np.uint8. Data type for new columns. Only a single dtype is allowed.
Returns:
Dataframe for onehot binarizer.
"""
assert not any(feature.isnull()), "`feature' should be not contains NaN"
scale = feature.drop_duplicates().tolist()
if feature_scale is not None:
t = pd.get_dummies(feature.replace({i:'temp_str' for i in set.difference(set(scale), set(feature_scale))}), prefix=prefix, dtype=dtype)
if prefix+'_temp_str' in t.columns:
t = t.drop([prefix+'_temp_str'], axis=1)
for i in set.difference(set(feature_scale), set(scale)):
if prefix+'_'+str(i) not in t.columns:
t[prefix+'_'+str(i)] = 0
scale = feature_scale
t = t[[prefix+'_'+str(i) for i in feature_scale]]
else:
t = pd.get_dummies(feature, prefix=prefix, dtype=dtype)
t = t[[prefix+'_'+str(i) for i in scale]]
return t, scale | eb3a2b38d323c72bb298b64ebbd6567d143471fc | 11,602 |
import subprocess
import shlex
def mtp_file_list2():
"""
Returns the output of 'mtp-files' as a Python list.
Uses subprocess.
"""
cmd_str = "sudo mtp-files"
try: result = subprocess.check_output(shlex.split(cmd_str))
except subprocess.CalledProcessError as e:
log.error("Could not execute: %s" % str(e))
return False, None
the_files = parse_files(result)
return True, the_files | 530ecea3ee8cd3c30e03c5554ea04f7c602d75fa | 11,603 |
def add_selfloops(adj_matrix: sp.csr_matrix, fill_weight=1.0):
"""add selfloops for adjacency matrix.
>>>add_selfloops(adj, fill_weight=1.0) # return an adjacency matrix with selfloops
# return a list of adjacency matrices with selfloops
>>>add_selfloops(adj, adj, fill_weight=[1.0, 2.0])
Parameters
----------
adj_matrix: Scipy matrix or Numpy array or a list of them
Single or a list of Scipy sparse matrices or Numpy arrays.
fill_weight: float scalar, optional.
weight of self loops for the adjacency matrix.
Returns
-------
Single or a list of Scipy sparse matrix or Numpy matrices.
See also
----------
graphgallery.functional.AddSelfloops
"""
def _add_selfloops(adj, w):
adj = eliminate_selfloops(adj)
if w:
return adj + w * sp.eye(adj.shape[0], dtype=adj.dtype, format='csr')
else:
return adj
if gg.is_listlike(fill_weight):
return tuple(_add_selfloops(adj_matrix, w) for w in fill_weight)
else:
return _add_selfloops(adj_matrix, fill_weight) | 867bdf380995b6ff48aac9741facd09066ad03bd | 11,604 |
def handle(event, _ctxt):
""" Handle the Lambda Invocation """
response = {
'message': '',
'event': event
}
ssm = boto3.client('ssm')
vpc_ids = ssm.get_parameter(Name=f'{PARAM_BASE}/vpc_ids')['Parameter']['Value']
vpc_ids = vpc_ids.split(',')
args = {
'vpc_ids': vpc_ids
}
try:
sg_name = ssm.get_parameter(Name=f'{PARAM_BASE}/secgrp_name')['Parameter']['Value']
args['managed_sg_name'] = sg_name
except botocore.exceptions.ClientError as ex:
if ex.response['Error']['Code'] == 'ParameterNotFound':
pass
else:
print(ex)
return response
run(**args)
return response | 8cf0dc52b641bd28b002caef1d97c7e3a60be647 | 11,605 |
def _get_indice_map(chisqr_set):
"""Find element with lowest chisqr at each voxel """
#make chisqr array of dims [x,y,z,0,rcvr,chisqr]
chisqr_arr = np.stack(chisqr_set,axis=5)
indice_arr = np.argmin(chisqr_arr,axis=5)
return indice_arr | 9ac00310628d3f45f72542dbfff5345845053acd | 11,606 |
import sys
def _train(params, fpath, hyperopt=False):
"""
:param params: hyperparameters. Its structure is consistent with how search space is defined. See below.
:param fpath: Path or URL for the training data used with the model.
:param hyperopt: Use hyperopt for hyperparameter search during training.
:return: dict with fields 'loss' (scalar loss) and 'status' (success/failure status of run)
"""
max_depth, max_features, n_estimators = params
max_depth, max_features, n_estimators = (int(max_depth), float(max_features), int(n_estimators))
# Log all of our training parameters for this run.
pyver = sys.version_info
mlparams = {
'cudf_version': str(cudf.__version__),
'cuml_version': str(cuml.__version__),
'max_depth': str(max_depth),
'max_features': str(max_features),
'n_estimators': str(n_estimators),
'python_version': f"{pyver[0]}.{pyver[1]}.{pyver[2]}.{pyver[3]}",
}
mlflow.log_params(mlparams)
X_train, X_test, y_train, y_test = load_data(fpath)
mod = RandomForestClassifier(
max_depth=max_depth, max_features=max_features, n_estimators=n_estimators
)
mod.fit(X_train, y_train)
preds = mod.predict(X_test)
acc = accuracy_score(y_test, preds)
mlflow.log_metric("accuracy", acc)
mlflow.sklearn.log_model(mod, "saved_models")
if not hyperopt:
return mod
return {"loss": acc, "status": STATUS_OK} | 2ed5d8c0a7f688f0babb187f3aee71c83f22b6f9 | 11,607 |
def noct_synthesis(spectrum, freqs, fmin, fmax, n=3, G=10, fr=1000):
"""Adapt input spectrum to nth-octave band spectrum
Convert the input spectrum to third-octave band spectrum
between "fc_min" and "fc_max".
Parameters
----------
spectrum : numpy.ndarray
amplitude rms of the one-sided spectrum of the signal, size (nperseg, nseg).
freqs : list
List of input frequency , size (nperseg) or (nperseg, nseg).
fmin : float
Min frequency band [Hz].
fmax : float
Max frequency band [Hz].
n : int
Number of bands pr octave.
G : int
System for specifying the exact geometric mean frequencies.
Can be base 2 or base 10.
fr : int
Reference frequency. Shall be set to 1 kHz for audible frequency
range, to 1 Hz for infrasonic range (f < 20 Hz) and to 1 MHz for
ultrasonic range (f > 31.5 kHz).
Outputs
-------
spec : numpy.ndarray
Third octave band spectrum of signal sig [dB re.2e-5 Pa], size (nbands, nseg).
fpref : numpy.ndarray
Corresponding preferred third octave band center frequencies, size (nbands).
"""
# Get filters center frequencies
fc_vec, fpref = _center_freq(fmin=fmin, fmax=fmax, n=n, G=G, fr=fr)
nband = len(fpref)
if len(spectrum.shape) > 1:
nseg = spectrum.shape[1]
spec = np.zeros((nband, nseg))
if len(freqs.shape) == 1:
freqs = np.tile(freqs, (nseg, 1)).T
else:
nseg = 1
spec = np.zeros((nband))
# Frequency resolution
# df = freqs[1:] - freqs[:-1]
# df = np.concatenate((df, [df[-1]]))
# Get upper and lower frequencies
fu = fc_vec * 2**(1/(2*n))
fl = fc_vec / 2**(1/(2*n))
for s in range(nseg):
for i in range(nband):
if len(spectrum.shape) > 1:
# index of the frequencies within the band
idx = np.where((freqs[:, s] >= fl[i]) & (freqs[:, s] < fu[i]))
spec[i, s] = np.sqrt(
np.sum(np.power(np.abs(spectrum[idx,s]), 2)))
else:
# index of the frequencies within the band
idx = np.where((freqs >= fl[i]) & (freqs < fu[i]))
spec[i] = np.sqrt(np.sum(np.abs(spectrum[idx])**2))
return spec, fpref | 89c6be2be262b153bd63ecf498cf92cf93de9e31 | 11,608 |
def get_model_prediction(model_input, stub, model_name='amazon_review', signature_name='serving_default'):
""" no error handling at all, just poc"""
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
request.model_spec.signature_name = signature_name
request.inputs['input_input'].CopyFrom(tf.make_tensor_proto(model_input))
response = stub.Predict.future(request, 5.0) # 5 seconds
return response.result().outputs["output"].float_val | 6de7e35305e0d9fe9fe0b03e3b0ab0c82937778c | 11,609 |
def _make_feature_stats_proto(
common_stats, feature_name,
q_combiner,
num_values_histogram_buckets,
is_categorical, has_weights
):
"""Convert the partial common stats into a FeatureNameStatistics proto.
Args:
common_stats: The partial common stats associated with a feature.
feature_name: The name of the feature.
q_combiner: The quantiles combiner used to construct the quantiles
histogram for the number of values in the feature.
num_values_histogram_buckets: Number of buckets in the quantiles
histogram for the number of values per feature.
is_categorical: A boolean indicating whether the feature is categorical.
has_weights: A boolean indicating whether a weight feature is specified.
Returns:
A statistics_pb2.FeatureNameStatistics proto.
"""
common_stats_proto = statistics_pb2.CommonStatistics()
common_stats_proto.num_non_missing = common_stats.num_non_missing
common_stats_proto.num_missing = common_stats.num_missing
common_stats_proto.tot_num_values = common_stats.total_num_values
if common_stats.num_non_missing > 0:
common_stats_proto.min_num_values = common_stats.min_num_values
common_stats_proto.max_num_values = common_stats.max_num_values
common_stats_proto.avg_num_values = (
common_stats.total_num_values / common_stats.num_non_missing)
# Add num_values_histogram to the common stats proto.
num_values_quantiles = q_combiner.extract_output(
common_stats.num_values_summary)
histogram = quantiles_util.generate_quantiles_histogram(
num_values_quantiles, common_stats.min_num_values,
common_stats.max_num_values, common_stats.num_non_missing,
num_values_histogram_buckets)
common_stats_proto.num_values_histogram.CopyFrom(histogram)
# Add weighted common stats to the proto.
if has_weights:
weighted_common_stats_proto = statistics_pb2.WeightedCommonStatistics(
num_non_missing=common_stats.weighted_num_non_missing,
num_missing=common_stats.weighted_num_missing,
tot_num_values=common_stats.weighted_total_num_values)
if common_stats.weighted_num_non_missing > 0:
weighted_common_stats_proto.avg_num_values = (
common_stats.weighted_total_num_values /
common_stats.weighted_num_non_missing)
common_stats_proto.weighted_common_stats.CopyFrom(
weighted_common_stats_proto)
# Create a new FeatureNameStatistics proto.
result = statistics_pb2.FeatureNameStatistics()
result.name = feature_name
# Set the feature type.
# If we have a categorical feature, we preserve the type to be the original
# INT type. Currently we don't set the type if we cannot infer it, which
# happens when all the values are missing. We need to add an UNKNOWN type
# to the stats proto to handle this case.
if is_categorical:
result.type = statistics_pb2.FeatureNameStatistics.INT
elif common_stats.type is None:
# If a feature is completely missing, we assume the type to be STRING.
result.type = statistics_pb2.FeatureNameStatistics.STRING
else:
result.type = common_stats.type
# Copy the common stats into appropriate numeric/string stats.
# If the type is not set, we currently wrap the common stats
# within numeric stats.
if (result.type == statistics_pb2.FeatureNameStatistics.STRING or
is_categorical):
# Add the common stats into string stats.
string_stats_proto = statistics_pb2.StringStatistics()
string_stats_proto.common_stats.CopyFrom(common_stats_proto)
result.string_stats.CopyFrom(string_stats_proto)
else:
# Add the common stats into numeric stats.
numeric_stats_proto = statistics_pb2.NumericStatistics()
numeric_stats_proto.common_stats.CopyFrom(common_stats_proto)
result.num_stats.CopyFrom(numeric_stats_proto)
return result | 16b55556d76f5d5cb01f2dc3142b42a86f85bcb8 | 11,610 |
from typing import Optional
import requests
def serial_chunked_download(
d_obj: Download,
end_action: Optional[Action] = None,
session: Optional[requests.Session] = None,
*,
progress_data: Optional[DownloadProgressSave] = None,
start: int = 0,
end: int = 0,
chunk_id: Optional[int] = 0,
) -> bool:
"""Downloads a file using a single connection getting a chunk at a time
"""
splits = None
if start == 0 and end == 0:
if progress_data is None:
# new download
d_obj.init_size()
d_obj.init_file([Chunk(0, d_obj.size - 1, -1)])
nb_split: int = 0
# TODO: ugly here
if d_obj.split_size != -1:
nb_split = int(d_obj.size / d_obj.split_size) + 1
else:
nb_split = d_obj.nb_split
splits = utils.split(d_obj.size - 1, nb_split)
else:
d_obj.init_file()
# TODO: ugly here
if d_obj.split_size != -1:
nb_split = int(d_obj.size / d_obj.split_size) + 1
else:
nb_split = d_obj.nb_split
splits = utils.split(d_obj.size - 1, nb_split,
progress_data.chunks[0].last)
else:
# coming from serial_parralel_chunked
if d_obj.split_size != -1:
nb_split = int(d_obj.size / d_obj.split_size) + 1
else:
nb_split = d_obj.nb_split
splits = utils.split(end, nb_split, start)
for split in splits:
get_chunk(d_obj.url, split, d_obj, chunk_id, session)
if d_obj.has_error or d_obj.is_stopped():
return False
if not d_obj.is_paused():
if end_action is not None:
end_action()
if end == 0 and start == 0:
d_obj.finish()
return True | 3307407f25e2d68700697953122ef08acd92f069 | 11,611 |
def get_device():
"""
Returns the id of the current device.
"""
c_dev = c_int_t(0)
safe_call(backend.get().af_get_device(c_pointer(c_dev)))
return c_dev.value | 4be37aa83bf822aac794680d9f30fe24edb38231 | 11,612 |
def plotLatentsSweep(yhat,nmodels=1):
"""plotLatentsSweep(yhat):
plots model latents and a subset of the corresponding stimuli,
generated from sweepCircleLatents()
---e.g.,---
yhat, x = sweepCircleLatents(vae)
plotCircleSweep(yhat,x)
alternatively,
plotLatentsSweep(sweepCircleLatents(vae))
"""
# Initialization
if type(yhat) is tuple:
yhat = yhat[0]
# Start a-plottin'
fig, ax = plt.subplots(nmodels,4,figsize=(9, 15), dpi= 80, facecolor='w', edgecolor='k', sharey='row',sharex='col')
for latentdim in range(4):
if nmodels > 1:
for imodel in range(nmodels):
plt.sca(ax[imodel,latentdim])
plt.plot(yhat[imodel][latentdim*16+np.arange(0,16),:].detach().numpy())
# ax[imodel,latentdim].set_aspect(1./ax[imodel,latentdim].get_data_ratio())
ax[imodel,latentdim].spines['top'].set_visible(False)
ax[imodel,latentdim].spines['right'].set_visible(False)
if latentdim>0:
ax[imodel,latentdim].spines['left'].set_visible(False)
# ax[imodel,latentdim].set_yticklabels([])
ax[imodel,latentdim].tick_params(axis='y', length=0)
# if imodel<nmodels-1 or latentdim>0:
ax[imodel,latentdim].spines['bottom'].set_visible(False)
ax[imodel,latentdim].set_xticklabels([])
ax[imodel,latentdim].tick_params(axis='x', length=0)
else:
imodel=0
plt.sca(ax[latentdim])
plt.plot(yhat[latentdim*16+np.arange(0,16),:].detach().numpy())
ax[latentdim].set_aspect(1./ax[latentdim].get_data_ratio())
ax[latentdim].spines['top'].set_visible(False)
ax[latentdim].spines['right'].set_visible(False)
if latentdim>0:
ax[latentdim].spines['left'].set_visible(False)
ax[latentdim].tick_params(axis='y', length=0)
# if imodel<nmodels-1 or latentdim>0:
ax[latentdim].spines['bottom'].set_visible(False)
ax[latentdim].set_xticklabels([])
ax[latentdim].tick_params(axis='x', length=0)
return fig, ax | f43ffd9b45981254a550c8b187da649522522dd0 | 11,613 |
def calc_lipophilicity(seq, method="mean"):
""" Calculates the average hydrophobicity of a sequence according to the Hessa biological scale.
Hessa T, Kim H, Bihlmaier K, Lundin C, Boekel J, Andersson H, Nilsson I, White SH, von Heijne G. Nature. 2005 Jan 27;433(7024):377-81
The Hessa scale has been calculated empirically, using the glycosylation assay of TMD insertion.
Negative values indicate hydrophobic amino acids with favourable membrane insertion.
Other hydrophobicity scales are in the settings folder. They can be generated as follows.
hydrophob_scale_path = r"D:\korbinian\korbinian\settings\hydrophobicity_scales.xlsx"
df_hs = pd.read_excel(hydrophob_scale_path, skiprows=2)
df_hs.set_index("1aa", inplace=True)
dict_hs = df_hs.Hessa.to_dict()
hessa_scale = np.array([value for (key, value) in sorted(dict_hs.items())])
['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K',
'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V',
'W', 'Y']
Parameters:
-----------
seq : string
Sequence to be analysed. Gaps (-) and unknown amino acids (x) should be ignored.
method : string
Method to be used to average the hydrophobicity values over the whole sequence.
The hydrophobicity score is positive for polar/charged aa, negative for hydrophobic aa.
"sum" will return the sum of the hydrophobicity scores over the sequence
"mean" will return the mean of the hydrophobicity scores over the sequence
Returns:
--------
mean hydrophobicity value for the sequence entered
Usage:
------
from korbinian.utils import calc_lipophilicity
# for a single sequence
s = "SAESVGEVYIKSTETGQYLAG"
calc_lipophilicity(s)
# for a series of sequences
TMD_ser = df2.TM01_SW_match_seq.dropna()
hydro = TMD_ser.apply(lambda x : calc_lipophilicity(x))
Notes:
------
%timeit results:
for a 20aa seq: 136 µs per loop
for a pandas series with 852 tmds: 118 ms per loop
"""
# hydrophobicity scale
hessa_scale = np.array([0.11, -0.13, 3.49, 2.68, -0.32, 0.74, 2.06, -0.6, 2.71,
-0.55, -0.1, 2.05, 2.23, 2.36, 2.58, 0.84, 0.52, -0.31,
0.3, 0.68])
# convert to biopython analysis object
analysed_seq = ProteinAnalysis(seq)
# biopython count_amino_acids returns a dictionary.
aa_counts_dict = analysed_seq.count_amino_acids()
# get the number of AA residues used to calculated the hydrophobicity
# this is not simply the sequence length, as the sequence could include gaps or non-natural AA
aa_counts_excluding_gaps = np.array(list(aa_counts_dict.values()))
number_of_residues = aa_counts_excluding_gaps.sum()
# if there are no residues, don't attempt to calculate a mean. Return np.nan.
if number_of_residues == 0:
return np.nan
# convert dictionary to array, sorted by aa
aa_counts_arr = np.array([value for (key, value) in sorted(aa_counts_dict.items())])
multiplied = aa_counts_arr * hessa_scale
sum_of_multiplied = multiplied.sum()
if method == "mean":
return sum_of_multiplied / number_of_residues
if method == "sum":
return sum_of_multiplied | a8858a62b3c76d466b510507b1ce9f158b5c8c9c | 11,614 |
def get_enrollments(username, include_inactive=False):
"""Retrieves all the courses a user is enrolled in.
Takes a user and retrieves all relative enrollments. Includes information regarding how the user is enrolled
in the the course.
Args:
username: The username of the user we want to retrieve course enrollment information for.
include_inactive (bool): Determines whether inactive enrollments will be included
Returns:
A list of enrollment information for the given user.
Examples:
>>> get_enrollments("Bob")
[
{
"created": "2014-10-20T20:18:00Z",
"mode": "honor",
"is_active": True,
"user": "Bob",
"course_details": {
"course_id": "edX/DemoX/2014T2",
"course_name": "edX Demonstration Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": False
}
},
{
"created": "2014-10-25T20:18:00Z",
"mode": "verified",
"is_active": True,
"user": "Bob",
"course_details": {
"course_id": "edX/edX-Insider/2014T2",
"course_name": "edX Insider Course",
"enrollment_end": "2014-12-20T20:18:00Z",
"enrollment_start": "2014-10-15T20:18:00Z",
"course_start": "2015-02-03T00:00:00Z",
"course_end": "2015-05-06T00:00:00Z",
"course_modes": [
{
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": null,
"description": null,
"sku": null,
"bulk_sku": null
}
],
"invite_only": True
}
}
]
"""
return _data_api().get_course_enrollments(username, include_inactive) | 0cbc9a60929fd06f8f5ca90d6c2458867ae474e7 | 11,615 |
async def connections_accept_request(request: web.BaseRequest):
"""
Request handler for accepting a stored connection request.
Args:
request: aiohttp request object
Returns:
The resulting connection record details
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
connection_id = request.match_info["id"]
try:
connection = await ConnectionRecord.retrieve_by_id(context, connection_id)
except StorageNotFoundError:
raise web.HTTPNotFound()
connection_mgr = ConnectionManager(context)
my_endpoint = request.query.get("my_endpoint") or None
request = await connection_mgr.create_response(connection, my_endpoint)
await outbound_handler(request, connection_id=connection.connection_id)
return web.json_response(connection.serialize()) | 78a469d306c3306f8b9a0ba1a3364f7b30a36f85 | 11,616 |
def nearest(x, base=1.):
"""
Round the inputs to the nearest base. Beware, due to the nature of
floating point arithmetic, this maybe not work as you expect.
INPUTS
x : input value of array
OPTIONS
base : number to which x should be rounded
"""
return np.round(x/base)*base | ca1ddcd75c20ea82c18368b548c36ef5207ab77f | 11,617 |
def sort_nesting(list1, list2):
"""Takes a list of start points and end points and sorts the second list according to nesting"""
temp_list = []
while list2 != temp_list:
temp_list = list2[:] # Make a copy of list2 instead of reference
for i in range(1, len(list1)):
if list2[i] > list2[i-1] and list1[i] < list2[i-1]:
list2[i-1], list2[i] = list2[i], list2[i-1]
return list2 | 11693e54eeba2016d21c0c23450008e823bdf1c1 | 11,618 |
def confusion_matrix(Y_hat, Y, norm=None):
"""
Calculate confusion matrix.
Parameters
----------
Y_hat : array-like
List of data labels.
Y : array-like
List of target truth labels.
norm : {'label', 'target', 'all', None}, default=None
Normalization on resulting matrix. Must be one of:
- 'label' : normalize on labels (columns).
- 'target' : normalize on targets (rows).
- 'all' : normalize on the entire matrix.
- None : No normalization.
Returns
-------
matrix : ndarray, shape=(target_classes, label_classes)
Confusion matrix with target classes as rows and
label classes as columns. Classes are in sorted order.
"""
target_classes = sorted(set(Y))
label_classes = sorted(set(Y_hat))
target_dict = {target_classes[k]: k for k in range(len(target_classes))}
label_dict = {label_classes[k]: k for k in range(len(label_classes))}
matrix = np.zeros((len(target_classes), len(label_classes)))
for label, target in zip(Y_hat, Y):
matrix[target_dict[target],label_dict[label]] += 1
if norm == 'label':
matrix /= np.max(matrix, axis=0).reshape((1,matrix.shape[1]))
elif norm == 'target':
matrix /= np.max(matrix, axis=1).reshape((matrix.shape[0],1))
elif norm == 'all':
matrix /= np.max(matrix)
elif norm is not None:
raise ValueError("Norm must be one of {'label', 'target', 'all', None}")
return matrix.astype(int) | b1ed79b71cef8cdcaa2cfe06435a3b2a56c659dd | 11,619 |
def reroot(original_node: Tree, new_node: Tree):
"""
:param original_node: the node in the original tree
:param new_node: the new node to give children
new_node should have as chldren, the relations of original_node except for new_node's parent
"""
new_node.children = [
Tree(relation.label)
for relation in original_node.relations
if relation.label != new_node.parent_label
]
for relation in new_node.children:
reroot(
original_node.find(relation.label),
relation,
)
return new_node | f37141fc7645dfbab401eb2be3255d917372ef11 | 11,620 |
import uuid
import time
def update_users():
"""Sync LDAP users with local users in the DB."""
log_uuid = str(uuid.uuid4())
start_time = time.time()
patron_cls = current_app_ils.patron_cls
patron_indexer = PatronBaseIndexer()
invenio_users_updated_count = 0
invenio_users_added_count = 0
# get all CERN users from LDAP
ldap_client = LdapClient()
ldap_users = ldap_client.get_primary_accounts()
_log_info(
log_uuid,
"users_fetched_from_ldap",
dict(users_fetched=len(ldap_users)),
)
if not ldap_users:
return 0, 0, 0
# create a map by employeeID for fast lookup
ldap_users_emails = set()
ldap_users_map = {}
for ldap_user in ldap_users:
if "mail" not in ldap_user:
_log_info(
log_uuid,
"missing_email",
dict(employee_id=ldap_user_get(ldap_user, "employeeID")),
is_error=True,
)
continue
email = ldap_user_get_email(ldap_user)
if email not in ldap_users_emails:
ldap_person_id = ldap_user_get(ldap_user, "employeeID")
ldap_users_map[ldap_person_id] = ldap_user
ldap_users_emails.add(email)
_log_info(
log_uuid,
"users_cached",
)
remote_accounts = RemoteAccount.query.all()
_log_info(
log_uuid,
"users_fetched_from_invenio",
dict(users_fetched=len(remote_accounts)),
)
# get all Invenio remote accounts and prepare a list with needed info
invenio_users = []
for remote_account in remote_accounts:
invenio_users.append(
dict(
remote_account_id=remote_account.id,
remote_account_person_id=remote_account.extra_data[
"person_id"
],
remote_account_department=remote_account.extra_data.get(
"department"
),
user_id=remote_account.user_id,
)
)
_log_info(
log_uuid,
"invenio_users_prepared",
)
# STEP 1
# iterate on all Invenio users first, to update outdated info from LDAP
# or delete users if not found in LDAP.
#
# Note: cannot iterate on the db query here, because when a user is
# deleted, db session will expire, causing a DetachedInstanceError when
# fetching the user on the next iteration
for invenio_user in invenio_users:
# use `dict.pop` to remove from `ldap_users_map` the users found
# in Invenio, so the remaining will be the ones to be added later on
ldap_user = ldap_users_map.pop(
invenio_user["remote_account_person_id"], None
)
if ldap_user:
# the imported LDAP user is already in the Invenio db
ldap_user_display_name = ldap_user_get(ldap_user, "displayName")
user_id = invenio_user["user_id"]
user_profile = UserProfile.query.filter_by(
user_id=user_id
).one()
invenio_full_name = user_profile.full_name
ldap_user_department = ldap_user_get(ldap_user, "department")
invenio_user_department = invenio_user["remote_account_department"]
user = User.query.filter_by(id=user_id).one()
ldap_user_email = ldap_user_get_email(ldap_user)
invenio_user_email = user.email
has_changed = (
ldap_user_display_name != invenio_full_name
or ldap_user_department != invenio_user_department
or ldap_user_email != invenio_user_email
)
if has_changed:
_update_invenio_user(
invenio_remote_account_id=invenio_user[
"remote_account_id"
],
invenio_user_profile=user_profile,
invenio_user=user,
ldap_user=ldap_user,
)
_log_info(
log_uuid,
"department_updated",
dict(
user_id=invenio_user["user_id"],
previous_department=invenio_user_department,
new_department=ldap_user_department,
),
)
# re-index modified patron
patron_indexer.index(patron_cls(invenio_user["user_id"]))
invenio_users_updated_count += 1
db.session.commit()
_log_info(
log_uuid,
"invenio_users_updated_and_deleted",
)
# STEP 2
# Import any new LDAP user not in Invenio yet, the remaining
new_ldap_users = ldap_users_map.values()
if new_ldap_users:
importer = LdapUserImporter()
for ldap_user in new_ldap_users:
user_id = importer.import_user(ldap_user)
email = ldap_user_get_email(ldap_user)
employee_id = ldap_user_get(ldap_user, "employeeID")
_log_info(
log_uuid,
"user_added",
dict(email=email, employee_id=employee_id),
)
# index newly added patron
patron_indexer.index(patron_cls(user_id))
invenio_users_added_count += 1
db.session.commit()
_log_info(
log_uuid,
"invenio_users_created",
)
total_time = time.time() - start_time
_log_info(log_uuid, "task_completed", dict(time=total_time))
return (
len(ldap_users),
invenio_users_updated_count,
invenio_users_added_count,
) | 8aef4e258629dd6e36b0a8b7b722031316df1154 | 11,621 |
def opt(dfs, col='new', a=1, b=3, rlprior=None, clprior=None):
"""Returns maximum likelihood estimates of the model parameters `r` and `c`.
The optimised parameters `r` and `c` refer to the failure count of the
model's negative binomial likelihood function and the variance factor
introduced by each predictive prior, respectively.
Args:
dfs: a data frame or list/tuple of data frames containing counts.
col: the column containing daily new infection counts.
a, b: parameters of the initial predictive beta prime prior.
rlprior, clprior: log density functions to be used as priors on `r` and
`c` (uniform by default).
"""
def f(r):
return _optc(dfs, r, col, a, b, rlprior, clprior, copy=False)[1]
if not isinstance(dfs, list) and not isinstance(dfs, tuple):
dfs = [dfs]
dfs = [df.copy() for df in dfs] # create copies once, before optimising.
# We double r until we pass a local minimum, and then optimize the two
# regions that might contain that minimum separately.
p, r = 1, 2
while f(p) > f(r):
p, r = r, 2*r
r1, l1 = _cvxsearch(f, p//2, p)
r2, l2 = _cvxsearch(f, p, r)
if l1 <= l2:
return r1, _optc(dfs, r1, col, a, b, rlprior, clprior, copy=False)[0]
else:
return r2, _optc(dfs, r2, col, a, b, rlprior, clprior, copy=False)[0] | d40e63892676f18734d3b4789656606e898f69d9 | 11,622 |
import json
def unpackage_datasets(dirname, dataobject_format=False):
"""
This function unpackages all sub packages, (i.e. train, valid, test)
You should use this function if you want everything
args:
dirname: directory path that has the train, valid, test folders in it
dataobject_format: used for dataobject format
"""
with open(join(dirname, 'room-data.json')) as f:
lm = json.load(f)['Landmarks']
res = {s: unpackage_dataset(join(dirname, s), dataobject_format) for s in ['train', 'valid', 'test']}
res['landmarks'] = lm
return res | d1748b3729b4177315553eab5075d14ea2edf3a7 | 11,623 |
from typing import Sequence
from typing import Tuple
import cmd
from typing import OrderedDict
def get_command_view(
is_running: bool = False,
stop_requested: bool = False,
commands_by_id: Sequence[Tuple[str, cmd.Command]] = (),
) -> CommandView:
"""Get a command view test subject."""
state = CommandState(
is_running=is_running,
stop_requested=stop_requested,
commands_by_id=OrderedDict(commands_by_id),
)
return CommandView(state=state) | 3eaf1b8845d87c7eb6fef086bd2af3b2dd65409a | 11,624 |
import os
def _split_by_size(in_fastq, split_size, out_dir):
"""Split FASTQ files by a specified number of records.
"""
existing = _find_current_split(in_fastq, out_dir)
if len(existing) > 0:
return existing
def new_handle(num):
base, ext = os.path.splitext(os.path.basename(in_fastq))
fname = os.path.join(out_dir, "{base}_{num}{ext}".format(
base=base, num=num, ext=ext))
return fname, open(fname, "w")
cur_index = 0
cur_count = 0
out_fname, out_handle = new_handle(cur_index)
out_files = [out_fname]
with open(in_fastq) as in_handle:
for name, seq, qual in FastqGeneralIterator(in_handle):
if cur_count < split_size:
cur_count += 1
else:
cur_count = 0
cur_index += 1
out_handle.close()
out_fname, out_handle = new_handle(cur_index)
out_files.append(out_fname)
out_handle.write("@%s\n%s\n+\n%s\n" % (name, seq, qual))
out_handle.close()
return out_files | 00fa4f99204b57da50f7ae12bc31a96100703048 | 11,625 |
def get_node_ip_addresses(ipkind):
"""
Gets a dictionary of required IP addresses for all nodes
Args:
ipkind: ExternalIP or InternalIP or Hostname
Returns:
dict: Internal or Exteranl IP addresses keyed off of node name
"""
ocp = OCP(kind=constants.NODE)
masternodes = ocp.get(selector=constants.MASTER_LABEL).get("items")
workernodes = ocp.get(selector=constants.WORKER_LABEL).get("items")
nodes = masternodes + workernodes
return {
node["metadata"]["name"]: each["address"]
for node in nodes
for each in node["status"]["addresses"]
if each["type"] == ipkind
} | 622217c12b763c6dbf5c520d90811bdfe374e876 | 11,626 |
def fill_cache(msg="Fetching cache"):
"""Fill the cache with the packages."""
import os # pylint: disable=import-outside-toplevel
import requests # pylint: disable=import-outside-toplevel
from rich.progress import Progress # pylint: disable=import-outside-toplevel
all_packages_url = f"{base_url}/simple/"
cache_path = os.path.join(os.path.dirname(__file__), "cache")
if not os.path.exists(cache_path):
os.makedirs(cache_path)
cache_file = os.path.join(os.path.dirname(__file__), "cache", "packages.txt")
with Progress(transient=True) as progress:
response = requests.get(all_packages_url, stream=True)
response_data = ""
content_length = response.headers.get("content-length")
if content_length is not None:
total_length = int(content_length)
task = progress.add_task(msg, total=total_length)
downloaded = 0
for data in response.iter_content(chunk_size=32768):
downloaded += len(data)
response_data += data.decode("utf-8")
progress.advance(task, 32768)
else:
response_data = response.content.decode("utf-8")
import re # pylint: disable=import-outside-toplevel
packages = re.findall(r"<a[^>]*>([^<]+)<\/a>", response_data)
with open(cache_file, "w", encoding="utf-8") as cache_file:
cache_file.write("\n".join(packages))
return packages | 113a55f73c1d8f3dd430b4e497aeae8045c9b255 | 11,627 |
def house_filter(size, low, high):
"""
Function that returns the "gold standard" filter.
This window is designed to produce low sidelobes
for Fourier filters.
In essence it resembles a sigmoid function that
smoothly goes between zero and one, from short
to long time.
"""
filt = np.zeros(size)
def eval_filter(rf, c1, c2, c3, c4):
r1 = 1. - rf**2.
r2 = r1**2.
r3 = r2 * r1
filt = c1 + c2*r1 + c3*r2 + c4*r3
return filt
coefficients = {
"c1": 0.074,
"c2": 0.302,
"c3": 0.233,
"c4": 0.390
}
denom = (high - low + 1.0) / 2.
if denom < 0.:
raise ZeroDivisionError
for i in range(int(low), int(high)):
rf = (i + 1) / denom
if rf > 1.5:
filt[i] = 1.
else:
temp = eval_filter(rf, **coefficients)
if temp < 0.:
filt[i] = 1.
else:
filt[i] = 1. - temp
filt[int(high):] = 1.
return filt | ef7f3fe3bb4410ce81fcd061e24d6f34a36f3a04 | 11,628 |
def PToData(inGFA, data, err):
"""
Copy host array to data
Copys data from GPUFArray locked host array to data
* inFA = input Python GPUFArray
* data = FArray containing data array
* err = Obit error/message stack
"""
################################################################
return Obit.GPUFArrayToData (inFA.me, data.me, err.me) | 0f943a8340c2587c75f7ba6783f160d5d3bede76 | 11,629 |
import os
def download_vendor_image(image):
""" Downloads specified vendor binary image
Args:
image (str): Path of image filename to begin downloading
Returns:
"""
# TODO Prevent sending hidden files
return send_from_directory(os.path.join(_AEON_TOPDIR, 'vendor_images'), image) | 15926f4217539e40c6c3656792e4deb2094cca82 | 11,630 |
def maker(sql_connection, echo=False):
"""
Get an sessionmaker object from a sql_connection.
"""
engine = get_engine(sql_connection, echo=echo)
m = orm.sessionmaker(bind=engine, autocommit=True, expire_on_commit=False)
return m | 1296fa49058c8a583cf442355534d22b00bdaeea | 11,631 |
def test_auth(request):
"""Tests authentication worked successfuly."""
return Response({"message": "You successfuly authenticated!"}) | 59e065687333a4dd612e514e0f8ea459062c7cb3 | 11,632 |
def streak_condition_block() -> Block:
"""
Create block with 'streak' condition, when rotation probability is low and
target orientation repeats continuously in 1-8 trials.
:return: 'Streak' condition block.
"""
return Block(configuration.STREAK_CONDITION_NAME,
streak_rotations_generator) | 769b0f7b9ce8549f4bea75da066814b3e1f8a103 | 11,633 |
def resolve_appinstance(request,
appinstanceid,
permission='base.change_resourcebase',
msg=_PERMISSION_MSG_GENERIC,
**kwargs):
"""
Resolve the document by the provided primary key
and check the optional permission.
"""
return resolve_object(
request,
AppInstance, {'pk': appinstanceid},
permission=permission,
permission_msg=msg,
**kwargs) | bb17c2a842c4f2fced1bce46bd1d05293a7b0edf | 11,634 |
def statementTVM(pReact):
"""Use this funciton to produce the TVM statemet"""
T,V,mass = pReact.T,pReact.volume,pReact.mass
statement="\n{}: T: {:0.2f} K, V: {:0.2f} m^3, mass: {:0.2f} kg".format(pReact.name,T,V,mass)
return statement | cda356678d914f90d14905bdcadf2079c9ebfbea | 11,635 |
def fill_NaNs_with_nearest_neighbour(data, lons, lats):
"""At each depth level and time, fill in NaN values with nearest lateral
neighbour. If the entire depth level is NaN, fill with values from level
above. The last two dimensions of data are the lateral dimensions.
lons.shape and lats.shape = (data.shape[-2], data.shape[-1])
:arg data: the data to be filled
:type data: 4D numpy array
:arg lons: longitude points
:type lons: 2D numpy array
:arg lats: latitude points
:type lats: 2D numpy array
:returns: a 4D numpy array
"""
filled = data.copy()
for t in range(data.shape[0]):
for k in range(data.shape[1]):
subdata = data[t, k, :, :]
mask = np.isnan(subdata)
points = np.array([lons[~mask], lats[~mask]]).T
valid_data = subdata[~mask]
try:
filled[t, k, mask] = interpolate.griddata(
points, valid_data, (lons[mask], lats[mask]),
method='nearest'
)
except ValueError:
# if the whole depth level is NaN,
# set it equal to the level above
filled[t, k, :, :] = filled[t, k - 1, :, :]
return filled | cacde1f5a7e52535f08cd1154f504fb24293182e | 11,636 |
def transform_type_postorder(type_signature, transform_fn):
"""Walks type tree of `type_signature` postorder, calling `transform_fn`.
Args:
type_signature: Instance of `computation_types.Type` to transform
recursively.
transform_fn: Transformation function to apply to each node in the type tree
of `type_signature`. Must be instance of Python function type.
Returns:
A possibly transformed version of `type_signature`, with each node in its
tree the result of applying `transform_fn` to the corresponding node in
`type_signature`.
Raises:
TypeError: If the types don't match the specification above.
"""
# TODO(b/134525440): Investigate unifying the recursive methods in type_utils,
# rather than proliferating them.
# TODO(b/134595038): Revisit the change here to add a mutated flag.
py_typecheck.check_type(type_signature, computation_types.Type)
py_typecheck.check_callable(transform_fn)
if isinstance(type_signature, computation_types.FederatedType):
transformed_member, member_mutated = transform_type_postorder(
type_signature.member, transform_fn)
if member_mutated:
type_signature = computation_types.FederatedType(transformed_member,
type_signature.placement,
type_signature.all_equal)
fed_type_signature, type_signature_mutated = transform_fn(type_signature)
return fed_type_signature, type_signature_mutated or member_mutated
elif isinstance(type_signature, computation_types.SequenceType):
transformed_element, element_mutated = transform_type_postorder(
type_signature.element, transform_fn)
if element_mutated:
type_signature = computation_types.SequenceType(transformed_element)
seq_type_signature, type_signature_mutated = transform_fn(type_signature)
return seq_type_signature, type_signature_mutated or element_mutated
elif isinstance(type_signature, computation_types.FunctionType):
transformed_param, param_mutated = transform_type_postorder(
type_signature.parameter, transform_fn)
transformed_result, result_mutated = transform_type_postorder(
type_signature.result, transform_fn)
if param_mutated or result_mutated:
type_signature = computation_types.FunctionType(transformed_param,
transformed_result)
fn_type_signature, fn_mutated = transform_fn(type_signature)
return fn_type_signature, fn_mutated or param_mutated or result_mutated
elif isinstance(type_signature, computation_types.NamedTupleType):
elems = []
elems_mutated = False
for element in anonymous_tuple.iter_elements(type_signature):
transformed_element, element_mutated = transform_type_postorder(
element[1], transform_fn)
elems_mutated = elems_mutated or element_mutated
elems.append((element[0], transformed_element))
if elems_mutated:
if isinstance(type_signature,
computation_types.NamedTupleTypeWithPyContainerType):
type_signature = computation_types.NamedTupleTypeWithPyContainerType(
elems,
computation_types.NamedTupleTypeWithPyContainerType
.get_container_type(type_signature))
else:
type_signature = computation_types.NamedTupleType(elems)
tuple_type_signature, tuple_mutated = transform_fn(type_signature)
return tuple_type_signature, elems_mutated or tuple_mutated
elif isinstance(type_signature,
(computation_types.AbstractType, computation_types.TensorType,
computation_types.PlacementType)):
return transform_fn(type_signature) | 9a6b493e2dd5f7edf1ab5a53d24141b1f269441d | 11,637 |
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.version import compare_versions
import json
def check_stack_feature(stack_feature, stack_version):
"""
Given a stack_feature and a specific stack_version, it validates that the feature is supported by the stack_version.
:param stack_feature: Feature name to check if it is supported by the stack. For example: "rolling_upgrade"
:param stack_version: Version of the stack
:return: Will return True if successful, otherwise, False.
"""
stack_features_config = default("/configurations/cluster-env/stack_features", None)
data = _DEFAULT_STACK_FEATURES
if not stack_version:
return False
if stack_features_config:
data = json.loads(stack_features_config)
for feature in data["stack_features"]:
if feature["name"] == stack_feature:
if "min_version" in feature:
min_version = feature["min_version"]
if compare_versions(stack_version, min_version, format = True) < 0:
return False
if "max_version" in feature:
max_version = feature["max_version"]
if compare_versions(stack_version, max_version, format = True) >= 0:
return False
return True
return False | e7417738f285d94d666ac8b72d1b8c5079469f02 | 11,638 |
def get_random_action_weights():
"""Get random weights for each action.
e.g. [0.23, 0.57, 0.19, 0.92]"""
return np.random.random((1, NUM_ACTIONS)) | da929c6a64c87ddf9af22ab17636d0db011c8a45 | 11,639 |
import time
from datetime import datetime
def rpg_radar2nc(data, path, larda_git_path, **kwargs):
"""
This routine generates a daily NetCDF4 file for the RPG 94 GHz FMCW radar 'LIMRAD94'.
Args:
data (dict): dictionary of larda containers
path (string): path where the NetCDF file is stored
"""
dt_start = h.ts_to_dt(data['Ze']['ts'][0])
h.make_dir(path)
site_name = kwargs['site'] if 'site' in kwargs else 'no-site'
cn_version = kwargs['version'] if 'version' in kwargs else 'pyhon'
ds_name = f'{path}/{h.ts_to_dt(data["Ze"]["ts"][0]):%Y%m%d}-{site_name}-limrad94.nc'
ncvers = '4'
repo = git.Repo(larda_git_path)
sha = repo.head.object.hexsha
with netCDF4.Dataset(ds_name, 'w', format=f'NETCDF{ncvers}') as ds:
ds.Convention = 'CF-1.0'
ds.location = data['Ze']['paraminfo']['location']
ds.system = data['Ze']['paraminfo']['system']
ds.version = f'Variable names and dimensions prepared for Cloudnet {kwargs["version"]} version'
ds.title = 'LIMRAD94 (SLDR) Doppler Cloud Radar, calibrated Input for Cloudnet'
ds.institution = 'Leipzig Institute for Meteorology (LIM), Leipzig, Germany'
ds.source = '94 GHz Cloud Radar LIMRAD94\nRadar type: Frequency Modulated Continuous Wave,\nTransmitter power 1.5 W typical (solid state ' \
'amplifier)\nAntenna Type: Bi-static Cassegrain with 500 mm aperture\nBeam width: 0.48deg FWHM'
ds.reference = 'W Band Cloud Radar LIMRAD94\nDocumentation and User Manual provided by manufacturer RPG Radiometer Physics GmbH\n' \
'Information about system also available at https://www.radiometer-physics.de/'
ds.calibrations = f'remove Precip. ghost: {kwargs["ghost_echo_1"]}\n, remove curtain ghost: {kwargs["ghost_echo_2"]}\n' \
f'despeckle: {kwargs["despeckle"]}\n, number of standard deviations above noise: {kwargs["NF"]}\n'
ds.git_description = f'pyLARDA commit ID {sha}'
ds.description = 'Concatenated data files of LIMRAD 94GHz - FMCW Radar, used as input for Cloudnet processing, ' \
'filters applied: ghost-echo, despeckle, use only main peak'
ds.history = 'Created ' + time.ctime(time.time())
ds._FillValue = data['Ze']['paraminfo']['fill_value']
ds.day = dt_start.day
ds.month = dt_start.month
ds.year = dt_start.year
# ds.commit_id = subprocess.check_output(["git", "describe", "--always"]) .rstrip()
ds.history = 'Created ' + time.ctime(time.time()) + '\nfilters applied: ghost-echo, despeckle, main peak only'
Ze_str = 'Zh' if cn_version == 'python' else 'Ze'
vel_str = 'v' if cn_version == 'python' else 'vm'
width_str = 'width' if cn_version == 'python' else 'sigma'
dim_tupel = ('time', 'range') if cn_version == 'python' else ('range', 'time')
n_chirps = len(data['no_av'])
ds.createDimension('chirp', n_chirps)
ds.createDimension('time', data['Ze']['ts'].size)
ds.createDimension('range', data['Ze']['rg'].size)
if cn_version == 'matlab':
for ivar in ['Ze', 'VEL', 'sw', 'ldr', 'kurt', 'skew']:
data[ivar]['var'] = data[ivar]['var'].T
# coordinates
nc_add_variable(
ds,
val=94.0,
dimension=(),
var_name='frequency',
type=np.float32,
long_name='Radar frequency',
units='GHz'
)
nc_add_variable(
ds,
val=256,
dimension=(),
var_name='Numfft',
type=np.float32,
long_name='Number of points in FFT',
units=''
)
nc_add_variable(
ds,
val=np.mean(data['MaxVel']['var']),
dimension=(),
var_name='NyquistVelocity',
type=np.float32,
long_name='Mean (over all chirps) Unambiguous Doppler velocity (+/-)',
units='m s-1'
)
nc_add_variable(
ds,
val=data['Ze']['paraminfo']['altitude'],
dimension=(),
var_name='altitude',
type=np.float32,
long_name='Height of instrument above mean sea level',
units='m'
)
nc_add_variable(
ds,
val=data['Ze']['paraminfo']['coordinates'][0],
dimension=(),
var_name='latitude',
type=np.float32,
long_name='latitude',
units='degrees_north'
)
nc_add_variable(
ds,
val=data['Ze']['paraminfo']['coordinates'][1],
dimension=(),
var_name='longitude',
type=np.float32,
long_name='longitude',
units='degrees_east'
)
if 'version' in kwargs and cn_version == 'python':
nc_add_variable(
ds,
val=data['no_av'],
dimension=('chirp',),
var_name='NumSpectraAveraged',
type=np.float32,
long_name='Number of spectral averages',
units=''
)
# time and range variable
# convert to time since midnight
if cn_version == 'python':
ts = np.subtract(data['Ze']['ts'], datetime.datetime(dt_start.year, dt_start.month, dt_start.day, 0, 0, 0, tzinfo=timezone.utc).timestamp()) / 3600
ts_str = 'Decimal hours from midnight UTC to the middle of each day'
ts_unit = f'hours since {dt_start:%Y-%m-%d} 00:00:00 +00:00 (UTC)'
rg = data['Ze']['rg'] / 1000.0
elif cn_version == 'matlab':
ts = np.subtract(data['Ze']['ts'], datetime.datetime(2001, 1, 1, 0, 0, 0, tzinfo=timezone.utc).timestamp())
ts_str = 'Seconds since 1st January 2001 00:00 UTC'
ts_unit = 'sec'
rg = data['Ze']['rg']
else:
raise ValueError('Wrong version selected! version to "matlab" or "python"!')
nc_add_variable(ds, val=ts, dimension=('time',), var_name='time', type=np.float64, long_name=ts_str, units=ts_unit)
nc_add_variable(ds, val=rg, dimension=('range',), var_name='range', type=np.float32,
long_name='Range from antenna to the centre of each range gate', units='km')
nc_add_variable(ds, val=data['Azm']['var'], dimension=('time',), var_name='azimuth', type=np.float32,
long_name='Azimuth angle from north', units='degree')
nc_add_variable(ds, val=data['Elv']['var'], dimension=('time',), var_name='elevation', type=np.float32,
long_name='elevation angle. 90 degree is vertical direction.', units='degree')
# chirp dependent variables
nc_add_variable(ds, val=data['MaxVel']['var'][0], dimension=('chirp',),
var_name='DoppMax', type=np.float32, long_name='Unambiguous Doppler velocity (+/-)', units='m s-1')
# index plus (1 to n) for Matlab indexing
nc_add_variable(ds, val=data['rg_offsets'], dimension=('chirp',),
var_name='range_offsets', type=np.int32,
long_name='chirp sequences start index array in altitude layer array', units='-')
# 1D variables
nc_add_variable(ds, val=data['bt']['var'], dimension=('time',),
var_name='bt', type=np.float32, long_name='Direct detection brightness temperature', units='K')
nc_add_variable(ds, val=data['LWP']['var'], dimension=('time',),
var_name='lwp', type=np.float32, long_name='Liquid water path', units='g m-2')
nc_add_variable(ds, val=data['rr']['var'], dimension=('time',),
var_name='rain', type=np.float32, long_name='Rain rate from weather station', units='mm h-1')
nc_add_variable(ds, val=data['SurfRelHum']['var'], dimension=('time',),
var_name='SurfRelHum', type=np.float32, long_name='Relative humidity from weather station', units='%')
# 2D variables
nc_add_variable(ds, val=data['Ze']['var'], dimension=dim_tupel, var_name=Ze_str, type=np.float32,
long_name='Radar reflectivity factor', units='mm6 m-3', plot_range=data['Ze']['var_lims'], plot_scale='linear',
comment='Calibrated reflectivity. Calibration convention: in the absence of attenuation, '
'a cloud at 273 K containing one million 100-micron droplets per cubic metre will '
'have a reflectivity of 0 dBZ at all frequencies.')
nc_add_variable(ds, val=data['VEL']['var'], dimension=dim_tupel, plot_range=data['VEL']['var_lims'], plot_scale='linear',
var_name=vel_str, type=np.float32, long_name='Doppler velocity', units='m s-1', unit_html='m s<sup>-1</sup>',
comment='This parameter is the radial component of the velocity, with positive velocities are away from the radar.',
folding_velocity=data['MaxVel']['var'].max())
nc_add_variable(ds, val=data['sw']['var'], dimension=dim_tupel, plot_range=data['sw']['var_lims'], lot_scale='logarithmic',
var_name=width_str, type=np.float32, long_name='Spectral width', units='m s-1', unit_html='m s<sup>-1</sup>',
comment='This parameter is the standard deviation of the reflectivity-weighted velocities in the radar pulse volume.')
nc_add_variable(ds, val=data['ldr']['var'], dimension=dim_tupel, plot_range=[-30.0, 0.0],
var_name='ldr', type=np.float32, long_name='Linear depolarisation ratio', units='dB',
comment='This parameter is the ratio of cross-polar to co-polar reflectivity.')
nc_add_variable(ds, val=data['kurt']['var'], dimension=dim_tupel, plot_range=data['kurt']['var_lims'],
var_name='kurt', type=np.float32, long_name='Kurtosis', units='linear')
nc_add_variable(ds, val=data['skew']['var'], dimension=dim_tupel, plot_range=data['skew']['var_lims'],
var_name='Skew', type=np.float32, long_name='Skewness', units='linear')
print('save calibrated to :: ', ds_name)
return 0 | d59a46c2872b6f82e81b54cfca953ebc0bc18a90 | 11,640 |
def load_ssl_user_from_request(request):
"""
Loads SSL user from current request.
SSL_CLIENT_VERIFY and SSL_CLIENT_S_DN needs to be set in
request.environ. This is set by frontend httpd mod_ssl module.
"""
ssl_client_verify = request.environ.get('SSL_CLIENT_VERIFY')
if ssl_client_verify != 'SUCCESS':
raise Unauthorized('Cannot verify client: %s' % ssl_client_verify)
username = request.environ.get('SSL_CLIENT_S_DN')
if not username:
raise Unauthorized('Unable to get user information (DN) from client certificate')
user = User.find_user_by_name(username)
if not user:
user = User.create_user(username=username)
g.groups = []
g.user = user
return user | ff716c139f57f00345d622a849b714c67468d7bb | 11,641 |
def get_all_users():
"""Gets all users"""
response = user_info.get_all_users()
return jsonify({'Users' : response}), 200 | f178c509afdae44831c1ede0cdfee39ca7ea6cec | 11,642 |
def open_mailbox_maildir(directory, create=False):
""" There is a mailbox here.
"""
return lazyMaildir(directory, create=create) | c19e5bf97da7adfe9a37e515f1b46047ced75108 | 11,643 |
def TANH(*args) -> Function:
"""
Returns the hyperbolic tangent of any real number.
Learn more: https//support.google.com/docs/answer/3093755
"""
return Function("TANH", args) | 1bb3dbe8147f366415cf78c39f5cf6df3b80ffca | 11,644 |
def value_frequencies_chart_from_blocking_rules(
blocking_rules: list, df: DataFrame, spark: SparkSession, top_n=20, bottom_n=10
):
"""Produce value frequency charts for the provided blocking rules
Args:
blocking_rules (list): A list of blocking rules as specified in a Splink
settings dictionary
df (DataFrame): Dataframe to profile
spark (SparkSession): SparkSession object
top_n (int, optional): Number of values with the highest frequencies to display. Defaults to 20.
bottom_n (int, optional): Number of values with the lowest frequencies to display. Defaults to 10.
Returns:
Chart: If Altair is installed, return a chart. If not, then it returns the
vega lite chart spec as a dictionary
"""
col_combinations = blocking_rules_to_column_combinations(blocking_rules)
return column_combination_value_frequencies_chart(
col_combinations, df, spark, top_n, bottom_n
) | e9160c9cd14ced1b904fad67c72c10a027179f7f | 11,645 |
def getOfflineStockDataManifest():
"""Returns manifest for the available offline data.
If manifest is not found, creates an empty one.
Returns:
A dict with the manifest. For example:
{'STOCK_1':
{'first_available_date': datetime(2016, 1, 1),
'last_available_date': datetime(2017, 2, 28)},
'STOCK_2':
{'first_available_date': datetime(2014, 2, 4),
'last_available_date': datetime(2016, 6, 15)}}
"""
if exists(offlineStockDataManifestPath):
with open(offlineStockDataManifestPath) as manifest_file:
return JSON.openJson(manifest_file)
else:
manifest = {}
updateOfflineStockDataManifest(manifest)
return manifest | 40482daa96bf18a843f91bb4af00f37917e51340 | 11,646 |
def align_buf(buf: bytes, sample_width: bytes):
"""In case of buffer size not aligned to sample_width pad it with 0s"""
remainder = len(buf) % sample_width
if remainder != 0:
buf += b'\0' * (sample_width - remainder)
return buf | 9d4996a8338fe532701ee82843d055d6d747f591 | 11,647 |
import json
def update_alert():
""" Make Rest API call to security graph to update an alert """
if flask.request.method == 'POST':
flask.session.pop('UpdateAlertData', None)
result = flask.request.form
flask.session['VIEW_DATA'].clear()
alert_data = {_: result[_] for _ in result} # Iterate over html form POST from Graph.html
if alert_data.get('AlertId'): # Id form was not empty
alert_data['AlertId'] = alert_data.get('AlertId').strip(' ')
else:
flask.session['VIEW_DATA']['UpdateAlertError'] = "Please enter valid alert Id"
return flask.redirect(flask.url_for('homepage'))
alert_id = alert_data['AlertId']
old_alert = get_alert_by_id(alert_id) # store old alert before updating it
if not old_alert: # alert not found
flask.session['VIEW_DATA']['UpdateAlertError'] = "No alert matching this ID " + alert_id + " was found"
return flask.redirect(flask.url_for('homepage'))
else:
flask.session['VIEW_DATA']['OldAlert'] = old_alert
properties_to_update = {}
properties_to_update["assignedTo"] = flask.session['email']
if alert_data.get("SelectStatusToUpdate") != "Unknown":
properties_to_update["status"] = alert_data.get("SelectStatusToUpdate")
if alert_data.get("SelectFeedbackToUpdate") != "Unknown":
properties_to_update["feedback"] = alert_data.get("SelectFeedbackToUpdate")
if alert_data.get("Comments") != "":
comments = old_alert.get("comments")
new_comment = alert_data.get("Comments")
comments.append(new_comment)
properties_to_update["comments"] = comments
# include the required vendor information in the body of the PATCH
properties_to_update["vendorInformation"] = old_alert.get("vendorInformation")
# update the alert
update_security_alert(alert_id, properties_to_update)
# make another call to graph to get the updated alert
updated_alert = get_alert_by_id(alert_id)
# store the alert to be rendered in the table in Graph.html
flask.session['VIEW_DATA']['UpdateAlertResults'] = updated_alert
flask.session['VIEW_DATA']['UpdateQueryDetails'] = "REST query PATCH: '" \
+ config.SECURITYAPI_URL \
+ "alerts/" \
+ alert_id \
+ "'"
flask.session['VIEW_DATA']['UpdateQueryBody'] = "Request Body: " \
+ json.dumps(properties_to_update,
sort_keys=True,
indent=4,
separators=(',', ': '))
flask.session['UpdateAlertData'] = alert_data
return flask.redirect(flask.url_for('homepage')) | 52248bda1271fbd39ab056c5384f6318e30cc712 | 11,648 |
def simulate_bet(odds, stake):
"""
Simulate the bet taking place assuming the odds accurately represent the probability of the event
:param odds: numeric: the odds given for the event
:param stake: numeric: the amount of money being staked
:return: decimal: the returns from the bet
"""
probability = odds_to_prob(odds)
if np.random.rand() <= probability:
return stake * (1 + odds)
else:
return 0 | 0a56bc4b9a3071cc777786a1a7cf8b1410e3f941 | 11,649 |
from typing import OrderedDict
def lrcn(num_classes, lrcn_time_steps, lstm_hidden_size=200, lstm_num_layers=2):
"""
Args:
num_classes (int):
Returns:
torch.nn.modules.module.Module
"""
class TimeDistributed(nn.Module):
def __init__(self, layer, time_steps):
super(TimeDistributed, self).__init__()
# self.layers = nn.ModuleList([layer for _ in range(time_steps)])
self.layers = nn.ModuleList([nn.Linear(10, 10) for _ in range(time_steps)])
def forward(self, x):
batch_size, time_steps, *_ = x.size()
# outputs = list()
for i, layer in enumerate(self.layers):
x = layer(x)
# output_t = layer(x[:, i])
# if i == 0:
# output = output_t.unsqueeze(1)
# else:
# output = torch.cat((output, output_t.unsqueeze(1)), 1)
# outputs.append(output_t)
# output = torch.stack(outputs, dim=1)
# return output
return x
class BiLSTMHidden2Dense(nn.Module):
def __init__(self):
super(BiLSTMHidden2Dense, self).__init__()
def forward(self, x):
lstm_output, (hn, cn) = x
lstm_last_hidden_state = hn[-2:].transpose(0, 1).contiguous().view(hn.size(1), -1)
return lstm_last_hidden_state
cnn_model = squeezenet1_1(pretrained=False, progress=True)
model = nn.Sequential(OrderedDict([
('timedistributed_cnn', TimeDistributed(nn.Conv2d(3, 60, (1, 1)), time_steps=lrcn_time_steps)),
# ('timedistributed_cnn', TimeDistributed(cnn_model, time_steps=lrcn_time_steps)),
# ('bidirectional_stacked_lstm', nn.LSTM(input_size=1000, hidden_size=lstm_hidden_size, num_layers=lstm_num_layers,
# batch_first=True, dropout=0.2, bidirectional=True)),
# ('hidden2dense', BiLSTMHidden2Dense()),
# ('dense', nn.Linear(in_features=2*lstm_hidden_size, out_features=lstm_hidden_size)),
# ('norm', nn.BatchNorm1d(num_features=lstm_hidden_size)),
# ('relu', nn.ReLU()),
# ('dropout', nn.Dropout(p=0.25)),
# ('last', nn.Linear(in_features=lstm_hidden_size, out_features=num_classes))
]))
return model | a17f58906f4d5b514e56f5cba22ac60bdf739b9c | 11,650 |
import tokenize
def index_document(connection, doc_id, content):
"""对document建立反向索引"""
words = tokenize(content)
pipe = connection.pipeline(True)
for word in words:
pipe.sadd('idx:' + word, doc_id)
return len(pipe.execute()) | 89572980c0bfadef9e1557b7e7831fc7aebe6716 | 11,651 |
def get_incar_magmoms(incarpath,poscarpath):
"""
Read in the magnetic moments in the INCAR
Args:
incarpath (string): path to INCAR
poscarpath (string): path to POSCAR
Returns:
mof_mag_list (list of floats): magnetic moments
"""
mof_mag_list = []
init_mof = read(poscarpath)
with open(incarpath,'r') as incarfile:
for line in incarfile:
line = line.strip()
if 'MAGMOM' in line:
mag_line = line.split('= ')[1:][0].split(' ')
for val in mag_line:
mag = float(val.split('*')[1])
num = int(val.split('*')[0])
mof_mag_list.extend([mag]*num)
if not bool(mof_mag_list):
mof_mag_list = np.zeros(len(init_mof))
if len(mof_mag_list) != len(mof_mag_list):
raise ValueError('Error reading INCAR magnetic moments')
return mof_mag_list | 6b75f415e7128213bab63d251a3fb6feb7576656 | 11,652 |
from utils.deprecations import deprecate_old_command_line_tools
import optparse
import sys
def main(config_path=None):
""" The main entry point for the unix version of dogstatsd. """
# Deprecation notice
deprecate_old_command_line_tools()
COMMANDS_START_DOGSTATSD = [
'start',
'stop',
'restart',
'status'
]
parser = optparse.OptionParser("%prog [start|stop|restart|status]")
parser.add_option('-u', '--use-local-forwarder', action='store_true',
dest="use_forwarder", default=False)
opts, args = parser.parse_args()
if not args or args[0] in COMMANDS_START_DOGSTATSD:
reporter, server, cnf = init(config_path, use_watchdog=True, use_forwarder=opts.use_forwarder, args=args)
daemon = Dogstatsd(PidFile(PID_NAME, PID_DIR).get_path(), server, reporter,
cnf.get('autorestart', False))
# If no args were passed in, run the server in the foreground.
if not args:
daemon.start(foreground=True)
return 0
# Otherwise, we're process the deamon command.
else:
command = args[0]
if command == 'start':
daemon.start()
elif command == 'stop':
daemon.stop()
elif command == 'restart':
daemon.restart()
elif command == 'status':
daemon.status()
elif command == 'info':
return Dogstatsd.info()
else:
sys.stderr.write("Unknown command: %s\n\n" % command)
parser.print_help()
return 1
return 0 | 8c8a44f216c87aff87f20b504346e2713307bb4f | 11,653 |
import plistlib
def remove_report_from_plist(plist_file_obj, skip_handler):
"""
Parse the original plist content provided by the analyzer
and return a new plist content where reports were removed
if they should be skipped. If the remove failed for some reason None
will be returned.
WARN !!!!
If the 'files' array in the plist is modified all of the
diagnostic section (control, event ...) nodes should be
re indexed to use the proper file array indexes!!!
"""
report_data = None
try:
report_data = parse_plist(plist_file_obj)
if not report_data:
return
except Exception as ex:
LOG.error("Plist parsing error")
LOG.error(ex)
return
file_ids_to_remove = []
try:
for i, f in enumerate(report_data['files']):
if skip_handler.should_skip(f):
file_ids_to_remove.append(i)
kept_diagnostics, kept_files = get_kept_report_data(report_data,
file_ids_to_remove)
report_data['diagnostics'] = kept_diagnostics
report_data['files'] = kept_files if kept_diagnostics else []
return plistlib.dumps(report_data)
except KeyError:
LOG.error("Failed to modify plist content, "
"keeping the original version")
return | fb14ccf1b0a1ad6b5e3b3e536e21386dbbcac84e | 11,654 |
def isTask(item): # pragma: no cover
"""Is the given item an OmniFocus task?"""
return item.isKindOfClass_(taskClass) | b0e2c813b29315e7b84cd9f2a4d211552dab9baf | 11,655 |
import os
def add2grouppvalue_real1():
"""
add2grouppvalue_real1
description:
Uses the raw data from real_data_1.csv to compute p-values for 2 group comparisons on a bunch of pairs of
groups and using all 3 stats tests
Test fails if there are any errors or if the shape of any of the following stats entries are incorrect:
dset.stats['{stats_test}_..._raw'].shape = (773,)
returns:
(bool) -- test pass (True) or fail (False)
"""
dset = Dataset(os.path.join(os.path.dirname(__file__), 'real_data_1.csv'))
dset.assign_groups({
'Par': [0, 1, 2, 3],
'Dap2': [4, 5, 6, 7],
'Dal2': [8, 9, 10, 11],
'Van4': [12, 13, 14, 15],
'Van8': [16, 17, 18, 19]
})
# pairs of groups to compute log2fc on
pairs = [
['Par', 'Dap2'],
['Par', 'Dal2'],
['Par', 'Van4'],
['Par', 'Van8']
]
for pair in pairs:
for stats_test in ['students', 'welchs', 'mann-whitney']:
#print('testing {} with {}'.format(pair, stats_test))
add_2group_pvalue(dset, pair, stats_test)
stest_abbrev = {'students': 'studentsP', 'welchs': 'welchsP', 'mann-whitney': 'mannwhitP'}[stats_test]
if dset.stats['{}_{}_raw'.format(stest_abbrev, '-'.join(pair))].shape != (773,):
m = 'add2grouppvalue_real1: "{}_..._raw" should have shape (773,), has shape: {}'
raise RuntimeError(m.format(dset.stats['LOG2FC_{}_raw'.format(stest_abbrev, '-'.join(pair))].shape))
# diagnostic printing stuff
"""
print(dset)
for s, w, m in zip(dset.stats["studentsP_Par-Dap2_raw"] <= 0.05,
dset.stats["welchsP_Par-Dap2_raw"] <= 0.05,
dset.stats["mannwhitP_Par-Dap2_raw"] <= 0.05):
if s and w and m:
print(True)
elif not s and not w and not m:
print(False)
else:
print(s, w, m)
"""
return True | 291b2d4e7c72ff43e0b0fb8f67eea8819e384121 | 11,656 |
from typing import Tuple
import torch
def cox_cc_loss(g_case: Tensor, g_control: Tensor, shrink : float = 0.,
clamp: Tuple[float, float] = (-3e+38, 80.)) -> Tensor:
"""Torch loss function for the Cox case-control models.
For only one control, see `cox_cc_loss_single_ctrl` instead.
Arguments:
g_case {torch.Tensor} -- Result of net(input_case)
g_control {torch.Tensor} -- Results of [net(input_ctrl1), net(input_ctrl2), ...]
Keyword Arguments:
shrink {float} -- Shrinkage that encourage the net got give g_case and g_control
closer to zero (a regularizer in a sense). (default: {0.})
clamp {tuple} -- See code (default: {(-3e+38, 80.)})
Returns:
[type] -- [description]
"""
control_sum = 0.
shrink_control = 0.
if g_case.shape != g_control[0].shape:
raise ValueError(f"Need `g_case` and `g_control[0]` to have same shape. Got {g_case.shape}"+
f" and {g_control[0].shape}")
for ctr in g_control:
shrink_control += ctr.abs().mean()
ctr = ctr - g_case
ctr = torch.clamp(ctr, *clamp) # Kills grads for very bad cases (should instead cap grads!!!).
control_sum += torch.exp(ctr)
loss = torch.log(1. + control_sum)
shrink_zero = shrink * (g_case.abs().mean() + shrink_control) / len(g_control)
return torch.mean(loss) + shrink_zero.abs() | 1f528ff25984e0bb09bc49edf59d793a44281ddb | 11,657 |
def vector_field(mesh, v):
"""
Returns a np.array with values specified by `v`, where `v` should
be a iterable of length 3, or a function that returns an iterable of
length 3 when getting the coordinates of a cell of `mesh`.
"""
return field(mesh, v, dim=3) | 2d82fa86bc76367e2668b37815c068097d88c6fa | 11,658 |
def is_fav_recipe(request):
"""
Handles the requests from /ajax/is_fav_recipe/
Checks if a :model:`matega.recipe` is a saved recipe for a :model:'matega.user'
**Data**
Boolean if :model:`matega.recipe` is a saved recipe for :model:'matega.user'
"""
user_id = int(request.GET.get('user_id', None))
recipe_id = int(request.GET.get('recipe_id', None))
is_fav = False
user = User.objects.get(pk=user_id)
for rec in user.saved_recipes.values_list():
if rec[0] == recipe_id:
is_fav = True
data = {
'is_fav': is_fav
}
return JsonResponse(data) | f5ee3b21409f7a9ffe4ee427e19317f03b8db9c3 | 11,659 |
def people_interp():
"""
<enumeratedValueSet variable="People"> <value value="500"/> </enumeratedValueSet>
Integer between 1 and 500
"""
return f'<enumeratedValueSet variable="People"> <value value="%s"/> </enumeratedValueSet>' | 2aba1330a774e022c280d2e50e3fb63631989a88 | 11,660 |
import subprocess
import re
import os
def check_lint(path, lint_name="md"):
"""lint命令及检测信息提取,同时删除中间文档xxx_lint.md或者xxx_lint.py"""
error_infos = []
lint_ext = "_lint.md" if lint_name == "md" else "_lint.py"
check_command = "mdl -s mdrules.rb" if lint_name == "md" else "pylint -j 4"
if lint_name == "md":
convert_to_markdown(path)
if lint_name == "py":
convert_to_py(path)
check_path = path.replace(".ipynb", lint_ext)
if lint_ext == "_lint.md":
math_f_info = check_mathematical_formula(check_path)
error_infos.extend(math_f_info)
cmd = "{} {}".format(check_command, check_path)
res = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8",)
info = res.stdout.read()
if info is not None:
info_list = info.split("\n")[:-1]
for i in info_list:
try:
location_, error_info = re.findall(":([0-9]+):(.*)", i)[0]
file_name = check_path.replace(lint_ext, ".ipynb")
error_infos.append((file_name, location_, error_info.strip()))
except IndexError:
pass
finally:
pass
os.remove(check_path)
return error_infos | 554e8a8742822b334c65e70fb8187d4fd38b72da | 11,661 |
def through_omas_s3(ods, method=['function', 'class_method'][1]):
"""
Test save and load S3
:param ods: ods
:return: ods
"""
filename = 'test.pkl'
if method == 'function':
save_omas_s3(ods, filename, user='omas_test')
ods1 = load_omas_s3(filename, user='omas_test')
else:
ods.save('s3', filename=filename, user='omas_test')
ods1 = ODS().load('s3', filename=filename, user='omas_test')
return ods1 | f89bb1c31a9bcbae869d07313ab10a7df658fd1c | 11,662 |
def read_packages(filename):
"""Return a python list of tuples (repository, branch), given a file
containing one package (and branch) per line.
Comments are excluded
"""
lines = load_order_file(filename)
packages = []
for line in lines:
if "," in line: # user specified a branch
path, branch = [k.strip() for k in line.split(",", 1)]
packages.append((path, branch))
else:
packages.append((line, "master"))
return packages | e73573003bd0388ed850fd2e996643a91199d30a | 11,663 |
import heapq
def find_min(x0, capacities):
"""
(int list, int list) --> (int list, int)
Find the schedule that minimizes the passenger wait time with the given capacity distribution
Uses a mixture of Local beam search and Genetic Algorithm
Returns the min result
"""
scores_and_schedules = []
# Generate 199 neighbouring schedules using the input schedule x0
init_neighbours = find_neighbours(199, 10, x0)
min_score = all_trains(x0, capacities, passengers)
min_sched = x0
heapq.heappush(scores_and_schedules,(min_score, x0))
# Add them all to the list, as well as the input schedule
for i in init_neighbours:
score = all_trains(i, capacities, passengers)
heapq.heappush(scores_and_schedules,(score, i))
if score < min_score:
min_score, min_sched = score, i
local_min_counter = 0
# Perform the genetic algorithm for optimization
while local_min_counter < 500:
scores_and_schedules = best_n(scores_and_schedules, capacities, 5)
if scores_and_schedules[0][0] < min_score:
min_score, min_sched = scores_and_schedules[0]
local_min_counter = 0
else:
local_min_counter += 1
return min_sched, min_score | 016cdd310f4b59e61349edd94b3b7cc387c3c7c1 | 11,664 |
def versioning(version: str) -> str:
"""
version to specification
Author: Huan <[email protected]> (https://github.com/huan)
X.Y.Z -> X.Y.devZ
"""
sem_ver = semver.parse(version)
major = sem_ver['major']
minor = sem_ver['minor']
patch = str(sem_ver['patch'])
if minor % 2:
patch = 'dev' + patch
fin_ver = '%d.%d.%s' % (
major,
minor,
patch,
)
return fin_ver | bfef27712b8595f52314f300743012270a42e64f | 11,665 |
def freshdesk_sync_contacts(contacts=None, companies=None, agents=None):
"""Iterate through all DepartmentUser objects, and ensure that each user's
information is synced correctly to a Freshdesk contact.
May optionally be passed in dicts of contacts & companies.
"""
try:
if not contacts:
LOGGER.info('Querying Freshdesk for current contacts')
contacts = get_freshdesk_objects(obj_type='contacts', progress=False, params={'page': 1})
contacts = {c['email'].lower(): c for c in contacts if c['email']}
if not companies:
LOGGER.info('Querying Freshdesk for current companies')
companies = get_freshdesk_objects(obj_type='companies', progress=False, params={'page': 1})
companies = {c['name']: c for c in companies}
# FIXME: ignore Agents for the time being.
#if not agents:
# LOGGER.info('Querying Freshdesk for current agents')
# agents = get_freshdesk_objects(obj_type='agents', progress=False, params={'page': 1})
# agents = {a['contact']['email'].lower(): a['contact'] for a in agents if a['contact']['email']}
except Exception as e:
LOGGER.exception(e)
return False
# Filter DepartmentUsers: valid email (contains @), not -admin, DN contains 'OU=Users', active
d_users = DepartmentUser.objects.filter(email__contains='@', ad_dn__contains='OU=Users', active=True).exclude(email__contains='-admin')
LOGGER.info('Syncing details for {} DepartmentUsers to Freshdesk'.format(d_users.count()))
for user in d_users:
if user.email.lower() in contacts:
# The DepartmentUser exists in Freshdesk; verify and update details.
fd = contacts[user.email.lower()]
data = {}
user_sync = False
changes = []
if user.name != fd['name']:
user_sync = True
data['name'] = user.name
changes.append('name')
if user.telephone != fd['phone']:
user_sync = True
data['phone'] = user.telephone
changes.append('phone')
if user.title != fd['job_title']:
user_sync = True
data['job_title'] = user.title
changes.append('job_title')
if user_sync: # Sync user details to their Freshdesk contact.
r = update_freshdesk_object('contacts', data, fd['id'])
if r.status_code == 403: # Forbidden
# A 403 response probably means that we hit the API throttle limit.
# Abort the synchronisation.
LOGGER.error('HTTP403 received from Freshdesk API, aborting')
return False
LOGGER.info('{} was updated in Freshdesk (status {}), changed: {}'.format(
user.email.lower(), r.status_code, ', '.join(changes)))
else:
data = {'name': user.name, 'email': user.email.lower(),
'phone': user.telephone, 'job_title': user.title}
department = user.org_data.get('units', []) if user.org_data else []
department = department[0].get('name', '') if len(department) > 0 else None
if department and department in companies:
data['company_id'] = companies[department]['id']
r = update_freshdesk_object('contacts', data)
if not r.status_code == 200: # Error, unable to process request.
LOGGER.warn('{} not created in Freshdesk (status {})'.format(user.email.lower(), r.status_code))
else:
LOGGER.info('{} created in Freshdesk (status {})'.format(user.email.lower(), r.status_code))
return True | efa90bb449843472e0e65819b10845595398a4cc | 11,666 |
def device_create_from_symmetric_key(transportType, deviceId, hostname, symmetricKey): # noqa: E501
"""Create a device client from a symmetric key
# noqa: E501
:param transportType: Transport to use
:type transportType: str
:param deviceId:
:type deviceId: str
:param hostname: name of the host to connect to
:type hostname: str
:param symmetricKey: key to use for connection
:type symmetricKey: str
:rtype: ConnectResponse
"""
return "do some magic!" | 15ac85df5a41f88044cf449f0f9d99bfcd72d570 | 11,667 |
def create_with_index(data, columns):
"""
Create a new indexed pd.DataFrame
"""
to_df = {columns[0]: [x for x in range(1, len(data) + 1)], columns[1]: data}
data_frame = pd.DataFrame(to_df)
data_frame.set_index("Index", inplace=True)
return data_frame | f9bb854af5d77f4355d64c8c56a9fdda7bd2cf93 | 11,668 |
def random_aes_key(blocksize=16):
"""Set 2 - Challenge 11"""
return afb(np.random.bytes(blocksize)) | f5bfad117886e51bbb810274c62e44e89ec2c79a | 11,669 |
import json
import torch
def create_and_load(directory: str,
name: str,
new_name: str = None) -> nn.Module:
"""Instantiate an unkown function (uf) required
by the high-order functions with a trained neural network
Args:
directory: directory to the saved weights of an NN
name: name of the unknown function
new_name: the new name of the unknown function
"""
if new_name is None:
new_name = name
with open('{}/{}.json'.format(directory, name)) as json_data:
params_dict = json.load(json_data)
params_dict['name'] = new_name
if params_dict['output_activation'] == 'None':
params_dict['output_activation'] = None
elif params_dict['output_activation'] == 'sigmoid':
params_dict['output_activation'] = torch.sigmoid
elif params_dict['output_activation'] == 'softmax':
params_dict['output_activation'] = nn.Softmax(dim=1)
else:
raise NotImplementedError()
new_fn, _ = get_nn_from_params_dict(params_dict)
new_fn.load('{}/{}.pth'.format(directory, name))
new_fn.eval()
return new_fn | 1ebf471fb624918b52953748a4f275b22aeaba1a | 11,670 |
def select_region_climatedata(gcm_name, rcp, main_glac_rgi):
"""
Get the regional temperature and precipitation for a given dataset.
Extracts all nearest neighbor temperature and precipitation data for a given set of glaciers. The mean temperature
and precipitation of the group of glaciers is returned. If two glaciers have the same temp/prec data, that data
is only used once in the mean calculations. Additionally, one would not expect for different GCMs to be similar
because they all have different resolutions, so this mean calculations will have different numbers of pixels.
Parameters
----------
gcm_name : str
GCM name
rcp : str
rcp scenario (ex. rcp26)
main_glac_rgi : pd.DataFrame
glacier dataset used to select the nearest neighbor climate data
"""
# Date tables
print('select_region_climatedata fxn dates supplied manually')
dates_table_ref = modelsetup.datesmodelrun(startyear=2000, endyear=2100, spinupyears=0,
option_wateryear=1)
dates_table = modelsetup.datesmodelrun(startyear=2000, endyear=2100, spinupyears=0,
option_wateryear=1)
# Load gcm lat/lons
gcm = class_climate.GCM(name=gcm_name, rcp_scenario=rcp)
# Select lat/lon from GCM
ds_elev = xr.open_dataset(gcm.fx_fp + gcm.elev_fn)
gcm_lat_values_all = ds_elev.lat.values
gcm_lon_values_all = ds_elev.lon.values
ds_elev.close()
# Lat/lon dictionary to convert
gcm_lat_dict = dict(zip(range(gcm_lat_values_all.shape[0]), list(gcm_lat_values_all)))
gcm_lon_dict = dict(zip(range(gcm_lon_values_all.shape[0]), list(gcm_lon_values_all)))
# Find nearest neighbors for glaciers that have pixles
latlon_nearidx = pd.DataFrame(np.zeros((main_glac_rgi.shape[0],2)), columns=['CenLat','CenLon'])
latlon_nearidx.iloc[:,0] = (np.abs(main_glac_rgi.CenLat.values[:,np.newaxis] - gcm_lat_values_all).argmin(axis=1))
latlon_nearidx.iloc[:,1] = (np.abs(main_glac_rgi.CenLon.values[:,np.newaxis] - gcm_lon_values_all).argmin(axis=1))
latlon_nearidx = latlon_nearidx.drop_duplicates().sort_values(['CenLat', 'CenLon'])
latlon_nearidx.reset_index(drop=True, inplace=True)
latlon_reg = latlon_nearidx.copy()
latlon_reg.CenLat.replace(gcm_lat_dict, inplace=True)
latlon_reg.CenLon.replace(gcm_lon_dict, inplace=True)
# ===== LOAD CLIMATE DATA =====
# Reference climate data
ref_gcm = class_climate.GCM(name=input.ref_gcm_name)
# Air temperature [degC], Precipitation [m], Elevation [masl], Lapse rate [K m-1]
ref_temp, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.temp_fn, ref_gcm.temp_vn, latlon_reg,
dates_table_ref)
ref_prec, ref_dates = ref_gcm.importGCMvarnearestneighbor_xarray(ref_gcm.prec_fn, ref_gcm.prec_vn, latlon_reg,
dates_table_ref)
# ref_elev = ref_gcm.importGCMfxnearestneighbor_xarray(ref_gcm.elev_fn, ref_gcm.elev_vn, latlon_reg)
# GCM climate data
gcm_temp_all, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.temp_fn, gcm.temp_vn, latlon_reg, dates_table)
gcm_prec_all, gcm_dates = gcm.importGCMvarnearestneighbor_xarray(gcm.prec_fn, gcm.prec_vn, latlon_reg, dates_table)
# gcm_elev = gcm.importGCMfxnearestneighbor_xarray(gcm.elev_fn, gcm.elev_vn, latlon_reg)
# GCM subset to agree with reference time period to calculate bias corrections
gcm_subset_idx_start = np.where(dates_table.date.values == dates_table_ref.date.values[0])[0][0]
gcm_subset_idx_end = np.where(dates_table.date.values == dates_table_ref.date.values[-1])[0][0]
gcm_temp = gcm_temp_all[:,gcm_subset_idx_start:gcm_subset_idx_end+1]
gcm_prec = gcm_prec_all[:,gcm_subset_idx_start:gcm_subset_idx_end+1]
## ===== BIAS ADJUSTMENTS =====
# OPTION 2: Adjust temp and prec according to Huss and Hock (2015) accounts for means and interannual variability
if input.option_bias_adjustment == 2:
# TEMPERATURE BIAS CORRECTIONS
# Mean monthly temperature
ref_temp_monthly_avg = (ref_temp.reshape(-1,12).transpose()
.reshape(-1,int(ref_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
gcm_temp_monthly_avg = (gcm_temp.reshape(-1,12).transpose()
.reshape(-1,int(gcm_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
# Monthly bias adjustment
gcm_temp_monthly_adj = ref_temp_monthly_avg - gcm_temp_monthly_avg
# Monthly temperature bias adjusted according to monthly average
t_mt = gcm_temp_all + np.tile(gcm_temp_monthly_adj, int(gcm_temp_all.shape[1]/12))
# Mean monthly temperature bias adjusted according to monthly average
t_m25avg = np.tile(gcm_temp_monthly_avg + gcm_temp_monthly_adj, int(gcm_temp_all.shape[1]/12))
# Calculate monthly standard deviation of temperature
ref_temp_monthly_std = (ref_temp.reshape(-1,12).transpose()
.reshape(-1,int(ref_temp.shape[1]/12)).std(1).reshape(12,-1).transpose())
gcm_temp_monthly_std = (gcm_temp.reshape(-1,12).transpose()
.reshape(-1,int(gcm_temp.shape[1]/12)).std(1).reshape(12,-1).transpose())
variability_monthly_std = ref_temp_monthly_std / gcm_temp_monthly_std
# Bias adjusted temperature accounting for monthly mean and variability
gcm_temp_bias_adj = t_m25avg + (t_mt - t_m25avg) * np.tile(variability_monthly_std, int(gcm_temp_all.shape[1]/12))
# PRECIPITATION BIAS CORRECTIONS
# Calculate monthly mean precipitation
ref_prec_monthly_avg = (ref_prec.reshape(-1,12).transpose()
.reshape(-1,int(ref_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
gcm_prec_monthly_avg = (gcm_prec.reshape(-1,12).transpose()
.reshape(-1,int(gcm_temp.shape[1]/12)).mean(1).reshape(12,-1).transpose())
bias_adj_prec = ref_prec_monthly_avg / gcm_prec_monthly_avg
# Bias adjusted precipitation accounting for differences in monthly mean
gcm_prec_bias_adj = gcm_prec_all * np.tile(bias_adj_prec, int(gcm_temp_all.shape[1]/12))
# Regional means
reg_mean_temp_biasadj = gcm_temp_bias_adj.mean(axis=0)
reg_mean_prec_biasadj = gcm_prec_bias_adj.mean(axis=0)
return reg_mean_temp_biasadj, reg_mean_prec_biasadj | 8e2c4bf8a942b4a21d5549e9af87bacb75f92f26 | 11,671 |
def get_patch_boundaries(mask_slice, eps=2):
"""
Computes coordinates of SINGLE patch on the slice. Behaves incorrectly in the case of multiple tumors on the slice.
:mask_slice: 2D ndarray, contains mask with <0, 1, 2> values of pixels
:eps: int, number of additional pixels we extract around the actual mask coordinates
:return: `x_min`, `x_max`, `y_min`, `ymax`
"""
# check if we work with mask_slice that contains at least one non-zero pixel
if np.sum(mask_slice[:, :]) <= 0:
raise ValueError("Slice does not contains any tumors.")
# smallest index that has something except in its layer
x_min = None
for x in range(mask_slice.shape[0]):
if np.sum(mask_slice[x, :]) > 0:
# get first from the left index of nonzero 1D slice and break
x_min = x
break
x_max = None
for x in range(mask_slice.shape[0] - 1, -1, -1):
if np.sum(mask_slice[x, :]) > 0:
# get the first from the right index of nonzero 1D slice and break
x_max = x
break
y_min = None
for y in range(mask_slice.shape[1]):
if np.sum(mask_slice[:, y]) > 0:
# get the first from the bottom index of nonzero 1D slice and break
y_min = y
break
y_max = None
for y in range(mask_slice.shape[1] - 1, -1, -1):
if np.sum(mask_slice[:, y]) > 0:
# get the first from the top index of nonzero 1D slice and break
y_max = y
break
# apply `eps` parameter to the actual `min` and `max` values
x_min = max(x_min - eps, 0)
x_max = min(x_max + eps, mask_slice.shape[0] - 1)
y_min = max(y_min - eps, 0)
y_max = min(y_max + eps, mask_slice.shape[1] - 1)
return x_min, x_max, y_min, y_max | 0af985273b3e509bf9ee2580a64c8ddd6392d5a7 | 11,672 |
from typing import Union
from typing import List
def get_sqrt_ggn_extension(
subsampling: Union[None, List[int]], mc_samples: int
) -> Union[SqrtGGNExact, SqrtGGNMC]:
"""Instantiate ``SqrtGGN{Exact, MC} extension.
Args:
subsampling: Indices of active samples.
mc_samples: Number of MC-samples to approximate the loss Hessian. ``0``
uses the exact loss Hessian.
Returns:
Instantiated SqrtGGN extension.
"""
return (
SqrtGGNExact(subsampling=subsampling)
if mc_samples == 0
else SqrtGGNMC(subsampling=subsampling, mc_samples=mc_samples)
) | 47f074a387d0a6182d93061cbaaf4fc397be26c0 | 11,673 |
def gray_to_rgb(image):
"""convert cv2 image from GRAYSCALE to RGB
:param image: the image to be converted
:type image: cv2 image
:return: converted image
:rtype: cv2 image
"""
return cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) | feede538a2822d6d7f36bb6bbe40c845ca55808d | 11,674 |
def win_to_cygwin(winpath):
"""run `cygpath winpath` to get cygwin path"""
x = detail.command.run(['cygpath', winpath])
assert(len(x) == 1)
return x[0] | 1f941628fae51cfca7621c62c454e80d984f7019 | 11,675 |
def nucleotide_composition_to_letter(composition):
"""
Converts dictionary of {nucleotide letter: proportion} pairs
to IUPAC degenerate DNA letter.
Usage:
c = {'A': 1}
print(nucleotide_composition_to_letter(c)) --> 'A'
c = dict(zip('ACGT', [1, 1, 1, 1]))
print(nucleotide_composition_to_letter(c)) --> 'N'
c = dict(zip('ACGT', [1, 1, 2, 1]))
print(nucleotide_composition_to_letter(c)) --> 'n'
"""
nonzero_nucleotides = ''.join(sorted([n
for n, v in composition.items()
if v > 0]))
nonzero_proportions = [composition[n] for n in nonzero_nucleotides]
equimolar = min(nonzero_proportions) == max(nonzero_proportions)
letter = DEGENERATE_NUCLEOTIDE_CODE_REVERSED.get(nonzero_nucleotides,
DEFAULT_NUCLEOTIDE_LABEL)
if equimolar:
return letter
return letter.lower() | 2ad080d3a04cfc754f46d490a272362cadecbfd2 | 11,676 |
def forcast(doc):
"""
:param: doc object
:returns: tuple with grade level, age level
"""
word_tokens = doc.word_tokens
monosyllables = 0
for i in word_tokens:
if i.isalpha() == False and len(i) < 2:
word_tokens.remove(i)
for i in word_tokens[10:159]:
if syllable_count(i) < 2:
monosyllables += 1
gl = 20 - (monosyllables/10)
ra = 25 - (monosyllables/10)
return (gl, ra, monosyllables) | e71debbdaf057c61eaa620419b0357e603868989 | 11,677 |
def convert_coordinates_to_country(deg_x: float, deg_y: float) -> str:
""" returns country name """
return geocoder.osm([deg_x, deg_y], method="reverse").country | 5dca9d54bfa154a33a94550f983a3e9457cf2d52 | 11,678 |
def fixture_items(test_list):
"""Returns an instance of ItemCollection for testing"""
return test_list.get_items(query=QUERY) | 6351ffdb9ce8a65d7a08d55c6cfa9db8ef4aa978 | 11,679 |
def numbers_lists_entry_widget(
list_param: list,
name: str,
expect_amount: int = -1,
expect_int: bool = False,
help=None,
) -> list:
"""
create a list text input field and checks if expected amount and type matches if set.
:param list_param: a list variable handled by this wiget
:param name: the name/unique key of the text input for streamlit
:param expect_amount: set >0 to activate
:return: the list maybe modified by users input
"""
# train_lists_str = clean_list_str(str(self.trainer_params_json["gen"]["train"]["lists"]))
lists_str = clean_list_str(str(list_param))
logger.debug(f"cleaned str: {lists_str}")
lists_field, lists_state = st.columns([10, 1])
lists_field = lists_field.text_input(name, value=lists_str, help=help)
if lists_field:
lists_field = clean_list_str(lists_field)
lists_list = [str(x).replace(" ", "") for x in lists_field.split(",")]
ok = True if lists_list else False
if expect_amount > 0 and len(lists_list) != expect_amount:
ok = False
if expect_int:
for idx in range(len(lists_list)):
try:
lists_list[idx] = int(lists_list[idx])
except:
ok = False
else:
for idx in range(len(lists_list)):
try:
lists_list[idx] = float(lists_list[idx])
except:
ok = False
if ok:
lists_state.latex(state_ok)
return lists_list
else:
lists_state.latex(state_failed)
return [] | c997089835eba0328100a638210b33aea0ca0507 | 11,680 |
def get_daily_discussion_post(subreddit_instance: praw.models.Subreddit):
"""Try to get the daily discussions post for a subreddit.
Args:
subreddit_instance
Returns:
The submission object for the discussion post, or None if it couldn't be found.
Works by searching the stickied posts of the subreddit for a post with 'daily discussion' in the title.
"""
print('Searching stickied posts for daily discussion posts..')
for sticky_num in [1, 2]:
discussion_post = subreddit_instance.sticky(number=sticky_num)
if 'daily discussion' in discussion_post.title.lower():
print(f'Got daily discussion post, title {discussion_post.title}')
return discussion_post
print("Couldn't find daily discussion post!")
return None | 841612a8b2d2fa7a8a74f081e360a69884b20925 | 11,681 |
def metric_by_training_size(X, y, classifier_list, training_set, metric, as_percentage=True):
"""
This is a refactoriation of code to repeat metrics for best fitted models by training set percentage size.
i.e.: Find accuracy rating for multiple training-test splits for svm, random forests, and naive bayes and return an
np.ndarray
:param X:
:param y:
:param classifier_list:
:param training_set:
:param metric:
:param as_percentage:
:return: np.ndarray
"""
metric_array = np.zeros((len(training_set), len(classifier_list)))
for row_num, training_size in enumerate(training_set):
X_train_iter, X_test_iter, y_train_iter, y_test_iter = train_test_split(X, y,
test_size=1 - training_size,
random_state=0)
metric_list = []
for classifier in classifier_list:
y_pred = classifier.fit(X_train_iter, y_train_iter).predict(X_test_iter)
metric_list.append(metric(y_test_iter, y_pred))
metric_array[row_num] = metric_list
metric_array = metric_array.transpose()
return 100 * metric_array if as_percentage else metric_array | 3d918989b28db47479da3479b1804b7a502b9ce0 | 11,682 |
from typing import Optional
def apply_back_defence(
board: Board, opponent: Optional[Player] = None
) -> Optional[Action]:
"""
Move to intercept.
"""
player = board.controlled_player
ball = board.ball
if not opponent:
opponent = ball.player
if opponent.vector.x < 0:
# trying to predict opponent's next move
new_vector = __get_opponent_vector(board, opponent)
opponent = deepcopy(opponent)
opponent.vector = new_vector
intercept_interval = speed_interval(opponent.position, opponent.vector, opponent=player)
if intercept_interval:
target = opponent.future_position(turns=intercept_interval.lower() + 3)
else:
target = opponent.future_position(turns=5)
vector = Vector.from_point(target - player.position)
should_slide = __should_slide(board, player, ball, intercept_interval)
action = Action.Slide if should_slide else None
logger.debug(
"Slide action: Move to intercept, "
f"opponent = {opponent}, "
f"action = {action}, "
f"intercept_interval = {intercept_interval}, "
f"intercept_vector = {vector}."
)
return board.set_action(action, vector, sprint=True, dribble=False) | 20af2f28a5d2c55c19c821fff218843d0ff69164 | 11,683 |
def remove_namespace(tag, ns):
"""Remove namespace from xml tag."""
for n in ns.values():
tag = tag.replace('{' + n + '}', '')
return tag | d4837a3d906baf8e439806ccfea76284e8fd9b87 | 11,684 |
def false_prediction_pairs(y_pred, y_true):
"""
Prints pairs of predicted and true classes that differ.
Returns
-------
false_pairs
The pairs of classes that differ.
counts
Number of occurences of the pairs.
"""
cond = y_pred != y_true
false_preds = np.stack([y_true[cond], y_pred[cond]], axis=-1)
false_pairs, counts = np.unique(false_preds, axis=0, return_counts=True)
return false_pairs, counts | 460acb967b95d9e4c03e557e6bd1ede2dd7d0902 | 11,685 |
def draw_boxes_and_labels_to_image_multi_classes(image, classes, coords, scores=None, classes_name=None, classes_colors=None, font_color=[0, 0, 255]):
"""
Draw bboxes and class labels on image. Return or save the image with bboxes
Parameters
-----------
image : numpy.array
The RGB image [height, width, channel].
classes : list of int
A list of class ID (int).
coords : list of int
A list of list for coordinates.
- Should be [x, y, x2, y2]
scores : list of float
A list of score (float). (Optional)
classes_name : list of str
For converting ID to string on image.
classes_colors : list of color
A list of color [ [r,g,b], ...].
font_color : front color
Front color
Returns
-------
numpy.array
The output image.
"""
image = image.copy()
imh, imw = image.shape[0:2]
thick = int((imh + imw) // 500) # 粗细
for i, _v in enumerate(coords):
x, y, x2, y2 = np.asarray(coords[i], np.int32)
bbox_color = [0, 255, 0] if classes_colors is None else classes_colors[classes[i]]
cv2.rectangle(image, (x, y), (x2, y2), bbox_color, thick)
if classes is not None:
text = []
for c in classes[i]:
class_text = classes_name[c] if classes_name is not None else str(c)
# score_text = " %.2f" % (scores[i]) if scores is not None else ''
t = class_text #+ score_text
text.append(t)
text = '\n'.join(text)
score_text = " %.2f" % (scores[i]) if scores is not None else ''
text += score_text
font_scale = 1.0e-3 * imh
# text_size, _ = cv2.getTextSize(text, 0, font_scale, int(thick / 2) + 1)
# cv2.rectangle(image, (x, y), (x+text_size[0], y-text_size[1]), bbox_color, -1)
# cv2.putText(image, text, (x, y), 0, font_scale, font_color, int(thick / 3) + 1)
image = im_tool.put_text(image, text, (x, y), font_scale*32, font_color, bbox_color)
return image | 5a11dd98019e5096c83137c43457a312598e2be8 | 11,686 |
import os
def route_open_file_dialog(fqname):
"""Return html of file structure for that parameter"""
# these arguments are only set when called with the `navigate_to` function on an already open
# file dialog
current_folder = request.args.get('current_folder')
folder = request.args.get('folder')
config = current_app.cea_config
section, parameter_name = fqname.split(':')
parameter = config.sections[section].parameters[parameter_name]
if not current_folder:
# first time calling, use current value of parameter for current folder
current_folder = os.path.dirname(parameter.get())
folder = None
else:
current_folder = os.path.abspath(os.path.join(current_folder, folder))
if not os.path.exists(current_folder):
# use home directory if it doesn't exist
current_folder = os.path.expanduser('~')
folders = []
files = []
for entry in os.listdir(current_folder):
if os.path.isdir(os.path.join(current_folder, entry)):
folders.append(entry)
else:
ext = os.path.splitext(entry)[1]
if parameter._extensions and ext and ext[1:] in parameter._extensions:
files.append(entry)
elif not parameter._extensions:
# any file can be added
files.append(entry)
breadcrumbs = os.path.normpath(current_folder).split(os.path.sep)
return render_template('file_listing.html', current_folder=current_folder,
folders=folders, files=files, title=parameter.help, fqname=fqname,
parameter_name=parameter.name, breadcrumbs=breadcrumbs) | 22c3f5f1b43fd473e2e15a5da8c2c5d570fee372 | 11,687 |
def simulate():
"""
Runs a simulation given a context, a simulator, a trace, and a depth
Method PUT
"""
context = request.get_json()['context']
simulator = request.get_json()['simulator']
trace = request.get_json()['trace']
depth = request.get_json()['depth']
if context is None or simulator is None or trace is None or depth is None:
return {'result': 'error'}, 400
ctx = contexts[context]['context']
sim = contexts[context]['simulators'][simulator]
tra = contexts[context]['traces'][trace]
dep = int(depth)
assert ctx is not None
assert sim is not None
assert tra is not None
sim.simulate(tra, dep)
return {'result': 'ok'}, 200 | d905eb9fa34454588d4d1ab95794a6b9c41074ac | 11,688 |
def soft_expected_backup_rl(
next_q: Array,
next_pol: Array,
next_log_pol: Array,
rew: Array,
done: Array,
discount: float,
er_coef: float,
) -> Array:
"""Do soft expected bellman-backup :math:`r + \gamma P \langle \pi, q - \tau * \log{\pi}\rangle`.
Args:
next_q (Array): ? x dA q-values.
next_pol (Array): ? x dA policy.
next_log_pol (Array): ? x dA log-policy.
rew (Array): ? x 1 rewards.
done (Array): ? x 1 done flags.
discount (float): Discount factor.
er_coef (float): Entropy coefficient.
Returns:
q (Array): ? x 1 q-values.
"""
chex.assert_rank([next_q, next_pol], 2)
next_v = next_pol * (next_q - er_coef * next_log_pol)
next_v = next_v.sum(axis=-1, keepdims=True) # ? x 1
q = rew + discount * next_v * (~done)
return q | fb1fed9946e05e4ad464f54c559be5e32c1f2e8e | 11,689 |
import os
from typing import OrderedDict
def readcal(calfile):
"""
This reads all of the information from a master calibration index and returns
it in a dictionary where each calibration type has a structured arrays that
can be accessed by the calibration name (e.g. 'dark').
"""
if os.path.exists(calfile) == False:
raise ValueError(calfile+' NOT FOUND')
lines = dln.readlines(calfile)
lines = np.char.array(lines)
# Get rid of comment and blank lines
gd,ngd = dln.where(( lines.find('#') != 0) & (lines=='')==False )
if ngd==0:
raise ValueError('No good calibration lines')
lines = lines[gd]
# Initialize calibration dictionary
caldict = OrderedDict()
dtdict = OrderedDict()
# -- Darks --
# mjd1, mjd2, name, frames
# dark 55600 56860 12910009 12910009-12910037
# dark 56861 99999 15640003 15640003-15640021
dtdict['dark'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('frames',np.str,100)])
# -- Flats --
# mjd1, mjd2, name, frames, nrep, dithered
# flat 99999 55761 01380106 1380106-1380134 1 1
# flat 99999 99999 02410013 2410013-2410022 1 0
dtdict['flat'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('frames',np.str,100),
('nrep',int),('dithered',int)])
# -- Sparse --
# mjd1, mjd2, name, frames, darkframes, dmax, maxread
# sparse 55600 55761 01590015 1590015-1590024 0 21 30,30,20
# sparse 55797 99999 02410059 2410059-2410068 2410058,2410069 21 30,30,20
dtdict['sparse'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('frames',np.str,100),
('darkframes',np.str,100),('dmax',int),('maxread',np.str,100)])
# -- Fiber --
# mjd1, mjd2, name
# fiber 55600 55761 01970078
# fiber 55797 56860 02410024
dtdict['fiber'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50)])
# -- Badfiber --
# mjd1, mjd2, frames
# badfiber 55600 57008 0
# badfiber 57009 57177 195
dtdict['badfiber'] = np.dtype([('mjd1',int),('mjd2',int),('frames',np.str,100)])
# -- Fixfiber --
# mjd1, mjd2, name
# fixfiber 56764 56773 1
# fixfiber 58038 58046 2
dtdict['fixfiber'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50)])
# -- Wave --
# mjd1, mjd2, name, frames, psfid
# wave 55699 55699 01370096 1370096,1370099 1370098
# wave 55700 55700 01380079 1380079 1380081
dtdict['wave'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('frames',np.str,100),
('psfid',int)])
# -- Multiwave --
# mjd1, mjd2, name, frames
# multiwave 55800 56130 2380000 02390007,02390008,02500007
# multiwave 56130 56512 5680000 05870007,05870008,05870018,05870019
dtdict['multiwave'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('frames',np.str,500)])
# -- LSF --
# mjd1, mjd2, name, frames, psfid
# lsf 55800 56130 03430016 03430016 03430020
# lsf 56130 56512 07510018 07510018 07510022
dtdict['lsf'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('frames',np.str,100),
('psfid',int)])
# -- Det --
# mjd1, mjd2, name, linid
# det 99999 99999 55640 0
# det 55600 56860 11870003 11870003
dtdict['det'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('linid',int)])
# -- BPM --
# mjd1, mjd2, name, darkid, flatid
# bpm 99999 99999 05560001 5560001 4750009
# bpm 55600 56860 12910009 12910009 4750009
dtdict['bpm'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('darkid',int),
('flatid',int)])
# -- Littrow --
# mjd1, mjd2, name, psfid
# littrow 55600 56860 06670109 6670109
# littrow 56861 99999 13400052 13400052
dtdict['littrow'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('psfid',int)])
# -- Persist --
# mjd1, mjd2, name, darkid, flatid, thresh
# persist 55600 56860 04680019 4680019 4680018 0.03
# persist 56861 99999 13400061 13400061 13400060 0.03
dtdict['persist'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('darkid',int),
('flatid',int),('thresh',float)])
# -- Persistmodel --
# mjd1, mjd2, name
# persistmodel 55600 56860 57184
# persistmodel 56861 99999 0
dtdict['persistmodel'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50)])
# -- Response --
# mjd1, mjd2, name, fluxid, psfid, temp
# response 55600 99999 0 0 0 0
dtdict['response'] = np.dtype([('mjd1',int),('mjd2',int),('name',np.str,50),('fluxid',int),
('psfid',int),('temp',float)])
# Readnoise
# frame1, frame2
# rn 1380094 1380095
# rn 1380102 1380103
#dtdict['rn'] = np.dtype([('frame1',int),('frame2',int)])
# Gain
# frame1, frame2
#dtdict['gain'] = np.dtype([('frame1',int),('frame2',int)])
# READNOISE and GAIN lines are NOT used
# Load the data
for caltype in dtdict.keys():
cat = loadcaltype(lines,caltype,dtdict[caltype])
caldict[caltype.strip()] = cat
return caldict | ec2dc4531f9504695c3f4f65952510ccaa913944 | 11,690 |
import os
import sys
import subprocess
def global_attributes_dict():
# type: () -> Dict[str, str]
"""Set global attributes required by conventions.
Currently CF-1.6 and ACDD-1.3.
Returns
-------
global_atts: dict
Still needs title, summary, source, creator_institution,
product_version, references, cdm_data_type, institution,
geospatial_vertical_{min,max,positive,units}, ...
References
----------
CF Conventions document:
http://cfconventions.org
ACDD document:
http://wiki.esipfed.org/index.php/Category:Attribute_Conventions_Dataset_Discovery
NCEI Templates:
https://www.nodc.noaa.gov/data/formats/netcdf/v2.0/
"""
username = getpwuid(os.getuid())[0]
global_atts = dict(
Conventions="CF-1.6 ACDD-1.3",
standard_name_vocabulary="CF Standard Name Table v32",
history=(
"{now:{date_fmt:s}}: Created by {progname:s} "
"with command line: {cmd_line:s}"
).format(
now=RUN_DATE,
date_fmt=UDUNITS_DATE,
progname=sys.argv[0],
cmd_line=COMMAND_LINE,
),
source=("Created by {progname:s} " "with command line: {cmd_line:s}").format(
progname=sys.argv[0],
cmd_line=COMMAND_LINE,
),
date_created="{now:{date_fmt:s}}".format(now=RUN_DATE, date_fmt=ACDD_DATE),
date_modified="{now:{date_fmt:s}}".format(now=RUN_DATE, date_fmt=ACDD_DATE),
date_metadata_modified="{now:{date_fmt:s}}".format(
now=RUN_DATE, date_fmt=ACDD_DATE
),
creator_name=username,
creator_email="{username:s}@{host:s}".format(
username=username,
host=MAIN_HOST,
),
creator_institution=MAIN_HOST,
)
try:
global_atts["conda_packages"] = subprocess.check_output(
# Full urls including package, version, build, and MD5
["conda", "list", "--explicit", "--md5"],
universal_newlines=True,
)
except OSError:
pass
try:
global_atts["pip_packages"] = subprocess.check_output(
[sys.executable, "-m", "pip", "freeze"],
universal_newlines=True,
)
except OSError:
pass
return global_atts | 3dfcadeb7e17d969836966b1974bba092e5c00b8 | 11,691 |
def generate_bias(series: pd.Series, effect_size: float = 1, power: float = 1) -> pd.Series:
"""
Calculate bias for sensitive attribute
Parameters
----------
series : pd.Series
sensitive attribute for which the bias is calculated.
effect_size : float, optional
Size of the bias for 1 std from the mean. The default is 1.
power : float, optional
power=1: linear bias, power=2: quadratic bias, etc. The default is 1.
Returns
-------
pd.Series
DESCRIPTION.
"""
bias = series.sub(series.mean()).pow(power)
bias = (bias - bias.mean())/bias.std() # Make the bias neutral
return bias * effect_size | a23a201dfeac8ed25cb923080f9c968d1a8a6583 | 11,692 |
def compress_coils(kspace,
num_output_coils=None,
tol=None,
coil_axis=-1,
matrix=None,
method='svd',
**kwargs):
"""Coil compression gateway.
This function estimates a coil compression matrix and uses it to compress
`kspace`. Alternatively, this method can use a precomputed coil compression
matrix to perform the compression. In this case, use
`tfmr.coil_compression_matrix` to calculate the compression matrix, then pass
it to this function using the `matrix` argument. Use this two-step process
if you intend to reuse a coil compression matrix or need to calibrate the
compression using different data.
This function supports the following coil compression methods:
* **SVD**: Based on direct singular-value decomposition (SVD) of *k*-space
data [1]_. This coil compression method supports Cartesian and
non-Cartesian data. This method is resilient to noise, but does not
achieve optimal compression if there are fully-sampled dimensions.
.. * **Geometric**: Performs local compression along fully-sampled dimensions
.. to improve compression. This method only supports Cartesian data. This
.. method can suffer from low SNR in sections of k-space.
.. * **ESPIRiT**: Performs local compression along fully-sampled dimensions
.. and is robust to noise. This method only supports Cartesian data.
Args:
kspace: A `Tensor`. The multi-coil *k*-space data. Must have type
`complex64` or `complex128`. Must have shape `[..., Cin]`, where `...` are
the encoding dimensions and `Cin` is the number of coils. Alternatively,
the position of the coil axis may be different as long as the `coil_axis`
argument is set accordingly. If `method` is `"svd"`, `kspace` can be
Cartesian or non-Cartesian. If `method` is `"geometric"` or `"espirit"`,
`kspace` must be Cartesian.
num_output_coils: An `int`. The number of desired virtual output coils. If
`None`, the number of output coils is automatically determined based on
`tol`. If `tol` is also None, all virtual coils are returned.
tol: A `float` between 0.0 and 1.0. Virtual coils whose singular value is
less than `tol` times the first singular value are discarded. `tol` is
ignored if `num_output_coils` is also specified.
coil_axis: An `int`. Defaults to -1.
matrix: An optional `Tensor`. The coil compression matrix. If provided,
`matrix` is used to calculate the compressed output. Must have the same
type as `kspace`. Must have shape `[Cin, Cout]`, where `Cin` is the number
of input coils and `Cout` is the number of output coils. If `matrix` is
provided, arguments `num_output_coils` and `tol` are ignored.
method: A `string`. The coil compression algorithm. Must be `"svd"`.
**kwargs: Additional method-specific keyword arguments. See Notes for more
details.
Notes:
This function also accepts the following method-specific keyword arguments:
* For `method="svd"`, no additional keyword arguments are accepted.
Returns:
A `Tensor` containing the compressed *k*-space data. Has shape
`[..., Cout]`, where `Cout` is determined based on `num_output_coils` or
`tol` and `...` are the unmodified encoding dimensions.
References:
.. [1] Huang, F., Vijayakumar, S., Li, Y., Hertel, S. and Duensing, G.R.
(2008). A software channel compression technique for faster reconstruction
with many channels. Magn Reson Imaging, 26(1): 133-141.
.. [2] Zhang, T., Pauly, J.M., Vasanawala, S.S. and Lustig, M. (2013), Coil
compression for accelerated imaging with Cartesian sampling. Magn
Reson Med, 69: 571-582. https://doi.org/10.1002/mrm.24267
.. [3] Bahri, D., Uecker, M., & Lustig, M. (2013). ESPIRIT-based coil
compression for cartesian sampling. In Proceedings of the 21st
Annual Meeting of ISMRM, Salt Lake City, Utah, USA (Vol. 47).
"""
# pylint: disable=missing-raises-doc
kspace = tf.convert_to_tensor(kspace)
tf.debugging.assert_rank_at_least(kspace, 2, message=(
f"Argument `kspace` must have rank of at least 2, but got shape: "
f"{kspace.shape}"))
coil_axis = check_util.validate_type(coil_axis, int, name='coil_axis')
method = check_util.validate_enum(
method, {'svd', 'geometric', 'espirit'}, name='method')
# Move coil axis to innermost dimension if not already there.
if coil_axis != -1:
rank = kspace.shape.rank
canonical_coil_axis = coil_axis + rank if coil_axis < 0 else coil_axis
perm = (
[ax for ax in range(rank) if not ax == canonical_coil_axis] +
[canonical_coil_axis])
kspace = tf.transpose(kspace, perm)
# Calculate the compression matrix, unless one was already provided.
if matrix is None:
matrix = coil_compression_matrix(kspace,
num_output_coils=num_output_coils,
tol=tol,
method=method,
**kwargs)
# Apply the compression.
compressed_kspace = _apply_coil_compression(kspace, matrix)
# If necessary, move coil axis back to its original location.
if coil_axis != -1:
inv_perm = tf.math.invert_permutation(perm)
compressed_kspace = tf.transpose(compressed_kspace, inv_perm)
return compressed_kspace | abb3a4ecd8c98a27fa0f8d4bc5a02567056fef2a | 11,693 |
def get_thellier_gui_meas_mapping(input_df, output=2):
"""
Get the appropriate mapping for translating measurements in Thellier GUI.
This requires special handling for treat_step_num/measurement/measurement_number.
Parameters
----------
input_df : pandas DataFrame
MagIC records
output : int
output to this MagIC data model (2 or 3)
Output
--------
mapping : dict (used in convert_meas_df_thellier_gui)
"""
if int(output) == 2:
thellier_gui_meas3_2_meas2_map = meas_magic3_2_magic2_map.copy()
if 'treat_step_num' in input_df.columns:
thellier_gui_meas3_2_meas2_map.update(
{'treat_step_num': 'measurement_number'})
thellier_gui_meas3_2_meas2_map.pop('measurement')
return thellier_gui_meas3_2_meas2_map
# 2 --> 3
else:
thellier_gui_meas2_2_meas3_map = meas_magic2_2_magic3_map.copy()
if 'measurement' in input_df.columns:
thellier_gui_meas2_2_meas3_map.pop('measurement_number')
try:
res = int(input_df.iloc[0]['measurement_number'])
if res < 100:
thellier_gui_meas2_2_meas3_map['measurement_number'] = 'treat_step_num'
except ValueError as ex:
pass
return thellier_gui_meas2_2_meas3_map | ba32104db56cfdb450015a0a43f0717263d5ea44 | 11,694 |
import time
def new_unsigned_vaccination_credential(
passenger_first_name: str,
passenger_last_name: str,
passenger_id_number: str,
passenger_date_of_birth: str,
vaccination_disease: str,
vaccination_vaccine: str,
vaccination_product: str,
vaccination_auth_holder: str,
vaccination_dose_number: str,
vaccination_total_doses: str,
vaccination_batch: str,
vaccination_date: str,
vaccination_next_date: str,
vaccination_center: str,
vaccination_professional: str,
vaccination_country: str,
issuer_did: str
):
"""Create a Claims object for a Verifiable Credentia in JWT format.
The returned object has just the plain claims object, and has to be
signed later.
"""
# Generate a random UUID, not related to anything in the credential
# This is important for privacy reasons to avoid possibility of
# correlation if the UUID is used for Revocation Lists in a blockchain
uid = unique_id.uuid4().hex
# Current time and expiration
now = int(time.time())
exp = now + 365*24*60*60 # The token will expire in 365 days
# Generate a template Verifiable Credential
credential = {
"iss": issuer_did,
"sub": passenger_id_number,
"iat": now,
"exp": exp,
"uuid": uid,
"vc": {
"@context": [
"https://www.w3.org/2018/credentials/v1",
"https://alastria.github.io/identity/credentials/v1",
"https://safeisland.org/.well-known/w3c-covid-test/v1"
],
"type": [
"VerifiableCredential",
"AlastriaVerifiableCredential",
"SafeIslandVaccinationCredential"
],
"credentialSchema": {
"id": "vaccinationCredential",
"type": "JsonSchemaValidator2018"
},
"credentialSubject": {
"vaccinationCredential": {
"patient": {
"name": passenger_last_name.upper() + "/" + passenger_first_name.upper(),
"idnumber": passenger_id_number,
"dob": passenger_date_of_birth
},
"vaccination": {
"disease": vaccination_disease,
"vaccine": vaccination_vaccine,
"product": vaccination_product,
"auth_holder": vaccination_auth_holder,
"dose_number": vaccination_dose_number,
"total_doses": vaccination_total_doses,
"batch": vaccination_batch,
"date": vaccination_date,
"next_date": vaccination_next_date,
"center": vaccination_center,
"professional": vaccination_professional,
"country": vaccination_country,
},
"comments": "These are some comments"
},
"issuedAt": ["redt.alastria"],
"levelOfAssurance": 2
}
}
}
return credential | e3a072e1d16a3520a5bad92e692e5f4de72e8b1d | 11,695 |
def calc_pk_integrated_intensities(p,x,pktype,num_pks):
"""
Calculates the area under the curve (integrated intensities) for fit peaks
Required Arguments:
p -- (m x u + v) peak parameters for number of peaks, m is the number of
parameters per peak ("gaussian" and "lorentzian" - 3, "pvoigt" - 4, "split_pvoigt"
- 5), v is the number of parameters for chosen bgtype
x -- (n) ndarray of coordinate positions
f -- (n) ndarray of intensity measurements at coordinate positions x
pktype -- string, type of analytic function that will be used to fit the data,
current options are "gaussian","lorentzian","pvoigt" (psuedo voigt), and
"split_pvoigt" (split psuedo voigt)
num_pks -- integer 'u' indicating the number of pks, must match length of p
Outputs:
ints -- (m) integrated intensities for m fit peaks
"""
ints=np.zeros(num_pks)
if pktype == 'gaussian' or pktype == 'lorentzian':
p_fit=np.reshape(p[:3*num_pks],[num_pks,3])
elif pktype == 'pvoigt':
p_fit=np.reshape(p[:4*num_pks],[num_pks,4])
elif pktype == 'split_pvoigt':
p_fit=np.reshape(p[:6*num_pks],[num_pks,6])
for ii in np.arange(num_pks):
if pktype == 'gaussian':
ints[ii]=integrate.simps(pkfuncs._gaussian1d_no_bg(p_fit[ii],x),x)
elif pktype == 'lorentzian':
ints[ii]=integrate.simps(pkfuncs._lorentzian1d_no_bg(p_fit[ii],x),x)
elif pktype == 'pvoigt':
ints[ii]=integrate.simps(pkfuncs._pvoigt1d_no_bg(p_fit[ii],x),x)
elif pktype == 'split_pvoigt':
ints[ii]=integrate.simps(pkfuncs._split_pvoigt1d_no_bg(p_fit[ii],x),x)
return ints | d3ab50d6e6e5d2187917e06a8258a46ac5d4db18 | 11,696 |
def read_fid_ntraces(filename, shape=None, torder='flat', as_2d=False,
read_blockhead=False):
"""
Read a Agilent/Varian binary (fid) file possibility having multiple
traces per block.
Parameters
----------
filename : str
Filename of Agilent/Varian binary file (fid) to read.
shape : tuple of ints, optional
Shape of the binary data. If not provided data is returned as a 2D
array. Required if more than one trace per block (non-standard).
torder : {'f', 'n', 'o'}
Trace order. See :py:func:`read` for details.
as_2d : bool, optional
True to return the data as a 2D array, ignoring the shape and torder
parameters.
read_blockhead : bool, optional
True to read the Agilent/Varian blockheaders(s) into the returned
dictionary. False ignores them.
Returns
-------
dic : dict
Dictionary of Agilent/Varian binary file parameters.
data : array_like
Low memory object which can access NMR data on demand.
See Also
--------
read_fid : Read a Agilent/Varian binary file with one trace per block.
read_fid_lowmem : Read a Agilent/Varian binary file with one trace per
block using minimal amounts of memory.
"""
# open the file
f = open(filename, 'rb')
# read the fileheader
dic = fileheader2dic(get_fileheader(f))
# data parameters
dt = find_dtype(dic)
nblocks = dic["nblocks"]
pts = dic["np"]
nbheaders = dic["nbheaders"]
ntraces = dic["ntraces"]
# read the data
if read_blockhead:
bdic, data = get_nblocks_ntraces(f, nblocks, ntraces, pts,
nbheaders, dt, read_blockhead)
dic["blockheader"] = bdic
else:
data = get_nblocks_ntraces(f, nblocks, ntraces, pts, nbheaders, dt,
read_blockhead)
f.close()
# uninterleave the real and imaginary data
data = uninterleave_data(data)
# if 2D array requested, return unshaped
if as_2d:
return dic, data
# check for 1D
if data.shape[0] == 1:
return dic, np.squeeze(data)
# try to reshape
if shape is None:
warn("unknown shape, returning unshaped data")
return dic, data
# reorder 3D/4D data
if len(shape) >= 3:
return dic, reorder_data(data, shape, torder)
try:
data = data.reshape(shape)
except ValueError:
warn(str(data.shape) + "cannot be shaped into" + str(shape))
return dic, data
return dic, data | d82f341326d089dad9def8a95b4233cf4dde607d | 11,697 |
import typing
async def async_get_erc20_decimals(
token: spec.ERC20Reference,
block: typing.Optional[spec.BlockNumberReference] = None,
**rpc_kwargs: typing.Any
) -> int:
"""get decimals of an erc20"""
return await erc20_generic.async_erc20_eth_call(
function_name='decimals', token=token, block=block, **rpc_kwargs
) | 665c9e697caffd9470c4f71769c8d215ce7d14a0 | 11,698 |
def get_game_log(game_id: int):
"""
Method used to get list of important events of macau game with given game id.
:param game_id: integer value of existing game
:return: list with string with all important events in game
"""
if game_id >= len(games_container):
return JSONResponse(content={'status': 'No game', 'output': None}, status_code=404)
outputs = games_container[game_id]['outputs']['game']
return {"status": "OK", "output": outputs} | 837b43b24f747fabb819fe5eeb3e284694fd02a3 | 11,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.