content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def orders(request):
"""显示用户的所有订单"""
username = request.user.get_username()
# 获取所有的uesername=用户名的记录,然后将记录按照time逆序排列
all_orders = Order.objects.filter(username=username)
times = all_orders.values('time')
# 获取不同的时间,因为对于不同的用户按照时间分类即可,相同的时间下的肯定是同一单
distinct_times = set()
for distinct_time in times:
distinct_times.add(distinct_time['time'])
order_infos = []
distinct_times = [t for t in distinct_times]
# print(distinct_times)
for distinct_time in sorted(distinct_times, reverse=True):
seats = all_orders.filter(time=distinct_time)
order_info = {}
session = Session.objects.get(session_id=seats.values('session_id', 'price')[0]['session_id'])
house = session.house_id
movie = session.movie_id
cinema = house.cinema_id
order_info['order_id'] = distinct_time
order_info['cinema_name'] = cinema.name
order_info['cinema_id'] = cinema.cinema_id
order_info['movie_name'] = movie.name
order_info['movie_id'] = movie.movie_id
order_info['house_name'] = house.house_name
order_info['start_time'] = session.start_time
order_info['status'] = seats[0].status
order_info['price'] = len(seats) * seats.values('session_id', 'price')[0]['price']
order_info['seats'] = []
for seat in seats.values():
order_info['seats'].append(str(seat['seat_row']) + '排' + str(seat['seat_column']) + '座')
order_infos.append(order_info)
context = {'orders': order_infos}
return render(request, 'orders.html', context)
|
4aa980465fb63e6e28d1020567cb0f6b4b607143
| 33,982 |
def convert_filename(filename):
"""Fix a filename"""
# Try and replace illegal characters
filename = replace_characters(filename)
# Remove remaining illegal characters
filename = remove_characters(filename)
return filename
|
6a55ed29842ef08f19ec64db24ebcbe62f3849d9
| 33,983 |
from typing import Optional
from pathlib import Path
def create(
path_or_url: str,
output_dir: Optional[Path] = None,
) -> Path:
"""
Generate a new project from a composition file, local template or remote template.
Args:
path_or_url: The path or url to the composition file or template
output_dir: Where to generate the project
Returns:
The path to the generated project.
"""
output_dir = output_dir or Path(".")
if is_composition_file(path_or_url):
composition = read_composition(path_or_url, output_dir)
else:
tmpl = LayerConfig(template=path_or_url)
composition = ProjectComposition(layers=[tmpl], destination=output_dir)
process_composition(composition)
return output_dir
|
37dcc2ebb5c918afaf851f02a5202399947e2c63
| 33,984 |
def get_camera_index(glTF, name):
"""
Return the camera index in the glTF array.
"""
if glTF.get('cameras') is None:
return -1
index = 0
for camera in glTF['cameras']:
if camera['name'] == name:
return index
index += 1
return -1
|
0ffee8f036f5223f419fc3e5f95184c15f1b75d4
| 33,985 |
def local_se(df, kernel, deg, width):
""" This function is used to calculate the local standard errors, based on
estimation results of a local polynomial regression.
"""
if deg != 1:
print(
"WARNING: function local_se is currently hard-coded for ",
"polynomials of degree 1 and will deliver wrong results for ",
"anything else!",
)
x = df[df.columns[0]].array
cap_x = pd.DataFrame(data={"constant": np.ones(len(x)), "x": x})
beta_hat_covariances = {}
y_hat_se = {}
for count, element in enumerate(x):
# get weights from the local regression
weights = kernel(np.abs(x - element) / width)
# only consider values with a weight > 0
inds = np.where(np.abs(weights) > 1e-10)[0]
# CAUTION: This area is hard-coded for a polynomial of degree 1
rel_data = df.iloc[inds]
beta_cov = np.cov(m=rel_data[["beta_hat_0", "beta_hat_1"]], rowvar=0)
beta_hat_covariances.update({str(element): beta_cov})
se = np.dot(cap_x.loc[count, :].array, beta_hat_covariances[str(element)])
se = np.dot(se, np.transpose(cap_x.loc[count, :].array))
y_hat_se.update({str(element): np.sqrt(se)})
return y_hat_se
|
7b8fbc2d8edb2f572d0a5b06766c0e57d26b2a6a
| 33,986 |
import pickle
def data(get_api_data):
"""Get Weather data.
For testing I used pickle to
"""
GET_API_DATA = get_api_data
if GET_API_DATA:
weather_data = WeatherData()
with open('today_weather_data.tmp', 'wb+')as f:
pickle.dump(weather_data, f)
else:
with open('today_weather_data.tmp', 'rb') as f:
weather_data = pickle.load(f)
return weather_data
|
0cf4e45a3129df323ed2089a6c27b8bb0a0e2831
| 33,987 |
def json_format(subtitle, data):
"""
Format json to string
:param subtitle: description to text
:type subtitle: string
:param data: content to format
:type data: dictionary
"""
msg = subtitle+':\n'
for name in data: msg += name+': '+data[name]+'\n'
return msg.strip()
|
bb3392d7ad57a482b4175838858d316ecc5f56e1
| 33,988 |
def _nSentencesInWordMap(wtm):
"""Return the number of valid word sequence in wtm"""
result = [0] * len(wtm) + [1]
for i_ in xrange(len(wtm)):
i = len(wtm) - i_ - 1
for j in wtm[i].iterkeys():
result[i] += result[j]
return result[0]
|
9a771693ec572ad99ee7007efbf9d18e393b2c6c
| 33,989 |
def hflip(img):
# type: (Tensor) -> Tensor
"""Horizontally flip the given the Image Tensor.
Args:
img (Tensor): Image Tensor to be flipped in the form [C, H, W].
Returns:
Tensor: Horizontally flipped image Tensor.
"""
if not _is_tensor_a_torch_image(img):
raise TypeError('tensor is not a torch image.')
return img.flip(-1)
|
e551cfe143bc823ff046bd57b524a262b3ecdb8b
| 33,991 |
import numpy
def gfalternate_gotnogaps(data,label,mode="verbose"):
"""
Returns true if the data series has no gaps, false if there are gaps
"""
return_code = True
if numpy.ma.count_masked(data)==0:
if mode=="verbose":
msg = " No gaps in "+label
logger.info(msg)
return_code = True
else:
return_code = False
return return_code
|
bf31bf4da5896ebe37e76d26054d7b7cac8e4a4b
| 33,992 |
def expm_tS_v2(t, S):
"""
Compute expm(t*S) using AlgoPy eigendecomposition and an identity.
t: truncated univariate Taylor polynomial
S: symmetric numpy matrix
"""
# Compute the eigendecomposition using a Taylor-aware eigh.
L, Q = eigh(t * S)
return dot(Q * exp(L), Q.T)
|
3d5512e3b9fc00b765538c6f1bdb3ea0d460a896
| 33,993 |
def command():
"""
Return the tested command.
"""
return module.Command()
|
bd79175d169372283fca9e4555ce762f80f60f18
| 33,994 |
def computeSWC(D, cl_cores, ncl, shortpath):
"""
:param D: 2D dataset of n*d, i.e. n the number of cores (initial clusters) with d dimension
:param cl_cores: cluster label of each cores
:param ncl: the number of clusters
:param shortpath: the shortest path length matrix of core-based graph
:return: swc: result the Silhouette width criterion (when using the Silhouette index)
sl: the Silhouette value of each point (used for computing the LCCV index)
"""
# -------------------------------------------------------------------------
# Reference: R.P.Silhouettes: A graphical aid to the interpretation and
# validation of cluster analysis[J].Journal of Computational & Applied
# Mathematics, 1987, 20(20): 53-65.
# -------------------------------------------------------------------------
n, d = D.shape
ncl = int(ncl)
#cdata = cell(1, ncl)
cdata = -np.ones((ncl, n*d+1)) # keep data points in each cluster
cindex = np.zeros((ncl, n))
numc = ncl
for i in range(ncl):
nump = 0
for j in range(n):
if cl_cores[j] == (i+1):
for k in range(d):
cdata[i, nump*d+k] = D[j, k]
cindex[i, nump] = j
nump += 1
cdata[i, n*d] = nump
numo = 0
# Do not compute the swc for outliers
if np.min(cl_cores) <= 0:
for i in range(n):
if cl_cores[i] <= 0:
numo += 1
swc = 0
s1 = np.zeros((n, 1))
for i in range(numc):
aa = 0
bb = 0
ss = 0
np1 = int(cdata[i, n*d])
if np1 > 1:
for j in range(np1):
# compute aa
suma = 0
for k in range(np1):
if j != k:
suma += shortpath[int(cindex[i, j]), int(cindex[i, k])]
aa = suma / (np1 - 1)
# compute bb
dd = np.ones((numc, 1)) * float('inf')
for k in range(numc):
if k != i:
np2 = int(cdata[k, n*d])
#print(np2)
sumd = 0
for l in range(np2):
sumd += shortpath[int(cindex[i, j]), int(cindex[k, l])]
if np2 != 0: dd[k] = sumd / np2
bb = np.min(dd, axis = 0)
#print('bb:', bb)
# compute ss
if (np.max([aa, bb]) != 0):
if bb != float('inf'):
ss = (bb - aa) / np.max([aa, bb])
else:
ss = 0
else:
ss = 0
#print('np1=%d,numc=%d' % (np1, numc))
#print('a(j)=%f,b(j)=%f,max(a,b)=%f,s(j)=%f' % (aa, bb, np.max([aa, bb]), ss))
s1[int(cindex[i, j])] = ss
swc = swc + ss
#print ('swc=%f' % swc)
if (n - numo) != 0:
swc = swc / (n - numo)
else:
swc = 0
return swc, s1
|
fca3dd6166867cd5d1f41c4dc0e85750f0339086
| 33,995 |
import warnings
def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum
Parameters
----------
arr : array-like
To be cumulatively summed as flat
axis : int, optional
Axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
rtol : float
Relative tolerance, see ``np.allclose``
atol : float
Absolute tolerance, see ``np.allclose``
"""
# sum is as unstable as cumsum for numpy < 1.9
if np_version < (1, 9):
return np.cumsum(arr, axis=axis, dtype=np.float64)
out = np.cumsum(arr, axis=axis, dtype=np.float64)
expected = np.sum(arr, axis=axis, dtype=np.float64)
if not np.all(np.isclose(out.take(-1, axis=axis), expected, rtol=rtol,
atol=atol, equal_nan=True)):
warnings.warn('cumsum was found to be unstable: '
'its last element does not correspond to sum',
RuntimeWarning)
return out
|
0a6956461b1870c92a4a1b3556fb0889320fcac5
| 33,996 |
def read_data_min(server, port, user, pwd, instrument):
"""从mongo中读取分钟数据(含实时)"""
#取数据
client = py_at.MongoDBClient.DBConn(server, port)
client.connect(user, pwd)
db = client.get_database("future_min")
coll = client.get_collection("future_min", instrument)
docs = coll.find()
coll_real = client.get_collection("future_real", instrument)
docs_real = coll_real.find()
ddoc = []
for doc in docs:
ddoc.append(doc)
for doc in docs_real:
ddoc.append(doc)
return ddoc
|
7dfd30a63268318a8339c3c91fc4a477891b908e
| 33,997 |
from textwrap import dedent
def _demo(seed=None, out_fc=False, SR=None, corner=[0, 0], angle=0):
"""Generate the grid using the specified or default parameters
"""
corner = corner # [300000.0, 5000000.0]
dx, dy = [1, 1]
cols, rows = [3, 3]
if seed is None:
# seed = rectangle(dx=1, dy=1, cols=3, rows=3)
seed = hex_pointy(dx=10, dy=10, cols=3, rows=3)
# seed = hex_flat(dx=10, dy=10, cols=3, rows=3)
seed_t = 'rectangle'
if SR is None:
SR = 3857 # -- WGS84 Web Mercator (Auxiliary Sphere)
pnts = repeat(seed=seed, corner=corner, cols=3, rows=3, angle=0)
args = ["", SR, seed_t, corner, [dx, dy], [cols, rows], seed[0]]
print(dedent(msg).format(*args))
return pnts
|
194582b64bc279ab51c54107811e052019bc862e
| 33,998 |
def measure_bb_tpdm(spatial_dim, variance_bound, program, quantum_resource,
transform=jordan_wigner, label_map=None):
"""
Measure the beta-beta block of the 2-RDM
:param spatial_dim: size of spatial basis function
:param variance_bound: variance bound for measurement. Right now this is
the bound on the variance if you summed up all the
individual terms. 1.0E-6 is a good place to start.
:param program: a pyquil Program
:param quantum_resource: a quantum abstract machine connection object
:param transform: fermion-to-qubit transform
:param label_map: qubit label re-mapping if different physical qubits are
desired
:return: the beta-beta block of the 2-RDM
"""
# first get the pauli terms corresponding to the alpha-alpha block
pauli_terms_in_bb = pauli_terms_for_tpdm_bb(spatial_dim,
transform=jordan_wigner)
if label_map is not None:
pauli_terms_in_bb = pauli_term_relabel(sum(pauli_terms_in_bb),
label_map)
rev_label_map = dict(zip(label_map.values(), label_map.keys()))
result_dictionary = _measure_list_of_pauli_terms(pauli_terms_in_bb,
variance_bound,
program,
quantum_resource)
if label_map is not None:
result_dictionary = pauli_dict_relabel(result_dictionary, rev_label_map)
d2bb = pauli_to_tpdm_bb(spatial_dim, result_dictionary, transform=transform)
return d2bb
|
0b5ec56b6f0b6bb20d30e3d4bbe76b723a0dc631
| 33,999 |
def singlepass(fn):
"""
Will take the mean of the iterations if needed.
Args:
fn (Callable): Heuristic function.
Returns:
fn : Array -> Array
"""
@_wraps(fn)
def wrapper(self, probabilities):
if probabilities.ndim >= 3:
# Expected shape : [n_sample, n_classes, ..., n_iterations]
probabilities = probabilities.mean(-1)
return fn(self, probabilities)
return wrapper
|
81052f3daf498e119cd953144da6ae76ef5bdf7d
| 34,000 |
def remove_dihedral(mol, a, b, c, d):
"""
utils.remove_dihedral
Remove a specific dihedral in RDkit Mol object
Args:
mol: RDkit Mol object
a, b, c: Atom index removing a specific dihedral (int)
Returns:
boolean
"""
if not hasattr(mol, 'dihedrals'):
return False
for i, dihedral in enumerate(mol.dihedrals):
if ((dihedral.a == a and dihedral.b == b and dihedral.c == c and dihedral.d == d) or
(dihedral.d == a and dihedral.c == b and dihedral.b == c and dihedral.a == d)):
del mol.dihedrals[i]
break
return True
|
7e26e995fec97c5c6d2304e11d06fec03b990942
| 34,001 |
import inspect
def load_mo(elem):
"""
This loads the managed object into the current name space
Args:
elem (str): element
Returns:
MangedObject
"""
mo_class_id = elem.tag
mo_class = load_class(mo_class_id)
mo_class_params = inspect.getargspec(mo_class.__init__)[0][2:]
mo_class_param_dict = {}
for param in mo_class_params:
mo_class_param_dict[param] = elem.attrib[
mo_class.PROPERTY_MAP[param]]
mo_obj = mo_class(parent_mo_or_dn="", **mo_class_param_dict)
return mo_obj
|
07738ce084cc0c6a51a6445eaa361c5c38daf312
| 34,002 |
import collections
def partialdot(mpa1, mpa2, start_at, axes=(-1, 0)):
"""Partial dot product of two MPAs of inequal length.
The shorter MPA will start on site ``start_at``. Local dot products
will be carried out on all sites of the shorter MPA. Other sites
will remain unmodified.
mpa1 and mpa2 can also have equal length if ``start_at == 0``. In
this case, we do the same as :func:`dot()`.
:param mpa1, mpa2: Factors as MPArrays, length must be inequal.
:param start_at: The shorter MPA will start on this site.
:param axes: See ``axes`` argument to :func:`dot()`.
:returns: MPA with length of the longer MPA.
"""
# adapt the axes from physical to true legs
if isinstance(axes[0], collections.Sequence):
axes = tuple(tuple(ax + 1 if ax >= 0 else ax - 1 for ax in axes2)
for axes2 in axes)
else:
axes = tuple(ax + 1 if ax >= 0 else ax - 1 for ax in axes)
# Make the MPAs equal length (in fact, the shorter one will be
# infinite length, but that's fine because we use zip()).
shorter = mpa1 if len(mpa1) < len(mpa2) else mpa2
shorter = it.chain(it.repeat(None, times=start_at), shorter.lt,
it.repeat(None))
if len(mpa1) < len(mpa2):
mpa1_lt = shorter
mpa2_lt = mpa2.lt
else:
mpa1_lt = mpa1.lt
mpa2_lt = shorter
ltens_new = (
l if r is None else (r if l is None else _local_dot(l, r, axes))
for l, r in zip(mpa1_lt, mpa2_lt)
)
return MPArray(ltens_new)
|
2051e1e9e5023ee5e9724e62d0a123937ece7fb3
| 34,003 |
import torch
def thresh_ious(gt_dists, pred_dists, thresh):
"""
Computes the contact intersection over union for a given threshold
"""
gt_contacts = gt_dists <= thresh
pred_contacts = pred_dists <= thresh
inter = (gt_contacts * pred_contacts).sum(1).float()
union = union = (gt_contacts | pred_contacts).sum(1).float()
iou = torch.zeros_like(union)
iou[union != 0] = inter[union != 0] / union[union != 0]
return iou
|
9bd6244325acae0d3ebb5ffca46e0453a71000d1
| 34,004 |
def encode_sentences(sentences, tokenizer, language, mode):
"""对句子进行编码
@param sentences: 要编码的句子列表
@param tokenizer: 使用的字典
@param language: 语言
@param mode: 模式
@return: 编码好的句子列表,最大句子长度
"""
if language == 'zh':
return _encode_sentences_keras(sentences, tokenizer)
elif language == 'en':
if mode == 'BPE':
return _encode_sentences_bpe(sentences, tokenizer)
elif mode == 'WORD':
return _encode_sentences_keras(sentences, tokenizer)
else:
raise ValueError("语言(%s)暂不支持模式(%s)" % (language, mode))
|
8bb0d2143b73c02f1fac600ce322012da0bbdb05
| 34,005 |
def calc_scores(line_executions=LINE_EXECUTIONS, prior=PRIOR):
"""Return 'fault' score for each line, given prior and observations."""
return {
line: float(
execution_counts.negative_cases + prior.negative_cases
) / (
execution_counts.positive_cases + prior.positive_cases
+ execution_counts.negative_cases + prior.negative_cases
)
for line, execution_counts in line_executions.items()
}
|
985fb4ccc193135e5aa8b5637fc8878c9314e74e
| 34,006 |
def symmetric_variable_summaries(var, metrics=None):
"""
Set summaries for a symmetric variable, e.g., the output of tanh.
Args:
var: tensorflow variable.
metrics: set a list of metrics for evaluation. Support: mean, stddev, max, min, histogram.
Default: mean, stddev.
Returns:
summaries: a list of summaries.
"""
summaries = []
if metrics is None:
metrics = ["mean-stddev"]
with tf.name_scope("symmatric_value_summary"):
with tf.name_scope("positive_value_summary"):
# first, get positive or zero values
where_pos = tf.where(tf.greater_equal(var, tf.constant(0, dtype=tf.float32)))
pos_elements = tf.gather_nd(var, where_pos)
summaries.extend(variable_summaries(pos_elements, metrics=metrics))
with tf.name_scope("negative_value_summary"):
# then, get negative values
where_neg = tf.where(tf.less(var, tf.constant(0, dtype=tf.float32)))
neg_elements = tf.gather_nd(var, where_neg)
summaries.extend(variable_summaries(neg_elements, metrics=metrics))
return summaries
|
8ba02b426c73e939aa2797bdc32dc988b07191b0
| 34,007 |
def normalize_parameters(raw_parameters, minvals, maxvals):
"""takes in a list of parameters and does simple min/max normalization according to min/max values
INPUTS
raw_parameters: length n, containing parameters for a star
minvals: length n, minimum parameter values
maxvals: length n, max parameter values
OUTPUTS
normed_parameters: length n, normalized parameters
"""
normed_parameters = (raw_parameters - minvals) / (maxvals-minvals)
return normed_parameters
|
53249bcecc8c3fd88beae7f377c6d5490693fba9
| 34,008 |
from typing import Any
def read_users(
db: Session = Depends(deps.get_db),
skip: int = 0,
limit: int = 100,
current_user: models.User = Depends(deps.get_current_active_superuser)
) -> Any:
"""
Retrieve users.
"""
users = crud.user.get_multi(db, skip=skip, limit=limit)
return users
|
0f4f6eed8fc181165b261cb2f325e2dd38d14cf8
| 34,009 |
from numpy import cos, sin, array
def euler(a, b, c):
"""
Calculate a three dimensional rotation matrix from the euler angles.
@param a: alpha, angle between the x-axis and the line of nodes
@param b: beta, angle between the z axis of the different coordinate systems
@param c: gamma, angle between the line of nodes and the X-axis
"""
ca, cb, cc = cos(a), cos(b), cos(c)
sa, sb, sc = sin(a), sin(b), sin(c)
return array([[ cc * cb * ca - sc * sa, cc * cb * sa + sc * ca, -cc * sb],
[-sc * cb * ca - cc * sa, -sc * cb * sa + cc * ca, sc * sb],
[ sb * ca, sb * sa, cb ]])
|
406786a62b798b8e7dbbf57bd33f7243b5671e06
| 34,010 |
from pathlib import Path
def guardar_imagen_cv2(img_path, filtro, filtro_name="", output_dir=""):
"""
img_path: directorio de la imagen
filtro: funcion para aplicar filtro
file_path: directorio donde guardaremos las imagen
"""
try:
imagen_preprocesada = aplicar_preproceso(img_path, filtro)
except:
print("Error, image path incorrecto")
img_name = Path(img_path).name #cambia el sufijo
output_filename = img_name.replace('.JPG','')+filtro_name+".JPG"
output_dir = Path(output_dir)
output_path = output_dir/output_filename
cv2.imwrite(str(output_path), imagen_preprocesada)
return str(output_path)
|
ede6ede0080df8b0ab1d99f348c014710f37c254
| 34,011 |
def create_begin_files(params):
"""Create initial files for HADDOCK3 run."""
run_dir = params['run_dir']
data_dir = run_dir / 'data'
begin_dir = run_dir / 'begin'
run_dir.mkdir()
begin_dir.mkdir()
data_dir.mkdir()
copy_files_to_dir(params['molecules'], data_dir)
copy_molecules_to_begin_folder(params['molecules'], begin_dir)
return begin_dir, data_dir
|
8c80a2bc084cb7573948055d2e04c1fb54eb93da
| 34,012 |
import torch
from typing import Optional
def dump_sender_receiver_with_noise(agent_1: torch.nn.Module,
agent_2: torch.nn.Module,
dataset: 'torch.utils.data.DataLoader',
noise_prob: float,
max_len:int,
gs:bool,variable_length: bool,
device: Optional[torch.device] = None):
"""
A tool to dump the interaction between Sender and Receiver
:param game: A Game instance
:param dataset: Dataset of inputs to be used when analyzing the communication
:param gs: whether Gumbel-Softmax relaxation was used during training
:param variable_length: whether variable-length communication is used
:param device: device (e.g. 'cuda') to be used
:return:
"""
device = device if device is not None else common_opts.device
sender_inputs, messages, receiver_inputs, receiver_outputs = [], [], [], []
labels = []
with torch.no_grad():
for batch in dataset:
# by agreement, each batch is (sender_input, labels) plus optional (receiver_input)
sender_input = move_to(batch[0], device)
receiver_input = None if len(batch) == 2 else move_to(batch[2], device)
message = agent_1.send(sender_input)
# Under GS, the only output is a message; under Reinforce, two additional tensors are returned.
# We don't need them.
if not gs: message = message[0]
for i in range(message.size(0)):
for j in range(message.size(1)):
if message[i,j]!=0:
if np.random.rand()<noise_prob:
message[i,j]=np.random.randint(1,max_len)
output = agent_2.receive_2(message, receiver_input, None)
if not gs: output = output[0]
if batch[1] is not None:
labels.extend(batch[1])
if isinstance(sender_input, list) or isinstance(sender_input, tuple):
sender_inputs.extend(zip(*sender_input))
else:
sender_inputs.extend(sender_input)
if receiver_input is not None:
receiver_inputs.extend(receiver_input)
if gs: message = message.argmax(dim=-1) # actual symbols instead of one-hot encoded
if not variable_length:
messages.extend(message)
receiver_outputs.extend(output)
else:
# A trickier part is to handle EOS in the messages. It also might happen that not every message has EOS.
# We cut messages at EOS if it is present or return the entire message otherwise. Note, EOS id is always
# set to 0.
for i in range(message.size(0)):
eos_positions = (message[i, :] == 0).nonzero()
message_end = eos_positions[0].item() if eos_positions.size(0) > 0 else -1
assert message_end == -1 or message[i, message_end] == 0
if message_end < 0:
messages.append(message[i, :])
else:
messages.append(message[i, :message_end + 1])
if gs:
receiver_outputs.append(output[i, message_end_1, ...])
else:
receiver_outputs.append(output[i, ...])
return sender_inputs, messages, receiver_inputs, receiver_outputs
|
0fde105094d2fdf1b15247b80533bd6f2714d9dd
| 34,013 |
def split_list(xs, at):
"""Split a list into sublists by a specific item value
E.g split_list(['a', 'b', '.', 'c', '.', 'd') = [['a', 'b'], ['c'], ['d']
Parameters
----------
x
A list
at
The item value to split by
Returns
-------
List[List]
A list of sublists split by `at`
"""
if at in xs:
# Stores the intermediate sublists for output
output = []
# Stores items in between split values
buffer_ = []
for x in xs:
if x == at:
output.append(buffer_)
buffer_ = []
else:
buffer_.append(x)
output.append(buffer_)
return exclude_blank(output)
return [xs]
|
5fd0d3f0c9417481918c36203fc84db8be33b104
| 34,014 |
def do(the_state_machine, pre_context_sm, BeginOfLinePreContextF, BeginOfStreamPreContextF):
"""Sets up a pre-condition to the given state machine. This process
is entirely different from any concatenation or parallelization
of state machines. Here, the state machine representing the pre-
condition is **not** webbed into the original state machine!
Instead, the following happens:
-- the pre-condition state machine is inverted, because
it is to be walked through backwards.
-- the inverted state machine is marked with the state machine id
of the_state_machine.
-- the original state machine will refer to the inverse
state machine of the pre-condition.
-- the initial state origins and the origins of the acceptance
states are marked as 'pre-conditioned' indicating the id
of the inverted state machine of the pre-condition.
"""
#___________________________________________________________________________________________
# (*) do some consistency checking
# -- state machines with no states are senseless here.
assert not the_state_machine.is_Empty()
assert pre_context_sm is None or not pre_context_sm.is_Empty()
# -- trivial pre-conditions should be added last, for simplicity
#___________________________________________________________________________________________
if pre_context_sm is None:
# NOT: 'and ...' !
if BeginOfLinePreContextF:
# Set acceptance condition: 'begin of line'.
for state in the_state_machine.get_acceptance_state_list():
state.set_acceptance_condition_id(E_AcceptanceCondition.BEGIN_OF_LINE)
if BeginOfStreamPreContextF:
# Set acceptance condition: 'begin of stream'.
for state in the_state_machine.get_acceptance_state_list():
state.set_acceptance_condition_id(E_AcceptanceCondition.BEGIN_OF_STREAM)
return None
if BeginOfLinePreContextF:
new_pre_context_sm = DFA_Newline()
sequentialize.do([new_pre_context_sm, pre_context_sm],
MountToFirstStateMachineF=True)
pre_context_sm = beautifier.do(new_pre_context_sm)
# (*) Once an acceptance state is reached no further analysis is necessary.
pre_context_sm.delete_loops_to_init_state()
if Setup.fallback_mandatory_f \
and pre_context_sm.longest_path_to_first_acceptance() is None:
error.log("Pre-context contains patterns of arbitrary length to first acceptance backwards.")
# (*) let the state machine refer to it
# [Is this necessary? Is it not enough that the acceptance origins point to it? <fschaef>]
pre_context_sm_id = pre_context_sm.get_id()
# (*) Associate acceptance with pre-context id.
for state in the_state_machine.get_acceptance_state_list():
state.set_acceptance_condition_id(pre_context_sm_id)
return pre_context_sm
|
bd6bdfa6826b74b036c2b7514e78a34b0b68404e
| 34,015 |
def Sexp(x,G,N):
"""
Stochastic exponentiation function
Note: G should be such that G<<N
Parameters
----------
x: Stochastic bit-stream
G: Positive integer
N: Integer specifying the number of states
Returns
-------
A stochastic bit-stream approximating exp(-2Gx)
"""
no_samples = x.size
z = np.empty(no_samples,dtype=np.int8)
# Initialise FSM Parameters
S_min = 0
S_max = N-1
S_bound = N-G
# Initialise the state S
S = N/2
# Run the FSM
for sample in range(no_samples):
# State transition
if(x[sample] == 0):
S = S-1
else:
S = S+1
# Saturate the counter
if(S < S_min):
S = S_min
elif(S > S_max):
S = S_max
# Output logic
if(S >= S_bound):
z[sample] = 0
else:
z[sample] = 1
return z
|
7815c2a2310593db651bad3e7922fc289d13597a
| 34,016 |
def truncate(s: str, length: int = DEFAULT_CURTAIL) -> str:
"""
Truncate a string and add an ellipsis (three dots) to the end if it was too long
:param s: string to possibly truncate
:param length: length to truncate the string to
"""
if len(s) > length:
s = s[: length - 1] + '…'
return s
|
90eb3cb1a26f525f8bc6076421a85d2f86af365c
| 34,017 |
def amp_kd_calc(amph_sites_ff, element):
"""
aem_calc calculates the partition coefficient for a specified trace element
that is in equilibrium with a given amphibole composition according to
Humphreys et al., 2019.
supported elements = ['Rb','Sr','Pb','Zr','Nb','La','Ce','Nd','Sm',
'Eu','Gd','Dy','Ho','Yb','Lu','Y']
Parameters
----------
amph_sites_ff : pandas DataFrame
Amphibole site allocations that incorporate ferric ferrous iron.
This should be the output from the get_amp_sites_ferric_ferrous function
element : string
The element you want to calculate the partition coefficient for
Raises
------
Exception
If you do not choose a supported element from Humphreys et al., 2019
an error will be thrown prompting you to choose a supported element
Returns
-------
aem_kd : array-like
partition coefficient between amphibole and its equilibrium melt
aem_kd_se : scalar
the one sigma uncertainty on your partition coefficient taken from
table 2 in Humphreys et al., 2019
"""
# Building table 2 from Humphreys et al 2019
elements = [
"Rb",
"Sr",
"Pb",
"Zr",
"Nb",
"La",
"Ce",
"Nd",
"Sm",
"Eu",
"Gd",
"Dy",
"Ho",
"Yb",
"Lu",
"Y",
]
constants = np.array(
[
9.1868,
3.41585,
-4.2533,
-25.6167,
-22.27,
-20.0493,
-21.1078,
-20.3082,
-11.3625,
-35.6604,
-19.0583,
-16.0687,
-20.4148,
-15.8659,
-19.3462,
-36.2514,
]
)
si = np.array(
[
-1.3898,
-0.75281,
0,
2.6183,
2.3241,
2.0732,
2.4749,
2.5162,
1.6002,
4.1452,
2.4417,
2.3858,
2.3654,
2.281,
2.1142,
3.6078,
]
)
al = np.array([0, 0, 2.715, 2.6867, 0, 0, 0, 0, 0, 2.6886, 0, 0, 0, 0, 0, 3.78])
ti = np.array(
[
-3.6797,
0,
1.69,
4.838,
3.7633,
2.5498,
2.4717,
2.5863,
0,
6.4057,
1.9786,
1.8255,
2.484,
1.5905,
2.8478,
7.513,
]
)
fe3 = np.array(
[
-1.5769,
0,
0.7065,
2.6591,
2.9786,
1.5317,
1.5722,
1.9459,
1.2898,
3.8508,
1.8765,
1.9741,
3.2601,
2.1534,
2.7011,
4.8366,
]
)
fe2 = np.array(
[
-0.6938,
0.36529,
0,
0.6536,
1.44,
1.117,
0.952,
0.9566,
1.2376,
0.7255,
0.9943,
0.6922,
1.2922,
0.7867,
1.0402,
0.814,
]
)
ca = np.array(
[
0,
0,
0,
2.5248,
1.8719,
2.2771,
1.5311,
1.2763,
0,
3.0679,
1.3577,
0,
3.1762,
0,
2.9625,
4.60,
]
)
naa = np.array(
[0, 0, -1.0433, 0, 0, -1.4576, 0, 0, 0, 0, 0, 0, -4.9224, 0, -3.2356, 0]
)
se = np.array(
[
0.29,
0.19,
0.23,
0.49,
0.45,
0.34,
0.32,
0.36,
0.43,
0.37,
0.4,
0.33,
0.4,
0.43,
0.39,
0.32,
]
)
columns = [
"element",
"constant",
"Si",
"Al_vi",
"Ti",
"Fe3",
"Fe2",
"Ca",
"Na_A",
"se",
]
aem_params = pd.DataFrame(
dict(
constant=constants,
Si=si,
Al_vi=al,
Ti=ti,
Fe3=fe3,
Fe2=fe2,
Ca=ca,
Na_a=naa,
SE=se,
),
index=elements,
)
if element in elements:
aem_kd = np.exp(
aem_params.loc[element].constant
+ (aem_params.loc[element].Si * amph_sites_ff["Si_T"])
+ (aem_params.loc[element].Al_vi * amph_sites_ff["Al_D"])
+ (aem_params.loc[element].Ti * amph_sites_ff["Ti_D"])
+ (aem_params.loc[element].Fe3 * amph_sites_ff["Fe3_D"])
+ (
aem_params.loc[element].Fe2
* (amph_sites_ff["Fe2_C"] + amph_sites_ff["Fe2_B"])
)
+ (aem_params.loc[element].Ca * amph_sites_ff["Ca_B"])
+ (aem_params.loc[element].Na_a * amph_sites_ff["Na_A"])
)
aem_kd_se = aem_params.loc[element].SE
else:
raise Exception(
"The element you have selected is not supported by this function. Please choose another one"
)
return aem_kd, aem_kd_se
|
8c1f4f4b82ba24801770fffc1b8a127183a63428
| 34,018 |
def _compute_errors_and_split(p, bezier, u):
"""Compute the maximum and rms error between a set of points and a bezier curve"""
dists = np.linalg.norm(bezier.xy(u)-p,axis=1)
i = np.argmax(dists)
rms = np.sqrt(np.mean(dists**2))
if i==0:
return 0.0, rms, len(p)//2
elif i==len(p)-1:
return 0.0, rms, len(p)//2
else:
return rms, dists[i], i
|
d97c06a0924e6a0d26a57d4fd0bd4e7322e74206
| 34,019 |
def rebin_flux(target_wavelength, source_wavelength, source_flux):
"""Rebin a flux onto a new wavelength grid."""
targetwl = target_wavelength
originalwl = source_wavelength
originaldata = source_flux[1:-1]
# The following is copy-pasted from the original fluxcal.py
originalbinlimits = ( originalwl[ :-1 ] + originalwl[ 1: ] ) / 2.
okaytouse = np.isfinite( originaldata )
originalweight = np.where(okaytouse, 1., 0.)
originaldata = np.where(okaytouse, originaldata, 0.)
originalflux = originaldata * np.diff( originalbinlimits )
originalweight *= np.diff( originalbinlimits )
nowlsteps = len( targetwl )
rebinneddata = np.zeros( nowlsteps )
rebinnedweight = np.zeros( nowlsteps )
binlimits = np.array( [ np.nan ] * (nowlsteps+1) )
binlimits[ 0 ] = targetwl[ 0 ]
binlimits[ 1:-1 ] = ( targetwl[ 1: ] + targetwl[ :-1 ] ) / 2.
binlimits[ -1 ] = targetwl[ -1 ]
binwidths = np.diff( binlimits )
origbinindex = np.interp( binlimits, originalbinlimits,
np.arange( originalbinlimits.shape[0] ),
left=np.nan, right=np.nan )
fraccounted = np.zeros( originaldata.shape[0] )
# use fraccounted to check what fraction of each orig pixel is counted,
# and in this way check that flux is conserved.
maximumindex = np.max( np.where( np.isfinite( origbinindex ) ) )
for i, origindex in enumerate( origbinindex ):
if np.isfinite( origindex ) :
# deal with the lowest orig bin, which straddles the new lower limit
lowlimit = int( origindex )
lowfrac = 1. - ( origindex % 1 )
indices = np.array( [ lowlimit] )
weights = np.array( [ lowfrac ] )
# deal with the orig bins that fall entirely within the new bin
if np.isfinite( origbinindex[i+1] ):
intermediate = np.arange( int( origindex )+1, \
int(origbinindex[i+1]) )
else :
# XXX This is wrong: maximumindex is in the wrong scale
#intermediate = np.arange( int( origindex )+1, \
# maximumindex )
# This may also be wrong, but at least it doesn't crash
intermediate = np.arange(0)
indices = np.hstack( ( indices, intermediate ) )
weights = np.hstack( ( weights, np.ones( intermediate.shape ) ) )
# deal with the highest orig bin, which straddles the new upper limit
if np.isfinite( origbinindex[i+1] ):
upplimit = int( origbinindex[i+1] )
uppfrac = origbinindex[ i+1 ] % 1
indices = np.hstack( ( indices, np.array( [ upplimit ] ) ) )
weights = np.hstack( ( weights, np.array( [ uppfrac ] ) ) )
fraccounted[ indices ] += weights
rebinneddata[ i ] = np.sum( weights * originalflux[indices] )
rebinnedweight[i ]= np.sum( weights * originalweight[indices] )
# now go back from total flux in each bin to flux per unit wavelength
rebinneddata = rebinneddata / rebinnedweight
return rebinneddata
|
361484666824c7b8f23997812822cadaf57bcf47
| 34,020 |
def rate_match(residue, bead, match):
"""
A helper function which rates how well ``match`` describes the isomorphism
between ``residue`` and ``bead`` based on the number of matching atomnames.
Parameters
----------
residue : networkx.Graph
A graph. Required node attributes:
:atomname: The name of an atom.
bead : networkx.Graph
A subgraph of ``residue`` where the isomorphism is described by ``match``.
Required node attributes:
:atomname: The name of an atom.
Returns
-------
int
The number of entries in match where the atomname in ``residue`` matches
the atomname in ``bead``.
"""
return sum(residue.nodes[rdx].get('atomname') == bead.nodes[bdx].get('atomname')
for rdx, bdx in match.items())
|
474c2a4fb8be84935088dd1984445c4d0dccb980
| 34,021 |
def isNotEmptyString(input:str):
"""
this function check a string is not empty.
:param input:str: given string
"""
if isNotNone(input):
tmp = input.lstrip(' ')
return isStr(input) and (len(tmp) > 0)
else:
return False
|
53c9fecea08947b500047962f609236ebc9bbb7f
| 34,022 |
def show_queue(event_id):
"""
Pokazuje listę osób chętnych do rezerwacji wydarzenia (kolejka). Działa
tylko dla właściciela wydarzenia.
:type event_id: int
:param event_id: id wydarzenia
"""
try:
with conn:
with conn.cursor() as cur:
cur.execute("""select * from pokaz_kolejke_wydarzenia(%s, %s)""", (session['uid'], event_id,))
queue = [{
'reservation_id': entry[0],
'email': entry[1],
} for entry in cur.fetchall()]
cur.execute("""select tytul from wydarzenia where id=%s""", (event_id,))
evt = {
'event_id': event_id,
'title': cur.fetchone()[0]
}
except psycopg2.InternalError as ex:
flash(ex.diag.message_primary, 'danger')
return redirect(url_for('my_events'))
return render_template('show_queue.html', queue=queue, event=evt)
|
d617a4d71a925d6f6f000fb481e7afbf9ee4ff66
| 34,023 |
def _load_contact_template(user_email, email_details):
"""
Loads the email contents and returns the template for contact us
"""
##Email Template Init###
email_template = EmailMessage(
subject=email_details['subject'],
from_email="Questr <[email protected]>",
to=["Questr <[email protected]>"],
headers={'Reply-To': "Questr <[email protected]>"}
)
###List Email Template###
email_template.template_name = email_details['template_name']
###List Email Tags to be used###
email_template.global_merge_vars = email_details['global_merge_vars']
return email_template
|
4534c0ae8ad01c3abff20e8478589c0b7863118f
| 34,024 |
def create_index(files, include_metadata=False, multi_run=False):
"""creates the index from list of files, by crawling over each file"""
ecpps = None
ecpps = run_func_on_files(ElChemPathParser, files, multi_run=multi_run)
ecds = None
if include_metadata:
ecds = run_func_on_files(
ElChemData,
files,
multi_run=multi_run,
metadata_only=include_metadata,
)
index = pd.DataFrame()
if not ecpps:
return index, ecpps, ecds
try:
index = merge_and_make_index_from_collections(ecpps, ecds)
except Exception as exc:
logger.error(f"create index error, index will be empty : {exc}")
index = add_extra_folder_columns_to_index(files, index)
return index, ecpps, ecds
|
ccba3c031d2b763be19f4148162fc5cb39210625
| 34,025 |
def save_partnership_contact_form(request):
""" Saves the requests coming from the Partnership Contact form on the
home page """
if request.method == 'POST':
form = PartnershipRequestForm(request.POST)
if form.is_valid():
form.save()
send_partnership_request_email(request.POST)
messages.success(request, ("Thank you very much for your interest!"
" We will be in touch shortly."))
else:
print(form.errors)
messages.error(
request, ("Sorry, there was an error submitting your request. "
"Please try again or send an email to "
f"{settings.SUPPORT_EMAIL}."))
return redirect(reverse('home'))
else:
return redirect(reverse('home'))
|
f22958204b3edd587e4d6a3ea6c9f6dee12f1705
| 34,026 |
def check_and_format_value(v):
"""
Formats all Pythonic boolean value into XML ones
"""
booleans = {'True': 'true', 'False': 'false'}
clean = strip(v)
if clean in booleans:
return booleans.get(clean)
return clean
|
26beca017621efe425a99d5f7af0645953b83870
| 34,027 |
def name_exists(name, id=-1):
"""Check if category name exists in db.
Arguments: name as String, optional id as Integer
Public categories are not allowed to appear more than once. Private
categories can be duplicates.
"""
try:
c = db_session.query(Category).filter(Category.name==name,\
Category.public==True, Category.id!=id).one()
except:
c = None
if c:
return True
return False
|
36d2d8597431b62425d1270c9b82c5940eb6cbf0
| 34,028 |
def perimeter_nd(img_np, largest_only=False):
"""Get perimeter of image subtracting eroded image from given image.
Args:
img_np (:obj:`np.ndarray`): Numpy array of arbitrary dimensions.
largest_only (bool): True to retain only the largest connected
component, typically the outer border; defaults to False.
Returns:
:obj:`np.ndarray`: The perimeter as a boolean array where True
represents the border that would have been eroded.
"""
interior = morphology.binary_erosion(img_np)
img_border = np.logical_xor(img_np, interior)
if largest_only:
# retain only the largest perimeter based on pixel count
labeled = measure.label(img_border)
labels, counts = np.unique(labeled[labeled != 0], return_counts=True)
labels = labels[np.argsort(counts)]
img_border[labeled != labels[-1]] = False
#print("perimeter:\n{}".format(img_border))
return img_border
|
8957efb818c3d17485c88a0864c06f14b091c169
| 34,029 |
def get_storing_type():
"""Return the storing type of the data file.
One of the following integers:
0 --> ST_ALWAYS_FAST
1 --> ST_ALWAYS_SLOW
2 --> ST_FAST_ON_TRIGGER
3 --> ST_FAST_ON_TRIGGER_SLOW_OTH
A STORING_TYPE dict is provided by this module.
Wraps:
int DWGetStoringType();
(not documented)
"""
return _get_storing_type()
|
aa10f947e55c0731e9fb6446a9c102198954b09a
| 34,030 |
from typing import Optional
import requests
def transactions(api_url : str, network_id : NetworkIdentifier, operator : Optional[Operator] = "and",
max_block : Optional[int] = None, offset : Optional[int] = None, limit : Optional[int] = None,
transaction_id : Optional[TransactionIdentifier] = None, account_id : Optional[AccountIdentifier] = None,
coin_id : Optional[CoinIdentifier] = None, currency : Optional[Currency] = None,
status : Optional[str] = None, type_ : Optional[str] = None, address : Optional[str] = None,
success : Optional[bool] = None, sesion : Optional[requests.Session] = None) -> SearchTransactionsResponse:
"""
Search for transactions that match given conditions.
Parameters
----------
api_url: str
network_id: NetworkIdenfier
operator: Operator, optional
This is either "and" or "or", and determines how multiple conditions
should be applied. Defaults to "and"
max_block: int, optional
The newest block to consider when searching. If none is provided,
the current block is assumed.
offset: int, optional
Offset into the query result to start returning transactions.
If any query parameters are changed, this offset will also change.
limit: int, optional
The maximum number of transactions to return in a call.
transaction_id : TransactionIdentifier, optional
account_id : AccountIdentifier, optional
Any included metadata in this will be considered unique when searching
coin_id: CoinIdentifier, optional
currency: Currency, optional
status: str, optional
The network-specific operation type.
type_: str, optional
the network-specific operation type.
address: str, optional
The AccountIdentifier.address, this will get all transactions related to
an account address regardless of the subaccount.
success: bool, optional
A synthetic condition populated by parsing network-specific operation statuses
(using the mapping provided in /network/options).
session: requests.Session, optional
Returns
-------
SearchTransactionsResponse
transactions: list[BlockTransaction]
total_count: int
next_offset: int, optional
Used when paginated results, if this is not populated
there are no more results.
"""
req = SearchTransactionsRequest(network_identifier=network_id, operator=operator, max_block=max_block,
offset=offset, limit=limit, transaction_identifier=transaction_id,
account_identifier=account_id, coin_identifier=coin_id, currency=currency,
status=status, type=type_, address=address, success=success)
return search_for_transactions(api_url, req, session)
|
76b4a64f24dc54a7e6f1a37761b82ec2222e4fbb
| 34,031 |
def _normalize_3dinputs(x, y, z):
"""Allow for a variety of different input types, but convert them all to componentwise
numpy arrays that the functions in this module assume."""
# Handle different forms in input arguments
x = np.atleast_2d(x) # Assure a numpy array for componentwise or array versions
componentwise = False
if y is not None and z is not None:
# Componentwise inputs, separate arguments for X,Y,Z
componentwise = True
y = np.atleast_2d(y)
z = np.atleast_2d(z)
elif x.ndim == 2 and x.shape[1] == 3: # Array of 3-element vectors
y = x[:, 1]
z = x[:, 2]
x = x[:, 0]
else:
raise ValueError() # Must be right type if np.array(x) worked above
return x, y, z, componentwise
|
f6162959cee8c4e5efa6fff5f9d81a63bacb396b
| 34,032 |
import phono3py._phono3py as phono3c
def get_grid_point_from_address(address, D_diag):
"""Return GR grid-point indices of grid addresses.
Parameters
----------
address : array_like
Grid address.
shape=(3, ) or (n, 3), dtype='int_'
D_diag : array_like
This corresponds to mesh numbers. More precisely, this gives
diagonal elements of diagonal matrix of Smith normal form of
grid generating matrix. See the detail in the docstring of BZGrid.
shape=(3,), dtype='int_'
Returns
-------
int
GR-grid point index.
or
ndarray
GR-grid point indices.
shape=(n, ), dtype='int_'
"""
adrs_array = np.array(address, dtype="int_", order="C")
mesh_array = np.array(D_diag, dtype="int_")
if adrs_array.ndim == 1:
return phono3c.grid_index_from_address(adrs_array, mesh_array)
gps = np.zeros(adrs_array.shape[0], dtype="int_")
for i, adrs in enumerate(adrs_array):
gps[i] = phono3c.grid_index_from_address(adrs, mesh_array)
return gps
|
86a6bb7385dfeb6553e1784ca656c7fbfebd30e4
| 34,033 |
def _generate_interpreted_layer(diagnostic_layer):
"""Generate interpreted layer from diagnostic test band
Parameters
----------
diagnostic_layer: numpy.ndarray
Diagnostic test band
Returns
-------
interpreted_layer : numpy.ndarray
Interpreted layer
"""
logger.info('step 2 - get interpreted DSWX band')
shape = diagnostic_layer.shape
interpreted_layer = np.zeros(shape, dtype = np.uint8)
for i in range(shape[0]):
for j in range(shape[1]):
for key, value in interpreted_dswx_band_dict.items():
if diagnostic_layer[i, j] == key:
interpreted_layer[i, j] = value
break
else:
interpreted_layer[i, j] = 255
return interpreted_layer
|
9810ec9306d967993881d5b94701239dbd862727
| 34,034 |
def get_evaluations(filename):
"""Return a list of all evaluations within an ENDF file.
Parameters
----------
filename : str
Path to ENDF-6 formatted file
Returns
-------
list
A list of :class:`openmc.data.endf.Evaluation` instances.
"""
evaluations = []
with open(str(filename), 'r') as fh:
while True:
pos = fh.tell()
line = fh.readline()
if line[66:70] == ' -1':
break
fh.seek(pos)
evaluations.append(Evaluation(fh))
return evaluations
|
469153dd3385f8f9ca3008c410d7b039e7167868
| 34,037 |
def svm_ova():
"""Multiclass (One-vs-All) Linear Support Vector Machine."""
return CClassifierMulticlassOVA(CClassifierSVM)
|
064338ae0824225ad9fcd1dafc842bd2fee5180a
| 34,038 |
import re
def parse(filename):
""" Dict of Data """
to_return = {}
f = open(filename,'r')
for line in f.readlines():
m = re.match(parse_regex, line)
result = m.groups()
if result[0] == '+':
to_return[result[1]] = True
else:
to_return[result[1]] = False
return to_return
|
dbf4f25583913e1c99d70f99084a0aad2b6f618e
| 34,039 |
def get_Theta_wtr_d(region, Theta_ex_prd_Ave_d):
"""日平均給水温度 (℃) (12)
Args:
region(int): 省エネルギー地域区分
Theta_ex_prd_Ave_d(ndarray): 期間平均外気温度 (℃)
Returns:
ndarray: 日平均給水温度 (℃)
"""
# 日平均給水温度を求める際の会期係数
a_wtr, b_wtr = get_table_7()[region - 1]
# 日平均給水温度 (12)
Theta_wtr_d = np.clip(a_wtr * Theta_ex_prd_Ave_d + b_wtr, 0.5, None)
return Theta_wtr_d
|
d038da7a01f6acc7ac4204e8ef8c0ac99a74027a
| 34,040 |
def get_FAAM_flights_df():
"""
Retrieve DataFrame of FAAM BAe146 flights
"""
# Flights to use...
DataRoot = get_local_folder('DataRoot')
folder = '/{}/FAAM/core_faam_NetCDFs/'.format(DataRoot)
filename = 'FAAM_BAe146_Biomass_burning_and_ACISIS_flights_tabulated.csv'
df = pd.read_csv(folder+filename)
# Only consider the dates after Jan 2018
DateVar = 'Date'
df[DateVar] = pd.to_datetime(df[DateVar])
return df
|
0a60f9cdc61479da29de9ee93102af1abc9354b2
| 34,041 |
def get_facebook_customisations(token, aiid):
"""load customisations for the page"""
return fetch_api(
'/ai/{aiid}/facebook/custom',
token=token,
aiid=aiid,
timeout=config.API_FACEBOOK_TIMEOUT
)
|
067e3f30143238053ba796fe3f0bac4fd4f7a751
| 34,042 |
def plot_profile_base(
x, y, err, label_x, label_y, annotate, categorical, scale_x, scale_y
):
"""Make a scatter plot of eval metric `y` vs training config param `x`"""
f, ax = plt.subplots()
if scale_y is not None:
ax.set_yscale(scale_y)
if scale_x is not None:
ax.set_xscale(scale_x)
ax.scatter(x, y, color = 'C0', label = 'model')
if err is not None:
ax.vlines(x, y - err, y + err, color = 'C0')
if (label_x is not None) and (label_y is not None):
ax.set_title('%s vs %s' % (label_y, label_x))
ax.set_xlabel(label_x)
ax.set_ylabel(label_y)
if annotate:
# pylint: disable=consider-using-enumerate
for i in range(len(x)):
ax.annotate(
"%.1e" % y[i],
[x[i], y[i]],
horizontalalignment = 'center',
verticalalignment = 'bottom',
rotation = 45,
fontsize = 'smaller'
)
if categorical:
x = [ "\n".join(wrap(l, 70)) for l in x ]
ax.set_xticklabels(x)
f.autofmt_xdate(rotation = 45)
ax.legend()
return f, ax
|
6c23989b4eaa380d5955279e1e1ba8b2eb3b85be
| 34,044 |
def train_one_step(
train_batch_i,
bvae_model,
genmo_optimizer,
infnet_optimizer,
prior_optimizer,
grad_variable_dict,
grad_sq_variable_dict):
"""Train Discrete VAE for 1 step."""
metrics = {}
input_batch = process_batch_input(train_batch_i)
(genmo_grads, prior_grads, infnet_grads, genmo_loss) = estimate_gradients(
input_batch, bvae_model, FLAGS.grad_type,
stick_breaking=FLAGS.stick_breaking,
tree_structure=FLAGS.tree_structure,
importance_weight=FLAGS.importance_weight,
logits_sorting_order=FLAGS.logits_order)
genmo_vars = bvae_model.decoder_vars
genmo_optimizer.apply_gradients(list(zip(genmo_grads, genmo_vars)))
prior_vars = bvae_model.prior_vars
prior_optimizer.apply_gradients(list(zip(prior_grads, prior_vars)))
infnet_vars = bvae_model.encoder_vars
infnet_optimizer.apply_gradients(list(zip(infnet_grads, infnet_vars)))
batch_size_sq = tf.cast(FLAGS.batch_size * FLAGS.batch_size, tf.float32)
encoder_grad_var = bvae_model.compute_grad_variance(
grad_variable_dict[FLAGS.grad_type],
grad_sq_variable_dict[FLAGS.grad_type],
infnet_grads) / batch_size_sq
variance_dict = {}
if (FLAGS.grad_type == 'reinforce_loo') and FLAGS.estimate_grad_basket:
for method in FLAGS.estimate_grad_basket:
if method == FLAGS.grad_type:
continue # Already computed
if ('disarm' in method) and ('tree' not in method):
main_method, logits_order = method.split('-')
logits_order = None if logits_order == 'null' else logits_order
(_, _, infnet_grads, _) = estimate_gradients(
input_batch, bvae_model, main_method,
stick_breaking=True,
tree_structure=False,
logits_sorting_order=logits_order)
variance_dict[method] = bvae_model.compute_grad_variance(
grad_variable_dict[method],
grad_sq_variable_dict[method],
infnet_grads) / batch_size_sq
return (encoder_grad_var, variance_dict, genmo_loss, metrics)
|
b1a4dff6dd43f1569908718bef50eacf77fe84a8
| 34,045 |
def make_stepped_schedule(steps):
"""
Helper to generate a schedule function to perform step-wise changes of a given optimizer hyper-parameter.
:param steps: List of tuples (start_epoch, value);
start_epochs should be increasing, starting at 0. E.g.
momentum_schedule = make_stepped_schedule([(0, 0.5), (5, 0.9)])
"""
def get_value_for_epoch(epoch):
return next(x[1][1] for x in reversed(list(enumerate(steps))) if x[1][0] <= epoch) # search backwards for first step whose epoch <= current epoch, return associated hyper-parameter value
return get_value_for_epoch
|
33fcd823bba6bcbd5fbf9edfbad780d79a1875f5
| 34,046 |
def sepfirnd(input,filters,axes,output=None,mode='reflect',cval=0.0,origin=0):
"""Apply multiple 1d filters to input using scipy.ndimage.filters.convolve1d
input : array_like
filters : sequence of array_like
Sequence of filters to apply along axes. If length 1, will apply the same filter to all axes.
axes : sequence of int
output : array, optional
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
cval : scalar, optional
origin : int, optional
"""
if output is None:
output = np.empty_like(input)
tmp = output
if np.isscalar(filters[0]):
filters = [np.asarray(filters)]
if np.isscalar(axes):
axes = [axes]
if len(axes) > 1:
tmp = np.empty_like(output)
if len(filters) == 1:
filters = [filters[0]]*len(axes)
if len(axes) & 1 == 1: #pre-swap so that last write goes to output
output,tmp = tmp,output
for filt,ax in zip(filters,axes):
output,tmp = tmp,output #swap buffers
convolve1d(input,filt,ax,output,mode,cval,origin)
input = output
return output
|
3b2cc1687343121c3b93805efdadb467c0dd9e34
| 34,047 |
import json
def daily_treasury(request):
"""
Get daily treasury. Data is from https://fiscaldata.treasury.gov/datasets/daily-treasury-statement/operating-cash-balance
"""
pd.options.display.float_format = '{:.2f}'.format
daily_treasury_stats = pd.read_sql_query("SELECT * FROM daily_treasury", conn)
daily_treasury_stats['Moving Avg'] = daily_treasury_stats['close_today_bal'].rolling(window=7).mean()
daily_treasury_stats.rename(columns={"record_date": "Date", "close_today_bal": "Close Balance",
"open_today_bal": "Open Balance", "amount_change": "Amount Change",
"percent_change": "Percent Change"},
inplace=True)
with open(r"database/economic_date.json", "r+") as r:
data = json.load(r)
return render(request, 'economy/daily_treasury.html',
{"daily_treasury_stats": daily_treasury_stats[::-1].to_html(index=False),
"next_date": data})
|
6b5f14edf68858683727b3c8a640674c4499e4e3
| 34,048 |
def list_representations():
"""list available server-side objects"""
try:
response = get_bus().list_representations()
return JsonResponse({'type': 'list', 'list': response})
except Exception as exception:
message, status = handle_exception(exception)
return JsonResponse({'message': message}, status=status)
|
01241939634c3eff088c1d3c6b2caac8847d1f9f
| 34,049 |
def format_endpoint_argument_doc(argument):
"""Return documentation about the argument that an endpoint accepts."""
doc = argument.doc_dict()
# Trim the strings a bit
doc['description'] = clean_description(py_doc_trim(doc['description']))
details = doc.get('detailed_description', None)
if details is not None:
doc['detailed_description'] = clean_description(py_doc_trim(details))
return doc
|
7acb21e3abafb188e3850ee9c78803e55d33fc8d
| 34,050 |
import scipy
def from_edgelist(filename, directed=False, num_vertices=None, **kwargs) -> Graph:
"""
Construct an ``sgtl.Graph`` object from an edgelist file.
The edgelist file represents a graph by specifying each edge in the graph on a separate line in the file.
Each line looks like::
id1 id2 <weight>
where id1 and id2 are the vertex indices of the given edge, and an optional weight is given as a floating point
number. If any edge has a specified weight, then you must specify the weight for every edge. That is, every line
in the edgelist must contain the same number of elements (either 2 or 3). The file may also contain comment lines.
By default, the elements on a given line are assumed to be separated by spaces, and comments should begin with a
'#' character. These defaults can be changed by passing additional key-word arguments which will be passed to
pandas.read_csv.
:param filename: The edgelist filename.
:param directed: Whether the graph is directerd.
:param num_vertices: The number of vertices in the graph, if this is known in advance. Specifying this value will
speed up the method.
:param kwargs: Key-word arguments to pass to the `pandas.read_csv` method which will be used to read in the file.
This can be used to set a delimiter other than the space character, or to change the comment
character.
:return: the constructed `sgtl.Graph` object
:raises TypeError: If the edgelist file cannot be parsed due to incorrect types.
:Example:
An example edgelist file is as follows::
# This is a comment line
0 1 0.5
1 2 1
2 0 0.5
The above file gives a weighted triangle graph.
"""
# Set the default values of the parameters
if 'sep' not in kwargs:
kwargs['sep'] = r'\s+'
if 'comment' not in kwargs:
kwargs['comment'] = '#'
if 'header' not in kwargs:
kwargs['header'] = None
# Read in the edgelist file, and create the adjacency matrix of the graph as we go
if num_vertices is None:
num_vertices = 1
adj_mat = scipy.sparse.lil_matrix((num_vertices, num_vertices))
maximum_node_index = num_vertices - 1
edgelist_data = pd.read_csv(filename, **kwargs)
for edge_row in edgelist_data.iterrows():
# Get the vertex indices from the edgelist file
vertex1 = int(edge_row[1][0])
vertex2 = int(edge_row[1][1])
# Check whether the weight of the edge is specified
if len(edge_row[1]) > 2:
weight = edge_row[1][2]
else:
weight = 1.0
if not isinstance(weight, float):
raise TypeError("Edge weights must be given as floating point numbers.")
# Update the size of the adjacency matrix if we have encountered a larger vertex index than previously.
if vertex1 > maximum_node_index or vertex2 > maximum_node_index:
old_maximum_node_index = maximum_node_index
maximum_node_index = max(vertex1, vertex2)
# Update the shape of the lil matrix
adj_mat._shape = (maximum_node_index + 1, maximum_node_index + 1) # pylint: disable=protected-access
# Add rows to the data elements of the lil matrix
for i in range(old_maximum_node_index + 1, maximum_node_index + 1):
adj_mat.rows = np.append(adj_mat.rows, 1)
adj_mat.rows[i] = []
adj_mat.data = np.append(adj_mat.data, 1)
adj_mat.data[i] = []
# Update the adjacency matrix with this edge.
adj_mat[vertex1, vertex2] = weight
if not directed:
adj_mat[vertex2, vertex1] = weight
# Construct the graph object and return it
return Graph(adj_mat)
|
e425169eadb405458ffe3e72629409cb67e563db
| 34,051 |
def shn_shelter_rheader(r, tabs=[]):
""" Resource Headers """
if r.representation == "html":
rheader_tabs = shn_rheader_tabs(r, tabs)
record = r.record
rheader = DIV(TABLE(
TR(
TH(Tstr("Name") + ": "), record.name
),
),
rheader_tabs)
return rheader
else:
return None
|
d802f68d89e04ec420e366ef651df278025d9822
| 34,052 |
from operator import xor
def usig1(bitarray: UBitArray32) -> UBitArray32:
"""
(uppercase sigma 1)
Computes the XOR of three sets of bits which result from rotating the input
set rightwards by 6, then 11, and then 25.
Args:
bitarray: (UBitArray32) The set of bits to operate on.
Returns:
(UBitArray32) The XOR of the three sets that result from
rotating the input set..
"""
a = bitarray.rotr(6)
b = bitarray.rotr(11)
c = bitarray.rotr(25)
return xor(a,b,c)
|
62aa93fa77dde53aecfe5bfd335ec919eedbbdb2
| 34,054 |
def fullRadar(dicSettings, mcTable):
"""
Calculates the radar variables over the entire range
Parameters
----------
dicSettings: a dictionary with all settings output from loadSettings()
mcTable: McSnow data output from getMcSnowTable()
Returns
-------
specXR: xarray dataset with the spectra(range, vel) and KDP(range)
"""
specXR = xr.Dataset()
#specXR_turb = xr.Dataset()
counts = np.ones_like(dicSettings['heightRange'])*np.nan
vol = dicSettings['gridBaseArea'] * dicSettings['heightRes']
for i, heightEdge0 in enumerate(dicSettings['heightRange']):
heightEdge1 = heightEdge0 + dicSettings['heightRes']
print('Range: from {0} to {1}'.format(heightEdge0, heightEdge1))
mcTableTmp = mcTable[(mcTable['sHeight']>heightEdge0) &
(mcTable['sHeight']<=heightEdge1)].copy()
#mcTableTmp = mcTableTmp[(mcTableTmp['sPhi']<=4)]
if (dicSettings['scatSet']['mode'] == 'full') or (dicSettings['scatSet']['mode'] == 'table') or (dicSettings['scatSet']['mode'] == 'wisdom') :
mcTableTmp = mcTableTmp[(mcTableTmp['sPhi']>=0.01)]
mcTableTmp = calcParticleZe(dicSettings['wl'], dicSettings['elv'],
mcTableTmp, ndgs=dicSettings['ndgsVal'],
scatSet=dicSettings['scatSet'])
tmpSpecXR = getMultFrecSpec(dicSettings['wl'], mcTableTmp, dicSettings['velBins'],
dicSettings['velCenterBin'], heightEdge1,dicSettings['convolute'],dicSettings['nave'],dicSettings['noise_pow'],
dicSettings['eps_diss'], dicSettings['uwind'], dicSettings['time_int'], dicSettings['theta']/2./180.*np.pi, scatSet=dicSettings['scatSet'] )
#volume normalization
tmpSpecXR = tmpSpecXR/vol
specXR = xr.merge([specXR, tmpSpecXR])
if (dicSettings['scatSet']['mode'] == 'full') or (dicSettings['scatSet']['mode'] == 'table') or (dicSettings['scatSet']['mode'] == 'wisdom') :
#calculating the integrated kdp
tmpKdpXR = getIntKdp(dicSettings['wl'], mcTableTmp, heightEdge1)
#volume normalization
tmpKdpXR = tmpKdpXR/vol
specXR = xr.merge([specXR, tmpKdpXR])
counts[i] = len(mcTableTmp.vel.values)
return specXR
|
c7de880a159b80c60ec030a926f9d6278103c8cb
| 34,055 |
def connect_db(uri, default_db_name=None, factory=pymongo.MongoClient):
"""
Use pymongo to parse a uri (possibly including database name) into
a connected database object.
This serves as a convenience function for the common use case where one
wishes to get the Database object and is less concerned about the
intermediate MongoClient object that pymongo creates (though the
connection is always available as db.client).
>>> db = connect_db(
... 'mongodb://mongodb.localhost/mydb?readPreference=secondary')
>>> db.name
'mydb'
>>> db.client.read_preference
Secondary(...)
If no database is indicated in the uri, fall back to default.
>>> db = connect_db('mongodb://mgo/', 'defaultdb')
>>> db.name
'defaultdb'
The default should only apply if no db was present in the URI.
>>> db = connect_db('mongodb://mgo/mydb', 'defaultdb')
>>> db.name
'mydb'
"""
uri_p = pymongo.uri_parser.parse_uri(uri)
client = factory(uri)
return client.get_database(uri_p['database'] or default_db_name)
|
2ccb233aec3f9df9da1e4a9b82c7c4e5dbbfd0d9
| 34,056 |
from pathlib import Path
import re
def parse_path(path: Path):
"""Obtain filename, task, framework and engine from saved path.
"""
if re.match(r'^.*?[!/]*/[A-Za-z]+-[A-Za-z]+/[A-Za-z_]+/\d+$', str(path.with_suffix(''))):
filename = path.name
architecture = path.parent.parent.parent.stem
task = Task[path.parent.name]
info = path.parent.parent.name.split('-')
framework = Framework[info[0]]
engine = Engine[info[1]]
version = int(Path(filename).stem)
return {
'architecture': architecture,
'task': task,
'framework': framework,
'engine': engine,
'version': version,
'filename': filename,
'base_dir': path.parent
}
else:
raise ValueError('Incorrect model path pattern')
|
310d7bccb6f54147cb2214da45f339243be99620
| 34,058 |
import requests
def check_newer_version(command: str):
"""
Query for the latest release of the chaostoolkit to compare it
with the current's version. If the former is higher then issue a warning
inviting the user to upgrade its environment.
"""
try:
command = command.strip()
r = requests.get(LATEST_RELEASE_URL, timeout=(2, 30),
params={"current": __version__, "command": command})
if r.status_code == 200:
payload = r.json()
latest_version = payload["version"]
if payload.get("up_to_date") is False:
options = '--pre -U' if 'rc' in latest_version else '-U'
logger.warning(
"\nThere is a new version ({v}) of the chaostoolkit "
"available.\n"
"You may upgrade by typing:\n\n"
"$ pip install {opt} chaostoolkit\n\n"
"Please review changes at {u}\n".format(
u=CHANGELOG_URL, v=latest_version, opt=options))
return latest_version
except Exception:
pass
|
0998818229f2cb0de5abb6bf9c9de0ba765fdbe9
| 34,059 |
def prettyprint(s, toUpper=False):
"""Given a string, replaces underscores with spaces and uppercases the
first letter of each word ONLY if the string is composed of lowercased
letters. If the param, toUpper is given then s.upper is returned.
Examples: "data_quality" -> "Data Quality"
"copy_number_123" -> "Copy Number 123"
"My_own_title" -> "My own title"
"Hla" -> "Hla"
"""
if toUpper:
s = s.upper()
s= s.replace("_", " ")
else:
s = s.title()
s= s.replace("_", " ")
return s
|
18a57e74a2e3df66db4ede337663f9d8993a986b
| 34,060 |
def doFilter(rule, dstPath):
"""
过滤
"""
global useStart
global useEnd
global useConatins
global searchInChild
# 文件名后缀(可以包含 点号)
global fileNameSuffix
temp = rule.get("searchInChild", False)
if True == temp:
searchInChild = True
print "searchInChild is " + str(searchInChild)
fileNameSuffix = rule.get("fileNameSuffix", "")
matchPathList = doFilterInPath(dstPath)
print "match %d files" % len(matchPathList)
print "match list are "
for a in matchPathList:
print a
return matchPathList
|
9de196537753ffabd288f1aa64d0fe6b63a4225e
| 34,061 |
import json
def generate(schema_id: str) -> tuple:
"""Generates sample data from a schema
:param str schema_id: Schema id
:return: A http response
:rtype: tuple
"""
search = [{'schema': schema_id}]
if ObjectId.is_valid(schema_id):
search.append({'_id': ObjectId(schema_id)})
schema = app.data.driver.db['schema'].find_one({"$or": search})
if not schema:
return json.dumps({"_status": "ERR", "_error": ERROR_404}), 404, {'Content-Type': 'application/json'}
return generate_and_format(schema)
|
1caaa15753d0e33cb62d0d3c0f7cdf68945bb938
| 34,062 |
def recombine_edges(output_edges):
"""
Recombine a list of edges based on their rules.
Recombines identical Xe isotopes. Remove isotopes.
:param output_edges:
:return:
"""
mol = Chem.MolFromSmiles(".".join(output_edges))
# Dictionary of atom's to bond together and delete if they come in pairs
iso_dict = {}
for atom in mol.GetAtoms():
if atom.GetAtomicNum()==54:
# Get the isotope
iso = atom.GetIsotope()
if iso in iso_dict:
iso_dict[iso].append(get_info(atom))
else:
iso_dict[iso] = [get_info(atom)]
mw = Chem.RWMol(mol)
# Add bonds first
del_indices = []
for isotope in iso_dict:
if len(iso_dict[isotope])>1:
mw.AddBond(iso_dict[isotope][0][1],
iso_dict[isotope][1][1],
Chem.BondType.SINGLE)
del_indices.append(iso_dict[isotope][0][0])
del_indices.append(iso_dict[isotope][1][0])
# Now delete atoms
del_count = 0
for atom_index in sorted(del_indices):
mw.RemoveAtom(atom_index-del_count)
del_count+=1
Chem.SanitizeMol(mw)
return Chem.MolToSmiles(mw,isomericSmiles=True)
|
b3ffbb9d3cf15c0f411fb83a8f1013a1988ae7ec
| 34,063 |
def to_num_groups(word_groups):
"""
This function returns the a list of 24 numbers from a list of 24 words
representing the payload
"""
with open('wordlist.txt', 'rb') as (file):
words = file.readlines()
words = [word.decode('utf8').strip() for word in words]
num_groups = [words.index(word) for word in word_groups]
return num_groups
|
4286c584bb2f32d9c5ed0f3bcf2d0ee225b84ce4
| 34,065 |
import math
def _rsqrt(step_number, tail_start, body_value):
"""Computes a tail using a scaled reciprocal square root of step number.
Args:
step_number: Absolute step number from the start of training.
tail_start: Step number at which the tail of the curve starts.
body_value: Value relative to which the tail should be computed.
Returns:
A learning rate value that falls as the reciprocal square root of the step
number, scaled so that it joins smoothly with the body of a BodyAndTail
instance.
"""
return body_value * (math.sqrt(tail_start) / math.sqrt(step_number))
|
99138c88ae8d0fc0d49a5ac55e389cd5a40d5f90
| 34,066 |
def set_formula_in_row(ws, num, row, mr_col=1):
"""
This "loops" through single cells in a column and applies mixing ratio formulas. The format of the formulas is quite
convulted for legacy reasons. Existing procedures made adhering to this format easier, but the gist is below.
Samples come in sets of 10, from a GC Run. They were kept in two columns in a spreadsheet, where the below is
repeated in sets of 5 rows (per GC Run). One GC Run looks like:
col1 | col2
-------------
samp1 | samp2
std1 | samp4
samp5 _______ # all beyond this line are quantified with standard 2
______| samp6
samp7 | std2
samp9 | samp10
Samples 1-5 use the first standard (sample 3) to quantify themselves, and samples 6-10 use the second standard
(sample 8) to quantify themselves. These rules are applied in the integration code itself, but the mixing ratios
and relevant statistics need to be calculated within the spreadsheet so the person integrating has access to them as
they integrate manually.
The sheet is loaded by add_formulas_and_format_sheet(), then the two columns are passed row-by-row to this function
to add the formulae before saving.
:param ws: object, open worksheet with openpyxl as the engine
:param num: int, absolute row number (excluding header
:param row: object, the row object generated by iterating over ws
:param mr_col: int, in [1,2]
:return: ws, the modified worksheet is passed back
"""
assert mr_col in [1,2], "Invalid mixing ratio column. It must either 1 or 2"
std_relnum = 1 if mr_col is 1 else 3
# if it's the first mixing ratio column, the standard will be in the second row (0-indexed: 1)
# if it's the second mixing ratio columnm, the standard will be in the fourth row (0-indexed: 3)
standard_div_line = 2 if mr_col is 1 else 1
# samples 1-5 (excluding the standardd) are quantified using the first standard (sample 3)
# samples 6-10 (excluding the stnadrad) are quantified using the second standard (sample 8)
# so, in column 1, every sample up to (0-indexed) 2 should be quantified with standard 1, and
# everything after is quantified with standard 2. In column 2, that number changes to 1
relnum = num % 5
# num is 0-indexed, relnum is the position in this group of 5 rows (one run is 5r x 2c for 10 total runs)
if relnum is std_relnum: return ws # skip the standard for this column
for cell in row:
if cell.value is None: # assume cells with some value have been modified and should not be changed
rownum = cell.row # retrieve the real row number
pa_cell = f'C{rownum}' if mr_col is 1 else f'D{rownum}'
# the peak area for a mixing ratio cell will always be C for column 1 and D for column 2, always same row
if relnum <= standard_div_line: # this is should be quantified by standard 1, in this column
std_pa_cell = f'C{rownum - relnum + 1}'
else: # it should be quantified by standard 2, in the next column
std_pa_cell = f'D{rownum - relnum + 3}'
cell.value = f'={pa_cell}/{std_pa_cell} * 2067.16'
if relnum is 0 and mr_col is 1: # the first line of every 5-row batch needs additional statistics added
# this does not need to be done twice, which is why it's done only for MR col 1
run_range = f'E{rownum}:F{rownum+4}' # all mixing cells in the run
std_range = f'C{rownum+1}, D{rownum+3}' # the two standards
run_median_cell = ws[f'G{rownum}']
run_rsd_cell = ws[f'H{rownum}']
std_med_cell = ws[f'I{rownum}']
std_rsd_cell = ws[f'J{rownum}']
run_rsd_cell.number_format = '0.00%'
std_rsd_cell.number_format = '0.00%'
run_median_cell.value = f'=MEDIAN({run_range})' # set formulas
run_rsd_cell.value = f'=STDEV({run_range})/{run_median_cell.coordinate}'
std_med_cell.value = f'=MEDIAN({std_range})'
std_rsd_cell.value = f'=STDEV({std_range})/{std_med_cell.coordinate}'
return ws
|
2d4d438670e0760ce0158d5930390824634cce52
| 34,067 |
from typing import Union
import pathlib
from typing import List
from typing import Tuple
import json
def prepare_ablation_from_path(
path: Union[str, pathlib.Path],
directory: Union[str, pathlib.Path],
save_artifacts: bool,
) -> List[Tuple[pathlib.Path, pathlib.Path]]:
"""Prepare a set of ablation study directories.
:param path: Path to configuration file defining the ablation studies.
:param directory: The directory in which the experimental artifacts (including the ablation configurations)
will be saved.
:param save_artifacts: Defines, whether the output directories for the trained models sampled during HPO should be
created.
:return: pairs of output directories and HPO config paths inside those directories
"""
directory = normalize_path(directory, *iter_unique_ids())
with open(path) as file:
config = json.load(file)
return prepare_ablation_from_config(config=config, directory=directory, save_artifacts=save_artifacts)
|
39e043ed3a9cb82a85610c4b6df0d2d718895fb1
| 34,068 |
def currentTime(*args, **kwargs):
"""
Modifications:
- if no args are provided, the command returns the current time
"""
if not args and not kwargs:
return cmds.currentTime(q=1)
else:
return cmds.currentTime(*args, **kwargs)
|
89b6d0c8abbea29b4a1f1f9f0f6458735313b400
| 34,070 |
def get_shapes(gdf: gpd.GeoDataFrame, colname: str) -> list:
"""
Extract and associate and format geometry and data in GeoDataFrame.
"""
assert "geometry" in gdf.columns, f"Expected `geometry` in {gdf.columns=}"
assert colname in gdf.columns, f"Expected {colname!r} in {gdf.columns=}."
return [(shape, value) for (shape, value) in zip(gdf.geometry, gdf[colname])]
|
bf65d6a2caeee7d82b2b84f2b07722445beefb58
| 34,071 |
def _decodeUserData(byteIter, userDataLen, dataCoding, udhPresent):
""" Decodes PDU user data (UDHI (if present) and message text) """
result = {}
if udhPresent:
# User Data Header is present
result['udh'] = []
udhLen = next(byteIter)
ieLenRead = 0
# Parse and store UDH fields
while ieLenRead < udhLen:
ie = InformationElement.decode(byteIter)
ieLenRead += len(ie)
result['udh'].append(ie)
del ieLenRead
if dataCoding == 0x00: # GSM-7
# Since we are using 7-bit data, "fill bits" may have been added to make the UDH end on a septet boundary
shift = ((udhLen + 1) * 8) % 7 # "fill bits" needed to make the UDH end on a septet boundary
# Simulate another "shift" in the unpackSeptets algorithm in order to ignore the fill bits
prevOctet = next(byteIter)
shift += 1
if dataCoding == 0x00: # GSM-7
if udhPresent:
userDataSeptets = unpackSeptets(byteIter, userDataLen, prevOctet, shift)
else:
userDataSeptets = unpackSeptets(byteIter, userDataLen)
result['text'] = decodeGsm7(userDataSeptets)
elif dataCoding == 0x02: # UCS2
result['text'] = decodeUcs2(byteIter, userDataLen)
else: # 8-bit (data)
userData = []
for b in byteIter:
userData.append(unichr(b))
result['text'] = ''.join(userData)
return result
|
91a259e2482ee654cc3fc1388138695ed975d778
| 34,072 |
from NexposeAPI import VulnData
from skaldship.exploits import connect_exploits
def import_all_vulndata(overwrite=False, nexpose_server={}):
"""
Uses the NexposeAPI and imports each and every vulnerability to Kvasir. Can take a looooong time.
Args:
overwrite: Whether or not to overwrite an existing t_vulndata record
Returns:
msg: A string message of status.
"""
db = current.globalenv['db']
vuln_class = VulnData()
vuln_class.host = nexpose_server.get('host', 'localhost')
vuln_class.port = nexpose_server.get('port', '3780')
if vuln_class.login(user_id=nexpose_server.get('user'), password=nexpose_server.get('pw')):
log(" [*] Populating list of Nexpose vulnerability ID summaries")
try:
vuln_class.populate_summary()
except Exception, e:
log(" [!] Error populating summaries: %s" % str(e), logging.ERROR)
return False
try:
vulnxml = etree.parse(StringIO(vuln_class.vulnxml))
except Exception, e:
log(" [!] Error parsing summary XML: %s" % str(e), logging.ERROR)
return False
vulns = vulnxml.findall('VulnerabilitySummary')
log(" [*] %s vulnerabilities to parse" % len(vulns))
if vuln_class.vulnerabilities > 0:
existing_vulnids = []
[existing_vulnids.extend([x['f_vulnid']]) for x in
db(db.t_vulndata.f_source == "Nexpose").select(db.t_vulndata.f_vulnid).as_list()]
log(" [*] Found %d vulnerabilities in the database already." % (len(existing_vulnids)))
stats = {'added': 0, 'updated': 0, 'skipped': 0, 'errors': 0}
for vuln in vulns:
if vuln.attrib['id'] in existing_vulnids and not overwrite:
# skip over existing entries if we're not overwriting
stats['skipped'] += 1
continue
try:
vulndetails = vuln_class.detail(vuln.attrib['id'])
except Exception, e:
log(" [!] Error retrieving details for %s: %s" % (vuln.attrib['id'], str(e)), logging.ERROR)
stats['errors'] += 1
if stats['errors'] == 50:
log(" [!] Too many errors, aborting!", logging.ERROR)
return False
else:
continue
if vulndetails is not None:
(vulnfields, references) = vuln_parse(vulndetails.find('Vulnerability'), fromapi=True)
else:
log(" [!] Unable to find %s in Nexpose" % vuln.attrib['id'], logging.WARN)
continue
# add the vulnerability to t_vulndata
vulnid = db.t_vulndata.update_or_insert(**vulnfields)
if not vulnid:
vulnid = db(db.t_vulndata.f_vulnid == vulnfields['f_vulnid']).select().first().id
stats['updated'] += 1
log(" [-] Updated %s" % vulnfields['f_vulnid'])
else:
stats['added'] += 1
log(" [-] Added %s" % vulnfields['f_vulnid'])
db.commit()
# add the references
if vulnid is not None and references:
for reference in references:
# check to see if reference exists first
query = (db.t_vuln_refs.f_source == reference[0]) & (db.t_vuln_refs.f_text == reference[1])
ref_id = db.t_vuln_refs.update_or_insert(query, f_source=reference[0], f_text=reference[1])
if not ref_id:
ref_id = db(query).select().first().id
# make many-to-many relationship with t_vuln_data
db.t_vuln_references.update_or_insert(f_vuln_ref_id=ref_id, f_vulndata_id=vulnid)
db.commit()
connect_exploits()
msg = "%s added, %s updated, %s skipped" % (stats['added'], stats['updated'], stats['skipped'])
log(" [*] %s" % msg)
else:
msg = "No vulndata populated from Nexpose"
log(" [!] Error: %s" % msg, logging.ERROR)
else:
msg = "Unable to communicate with Nexpose"
log(" [!] Error: %s" % msg, logging.ERROR)
return msg
|
d87bfc3e1a12de4e4c65bf0961be4b74787c5393
| 34,073 |
import json
def add_category(category_name):
"""
REST-like endpoint to add a category
:returns: Customized output from GIPHY
:rtype: json
"""
user = (
models.database.session.query(models.users.User)
.filter(
models.users.User.token == flask.request.cookies["X-Auth-Token"]
)
.one()
)
existing_category = (
models.database.session.query(models.categories.Category)
.filter(models.categories.Category.name == category_name)
.filter(models.categories.Category.user == user)
.all()
)
if existing_category:
# Already exists, move on
return json.dumps({})
new_category = models.categories.Category(name=category_name, user=user)
models.database.session.add(new_category)
models.database.session.commit()
return json.dumps(new_category.to_dict())
|
e5ff14ef56f628df982c524c27a7a7b61719be2c
| 34,074 |
from typing import Optional
from datetime import datetime
from typing import List
from typing import Tuple
import re
async def send_embed(
channel: Messageable,
embed_text: str,
title: str = "",
url: Optional[str] = None,
colour: Colour = Colour(settings.embed_color_normal),
footer_text: Optional[str] = None,
footer_icon: Optional[str] = None,
timestamp: Optional[datetime] = datetime.now(),
fields: Optional[List[Tuple[str, str, bool]]] = None,
embed_fallback: bool = False,
token_guard: bool = False,
path_guard: bool = False
) -> Message:
"""
Sends an embeded message to a given context or channel.
:param channel: Context or channel of message
:param embed_text: Text content of embed
:param title: Title of embed
:param url: URL of embed
:param colour: Colour of embed
:param footer_text: Footer text of embed
:param footer_icon: Footer icon URL of embed
:param timestamp: Timestamp of embed
:param fields: List of fields represented by a tuple of their title,
text, and inline mode
:param embed_fallback: Whether embed will be sent as a regular
message if the bot doesn't have the send embeds permission
:param token_guard: Censor Discord bot tokens
:param path_guard: Censor full project directory
:return: Discord Message object
"""
if token_guard:
title = re.sub(TOKEN_REGEX, "[REDACTED TOKEN]", title)
embed_text = re.sub(TOKEN_REGEX, "[REDACTED TOKEN]", embed_text)
if path_guard:
title = title.replace(PARENT_DIRECTORY, ".")
embed_text = embed_text.replace(PARENT_DIRECTORY, ".")
embed = Embed(
title=title,
url=url if url is not None else Embed.Empty,
colour=colour,
description=embed_text,
timestamp=timestamp if timestamp is not None else Embed.Empty
)
if footer_text is not None or footer_icon is not None:
embed = embed.set_footer(
text=footer_text if footer_text is not None else embed.Empty,
icon_url=footer_icon if footer_icon is not None else embed.Empty
)
try:
if fields is not None:
for name, text, inline in fields:
embed = embed.add_field(name=name, value=text, inline=inline)
except ValueError:
logger.warning("Failed to add fields to embed: {}", str(fields))
try:
# noinspection PyTypeChecker
message = await channel.send(None, embed=embed)
return message
except Forbidden:
logger.trace(
"Failed to send embed to channel ID {}; "
"falling back on plain message: {}",
str(channel),
embed_fallback
)
if embed_fallback:
field_text = ""
if fields is not None and len(fields) > 0:
field_text = "\n\n".join(
f"**{title}**\n{text}" for title, text, inline in fields
)
try:
message = await send_message(
channel,
f"**{title}**\n\n{embed_text}\n\n"
f"{field_text}\n\n{footer_text}"
)
return message
except Forbidden:
logger.trace(
"Failed to send message to channel ID {}",
str(channel)
)
|
f102bb7a5737519d5d6f9cbda48feb1a0337ffba
| 34,076 |
from ucscsdk.mometa.comm.CommSyslogMonitor import \
def syslog_local_monitor_disable(handle):
"""
This method disables logs on local monitor.
Args:
handle (UcscHandle)
Returns:
CommSyslogMonitor: Managed Object
Raises:
UcscOperationError: If CommSyslogMonitor is not present
Example:
mo = syslog_local_monitor_disable(handle)
"""
CommSyslogMonitorConsts
dn = ucsc_base_dn + "/syslog/monitor"
mo = handle.query_dn(dn)
if not mo:
raise UcscOperationError("syslog_local_monitor_disable",
"syslog monitor does not exist.")
mo.admin_state = CommSyslogMonitorConsts.ADMIN_STATE_DISABLED
handle.set_mo(mo)
handle.commit()
return mo
|
40d48a4932c7e79221aba95ba5f0a4d8b5ba446c
| 34,077 |
def git_push_obj(verbose, remote, obj=None):
"""
Push given object to git
:param obj: object wich will be pushed to git
:type obj: any one implements the interface of pushing to git
:rparam boolean: If success return True
"""
try:
LOGGER.info("%s: pushing to remote - %s", verbose, remote.name)
if not obj:
remote.push()
else:
remote.push(obj)
LOGGER.info("%s: successfully pushed", verbose)
except GitCommandError as err:
LOGGER.warning("%s: was not pushed because %s", verbose,
err.stderr.split(':')[-1])
return None
return True
|
4139e93ff714baf70e50ff43329b6d3618508ede
| 34,078 |
from datetime import datetime
import random
def partial_generator(api_list, seed = datetime.now().microsecond):
"""
Randomly denys access to certain API functions
:param api_list: The list of functions in the api
:param seed: An int, allows for seeding the tests with a certain seed to create predictable results
:return: Returns an api where roughly 1/2 the functions are denied access
"""
random_gen = random.Random()
random_gen.seed(seed)
return {func: (lambda x: False) for
func in api_list if
random_gen.random() > 0.5}
|
c32f8e072d35a79028cd9dcbe1edb2b28edf867c
| 34,079 |
def get_key_from_event(*, event: dict) -> str:
"""Return S3 Object's Key from Lambda event
Args:
event (dict): Lambda event object
"""
record = event['Records'][0]
obj_key = record['s3']['object'].get('key')
if obj_key:
return obj_key
else:
raise ObjectKeyError('KeyError: "key" not found')
|
274eef78eb646fd10d3f5970051de24a0f273e1e
| 34,080 |
def import_buffer_to_ast(buf, module_name):
""" Import content from buf and return a Python AST."""
return hy_compile(import_buffer_to_hst(buf), module_name)
|
b9bf33808e6951f3c6e49223f77c8cd8b1db07b6
| 34,082 |
def svn_wc_delete(*args):
"""
svn_wc_delete(char const * path, svn_wc_adm_access_t * adm_access, svn_cancel_func_t cancel_func,
svn_wc_notify_func_t notify_func, apr_pool_t pool) -> svn_error_t
"""
return _wc.svn_wc_delete(*args)
|
fb93c2989f65001a5e0ef012801a7c66582c815b
| 34,083 |
import functools
import warnings
def deprecated(func):
"""
A decorator used to mark functions that are deprecated with a warning.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
# https://stackoverflow.com/questions/2536307/decorators-in-the-python-standard-lib-deprecated-specifically
# may not want to turn filter on and off
warnings.simplefilter('always', DeprecationWarning) # turn off filter
primary_message = "Call to deprecated function {}.".format(func.__name__)
warnings.warn(primary_message, category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return wrapper
|
b79556e9ef0c7b8fb877788ab53b198807f782ee
| 34,084 |
def queryAnnotations(
paths, # type: List[String]
startTime=None, # type: Optional[Date]
endTime=None, # type: Optional[Date]
types=None, # type: Optional[List[String]]
):
# type: (...) -> List[Annotation]
"""Queries user stored annotations from the Tag history system for a
set of paths, for a given time range.
Args:
paths: A list of Tag paths to query. The paths are equivalent to
what would be used ofr a Tag history query, and should
specify the source provider as well. For example,
"[HistoryProvider/Gateway:Provider]Path/To/Tag".
startTime: The start of the time range. If not defined, defaults
to 12 hours ago. Optional.
endTime: The end of time range. If not defined, defaults to
"now". Optional.
types: A list of string "types" to filter on. Types are defined
by the annotations and various subsystems, and may vary with
different providers. Possible annotation types are listed on
the system.Tag.storeAnnotations page. Optional.
Returns:
A list of Annotation objects that match the query criteria.
"""
print(paths, startTime, endTime, types)
return [Annotation()]
|
e68b2a94ae54f7cfba854cf295dfbaf9e148e391
| 34,085 |
def has_other_useful_output(content_text):
"""Returns whether |content_text| has other useful output.
Namely, console errors/warnings & alerts/confirms/prompts.
"""
prefixes = ('CONSOLE ERROR:', 'CONSOLE WARNING:', 'ALERT:', 'CONFIRM:',
'PROMPT:')
def is_useful(line):
return any(line.startswith(prefix) for prefix in prefixes)
lines = content_text.strip().splitlines()
return any(is_useful(line) for line in lines)
|
c1abfdaf681816314134ae33b5fd0fc48757dcc5
| 34,086 |
import re
def scrape(pagelim, endpoint, term=''):
"""
Retrieves all pages of the specified URL format up to the page limit.
Most of this code is spent on the loop structure to make sure we can
automatically get all the pages needed without storing empty data.
The latter half of this function is dedicated to making sure all the data
we need is typecasted correctly.
pagelim
Maximum number of pages to fetch
endpoint
API endpoint type, e.g. 'timelines/channels', 'timelines/users'
term
optional term to add onto url, e.g. 'comedy', '934940633704046592'
"""
comp = pd.DataFrame()
success = True
page = 0
url = 'https://vine.co/api/{0}/{1}'.format(endpoint, term)
vines = ''
while success:
if page:
url = url.split('?')[0] + '?page=' + str(page)
else:
print('Attempting to scrape: ' + url)
try:
vines = rq.get(url).json()
except Exception as e:
print(e)
if vines['success']:
if len(vines['data']['records']) > 0:
#the meat of the json object we're looking for, vine entries
df = pd.DataFrame.from_dict(vines['data']['records'])
print('Scrape successful! Downloaded {0} entries'.format(len(df.index)))
#if this is the first page, start comp as a copy of the page
if page == 0:
comp = df.copy()
#else add current page to the comp
else:
comp = pd.concat([df, comp], ignore_index=True)
#a pagelim of -1 means grab all the pages available/no limit
if page < pagelim or pagelim == -1:
page += 1
else:
print('Finished scraping at: ' + url)
success = False
else:
print('Finished scraping at: ' + url)
success = False
else:
print('API request failed, {0}/{1} not valid'.format(endpoint, term))
success = False
if page:
#expands the loops column's objects into count and velocity columns
loops = comp['loops'].apply(lambda x: pd.Series(x))
unstacked = loops.unstack().unstack().T[['count', 'velocity']]
#takes the columns we need
subset = comp[['videoUrl', 'permalinkUrl', 'username', 'created']].astype(basestring).copy()
#adds the new columns to the previous page(s) composite
subset['count'] = unstacked['count'].astype(int)
subset['velocity'] = unstacked['velocity'].astype(float)
subset['description'] = comp['description'].astype(basestring).map(enc_str)
#when I get the results for a tag search, it always seems to include
#results that don't have the tag in the description, this filters those
#results out of the data
tag = re.search(r'(?<=tags/)(\w+)(?=\?*?)', url)
if tag:
tag_text = tag.group()
print('checking for: ' + tag_text)
#filters results based on the truth result of a regex
subset = subset[subset.description.fillna(value=' ').str.contains(tag_text)]
#extracts the vineid from the right side of the permalink
get_id = lambda x: x.rsplit('/', 1)[-1]
subset['id'] = [get_id(perma) for perma in subset['permalinkUrl']]
sort = sort_clean(subset).dropna()
return sort
else:
return pd.DataFrame()
|
16c0d026e7f320549d77b1a6655aa8db35f6521e
| 34,087 |
def TransformContainerAnalysisData(image_name, occurrence_filter=None):
"""Transforms the occurrence data from Container Analysis API."""
occurrences = FetchOccurrencesForResource(image_name, occurrence_filter)
analysis_obj = container_analysis_data_util.ContainerAnalysisData(image_name)
for occurrence in occurrences:
analysis_obj.add_record(occurrence)
return analysis_obj
|
99edfe655884a2932b4e84d6ce370df7dde7412e
| 34,088 |
import random
def cluster_randomly_weigthed(document, prob_coref=0.03):
"""
Randomly creates a coreferring link between a mention, m2, and any preceding
mention, m1.
Probability of selecting a closer antecedent is higher, according to a
geometric distribution
Probability of selecting an antecedent is prob_coref, else selects
NO_ANTECEDENT for that given mention.
@prob_coref defaults to 0.03 which is the approximate percentage of positive
coreferring links.
"""
links = np.ndarray(shape=(len(document.mentions),), dtype=int)
links.fill(Link.NO_ANTECEDENT)
for idx in range(1, len(document.mentions)):
if random.random() < prob_coref:
random_antecedent = idx - geom.rvs(0.3)
links[idx] = random_antecedent if random_antecedent >= 0 else Link.NO_ANTECEDENT
return coreference_links_to_entity_clusters(links)
|
d7223c9594ba5689e69f47048cf53f10b04bb85a
| 34,091 |
def getOffset(self,name):
""" get the offset """
return [self.xoffset,self.yoffset]
|
b7b8d9c5da6bb38751e579583a8a5bd118cb1f67
| 34,092 |
def to_nbsphinx(s):
"""Use the sphinx naming style for anchors of headings"""
s = s.replace(" ", "-").lower()
return "".join(filter(lambda c : c not in "()", s))
|
87b266c84f9b32c1d7357c5ed23ba4058ba33673
| 34,093 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.