content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def virus_tsne_list(tsne_df, virus_df):
"""
return data dic
"""
tsne_df.rename(columns={"Unnamed: 0": "barcode"}, inplace=True)
df = pd.merge(tsne_df, virus_df, on="barcode", how="left")
df["UMI"] = df["UMI"].fillna(0)
tSNE_1 = list(df.tSNE_1)
tSNE_2 = list(df.tSNE_2)
virus_UMI = list(df.UMI)
res = {"tSNE_1": tSNE_1, "tSNE_2": tSNE_2, "virus_UMI": virus_UMI}
return res | b8265f3f3d6b602d045a890434322727d8e1adc5 | 12,801 |
def sometimes(aug):
"""
Return a shortcut for iaa.Sometimes
:param aug: augmentation method
:type aug: iaa.meta.Augmenter
:return: wrapped augmentation method
:rtype: iaa.meta.Augmenter
"""
return iaa.Sometimes(0.5, aug) | 95f7ece0b1da30c5a4e3be4d1a21e089e11d9036 | 12,802 |
def rev_to_b10(letters):
"""Convert an alphabet number to its decimal representation"""
return sum(
(ord(letter) - A_UPPERCASE + 1) * ALPHABET_SIZE**i
for i, letter in enumerate(reversed(letters.upper()))
) | b4850e97754f0404894673a51c1cce930e437f6c | 12,803 |
def test_from_rsid(rsids, start_rsid):
"""Continue collecting publications for rsids in list, beginning with start_rsid
Args:
rsids (list): list of rsids to collect publications on
start_rsid (str): rsid identifier to resume collecting publications on
Returns:
runtime_rsids (list): [start_rsid, onward...]
start_rsid (str): starting rsid
start_idx (str): starting rsid index
rsids (list): [original list of ALL rsids]
"""
start_idx = rsids.index(start_rsid) # start_rsid index
print(f"STARTING POINT SET TO: | INDEX: {start_idx} / {len(rsids)} | RSID: {rsids[start_idx]}")
runtime_rsids = rsids[start_idx:] # runtime rsids
return runtime_rsids, start_rsid, start_idx, rsids | bf2be86f28645addc08737e64f08695cd6b3a6d3 | 12,805 |
def _average_scada(times, values, nvalues):
"""
Function which down samples scada values.
:param times: Unix times of the data points.
:param values: Corresponding sensor value
:param nvalues: Number of samples we average over.
:return: new time values and
"""
if len(times) % nvalues:
nsamples = (len(times) // nvalues) - 1
else:
nsamples = (len(times) // nvalues)
res = np.zeros(nsamples, dtype=np.float32)
new_times = np.zeros(nsamples, dtype=np.int64)
for ind in range(nsamples):
res[ind] = np.mean(values[ind * nvalues:(ind + 1) * nvalues])
new_times[ind] = np.mean(times[ind * nvalues:(ind + 1) * nvalues])
return new_times, res | 8743e5065741299befe37b230a22512c65001a09 | 12,806 |
def main():
"""
Test harness
"""
def game_factory():
"""
Creates the game we need
"""
return Maze(Layout.from_string(Layout.MEDIUM_STR))
bot_factory = PlannedBot
trainer = BotTrainer(game_factory, bot_factory, 16, 2, goal_score=13)
start_time = time()
generations, result = trainer.breed_best_bot()
end_time = time()
msg = 'After {} generations, the bot {} the game'.format(
generations, 'won' if result.finished else 'lost')
print msg
print 'Elapsed time:', int(end_time - start_time + 0.5), 'seconds'
print 'Bot score:', result.score
print 'Bot plan:', result.player.moves | 0528a4a4c51a4b9491314555d2ccd5c5b9baf328 | 12,807 |
def behavior_of(classname):
"""
Finds and loads the behavior class for C++ (decoded) classname or returns
None if there isn't one.
Behaviors do not have a required base class, and they may be used with
Awkward Array's ``ak.behavior``.
The search strategy for finding behavior classes is:
1. Translate the ROOT class name from C++ to Python with
:py:func:`~uproot4.model.classname_encode`. For example,
``"ROOT::RThing"`` becomes ``"Model_ROOT_3a3a_RThing"``.
2. Look for a submodule of ``uproot4.behaviors`` without
the ``"Model_"`` prefix. For example, ``"ROOT_3a3a_RThing"``.
3. Look for a class in that submodule with the fully encoded
name. For example, ``"Model_ROOT_3a3a_RThing"``.
See :py:mod:`uproot4.behaviors` for details.
"""
name = classname_encode(classname)
assert name.startswith("Model_")
name = name[6:]
if name not in globals():
if name in behavior_of._module_names:
exec(
compile(
"import uproot4.behaviors.{0}".format(name), "<dynamic>", "exec"
),
globals(),
)
module = eval("uproot4.behaviors.{0}".format(name))
behavior_cls = getattr(module, name, None)
if behavior_cls is not None:
globals()[name] = behavior_cls
return globals().get(name) | ce588881e283f53755c7e468de298e6bc360cecc | 12,808 |
def adjust(data):
"""Calculate mean of list of values and subtract the mean of every element
in the list, making a new list.
Returns tuple of mean, list of adjusted values
"""
mu = mean(data)
return mu, map(lambda x: (x-mu), data) | c0ddf7140dee90903452c16afb2625ded34c4d73 | 12,810 |
def clear():
"""
Clears the world, and then returns the cleared representation
"""
myWorld.clear()
return jsonify(myWorld.world()) | 4d999388696986ad9a0a954f3791f0a4795ef69a | 12,811 |
from typing import Optional
from typing import TextIO
def _create_terminal_writer_factory(output: Optional[TextIO]):
"""
A factory method for creating a `create_terminal_writer` function.
:param output: The receiver of all original pytest output.
"""
def _create_terminal_writer(config: Config, _file: Optional[TextIO] = None) -> TerminalWriter:
file = output if output is not None else get_sink_io()
return create_terminal_writer(config, file)
return _create_terminal_writer | 9c06bd4b10eb5b1dc0e3e4f4f9bdb20074cacf6e | 12,812 |
import typing
def filter(
f: typing.Callable,
stage: Stage = pypeln_utils.UNDEFINED,
workers: int = 1,
maxsize: int = 0,
timeout: float = 0,
on_start: typing.Callable = None,
on_done: typing.Callable = None,
) -> Stage:
"""
Creates a stage that filter the data given a predicate function `f`. exactly like python's built-in `filter` function.
```python
import pypeln as pl
import time
from random import random
def slow_gt3(x):
time.sleep(random()) # <= some slow computation
return x > 3
data = range(10) # [0, 1, 2, ..., 9]
stage = pl.sync.filter(slow_gt3, data, workers=3, maxsize=4)
data = list(stage) # [3, 4, 5, ..., 9]
```
Arguments:
f: A function with signature `f(x, **kwargs) -> bool`, where `kwargs` is the return of `on_start` if present.
stage: A stage or iterable.
workers: This parameter is not used and only kept for API compatibility with the other modules.
maxsize: This parameter is not used and only kept for API compatibility with the other modules.
timeout: Seconds before stoping the worker if its current task is not yet completed. Defaults to `0` which means its unbounded.
on_start: A function with signature `on_start(worker_info?) -> kwargs`, where `kwargs` can be a `dict` of keyword arguments that will be passed to `f` and `on_done`. If you define a `worker_info` argument an object with information about the worker will be passed. This function is executed once per worker at the beggining.
on_done: A function with signature `on_done(stage_status?, **kwargs)`, where `kwargs` is the return of `on_start` if present. If you define a `stage_status` argument an object with information about the stage will be passed. This function is executed once per worker when the worker finishes.
!!! warning
To implement `timeout` we use `stopit.async_raise` which has some limitations for stoping threads.
Returns:
If the `stage` parameters is given then this function returns a new stage, else it returns a `Partial`.
"""
if pypeln_utils.is_undefined(stage):
return pypeln_utils.Partial(
lambda stage: filter(
f,
stage=stage,
workers=workers,
maxsize=maxsize,
timeout=timeout,
on_start=on_start,
on_done=on_done,
)
)
stage = to_stage(stage)
return Filter(
f=f, on_start=on_start, on_done=on_done, timeout=timeout, dependencies=[stage],
) | 741a1d4f941a293b41c98872c59c8bf7e451bba5 | 12,813 |
import numpy
def function_factory(model, loss, dataset):
"""A factory to create a function required by tfp.optimizer.lbfgs_minimize.
Args:
model [in]: an instance of `tf.keras.Model` or its subclasses.
loss [in]: a function with signature loss_value = loss(pred_y, true_y).
train_x [in]: the input part of training data.
train_y [in]: the output part of training data.
Returns:
A function that has a signature of:
loss_value, gradients = f(model_parameters).
"""
# obtain the shapes of all trainable parameters in the model
shapes = tf.shape_n(model.trainable_variables)
n_tensors = len(shapes)
# we'll use tf.dynamic_stitch and tf.dynamic_partition later, so we need to
# prepare required information first
count = 0
idx = [] # stitch indices
part = [] # partition indices
for i, shape in enumerate(shapes):
n = numpy.product(shape)
idx.append(tf.reshape(tf.range(count, count+n, dtype=tf.int32), shape))
part.extend([i]*n)
count += n
part = tf.constant(part)
@tf.function
@tf.autograph.experimental.do_not_convert
def assign_new_model_parameters(params_1d):
"""A function updating the model's parameters with a 1D tf.Tensor.
Args:
params_1d [in]: a 1D tf.Tensor representing the model's trainable parameters.
"""
params = tf.dynamic_partition(params_1d, part, n_tensors)
for i, (shape, param) in enumerate(zip(shapes, params)):
model.trainable_variables[i].assign(tf.reshape(param, shape))
#tf.print(model.trainable_variables[i])
@tf.function
def volume_form(x, Omega_Omegabar, mass, restriction):
kahler_metric = complex_math.complex_hessian(tf.math.real(model(x)), x)
volume_form = tf.math.real(tf.linalg.det(tf.matmul(restriction, tf.matmul(kahler_metric, restriction, adjoint_b=True))))
weights = mass / tf.reduce_sum(mass)
factor = tf.reduce_sum(weights * volume_form / Omega_Omegabar)
#factor = tf.constant(35.1774, dtype=tf.complex64)
return volume_form / factor
# now create a function that will be returned by this factory
def f(params_1d):
"""A function that can be used by tfp.optimizer.lbfgs_minimize.
This function is created by function_factory.
Args:
params_1d [in]: a 1D tf.Tensor.
Returns:
A scalar loss and the gradients w.r.t. the `params_1d`.
"""
# use GradientTape so that we can calculate the gradient of loss w.r.t. parameters
for step, (points, Omega_Omegabar, mass, restriction) in enumerate(dataset):
with tf.GradientTape() as tape:
# update the parameters in the model
assign_new_model_parameters(params_1d)
# calculate the loss
det_omega = volume_form(points, Omega_Omegabar, mass, restriction)
loss_value = loss(Omega_Omegabar, det_omega, mass)
# calculate gradients and convert to 1D tf.Tensor
grads = tape.gradient(loss_value, model.trainable_variables)
grads = tf.dynamic_stitch(idx, grads)
# reweight the loss and grads
mass_sum = tf.reduce_sum(mass)
try:
total_loss += loss_value * mass_sum
total_grads += grads * mass_sum
total_mass += mass_sum
except NameError:
total_loss = loss_value * mass_sum
total_grads = grads * mass_sum
total_mass = mass_sum
total_loss = total_loss / total_mass
total_grads = total_grads / total_mass
# print out iteration & loss
f.iter.assign_add(1)
tf.print("Iter:", f.iter, "loss:", total_loss)
# store loss value so we can retrieve later
tf.py_function(f.history.append, inp=[total_loss], Tout=[])
return total_loss, total_grads
# store these information as members so we can use them outside the scope
f.iter = tf.Variable(0)
f.idx = idx
f.part = part
f.shapes = shapes
f.assign_new_model_parameters = assign_new_model_parameters
f.history = []
return f | 2e50b3e085d2a88d76de31ba0fccb49f4f38dd1e | 12,814 |
import copy
import time
def nn_CPRAND(tensor,rank,n_samples,n_samples_err,factors=None,exact_err=False,it_max=100,err_it_max=20,tol=1e-7,list_factors=False,time_rec=False):
"""
Add argument n_samples_err
CPRAND for CP-decomposition in non negative case, with err_rand
return also exact error
Parameters
----------
tensor : tensor
rank : int
n_samples : int
sample size
n_samples_err : int
sample size used for error estimation. The default is 400.
factors : list of matrices, optional
initial non negative factor matrices. The default is None.
exact_err : boolean, optional
whether use err or err_rand_fast for terminaison criterion. The default is False.
(not useful for this version)
it_max : int, optional
maximal number of iteration. The default is 100.
err_it_max : int, optional
maximal of iteration if terminaison critirion is not improved. The default is 20.
tol : float, optional
error tolerance. The default is 1e-7.
list_factors : boolean, optional
If true, then return factor matrices of each iteration. The default is False.
time_rec : boolean, optional
If true, return computation time of each iteration. The default is False.
Returns
-------
the CP decomposition, number of iteration and exact / estimated termination criterion.
list_fac and list_time are optional.
"""
N=tl.ndim(tensor) # order of tensor
norm_tensor=tl.norm(tensor) # norm of tensor
if list_factors==True : list_fac=[]
if time_rec == True : list_time=[]
if (factors==None): factors=svd_init_fac(tensor,rank)
if list_factors==True : list_fac.append(copy.deepcopy(factors))
weights=None
it=0
err_it=0
########################################
######### error initialization #########
########################################
temp,ind_err=err_rand(tensor,weights,factors,n_samples_err)
error=[temp/norm_tensor]
min_err=error[len(error)-1]
rng = tl.check_random_state(None)
while (min_err>tol and it<it_max and err_it<err_it_max):
if time_rec == True : tic=time.time()
for n in range(N):
Zs,indices=sample_khatri_rao(factors,n_samples,skip_matrix=n,random_state=rng)
indices_list = [i.tolist() for i in indices]
indices_list.insert(n, slice(None, None, None))
indices_list = tuple(indices_list)
if (n==0) :sampled_unfolding = tensor[indices_list]
else : sampled_unfolding =tl.transpose(tensor[indices_list])
V=tl.dot(tl.transpose(Zs),Zs)
W=tl.dot(sampled_unfolding,Zs)
# update
fac, _, _, _ = hals_nnls(tl.transpose(W), V,tl.transpose(factors[n]))
factors[n]=tl.transpose(fac)
if list_factors==True : list_fac.append(copy.deepcopy(factors))
it=it+1
################################
######### error update #########
################################
error.append(err_rand(tensor,weights,factors,n_samples_err,ind_err)[0]/norm_tensor) # same indices used as for Random Lesat Square Calculation
if (error[len(error)-1]<min_err) : min_err=error[len(error)-1] # err update
else : err_it=err_it+1
if time_rec == True :
toc=time.time()
list_time.append(toc-tic)
if time_rec == True and list_factors==True: return(weights,factors,it,error,list_fac,list_time)
if list_factors==True : return(weights,factors,it,error,list_fac)
if time_rec==True : return(weights,factors,it,error,list_time)
return(weights,factors,it,error) | 8cd3402407a54579ef279efd8b3459c34933c9cb | 12,815 |
def get_base_url(host_name, customer_id):
"""
:arg host_name: the host name of the IDNow gateway server
:arg customer_id: your customer id
:returns the base url of the IDNow API and the selected customer
"""
return 'https://{0}/api/v1/{1}'.format(host_name, customer_id) | 5a24a87f597cf01c61ab6a01202b2e01e3b00bf8 | 12,816 |
def sample_category(user, **params):
"""Create and return a sample category"""
defaults = {
'name': 'Sample category',
'persian_title': 'persian',
'parent_category': None
}
defaults.update(params)
return Category.objects.create(user=user, **defaults) | ec013f1b699c4ae76acb0c78819da875b2453846 | 12,817 |
from typing import Dict
def lindbladian_average_infid_set(
propagators: dict, instructions: Dict[str, Instruction], index, dims, n_eval
):
"""
Mean average fidelity over all gates in propagators.
Parameters
----------
propagators : dict
Contains unitary representations of the gates, identified by a key.
index : int
Index of the qubit(s) in the Hilbert space to be evaluated
dims : list
List of dimensions of qubits
proj : boolean
Project to computational subspace
Returns
-------
tf.float64
Mean average fidelity
"""
infids = []
for gate, propagator in propagators.items():
perfect_gate = instructions[gate].get_ideal_gate(dims)
infid = lindbladian_average_infid(perfect_gate, propagator, index, dims)
infids.append(infid)
return tf.reduce_mean(infids) | 71fcc97afc80bae0e53aea2fafd30b8279f76d08 | 12,818 |
def edit(request, course_id):
"""
Teacher form for editing a course
"""
course = get_object_or_404(Course, id=course_id)
courseForm = CourseForm(request.POST or None, instance=course)
if request.method == 'POST': # Form was submitted
if courseForm.is_valid():
courseForm.save()
messages.add_message(request, messages.SUCCESS,
f'The course {course.code} - {course.title} was altered!')
return redirect('course:index')
return render(request, 'course/edit.html', {'form': courseForm}) | d4f39a26598108d9d5f03ad18fa6de26d88d849d | 12,819 |
def _build_ontology_embedded_list():
""" Helper function intended to be used to create the embedded list for ontology.
All types should implement a function like this going forward.
"""
synonym_terms_embed = DependencyEmbedder.embed_defaults_for_type(base_path='synonym_terms',
t='ontology_term')
definition_terms_embed = DependencyEmbedder.embed_defaults_for_type(base_path='definition_terms',
t='ontology_term')
return synonym_terms_embed + definition_terms_embed | 2245b82313e26ba741200e24d323f6aa6b9741e0 | 12,820 |
def interp1d(x,y,xi,axis=None,extrap=True):
"""
Args:
x (uniformly sampled vector/array): sampled x values
y (array): sampled y values
xi (array): x values to interpolate onto
axis (int): axis along which to interpolate.
extrap (bool): if True, use linear extrapolation based on the extreme values.
If false, nearest neighbour is used for extrapolation instead.
"""
x=np.asarray(x)
if axis is None:
axis=get_axis(x)
return mathx.interp1d_lin_reg(zero(x,axis),delta(x,axis),y,xi,axis,extrap) | 081c4f5156cc653804cbd770edaf01ecdb426a51 | 12,821 |
import threading
def _back_operate(
servicer, callback, work_pool, transmission_pool, utility_pool,
termination_action, ticket, default_timeout, maximum_timeout):
"""Constructs objects necessary for back-side operation management.
Also begins back-side operation by feeding the first received ticket into the
constructed _interfaces.ReceptionManager.
Args:
servicer: An interfaces.Servicer for servicing operations.
callback: A callable that accepts packets.BackToFrontPackets and delivers
them to the other side of the operation. Execution of this callable may
take any arbitrary length of time.
work_pool: A thread pool in which to execute customer code.
transmission_pool: A thread pool to use for transmitting to the other side
of the operation.
utility_pool: A thread pool for utility tasks.
termination_action: A no-arg behavior to be called upon operation
completion.
ticket: The first packets.FrontToBackPacket received for the operation.
default_timeout: A length of time in seconds to be used as the default
time alloted for a single operation.
maximum_timeout: A length of time in seconds to be used as the maximum
time alloted for a single operation.
Returns:
The _interfaces.ReceptionManager to be used for the operation.
"""
lock = threading.Lock()
with lock:
termination_manager = _termination.back_termination_manager(
work_pool, utility_pool, termination_action, ticket.subscription)
transmission_manager = _transmission.back_transmission_manager(
lock, transmission_pool, callback, ticket.operation_id,
termination_manager, ticket.subscription)
operation_context = _context.OperationContext(
lock, ticket.operation_id, packets.Kind.SERVICER_FAILURE,
termination_manager, transmission_manager)
emission_manager = _emission.back_emission_manager(
lock, termination_manager, transmission_manager)
ingestion_manager = _ingestion.back_ingestion_manager(
lock, work_pool, servicer, termination_manager,
transmission_manager, operation_context, emission_manager)
expiration_manager = _expiration.back_expiration_manager(
lock, termination_manager, transmission_manager, ingestion_manager,
ticket.timeout, default_timeout, maximum_timeout)
reception_manager = _reception.back_reception_manager(
lock, termination_manager, transmission_manager, ingestion_manager,
expiration_manager)
termination_manager.set_expiration_manager(expiration_manager)
transmission_manager.set_ingestion_and_expiration_managers(
ingestion_manager, expiration_manager)
operation_context.set_ingestion_and_expiration_managers(
ingestion_manager, expiration_manager)
emission_manager.set_ingestion_manager_and_expiration_manager(
ingestion_manager, expiration_manager)
ingestion_manager.set_expiration_manager(expiration_manager)
reception_manager.receive_packet(ticket)
return reception_manager | 46999af151338d0d8b15704e913801d9f2c80696 | 12,822 |
from typing import Tuple
import datasets
def load_train_val_data(
data_dir: str, batch_size: int,
training_fraction: float) -> Tuple[DataLoader, DataLoader]:
"""
Returns two DataLoader objects that wrap training and validation data.
Training and validation data are extracted from the full original training
data, split according to training_fraction.
"""
full_train_data = datasets.FashionMNIST(data_dir,
train=True,
download=False,
transform=ToTensor())
full_train_len = len(full_train_data)
train_len = int(full_train_len * training_fraction)
val_len = full_train_len - train_len
(train_data, val_data) = random_split(dataset=full_train_data,
lengths=[train_len, val_len])
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_data, batch_size=batch_size, shuffle=True)
return (train_loader, val_loader) | 9cc0c67532e5d77fa8653d43c4f537137905767c | 12,823 |
def match(input_string, start_node):
"""匹配字符串
input_string :: 需要配备的字符串
start_node :: NFA起始节点
return :: True | False
"""
# 初始化运行状态的状态集合: 起始节点+空转移能到达的节点
current_state_set = [start_node]
next_state_set = closure(current_state_set)
# 循环读入字符生成状态集合
for i, ch in enumerate(input_string):
# 读入一个字符后的状态集合+空转移能到达的节点
current_state_set = move(next_state_set, ch)
next_state_set = closure(current_state_set)
# 状态集合为空,返回False
if next_state_set is None:
return False
# 读入最后一个字符且存在接受状态的返回True
if has_accepted_state(next_state_set) and i == len(input_string) - 1:
return True
return False | fdc7c971cfeb3d0b13716ca1017c6557889d3f52 | 12,824 |
def _H_to_h(H):
"""Converts CIECAM02/CAM02-UCS hue composition (H) to raw hue angle (h)."""
x0 = H % 400 * 360 / 400
h, _, _ = fmin_l_bfgs_b(lambda x: abs(h_to_H(x) - H), x0, approx_grad=True)
return h % 360 | a9ccf1ec14b467b8a5e05b5e71141a4113cf0c07 | 12,825 |
def filter_df_merge(cpu_df, filter_column=None):
"""
process cpu data frame, merge by 'model_name', 'batch_size'
Args:
cpu_df ([type]): [description]
"""
if not filter_column:
raise Exception(
"please assign filter_column for filter_df_merge function")
df_lists = []
filter_column_lists = []
for k, v in cpu_df.groupby(filter_column, dropna=True):
filter_column_lists.append(k)
df_lists.append(v)
final_output_df = df_lists[-1]
# merge same model
for i in range(len(df_lists) - 1):
left_suffix = cpu_df[filter_column].unique()[0]
right_suffix = df_lists[i][filter_column].unique()[0]
print(left_suffix, right_suffix)
if not pd.isnull(right_suffix):
final_output_df = pd.merge(
final_output_df,
df_lists[i],
how='left',
left_on=['model_name', 'batch_size'],
right_on=['model_name', 'batch_size'],
suffixes=('', '_{0}_{1}'.format(filter_column, right_suffix)))
# rename default df columns
origin_column_names = list(cpu_df.columns.values)
origin_column_names.remove(filter_column)
suffix = final_output_df[filter_column].unique()[0]
for name in origin_column_names:
final_output_df.rename(
columns={name: "{0}_{1}_{2}".format(name, filter_column, suffix)},
inplace=True)
final_output_df.rename(
columns={
filter_column: "{0}_{1}_{2}".format(filter_column, filter_column,
suffix)
},
inplace=True)
final_output_df.sort_values(
by=[
"model_name_{0}_{1}".format(filter_column, suffix),
"batch_size_{0}_{1}".format(filter_column, suffix)
],
inplace=True)
return final_output_df | bc0e147ada18cbbb3e8450f8764d80be3ca32315 | 12,826 |
def MRP2Euler121(q):
"""
MRP2Euler121(Q)
E = MRP2Euler121(Q) translates the MRP
vector Q into the (1-2-1) euler angle vector E.
"""
return EP2Euler121(MRP2EP(q)) | 9cd8da8d38ad668b928ed896004611e85571be0d | 12,827 |
def nlayer(depth=64):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = NLayer_D(depth=depth)
return model | b8e57716e6b9576de9524cf730309885a79d0bfa | 12,828 |
import typing
def deserialize(
value: ElementTree.Element,
cipher: PSCryptoProvider,
**kwargs: typing.Any,
) -> typing.Optional[typing.Union[bool, PSObject]]:
"""Deserialize CLIXML to a Python object.
Deserializes a CLIXML XML Element from .NET to a Python object.
Args:
value: The CLIXML XML Element to deserialize to a Python object.
cipher: The Runspace Pool cipher to use for SecureStrings.
kwargs: Optional parameters to sent to the FromPSObjectForRemoting
method on classes that use that.
Returns:
Optional[Union[bool, PSObject]]: The CLIXML as an XML Element object.
"""
return _Serializer(cipher, **kwargs).deserialize(value) | 96b53a08c5c8273f29e108e020c40ab806c0949c | 12,829 |
import requests
def is_url_ok(url: str) -> bool:
"""Check if the given URL is down."""
try:
r = requests.get(url)
return r.status_code == 200
except Exception:
return False | 97e0ba4b609282ef0dc166f0f0407e4aacdf30b2 | 12,830 |
def calculate_pair_energy_np(coordinates, i_particle, box_length, cutoff):
"""
Calculate interaction energy of particle w/ its environment (all other particles in sys)
Parameters
----------------
coordinates : list
the coordinates for all particles in sys
i_particle : int
particle number for which to calculate energy
cutoff : float
simulation cutoff. beyond distances, interactions aren't calculated
box length : float
length of simultion box. assumes cubic boc
Returns
---------------
float
pairwise interaction energy of ith particle w/all other particles in sys
"""
particle_distances = calculate_distance_np(coordinates[i_particle], coordinates[i_particle+1:], box_length)
particle_distances_filtered = particle_distances[particle_distances < cutoff]
return calculate_LJ_np(particle_distances_filtered).sum() | c626d1398312e42fd72d70c9b23d397fce5070fd | 12,831 |
def lwhere(mappings, **cond):
"""Selects mappings containing all pairs in cond."""
return list(where(mappings, **cond)) | ade55be28f75ae082833948306c43e4070525f7e | 12,832 |
import re
def get_number(message, limit=4):
"""
convert Chinese to pinyin and extract useful numbers
attention:
1. only for integer
2. before apply this method, the message should be preprocessed
input:
message: the message you want to extract numbers from.
limit: limit the length of number sequence
"""
words = pinyin.get_pinyin(message).split('-')
numbers = []
tmp = ''
count = 0
for w in words:
if re.search(r'\W', w, re.A):
for s in list(w):
if s in special_char.keys():
count += 1
tmp += special_char[s]
else:
if count >= limit:
numbers.append(tmp)
count = 0
tmp = ''
elif w in pinyin2number.keys():
count += 1
tmp += pinyin2number[w]
else:
if count >= limit:
numbers.append(tmp)
count = 0
tmp = ''
if count >= limit:
numbers.append(tmp)
return numbers | ae1cc6886a4a2931baa61fcb201ffa67f70aecf6 | 12,833 |
import sqlite3
def create_connection(db_file):
"""
Creates a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
except Error as e:
print(e)
return conn | 37571690b5e970fc4344ee1d5d449b16cfc15896 | 12,835 |
def convex_hull(poly):
"""
ratio of the convex hull area to the area of the shape itself
Altman's A_3 measure, from Neimi et al 1991.
"""
chull = to_shapely_geom(poly).convex_hull
return poly.area / chull.area | 0ed9b4803b87b4138cb5490b153376aae6e71e99 | 12,836 |
from typing import Dict
from typing import List
from typing import Tuple
import torch
def create_scifact_annotations(
claims, corpus, tokenizer, class_to_id: Dict[str, int], neutral_class: str
) -> List[SciFactAnnotation]:
"""Create a SciFactAnnotation for each claim - evidence/cited document pair."""
def get_abstract_and_encoding(
doc_id,
) -> Tuple[List[List[str]], List[torch.IntTensor]]:
doc = [d for d in corpus if d["doc_id"] == int(doc_id)]
assert len(doc) == 1
abstract = doc[0]["abstract"]
encoding = [
torch.IntTensor(tokenizer.encode(sentence, add_special_tokens=False))
for sentence in abstract
]
return abstract, encoding
annotations = []
for c in claims:
# Convert Interventions, Comparator, and Outcomes tokens to encodings
intervention = torch.IntTensor(tokenizer.convert_tokens_to_ids(c["i_tokens"]))
comparator = torch.IntTensor(tokenizer.convert_tokens_to_ids(c["c_tokens"]))
outcome = torch.IntTensor(tokenizer.convert_tokens_to_ids(c["o_tokens"]))
evidence = c["evidence"]
# Handle claims with no evidence (label is NOT_ENOUGH_INFO)
if not evidence:
cited_doc_id = c["cited_doc_ids"][0]
abstract, encoded_abstract = get_abstract_and_encoding(cited_doc_id)
rationale_id = class_to_id[neutral_class]
s_ann = SciFactAnnotation(
claim_id=int(c["id"]),
doc_id=int(cited_doc_id),
sentences=abstract,
encoded_sentences=encoded_abstract,
rationale_sentences=[],
i=intervention,
c=comparator,
o=outcome,
rationale_class=neutral_class,
rationale_id=rationale_id,
)
annotations.append(s_ann)
# Create a SciFact Annotation for each evidence document
else:
for doc_id, doc_rationales in evidence.items():
abstract, encoded_abstract = get_abstract_and_encoding(doc_id)
rationale_class = doc_rationales[0]["label"]
rationale_id = class_to_id[rationale_class]
# extract all rationale sentence indices from the document
rationale_sentences = []
for rationale in doc_rationales:
rationale_sentences.extend(rationale["sentences"])
s_ann = SciFactAnnotation(
claim_id=int(c["id"]),
doc_id=int(doc_id),
sentences=abstract,
encoded_sentences=encoded_abstract,
rationale_sentences=rationale_sentences,
i=intervention,
c=comparator,
o=outcome,
rationale_class=rationale_class,
rationale_id=rationale_id,
)
annotations.append(s_ann)
return annotations | 0a38b572bac113d6aa0a47e7628a5cc9fec85f16 | 12,837 |
import math
def sort_by_value(front, values):
"""
This function sorts the front list according to the values
:param front: List of indexes of elements in the value
:param values: List of values. Can be longer than the front list
:return:
"""
copied_values = values.copy() # Copy so we can modify it
sorted_list = []
while len(sorted_list) != len(front):
min_value = copied_values.index(min(copied_values))
if min_value in front:
sorted_list.append(min_value)
copied_values[min_value] = math.inf
return sorted_list | 2d259ebbc0117f9aa043d78394b6423e596f176e | 12,839 |
def get_cell_area(self, indices=[]):
"""Return the area of the cells on the outer surface.
Parameters
----------
self : MeshVTK
a MeshVTK object
indices : list
list of the points to extract (optional)
Returns
-------
areas: ndarray
Area of the cells
"""
surf = self.get_surf(indices)
return surf.compute_cell_sizes(area=True)["Area"] | 518416acfae67f1b6e1280d5fd903d311d57f4d8 | 12,841 |
def _to_dataarray(origins, sources, values):
""" Converts grid_search inputs to DataArray
"""
origin_dims = ('origin_idx',)
origin_coords = [np.arange(len(origins))]
origin_shape = (len(origins),)
source_dims = sources.dims
source_coords = sources.coords
source_shape = sources.shape
return MTUQDataArray(**{
'data': np.reshape(values, source_shape + origin_shape),
'coords': source_coords + origin_coords,
'dims': source_dims + origin_dims,
}) | d7ac153f4e872e55ab55ddb76dfcf994e4523443 | 12,842 |
from typing import Optional
from typing import List
from pathlib import Path
import inspect
import pprint
import tempfile
import warnings
import json
def package(metadata: Metadata, requirements: Optional[List[str]] = None, path: Optional[str] = None):
"""Packages the chatbot into a single archive for deployment.
Performs some preliminary checks on the metadata.
Creates a _package.zip file in the directory containing the file that contains the bot class
unless a path is provided.
:param metadata:
:param requirements:
:param path:
:return:
"""
bot_file = Path(inspect.getfile(metadata.input_class))
print("Running verification checks on metadata.")
metadata.verify(bot_file)
metadata_dict = {
'name': metadata.name,
'imageUrl': metadata.image_url,
'color': metadata.color,
'developerUid': metadata.developer_uid,
'description': metadata.description,
'inputFile': bot_file.stem,
'inputClass': metadata.input_class.__name__,
'memory': metadata.memory,
}
print("Prepared metadata:")
pprint.pprint(metadata_dict)
print("Preparing temporary directory...")
with tempfile.TemporaryDirectory() as temp_dir:
# Copy files in bot directory
def ignore(src, names):
ignore_list = []
for name in names:
# e.g .git folder is not wanted
if name.startswith('.') or name.startswith('_package.zip'):
warnings.warn(
f"Ignoring files which start with '.': {name}.",
RuntimeWarning
)
ignore_list.append(name)
if name == "main.py":
raise RuntimeError("Bot root directory cannot contain a main.py file.")
return ignore_list
copytree(bot_file.parent, temp_dir, ignore=ignore)
# Write metadata.json
with (Path(temp_dir) / "metadata.json").open("w") as f:
json.dump(metadata_dict, f)
# Write requirements.txt
if requirements:
write_valid_requirements_file(Path(temp_dir) / "requirements.txt", requirements)
# Create zip
if path is None:
path = bot_file.parent / "_package.zip"
else:
path = Path(path)
with path.open("wb") as f:
zipfile_from_folder(temp_dir, f)
print(f"Created zip package at {path}.") | 0fb974eef4c36bc5fa0e5366eb1bf4634585025a | 12,843 |
def warp_grid(grid: tf.Tensor, theta: tf.Tensor) -> tf.Tensor:
"""
Perform transformation on the grid.
- grid_padded[i,j,k,:] = [i j k 1]
- grid_warped[b,i,j,k,p] = sum_over_q (grid_padded[i,j,k,q] * theta[b,q,p])
:param grid: shape = (dim1, dim2, dim3, 3), grid[i,j,k,:] = [i j k]
:param theta: parameters of transformation, shape = (batch, 4, 3)
:return: shape = (batch, dim1, dim2, dim3, 3)
"""
grid_size = grid.get_shape().as_list()
# grid_padded[i,j,k,:] = [i j k 1], shape = (dim1, dim2, dim3, 4)
grid_padded = tf.concat([grid, tf.ones(grid_size[:3] + [1])], axis=3)
# grid_warped[b,i,j,k,p] = sum_over_q (grid_padded[i,j,k,q] * theta[b,q,p])
# shape = (batch, dim1, dim2, dim3, 3)
grid_warped = tf.einsum("ijkq,bqp->bijkp", grid_padded, theta)
return grid_warped | 570c3acb6c57aff18b27deaa2ab5401e0fac23b6 | 12,844 |
async def makenotifyrole(guild):
"""Make the notify role in the given guild.
:type guild: discord.Guild
:rtype: None | discord.Role
:param guild: Guild instance to create the role in.
:return: The created role, possibly None if the creation failed.
"""
userrole = None
try:
# The bot should have the ping any role perm, so the role doesn't need to be mentionable
userrole = await guild.create_role(reason="Role created for notification", name=notifyrolename)
except discord.Forbidden: # May not have permission
pass # This should leave userrole as none
return userrole | 1da1eea0a1d510abdf21bc532f6c1d4ab6d41140 | 12,845 |
def mape(forecast: Forecast, target: Target) -> np.ndarray:
"""
Calculate MAPE.
This method accepts one or many timeseries.
For multiple timeseries pass matrix (N, M) where N is number of timeseries and M is number of time steps.
:param forecast: Predicted values.
:param target: Target values.
:return: Same shape array with sMAPE calculated for each time step of each timeseries.
"""
return 100 * np.abs(forecast - target) / target | 47d68499aa351b70d466940d7f3722566cf67568 | 12,846 |
def reverse_weighted_graph(graph):
"""
Function for reverting direction of the graph (weights still the same)
Args:
graph: graph representation as Example: {1: {2: 1, 3: 5}, 2: {3: 2}, 4: {1: 2}}
Returns:
reversed graph
Examples:
>>> reverse_weighted_graph({1: {2: 1, 3: 5}, 2: {3: 2}, 4: {1: 2}})
defaultdict(<class 'dict'>, {2: {1: 1}, 3: {1: 5, 2: 2}, 1: {4: 2}})
"""
rev_graph = defaultdict(dict)
for node, neighborhood in graph.items():
for adj, weight in neighborhood.items():
rev_graph[adj].update(({node: weight}))
return rev_graph | 100e05bf3b5e937133321673531103c7abd94bdb | 12,847 |
def clean_bin():
"""permanently deletes entries - crud delete"""
mongo.db.bin.remove()
mongo.db.bin.insert({'_id': ObjectId()})
return redirect(url_for('get_bin', data_requested="teams")) | bb1cb957112826710572bb5930dd1683d4295997 | 12,848 |
def correct_by_threshold(img, threshold):
"""
correct the fMRI RSA results by threshold
Parameters
----------
img : array
A 3-D array of the fMRI RSA results.
The shape of img should be [nx, ny, nz]. nx, ny, nz represent the shape of the fMRI-img.
threshold : int
The number of voxels used in correction.
If threshold=n, only the similarity clusters consisting more than n voxels will be visualized.
Returns
-------
img : array
A 3-D array of the fMRI RSA results after correction.
The shape of img should be [nx, ny, nz]. nx, ny, nz represent the shape of the fMRI-img.
"""
if len(np.shape(img)) != 3:
return "Invalid input"
sx = np.shape(img)[0]
sy = np.shape(img)[1]
sz = np.shape(img)[2]
nsmall = 1
while nsmall*nsmall*nsmall < threshold:
nsmall = nsmall + 1
nlarge = nsmall + 2
for i in range(sx-nlarge+1):
for j in range(sy-nlarge+1):
for k in range(sz-nlarge+1):
listlarge = list(np.reshape(img[i:i+nlarge, j:j+nlarge, k:k+nlarge], [nlarge*nlarge*nlarge]))
if listlarge.count(0) < nlarge*nlarge*nlarge:
index1 = 0
for l in range(nlarge):
for m in range(nlarge):
if img[i + l, j + m, k] == 0:
index1 = index1 + 1
if img[i + l, j + m, k + nlarge - 1] == 0:
index1 = index1 + 1
for l in range(nlarge-1):
for m in range(nlarge-2):
if img[i + l, j, k + m] == 0:
index1 = index1 + 1
if img[i, j + l + 1, k + m] == 0:
index1 = index1 + 1
if img[i + nlarge - 1, j + l, k + m] == 0:
index1 = index1 + 1
if img[i + l + 1, j + nlarge - 1, k + m] == 0:
index1 = index1 + 1
nex = nlarge * nlarge * nlarge - nsmall * nsmall * nsmall
if index1 == nex:
unit = img[i+1:i+1+nsmall, j+1:j+1+nsmall, k+1:k+1+nsmall]
unit = np.reshape(unit, [nsmall*nsmall*nsmall])
list_internal = list(unit)
index2 = nsmall*nsmall*nsmall-list_internal.count(0)
if index2 < threshold:
img[i+1:i+1+nsmall, j]
for l in range(nsmall):
for m in range(nsmall):
for p in range(nsmall):
img[i+1:i+1+nsmall, j+1:j+1+nsmall, k+1:k+1+nsmall] = np.zeros([nsmall, nsmall, nsmall])
print("finished correction")
return img | 67750aba6d03d82d9e41d2d53a82550e5a68a3e2 | 12,849 |
def config_date(dut, date):
"""
:param dut:
:param date:
:return:
"""
st.log("config date")
command = "date --set='{}'".format(date)
st.config(dut, command)
return True | 055db1a0ddb4d640d154aae4dec29e3845d7dfb8 | 12,850 |
def read_dicom():
"""Read in DICOM series"""
dicomPath = join(expanduser('~'), 'Documents', 'SlicerDICOMDatabase',
'TCIALocal', '0', 'images', '')
reader = sitk.ImageSeriesReader()
seriesIDread = reader.GetGDCMSeriesIDs(dicomPath)[1]
dicomFilenames = reader.GetGDCMSeriesFileNames(dicomPath, seriesIDread)
reader.SetFileNames(dicomFilenames)
return reader.Execute() | 64c4aae3c1cc0e31d6db46e741a3ecc52be580cc | 12,851 |
def L_model_backward(AL, Y, caches):
"""
完成L层神经网络模型后向传播计算
Arguments:
AL -- 模型输出值
Y -- 真实值
caches -- 包含Relu和Sigmoid激活函数的linear_activation_forward()中每一个cache
Returns:
grads -- 包含所有梯度的字典
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# 初始化后向传播计算
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
# L层神经网络梯度. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"]
current_cache = caches[L - 1]
grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = \
linear_activation_backward(dAL, current_cache, activation="sigmoid")
for l in reversed(range(L - 1)):
# 第L层: (RELU -> LINEAR) 梯度
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = \
linear_activation_backward(grads["dA" + str(l + 2)], current_cache, activation="relu")
grads["dA" + str(l + 1)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
return grads | ef296179d51e8c4b8be474414f65f812b6f8ffb0 | 12,855 |
def Cnot(idx0: int = 0, idx1: int = 1) -> Operator:
"""Controlled Not between idx0 and idx1, controlled by |1>."""
return ControlledU(idx0, idx1, PauliX()) | a087aa4d7fb22343523a8b6114a7b50eea971e21 | 12,857 |
def init_sql_references(conn):
"""
Utility function to get references from SQL.
The returned objects conveniently identify users based on kb_name or user hashkey
"""
# get kb_names to kb_id
kb_ref = pds.read_sql("""SELECT id, kb_name, directory_id FROM dbo.kb_raw""", conn)
get_kb_dir_id = kb_ref.loc[:,['kb_name', 'directory_id']].set_index('kb_name').to_dict()['directory_id']
get_kb_raw_id = kb_ref.loc[:,['kb_name', 'id']].set_index('kb_name').to_dict()['id']
# get kb permissions
permissions = pds.read_sql("SELECT hashkey, kb_name, user_id FROM dbo.users \
LEFT JOIN dbo.kb_directory ON dbo.users.id = dbo.kb_directory.user_id \
LEFT JOIN kb_raw ON dbo.kb_directory.id = dbo.kb_raw.directory_id \
", conn)
permissions = pd.DataFrame(np.array(permissions), columns = ['hashkey', 'kb_name', 'user_id']).set_index('hashkey')
return get_kb_dir_id, get_kb_raw_id, permissions | 3f9874632d50cd8a483d75573cc1d63561f253d2 | 12,858 |
def inoptimal_truncation_square_root(A, B, C, k, check_stability=False):
"""Use scipy to perform balanced truncation
Use scipy to perform balanced truncation on a linear state-space system.
This method is the natural application of scipy and inoptimal performance
wise compared to `truncation_square_root_trans_matrix`
See also
-----
truncation_square_root_trans_matrix
"""
if check_stability and not isStable(A):
raise ValueError("This doesn't seem to be a stable system!")
AH = A.transpose().conj()
P = linalg.solve_lyapunov(A, -np.dot(B, B.transpose().conj()))
Q = linalg.solve_lyapunov(AH, -np.dot(C.transpose().conj(), C))
U = linalg.cholesky(P).transpose().conj()
L = linalg.cholesky(Q)
W, Sigma, V = linalg.svd(np.dot(U.transpose().conj(), L),
full_matrices=False,
overwrite_a=True, check_finite=False)
W1 = W[:, :k]
Sigma1 = Sigma[:k]
V1 = V[:, :k]
Sigma1_pow_neg_half = np.diag(Sigma1**-.5)
T1 = np.dot(Sigma1_pow_neg_half,
np.dot(V1.transpose().conj(), L.transpose().conj()))
Ti1 = np.dot(np.dot(U, W1),
Sigma1_pow_neg_half)
return k, np.dot(T1, np.dot(A, Ti1)), np.dot(T1, B), np.dot(C, Ti1), \
Sigma, Ti1, T1 | 3c4fa1ac73f22f5e07d49314e1cf3d3b022349e8 | 12,859 |
def _tessellate_bed(chrom: str, chromStart: int, chromEnd: int, window_size: int) -> pd.DataFrame:
"""Return tessellated pandas dataframe splitting given window.
Parameters
-----------------------
chrom: str,
Chromosome containing given window.
chromStart: int,
Position where the window starts.
chromEnd: int,
Position where the window ends.
window_size: int
Target window size.
Returns
-----------------------
Returns a pandas DataFrame in bed-like format containing the tessellated windows.
"""
return pd.DataFrame([
{
"chrom": chrom,
"chromStart": chromStart + window_size*i,
"chromEnd": chromStart + window_size*(i+1),
}
for i in range((chromEnd - chromStart)//window_size)
]) | 706b031069dd334bc6f364e077398ced56b152a8 | 12,860 |
def compute_locksroot(locks: PendingLocksState) -> Locksroot:
"""Compute the hash representing all pending locks
The hash is submitted in TokenNetwork.settleChannel() call.
"""
return Locksroot(keccak(b"".join(locks.locks))) | 05c4996a9cc837939c662ef419e36421cb00033d | 12,862 |
from typing import Union
from typing import List
def flatten(text: Union[str, List[str]], separator: str = None) -> str:
"""
Flattens the text item to a string. If the input is a string, that
same string is returned. Otherwise, the text is joined together with
the separator.
Parameters
----------
text : Union[str, List[str]]
The text to flatten
separator : str, default=None
The separator to join the list with. If `None`, the separator will be " "
Returns
-------
str
The flattened text
"""
separator = separator or " "
if isinstance(text, list):
return separator.join(text)
return text | 3980e0d0d14ac5764c4c5844ab3a943d1971d0ad | 12,863 |
def convert_byte32_arr_to_hex_arr(byte32_arr):
"""
This function takes in an array of byte32 strings and
returns an array of hex strings.
Parameters:
byte32_arr Strings to convert from a byte32 array to a hex array
"""
hex_ids = []
for byte32_str in byte32_arr:
hex_ids = hex_ids + [byte32_str.hex()]
return hex_ids | 9185c1e98b6eb10a42714e1fc53ebaed88997a82 | 12,864 |
from typing import Union
from typing import Tuple
def backtest_loop(
start_time: Union[pd.Timestamp, str],
end_time: Union[pd.Timestamp, str],
trade_strategy: BaseStrategy,
trade_executor: BaseExecutor,
) -> Tuple[PortfolioMetrics, Indicator]:
"""backtest function for the interaction of the outermost strategy and executor in the nested decision execution
please refer to the docs of `collect_data_loop`
Returns
-------
portfolio_metrics: PortfolioMetrics
it records the trading portfolio_metrics information
indicator: Indicator
it computes the trading indicator
"""
return_value = {}
for _decision in collect_data_loop(start_time, end_time, trade_strategy, trade_executor, return_value):
pass
return return_value.get("portfolio_metrics"), return_value.get("indicator") | 74620671f0e37b7439d15d76e0e3e92b8984a608 | 12,866 |
import functools
def failOnNonTransient(func):
"""Only allow function execution when immutable is transient."""
@functools.wraps(func)
def wrapper(inst, *args, **kwargs):
# make the call fail if the object is not transient
if inst.__im_state__ != interfaces.IM_STATE_TRANSIENT:
raise AttributeError('Cannot update locked immutable object.')
return func(inst, *args, **kwargs)
return wrapper | 46b94385084a6b7dae9149cfe8864b94df3ed5ea | 12,867 |
def text_has_emoji(text):
"""判断文本中是否包含emoji"""
for character in text:
if character in emoji.UNICODE_EMOJI:
return True
return False | 8fd0cfb2aed42a6b149f29ffea5d65bc901c5353 | 12,868 |
def rod_faces(n1, n2, xform, dim1, dim2): # validated
"""
defines points in a circle with triangle based end caps
"""
# 4,8,12,16,... becomes 5,9,13,17,...
thetas = np.radians(np.linspace(0., 360., 17))
ntheta = len(thetas)
nfaces = 0
all_faces = []
points_list = []
x = np.zeros(ntheta)
for nid, dim in [(n1, dim1), (n2, dim2)]:
radius, = dim
y = radius * np.cos(thetas)
z = radius * np.sin(thetas)
xyz = np.vstack([x, y, z]).T
assert xyz.shape == (ntheta, 3), xyz.shape
pointsi = np.dot(xyz, xform) + nid
points_list.append(pointsi)
# the tri_cap is made from points that aren't defined yet
# (the n1/n2 end points)
tris = tri_cap(ntheta)
# we need to use the tolist because we're going to
# combine quads and tris (the elements have different
# lengths)
all_faces += (nfaces + tris).tolist()
nfaces += tris.shape[0]
# the main cylinder uses the points defined independent
# of the points n1/n2
faces = elements_from_quad(2, ntheta)
all_faces += faces.tolist()
# used by the tri_caps
points_list.append(n1)
points_list.append(n2)
points = np.vstack(points_list)
return all_faces, points, points.shape[0] | 306fdde57121f497d6ef263c2caea187bfc7af10 | 12,869 |
def xfork():
""" xfork() is similar to fork but doesn't throw an OSError exception.
Returns -1 on error, otherwise it returns the same value as fork() does.
"""
try:
ret = fork()
except OSError:
ret = -1
return ret | 1bc0c16a2d71e4e1607d45af485a7c2999fbe631 | 12,870 |
import re
def cigar_segment_bounds(cigar, start):
"""
Determine the start and end positions on a chromosome of a non-no-matching part of an
RNA-seq read based on a read's cigar string.
cigar string meaning: http://bioinformatics.cvr.ac.uk/blog/tag/cigar-string/
Example:
'50M25N50M' with start = 100 -> [100, 149, 175, 224]. Note that start and end integers
are inclusive, i.e. all positions at or between 100 and 149 and at or between 175 and 224
are covered by reads.
:param cigar: str a read's cigar string, e.g. "49M165N51M"
:param start: int a read's start position on a chromosome
:return: list of integers representing cigar match start, end points, in order of matching subsequences
"""
# if CIGAR string is a single full match (i.e. "<positive integer>M")
# extract length of the match, return match segment.
full_match = re.match(r'(\d+)M$', cigar)
if full_match is not None:
extension = int(cigar[:(full_match.span()[-1] - 1)]) - 1
return [start, start + extension]
# break up cigar string into list of 2-tuples (letter indicative of match/no match, run length integer).
cigar_split = [(v, int(k)) for k, v in re.findall(r'(\d+)([A-Z]?)', cigar)]
# initialize parse params.
# Allow for "hard clipping" where aligned read can start with non-matching region (https://bit.ly/2K6TJ5Y)
augment = False
any_match = False
# output storage.
match_idx_list = list()
for idx in range(len(cigar_split)):
segment = cigar_split[idx]
if segment[0] == 'M':
any_match = True
extension = segment[1] - 1 # end of a match run is inclusive.
augment = True
match_idx_list += [start, start + extension] # append a match run to output.
else:
if augment:
extension = segment[1] + 1
augment = False
else:
extension = segment[1]
start += extension
# if no matching regions found, throw error.
if not any_match:
raise ValueError('CIGAR string {0} has no matching region.'.format(cigar))
return match_idx_list | c870dfb9b11e2fd1df9fb347528252f114b8d70f | 12,871 |
def augument(data_dir, img_path, steering_angle, range_x=100, range_y=10):
"""
Generate an augumented image and adjust steering angle.
(The steering angle is associated with the image)
"""
image, steering_angle = choose_image(data_dir, img_path, steering_angle)
image, steering_angle = random_flip(image, steering_angle)
image, steering_angle = random_translate(image, steering_angle, range_x, range_y)
image = random_shadow(image)
image = random_brightness(image)
return image, steering_angle | 1eafb5ea4ed024e6bab4008155c8364e8a480b8f | 12,872 |
def ldns_buffer_limit(*args):
"""LDNS buffer."""
return _ldns.ldns_buffer_limit(*args) | d7a4c3c50ffd6db98d78a6a092c256bd1e0e3c11 | 12,873 |
def _call_godot(environment, source, arguments, target):
"""Runs the Godot executable with the specified command line arguments
@param environment Environment in which the Godot executable will be run
@param source Input files that will be involved
@param arguments Arguments that will be passed to the Godot executable
@param target Output files that should result from the call"""
if 'GODOT_EXECUTABLE' in environment:
godot_excutable = environment['GODOT_EXECUTABLE']
else:
if 'GODOT_VERSION' in environment:
godot_version = environment['GODOT_VERSION']
else:
godot_version = _default_godot_version
godot_executable = _find_godot_executable(godot_version)
#environment['GODOT_EXECUTABLE'] = godot_executable
#if source is None:
# source = godot_executable
return environment.Command(
target, source, '"' + godot_executable + '" ' + arguments
) | 4320a6af9d2d1f8e8a06494df201c9c4a6f2416b | 12,876 |
def random_seeded(func):
""" Decorator that uses the `random_seed` parameter from functions to seed the RNG. """
@wraps(func)
def wrapper(*args, random_seed: int = None, **kwargs):
_RNG.seed(random_seed)
return func(*args, **kwargs)
return wrapper | 1bf572625092680fb996b34469a9a990627acd59 | 12,877 |
def getCRS(station_name=None, crs=None, autoCreate=True):
"""
Method to get CRS code for the give station name. This method may not
scale nicely for a production environment. Use a proper DB instead.
@param station_name: Some characters for the station name.
@param crs: CRS code if known
@param autoCreate: Boolean to indicate if the sqlite DB should be created if not exist.
"""
# Create the SQLite DB of CRS if not found already. This can be turned off
# by passing autoCreate = False.
if not os.path.exists(CRS_SQLITE_DB) and autoCreate:
print "Attempting to create CRS DB for first run ..."
recreateDB()
fetchFromUrl()
conn = sqlite3.connect(CRS_SQLITE_DB)
c = conn.cursor()
if station_name:
c.execute('SELECT * from crstab where station_name like "%%%s%%"' %station_name.lower())
elif crs:
c.execute('SELECT * from crstab where crs = "%s"' %crs.lower())
else:
return None
ret = c.fetchall()
c.close()
conn.close()
return ret | e44cda3f0299cc5cc57c2574debe011809e716e6 | 12,878 |
def _initialize_object_from_dict(object_dict, parent=None):
"""Initialize a python object from dict."""
provider = object_dict['provider']
args = object_dict.get('args') or []
kwargs = object_dict.get('kwargs') or {}
obj = _get_object_by_referance(provider)
if parent is not None:
kwargs.update({'parent': parent})
return obj(*args, **kwargs) | a6fb19c0db1e839514d19df50e223bf98a2241f8 | 12,879 |
def from_hdf(in_path, index=None, keypoints=True, descriptors=True):
"""
For a given node, load the keypoints and descriptors from a hdf5 file. The
keypoints and descriptors kwargs support returning only keypoints or descriptors.
The index kwarg supports returning a subset of the data.
Parameters
----------
in_path : str
handle to the file
key : str
An optional path into the HDF5. For example key='image_name', will
search /image_name/descriptors for the descriptors.
index : iterable
an h5py accepted indexer to pull only a subset of the keypoints
off disk. Default is None to pull all keypoints.
keypoints : bool
if True (default) return the keypoints
descriptors : bool
if True (default) return the descriptors
Returns
-------
keypoints : DataFrame
A pandas dataframe of keypoints.
descriptors : ndarray
A numpy array of descriptors
"""
if isinstance(in_path, str):
hdf = io_hdf.HDFDataset(in_path, mode='r')
else:
hdf = in_path
outd = '/descriptors'
outk = '/keypoints'
if index is not None:
index=np.asarray(index)
# The indices into HDF have to be sorted lists. When indices get passed in
# they are frequently ordered, so this pulls the data using the sorted
# index and then reorders the data.
i = np.argsort(index)
ii = np.argsort(i)
# Is is important to use sorted() so that an in-place sort is NOT used.
if descriptors:
desc = hdf[outd][index[i].tolist()]
desc = desc[ii]
if keypoints:
raw_kps = hdf[outk][index[i].tolist()]
raw_kps = raw_kps[ii]
else:
# Unlike numpy hdf does not handle NoneType as a proxy for `:`
if descriptors:
desc = hdf[outd][:]
if keypoints:
raw_kps = hdf[outk][:]
if keypoints:
index = raw_kps['index']
clean_kps = utils.remove_field_name(raw_kps, 'index')
columns = clean_kps.dtype.names
allkps = pd.DataFrame(data=clean_kps, columns=columns, index=index)
if isinstance(in_path, str):
hdf = None
if keypoints and descriptors:
return allkps, desc
elif keypoints:
return allkps
else:
return desc | 2ec00092e04dcd41c7a263781b8a5f7e8d888e5f | 12,880 |
def main(cfg):
"""Solve the CVRP problem."""
# Instantiate the data problem.
data = create_data_model(cfg)
print(data)
if len(data['distance_matrix'])==0:
result = {
"solution":False,
"error-message":"unable to calculate distance matrix"
}
return result
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),
data['num_vehicles'], data['depot'])
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
# Create and register a transit callback.
def distance_callback(from_index, to_index):
"""Returns the distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return data['distance_matrix'][from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
# Define cost of each arc.
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
def demand_callback(from_index):
"""Returns the demand of the node."""
# Convert from routing variable Index to demands NodeIndex.
from_node = manager.IndexToNode(from_index)
return data['demands'][from_node]
# Add Distance constraint.
dimension_name = 'Distance'
routing.AddDimension(
transit_callback_index,
0, # no slack
7200, # vehicle maximum travel distance
True, # start cumul to zero
dimension_name)
demand_callback_index = routing.RegisterUnaryTransitCallback(demand_callback)
routing.AddDimensionWithVehicleCapacity(
demand_callback_index,
0, # null capacity slack
data['vehicle_capacities'], # vehicle maximum capacities
True, # start cumul to zero
'Capacity')
# Setting first solution heuristic.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
# Solve the problem.
solution = routing.SolveWithParameters(search_parameters)
# Return solution dictionary
if solution:
return get_solution(data, manager, routing, solution)
else:
result = {
"solution":False
}
return result | a33c1df5462e9af2eb508b7e2803dfd371609656 | 12,881 |
def get_corners(p, fov):
"""Get corners relative to DSS coordinates. xy coords anti-clockwise"""
c = np.array([[0, 0], fov[::-1]]) # lower left, upper right xy
# corners = np.c_[c[0], c[:, 1], c[1], c[::-1, 0]].T # / clockwise yx
corners = np.c_[c[0], c[::-1, 0], c[1], c[:, 1]].T # / clockwise xy
corners = trans.rigid(corners, p)
return corners | e66e4dfd8eb26dc2caacd2e59c64de5d85bc7d10 | 12,882 |
from typing import Dict
from typing import Any
from typing import Tuple
from typing import List
def mixnet_m(
num_classes: int = 1000,
multiplier: float = 1.0,
divisor: int = 8,
min_depth: int = None,
dataset: str = "IMAGENET",
) -> Dict[str, Any]:
"""Build MixNet-M."""
if dataset == "IMAGENET":
medium: Tuple[List[Any], ...] = (
[24, 24, 1, 1, 1, None, False],
[24, 32, 3, 2, 6, None, False],
[32, 32, 1, 1, 3, None, False],
[32, 40, 4, 2, 6, 0.5, True],
[40, 40, 2, 1, 6, 0.5, True],
[40, 40, 2, 1, 6, 0.5, True],
[40, 40, 2, 1, 6, 0.5, True],
[40, 80, 3, 2, 6, 0.25, True],
[80, 80, 4, 1, 6, 0.25, True],
[80, 80, 4, 1, 6, 0.25, True],
[80, 80, 4, 1, 6, 0.25, True],
[80, 120, 1, 1, 6, 0.5, True],
[120, 120, 4, 1, 3, 0.5, True],
[120, 120, 4, 1, 3, 0.5, True],
[120, 120, 4, 1, 3, 0.5, True],
[120, 200, 4, 2, 6, 0.5, True],
[200, 200, 4, 1, 6, 0.5, True],
[200, 200, 4, 1, 6, 0.5, True],
[200, 200, 4, 1, 6, 0.5, True],
)
stem = round_filters(24, multiplier)
stem_stride = 2
last_out_channels = round_filters(200, multiplier)
head = round_filters(1536, multiplier=1.0)
elif dataset == "CIFAR100":
medium = (
[24, 24, 1, 1, 1, None, False],
[24, 32, 3, 1, 6, None, False],
[32, 32, 1, 1, 3, None, False],
[32, 40, 4, 2, 6, 0.5, True],
[40, 40, 2, 1, 6, 0.5, True],
[40, 40, 2, 1, 6, 0.5, True],
[40, 40, 2, 1, 6, 0.5, True],
[40, 80, 3, 2, 6, 0.25, True],
[80, 80, 4, 1, 6, 0.25, True],
[80, 80, 4, 1, 6, 0.25, True],
[80, 80, 4, 1, 6, 0.25, True],
[80, 120, 1, 1, 6, 0.5, True],
[120, 120, 4, 1, 3, 0.5, True],
[120, 120, 4, 1, 3, 0.5, True],
[120, 120, 4, 1, 3, 0.5, True],
[120, 200, 4, 2, 6, 0.5, True],
[200, 200, 4, 1, 6, 0.5, True],
[200, 200, 4, 1, 6, 0.5, True],
[200, 200, 4, 1, 6, 0.5, True],
)
stem = round_filters(24, multiplier)
stem_stride = 1
last_out_channels = round_filters(200, multiplier)
head = round_filters(1536, multiplier=1.0)
else:
raise NotImplementedError
for line in medium:
line[0] = round_filters(line[0], multiplier)
line[1] = round_filters(line[1], multiplier)
return dict(
stem=stem,
stem_stride=stem_stride,
head=head,
last_out_channels=last_out_channels,
block_args=medium,
dropout=0.25,
num_classes=num_classes,
) | 839852df3bc535613093c752addc6aed64e61e5b | 12,883 |
import functools
import asyncio
def no_block(func):
"""Turns a blocking function into a non-blocking coroutine function."""
@functools.wraps(func)
async def no_blocking_handler(*args, **kwargs):
partial = functools.partial(func, *args, **kwargs)
return await asyncio.get_event_loop().run_in_executor(None, partial)
return no_blocking_handler | 5681fe7275a89c522384b28f9473fded8bba846b | 12,884 |
def wgan_g_loss(scores_fake):
"""
Input:
- scores_fake: Tensor of shape (N,) containing scores for fake samples
Output:
- loss: Tensor of shape (,) giving WGAN generator loss
"""
return -scores_fake.mean() | 089561b47059a4bf07bf878012ce650cd6e34b4f | 12,885 |
import time
def centroid_avg(stats):
"""
Read centroid X and Y 10x and return mean of centroids.
stats : stats method of ophyd camera object to use, e.g. cam_8.stats4
Examples
--------
centroid_avg(cam_8.stats4)
centroidY = centroid_avg(cam_8.stats4)[1]
"""
centroidXArr = np.zeros(10)
centroidYArr = np.zeros(10)
for i in range(0, 10):
centroidXArr[i] = stats.centroid.x.get()
centroidYArr[i] = stats.centroid.y.get()
# print('Centroid X = {:.6g} px'.format(centroidXArr[i]), ', Centroid Y = {:.6g} px'.format(centroidYArr[i]))
time.sleep(0.2)
CentroidX = centroidXArr.mean()
CentroidY = centroidYArr.mean()
print('Mean centroid X = {:.6g} px'.format(CentroidX))
print('Mean centroid Y = {:.6g} px'.format(CentroidY))
return CentroidX, CentroidY | 5fb1715ab77858084f25400bd8c2508689b57cc1 | 12,886 |
def get_address_host_port(addr, strict=False):
"""
Get a (host, port) tuple out of the given address.
For definition of strict check parse_address
ValueError is raised if the address scheme doesn't allow extracting
the requested information.
>>> get_address_host_port('tcp://1.2.3.4:80')
('1.2.3.4', 80)
"""
scheme, loc = parse_address(addr, strict=strict)
backend = registry.get_backend(scheme)
try:
return backend.get_address_host_port(loc)
except NotImplementedError:
raise ValueError(
"don't know how to extract host and port for address %r" % (addr,)
) | a0ec20c347becc6f403b9ee121d127fee41c6b0d | 12,887 |
def get_ua_list():
"""
获取ua列表
"""
with open('zhihu_spider/misc/ua_list.txt', 'r') as f:
return [x.replace('\n', '') for x in f.readlines()] | 6ebcf5d85650ad6644ccdf48aafed0160bd52ec0 | 12,888 |
import time
def measure_time(func):
"""add time measure decorator to the functions"""
def func_wrapper(*args, **kwargs):
start_time = time.time()
a = func(*args, **kwargs)
end_time = time.time()
#print("time in seconds: " + str(end_time-start_time))
return end_time - start_time
return func_wrapper | e9fb4c1b7260cfe686204b50cbe46f27f25c467a | 12,889 |
def generate_dummy_targets(bounds, label, n_points, field_keys=[], seed=1):
"""
Generate dummy points with randomly generated positions. Points
are generated on node 0 and distributed to other nodes if running
in parallel.
Parameters
----------
bounds : tuple of float
Bounding box to generate targets within, of format
(xmin, ymin, xmax, ymax).
label : str
Label to assign generated targets.
n_points : int
Number of points to generate
field_keys : list of str, optional
List of keys to add to `fields` property.
seed : int, optional
Random number generator seed.
Returns
-------
Targets
A collection of randomly generated targets.
"""
if mpiops.chunk_index == 0:
rnd = np.random.RandomState(seed)
def _generate_points(lower, upper, limit):
new_points = []
while len(new_points) < limit:
new_point = rnd.uniform(lower, upper)
new_points.append(new_point)
return new_points
new_lons = _generate_points(bounds[0], bounds[2], n_points)
new_lats = _generate_points(bounds[1], bounds[3], n_points)
lonlats = np.column_stack([sorted(new_lons), sorted(new_lats)])
labels = np.full(lonlats.shape[0], label)
if field_keys:
fields = {k: np.zeros(n_points) for k in field_keys}
else:
fields = {}
_logger.info("Generated %s dummy targets", len(lonlats))
# Split for distribution
lonlats = np.array_split(lonlats, mpiops.chunks)
labels = np.array_split(labels, mpiops.chunks)
split_fields = {k: np.array_split(v, mpiops.chunks) for k, v in fields.items()}
fields = [{k: v[i] for k, v in split_fields.items()} for i in range(mpiops.chunks)]
else:
lonlats, labels, fields = None, None, None
lonlats = mpiops.comm.scatter(lonlats, root=0)
labels = mpiops.comm.scatter(labels, root=0)
fields = mpiops.comm.scatter(fields, root=0)
return Targets(lonlats, labels, fields) | 6986161499aa62c3e0a9bea4367886dc51736c74 | 12,890 |
from typing import List
def load_numbers_sorted(txt: str) -> List[int]:
"""ファイルから番号を読み込みソートしてリストを返す
Args:
txt (str): ファイルのパス
Returns:
List[int]: 番号のリスト
"""
numbers = []
with open(txt) as f:
numbers = sorted(map(lambda e: int(e), f))
return numbers | 6f10badd417a2ceefefa9f28a5c40583ea077d43 | 12,891 |
def translate_pt(p, offset):
"""Translates point p=(x,y) by offset=(x,y)"""
return (p[0] + offset[0], p[1] + offset[1]) | 9fdc578d461219e9e5d1b557b9fde3d7a0946815 | 12,893 |
def truncate(sequence):
""" Do nothing. Just a placeholder. """
string = str(sequence)
return string.split()[0] | 2e8eeffb08d6d3d5d6ad5e6a83e596ec61a2eea2 | 12,895 |
def unbind(port: int) -> dict:
"""Request browser port unbinding.
Parameters
----------
port: int
Port number to unbind.
"""
return {"method": "Tethering.unbind", "params": {"port": port}} | c980eaa28e29dd44139035f0c8882d2960322328 | 12,896 |
def xy_to_ellipse(x,Vx,y,Vy):
"""
Takes the Cartesian variables.
This function returns the particle's position relative to an ellipse and parameters of the ellipse.
Returns a,e,theta,theta_E
"""
# radius using x and y
r = np.sqrt(x ** 2 + y ** 2)
# speed of the particle
V = np.sqrt(Vx ** 2 + Vy ** 2)
# angular momentum per mass
h = x * Vy - y * Vx
# energy per mass
u = (V ** 2) / 2. - 4. * (np.pi ** 2) / r
# semi-major axis
a = -2. * ((np.pi) ** 2) / u
# eccentricity of the elliptical orbit, added absolute value
e = np.sqrt(np.abs(1 - ((h / (2. * np.pi)) ** 2 )/ a))
# theta
theta = np.arctan2(y,x)
# theta_E, compute e*cos(theta - thetaE) first
buff = a * (1. - e ** 2) / r - 1.
# divide buff/e and output 0 if it is a circular orbit
buff_cos = np.divide(buff, e, out=np.zeros_like(buff), where=(e > np.power(10.,-5.)))
#to make sure that arccos takes values less than 1 and greater than -1
buff_cos[buff_cos < -1.] = -1.
buff_cos[buff_cos > 1.] = 1.
delta = np.arccos(buff_cos)
# change the sign if the radial velocity is negative
delta *= np.power(-1.,(x * Vx + y * Vy) < 0.)
thetaE = theta - delta
# set thetaE to 0 if it is a circular orbit
thetaE *= (e > np.power(10.,-5.))
# fix to add 2pi or subtract 2pi if thetaE isn't between -pi and pi
thetaE -= (thetaE > np.pi) * 2 * np.pi
thetaE += (thetaE < -np.pi) * 2 * np.pi
return a,e,theta,thetaE | 2606a81899431349adc419b04d87063f2e75936a | 12,898 |
from typing import List
from typing import Dict
from typing import OrderedDict
def leak_dictionary_by_ignore_sha(
policy_breaks: List[PolicyBreak],
) -> Dict[str, List[PolicyBreak]]:
"""
leak_dictionary_by_ignore_sha sorts matches and incidents by
first appearance in file.
sort incidents by first appearance on file,
file wide matches have no index
so give it -1 so they get bumped to the top
:return: Dictionary with line number as index and a list of
matches that start on said line.
"""
policy_breaks.sort(
key=lambda x: min( # type: ignore
(match.index_start if match.index_start else -1 for match in x.matches)
)
)
sha_dict: Dict[str, List[PolicyBreak]] = OrderedDict()
for policy_break in policy_breaks:
policy_break.matches.sort(key=lambda x: x.index_start if x.index_start else -1)
ignore_sha = get_ignore_sha(policy_break)
sha_dict.setdefault(ignore_sha, []).append(policy_break)
return sha_dict | d94bc10b8f2d94eee639bd94e75ad5835d9b6f1a | 12,899 |
async def get_token(tkn: Token = Depends(from_authotization_header_nondyn)):
"""
Returns informations about the token currently being used. Requires a
clearance level of 0 or more.
"""
assert_has_clearance(tkn.owner, "sni.read_own_token")
return GetTokenOut.from_record(tkn) | 19ea12ad43a4a61f940e9dce4ca3c4a5d6fbbdf2 | 12,900 |
def import_as_event_history(path):
"""
Import file as event history json format.
Parameters
----------
path : str
Absolute path to file.
Returns
-------
events : list
List of historic events.
"""
# initialise output list
events = []
# import through pandas dataframe
df = pd.read_csv(path)
# verify columns existance
if not 'temperature' in df.columns or not 'unix_time' in df.columns:
print_error('Imported file should have columns \'temperature\' and \'unix_time\'.')
# extract UTC timestamps
tx = pd.to_datetime(df['unix_time'], unit='s')
# iterate events
for i in range(len(df)):
# convert unixtime to DT format
timestamp = dt_timestamp_format(tx[i])
# create event json format
json = api_json_format(timestamp, df['temperature'].iloc[i])
# append output
events.append(json)
return events | 1c4362263d177bf2d2a5561d3ed2048ff23faeb2 | 12,901 |
def reduce_dataset(d: pd.DataFrame, reduction_pars: dict):
"""
Reduces the data contained in a pandas DataFrame
:param d: pandas DataFrame. Each column contains lists of numbers
:param reduction_pars: dict containing 'type' and 'values'. 'type' describes the type of reduction performed on the
lists in d.
:return:
"""
p = pd.DataFrame(index=d.index)
for k in d:
if reduction_pars['type'] == 'bins':
p[k] = list(reduce_matrix(np.vstack(d[k].values), reduction_pars['values']))
if reduction_pars['type'] == 'aggregated_selection':
if np.all(reduction_pars['values'] == np.arange(len(d[k][0]))):
p[k] = d[k]
else:
p[k] = list(aggregated_reduction(np.vstack(d[k].values), reduction_pars['values']))
if reduction_pars['type'] == 'min':
p[k] = np.min(np.vstack(d[k].values), axis=1)
if reduction_pars['type'] == 'max':
p[k] = np.max(np.vstack(d[k].values), axis=1)
if reduction_pars['type'] == 'mean':
p[k] = np.mean(np.vstack(d[k].values), axis=1)
return p | 080bb5486787fab25bbc9347e83ed79d4525abe8 | 12,902 |
def update_office(office_id):
"""Given that i am an admin i should be able to edit a specific political office
When i visit to .../api/v2/offices endpoint using PATCH method"""
if is_admin() is not True:
return is_admin()
if not request.get_json():
return make_response(jsonify({'status': 401, 'message': 'empty body'}, 401))
office_data = request.get_json()
check_missingfields = validate.missing_value_validator(['name', 'type'], office_data)
if check_missingfields is not True:
return check_missingfields
check_emptyfield = validate.empty_string_validator(['name', 'type'], office_data)
if check_emptyfield is not True:
return check_emptyfield
check_if_text_only = validate.text_arrayvalidator(['name', 'type'], office_data)
if check_if_text_only is not True:
return check_if_text_only
office_name = office_data['name']
office_type = office_data['type']
res = office.edit_office(office_id, office_name, office_type)
return res | 897ee73b508caf1e3d463f68d55c030259efb6e5 | 12,903 |
def phraser_on_header(row, phraser):
"""Applies phraser on cleaned header.
To be used with methods such as: `apply(func, axis=1)` or
`apply_by_multiprocessing(func, axis=1, **kwargs)`.
Parameters
----------
row : row of pd.Dataframe
phraser : Phraser instance,
Returns
-------
pd.Series
Examples
--------
>>> import pandas as pd
>>> data = pd.read_pickle('./tutorial/data/emails_anonymized.pickle')
>>> from melusine.nlp_tools.phraser import phraser_on_header
>>> from melusine.nlp_tools.phraser import Phraser
>>> # data contains a 'clean_header' column
>>> phraser = Phraser(columns='clean_header').load(filepath)
>>> data.apply(phraser_on_header, axis=1) # apply to all samples
"""
clean_header = phraser_on_text(row["clean_header"], phraser)
return clean_header | 30b9f11607ce1769b15a1c4fda4a4bc3b0aea94b | 12,905 |
def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):
"""
Args:
box_scores (N, 5): boxes in corner-form and probabilities.
iou_threshold: intersection over union threshold.
top_k: keep top_k results. If k <= 0, keep all the results.
candidate_size: only consider the candidates with the highest scores.
Returns:
picked: a list of indexes of the kept boxes
"""
scores = box_scores[:, -1]
boxes = box_scores[:, :-1]
picked = []
# _, indexes = scores.sort(descending=True)
indexes = np.argsort(scores)
# indexes = indexes[:candidate_size]
indexes = indexes[-candidate_size:]
while len(indexes) > 0:
# current = indexes[0]
current = indexes[-1]
picked.append(current)
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
# indexes = indexes[1:]
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = iou_of(
rest_boxes,
np.expand_dims(current_box, axis=0),
)
indexes = indexes[iou <= iou_threshold]
return box_scores[picked, :] | 44a6dbcd0db425196bd91f22907be395d270b3d8 | 12,907 |
def sma(data, span=100):
"""Computes and returns the simple moving average.
Note: the moving average is computed on all columns.
:Input:
:data: pandas.DataFrame with stock prices in columns
:span: int (defaul: 100), number of days/values over which
the average is computed
:Output:
:sma: pandas.DataFrame of simple moving average
"""
return data.rolling(window=span, center=False).mean() | 8f8abf7f851424c20f6cee2ad4a01b934b7b0182 | 12,908 |
def parse_csd(dependencies):
"""Parse C-State Dependency"""
return _CSD_factory(len(csd_data))(csd_data) | 54ab24def420fd8350e1130b98be6b4651464fb8 | 12,909 |
def field_path_get_type(root: HdlType, field_path: TypePath):
"""
Get a data type of element using field path
"""
t = root
for p in field_path:
if isinstance(p, int):
t = t.element_t
else:
assert isinstance(p, str), p
t = t.field_by_name[p].dtype
return t | d6c5f0c750149505e6da78f7b3e3ed602b8f30b0 | 12,910 |
def reverse(rule):
"""
Given a rule X, generate its black/white reversal.
"""
#
# https://www.conwaylife.com/wiki/Black/white_reversal
#
# "The black/white reversal of a pattern is the result of
# toggling the state of each cell in the universe: bringing
# dead cells to life, and killing live cells. The black/white
# reversal of a pattern is sometimes called an anti-pattern;
# for instance, the black/white reversal of a glider (in an
# appropriate rule) is referred to as an anti-glider. The
# black/white reversal of a rule is a transformation of a
# rule in such a way that the black/white reversal of any
# pattern (in the previous sense) will behave the same way
# under the new rule as the unreversed pattern did under the
# original rule."
#
# Note that some rules are their own reversals:
#
# https://www.conwaylife.com/wiki/OCA:Day_%26_Night
#
# See also:
#
# http://golly.sourceforge.net/Help/Algorithms/QuickLife.html#b0emulation
#
# a set of the allowed numbers of neighbours
neighbours = set("012345678")
# split rule at "/"
[born, survive] = rule.split("/")
# drop "B" and "S" and make sets
born = set(born[1:])
survive = set(survive[1:])
# invert neighbour counts using set difference
# - example: B0123478 --> B56, S01234678 --> S5
born_inverse = neighbours - born
survive_inverse = neighbours - survive
# use S(8-x) for the B counts and B(8-x) for the S counts
# - example: B56 --> S23, S5 --> B3
born_complement = map(complement, survive_inverse)
survive_complement = map(complement, born_inverse)
# sort and join
born_final = "B" + "".join(sorted(born_complement))
survive_final = "S" + "".join(sorted(survive_complement))
# new rule
reverse_rule = born_final + "/" + survive_final
return reverse_rule | 0451b2a49257540b8a069f4cdb96d6bff4337cb7 | 12,911 |
import torch
def hsic(k_x: torch.Tensor, k_y: torch.Tensor, centered: bool = False, unbiased: bool = True) -> torch.Tensor:
"""Compute Hilbert-Schmidt Independence Criteron (HSIC)
:param k_x: n by n values of kernel applied to all pairs of x data
:param k_y: n by n values of kernel on y data
:param centered: whether or not at least one kernel is already centered
:param unbiased: if True, use unbiased HSIC estimator of Song et al (2007), else use original estimator of Gretton et al (2005)
:return: scalar score in [0*, inf) measuring dependence of x and y
* note that if unbiased=True, it is possible to get small values below 0.
"""
if k_x.size() != k_y.size():
raise ValueError("RDMs must have the same size!")
n = k_x.size()[0]
if not centered:
h = torch.eye(n, device=k_y.device, dtype=k_y.dtype) - 1/n
k_y = h @ k_y @ h
if unbiased:
# Remove the diagonal
k_x = k_x * (1 - torch.eye(n, device=k_x.device, dtype=k_x.dtype))
k_y = k_y * (1 - torch.eye(n, device=k_y.device, dtype=k_y.dtype))
# Equation (4) from Song et al (2007)
return ((k_x *k_y).sum() - 2*(k_x.sum(dim=0)*k_y.sum(dim=0)).sum()/(n-2) + k_x.sum()*k_y.sum()/((n-1)*(n-2))) / (n*(n-3))
else:
# The original estimator from Gretton et al (2005)
return torch.sum(k_x * k_y) / (n - 1)**2 | 7c91aa5991b90f396abbf835111a456208cbc50a | 12,912 |
def task_group_task_ui_to_app(ui_dict):
"""Converts TaskGroupTask ui dict to App entity."""
return workflow_entity_factory.TaskGroupTaskFactory().create_empty(
obj_id=ui_dict.get("obj_id"),
title=ui_dict["title"],
assignees=emails_to_app_people(ui_dict.get("assignees")),
start_date=str_to_date(ui_dict["start_date"]),
due_date=str_to_date(ui_dict["due_date"])
) | 64ad5bc96b56c2feb41417890c6f04c0f17e4691 | 12,913 |
def int_converter(value):
"""check for *int* value."""
int(value)
return str(value) | ba1b780c7886fccf1203225de249ef129561fd36 | 12,914 |
def wraps(fun, namestr="{fun}", docstr="{doc}", **kwargs):
"""Decorator for a function wrapping another.
Used when wrapping a function to ensure its name and docstring get copied
over.
Args:
fun: function to be wrapped
namestr: Name string to use for wrapped function.
docstr: Docstring to use for wrapped function.
**kwargs: additional string format values.
Return:
Wrapped function.
"""
def _wraps(f):
try:
f.__name__ = namestr.format(fun=get_name(fun), **kwargs)
f.__doc__ = docstr.format(fun=get_name(fun), doc=get_doc(fun), **kwargs)
finally:
return f
return _wraps | af05b43ee3ac2cc8595d35148b0156cd441dce3a | 12,915 |
from re import L
def test_plot_distributed_loads_fixed_left():
"""Test the plotting function for distributed loads and fixed support on the left.
Additionally, test plotting of continuity points.
"""
a = beam(L)
a.add_support(0, "fixed")
a.add_distributed_load(0, L / 2, "-q * x")
a.add_distributed_load(L / 2, L, "q * (L - x)")
a.solve()
fig, ax = a.plot(subs={"q": 1000})
return fig | 2c7c2b37e19e69a66a751bf59c3150f0b7aa3d3f | 12,916 |
import requests
import json
def post_report(coverage):
"""Post coverage report to coveralls.io."""
response = requests.post(URL, files={'json_file': json.dumps(coverage)})
try:
result = response.json()
except ValueError:
result = {'error': 'Failure to submit data. '
'Response [%(status)s]: %(text)s' % {
'status': response.status_code,
'text': response.text}}
print(result)
if 'error' in result:
return result['error']
return 0 | a33affb2791d3dbb7528ce9d4aae6a89f46d03f2 | 12,917 |
import tokenize
def parse_dialogs_per_response(lines,candid_dic,profile_size=None):
"""Parse dialogs provided in the personalized dialog tasks format.
For each dialog, every line is parsed, and the data for the dialog is made by appending
profile, user and bot responses so far, user utterance, bot answer index within candidates dictionary.
If profile is updated during the conversation due to a recognition error,
context_profile is overwritten with the new profile.
"""
data = []
context = []
context_profile = []
u = None
r = None
for line in lines:
line=line.strip()
if line:
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1 and '\t' not in line:
# Process profile attributes
# format: isCusKnown , cusID , cusName
# format with order info: isCusKnown , cusID , cusName , prefSize , prefDrink , prefExtra (extra can be empty)
# isCusKnown is True or False
# cusID is the ID of the customer: if customer is not known, ID is 0, else starts from 1
# if isCusKnown = False then the profile will only be: False , 0
# after the customer is registered it will be False , cusID , chosenSize , chosenDrink , chosenExtra
# cusName is the name of the customer: if customer is not know, it is empty string, else it is name surname of the customer
if profile_size:
attribs = line.split(' , ')
if len(attribs) < profile_size:
# extend the attributes to the profile size so batch stacking won't be a problem
attribs.extend(['|']*(profile_size-len(attribs))) # append | for empty profile attributes, because it doesn't appear in word_index
else:
attribs = line.split(' ')
for attrib in attribs:
r=tokenize(attrib)
if r[0] != "|": # if it is a profile attribute
# Add temporal encoding, and utterance/response encoding
r.append('$r')
r.append('#'+str(nid))
context_profile.append(r)
else:
# Process conversation turns
if '\t' in line:
# Process turn containing bot response
u, r = line.split('\t')
a = candid_dic[r]
u = tokenize(u)
r = tokenize(r)
data.append((context_profile[:],context[:],u[:],a))
u.append('$u')
u.append('#'+str(nid))
r.append('$r')
r.append('#'+str(nid))
context.append(u)
context.append(r)
elif "True" in line or "False" in line:
# Process updated profile attributes (format: isCusKnown cusID cusName) - same as customer profile attributes.
# These are the true values. If the initial profile attributes are correct, there wouldn't be any updated profile attributes
# Else, it would appear after the name was given by the customer
context_profile = []
if profile_size:
attribs = line.split(' , ')
if len(attribs) < profile_size:
attribs.extend(['|']*(profile_size-len(attribs)))
else:
attribs = line.split(' ')
for attrib in attribs:
r=tokenize(attrib)
# Add temporal encoding, and utterance/response encoding
if r[0] != "|": # if it is a profile attribute
# Add temporal encoding, and utterance/response encoding
r.append('$r')
r.append('#'+str(nid))
context_profile.append(r)
else:
# Process turn without bot response
r=tokenize(line)
r.append('$r')
r.append('#'+str(nid))
context.append(r)
else:
# Clear profile and context when it is a new dialog
context=[]
context_profile=[]
return data | b919a9d970e93da9de6221f29573261f83158e49 | 12,918 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.