id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1663941
|
<reponame>jfthuong/pydpf-core
"""
default_value
===============
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class default_value(Operator):
"""default return value from input pin 1 to output pin 0 if there is
nothing on input pin 0.
Parameters
----------
forced_value : optional
default_value :
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.utility.default_value()
>>> # Make input connections
>>> my_forced_value = dpf.()
>>> op.inputs.forced_value.connect(my_forced_value)
>>> my_default_value = dpf.()
>>> op.inputs.default_value.connect(my_default_value)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.utility.default_value(
... forced_value=my_forced_value,
... default_value=my_default_value,
... )
>>> # Get output data
>>> result_output = op.outputs.output()
"""
def __init__(self, forced_value=None, default_value=None, config=None, server=None):
super().__init__(name="default_value", config=config, server=server)
self._inputs = InputsDefaultValue(self)
self._outputs = OutputsDefaultValue(self)
if forced_value is not None:
self.inputs.forced_value.connect(forced_value)
if default_value is not None:
self.inputs.default_value.connect(default_value)
@staticmethod
def _spec():
description = """default return value from input pin 1 to output pin 0 if there is
nothing on input pin 0."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="forced_value",
type_names=["any"],
optional=True,
document="""""",
),
1: PinSpecification(
name="default_value",
type_names=["any"],
optional=False,
document="""""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="output",
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the the global server.
"""
return Operator.default_config(name="default_value", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsDefaultValue
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluationg it
Returns
--------
outputs : OutputsDefaultValue
"""
return super().outputs
class InputsDefaultValue(_Inputs):
"""Intermediate class used to connect user inputs to
default_value operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.default_value()
>>> my_forced_value = dpf.()
>>> op.inputs.forced_value.connect(my_forced_value)
>>> my_default_value = dpf.()
>>> op.inputs.default_value.connect(my_default_value)
"""
def __init__(self, op: Operator):
super().__init__(default_value._spec().inputs, op)
self._forced_value = Input(default_value._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._forced_value)
self._default_value = Input(default_value._spec().input_pin(1), 1, op, -1)
self._inputs.append(self._default_value)
@property
def forced_value(self):
"""Allows to connect forced_value input to the operator.
Parameters
----------
my_forced_value :
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.default_value()
>>> op.inputs.forced_value.connect(my_forced_value)
>>> # or
>>> op.inputs.forced_value(my_forced_value)
"""
return self._forced_value
@property
def default_value(self):
"""Allows to connect default_value input to the operator.
Parameters
----------
my_default_value :
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.default_value()
>>> op.inputs.default_value.connect(my_default_value)
>>> # or
>>> op.inputs.default_value(my_default_value)
"""
return self._default_value
class OutputsDefaultValue(_Outputs):
"""Intermediate class used to get outputs from
default_value operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.default_value()
>>> # Connect inputs : op.inputs. ...
>>> result_output = op.outputs.output()
"""
def __init__(self, op: Operator):
super().__init__(default_value._spec().outputs, op)
self._output = Output(default_value._spec().output_pin(0), 0, op)
self._outputs.append(self._output)
@property
def output(self):
"""Allows to get output output of the operator
Returns
----------
my_output :
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.default_value()
>>> # Connect inputs : op.inputs. ...
>>> result_output = op.outputs.output()
""" # noqa: E501
return self._output
|
StarcoderdataPython
|
1766011
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-29 22:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('transcript', '0013_informationflow'),
]
operations = [
migrations.AlterField(
model_name='informationflow',
name='extract',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='info_flow', to='transcript.Extract'),
),
]
|
StarcoderdataPython
|
1757470
|
<filename>main_app/migrations/0007_auto_20200118_2040.py<gh_stars>100-1000
# Generated by Django 2.2.5 on 2020-01-18 15:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main_app', '0006_remove_consultation_messages'),
]
operations = [
migrations.RemoveField(
model_name='doctor',
name='img',
),
migrations.RemoveField(
model_name='patient',
name='img',
),
]
|
StarcoderdataPython
|
32764
|
""" Converts some lyx files to the latex format.
Note: everything in the file is thrown away until a section or the workd "stopskip" is found.
This way, all the preamble added by lyx is removed.
"""
from waflib import Logs
from waflib import TaskGen,Task
from waflib import Utils
from waflib.Configure import conf
def postprocess_lyx(src, tgt):
Logs.debug("post-processing %s into %s" % (src,tgt))
f_src = open(src,'r')
f_tgt = open(tgt,'w')
toks = ['\\documentclass','\\usepackage','\\begin{document}','\\end{document}','\\geometry','\\PassOptionsToPackage']
keep = False
for l in f_src:
this_keep = ("stopskip" in l) or ("\\section" in l) or ("\\chapter" in l)
if this_keep:
print "start to keep"
keep = keep or this_keep
local_skip = False
for tok in toks:
local_skip = local_skip or l.startswith(tok)
local_keep = False if local_skip else keep
if local_keep:
f_tgt.write(l)
f_src.close()
f_tgt.close()
return 0
def process_lyx(task):
input0 = task.inputs[0]
src = input0.abspath()
input1 = input0.change_ext("_tmp.lyx")
output0 =task.outputs[0]
tgt = output0.abspath()
print "processing lyx file %s" % src
t = task.exec_command("cp %s %s" % (input0.abspath(), input1.abspath()))
if t != 0:
return t
t = task.exec_command("%s --export pdflatex %s" % (task.env.LYX, input1.abspath()))
if t != 0:
return t
t = postprocess_lyx(input1.change_ext(".tex").abspath(),output0.abspath())
return t
class PostprocessLyx(Task.Task):
def run(self):
#Logs.debug("in post process")
return postprocess_lyx(self.inputs[0].abspath(),self.outputs[0].abspath())
@conf
def lyx2tex(bld, lyx_file):
lyx_files = Utils.to_list(lyx_file)
for a in lyx_files:
b = a.change_ext("_tmp.lyx")
c = a.change_ext("_tmp.tex")
d = a.change_ext(".tex")
bld(rule="cp ${SRC} ${TGT}",source=a,target=b)
tsk0 = bld(rule="${LYX} --export pdflatex ${SRC}",source=b,target=c)
tsk = tsk0.create_task("PostprocessLyx")
tsk.set_inputs(c)
tsk.set_outputs(d)
def configure(conf):
conf.find_program('lyx',var='LYX')
|
StarcoderdataPython
|
1768687
|
<reponame>Integrative-Transcriptomics/VIPurPCA
from vipurpca import load_data
from vipurpca import PCA
if __name__ == '__main__':
Y, cov_Y, y = load_data.load_studentgrades_dataset()
print(y)
pca = PCA(Y, cov_Y, 2, compute_jacobian=True)
pca.pca_grad()
pca.compute_cov_eigenvectors()
pca.transform_data()
pca.animate(n_frames=10, labels=y, outfile="test.html")
print(pca.eigenvalues)
|
StarcoderdataPython
|
1653901
|
<reponame>JunhaLee/HRNet-Human-Pose-Estimation
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import math
from config import cfg
from core.inference import get_max_preds
def calc_dists(preds, target, normalize):
preds = preds.astype(np.float32)
target = target.astype(np.float32)
#print('preds:' , preds)
#print('targets: ',target)
dists = np.zeros((preds.shape[1], preds.shape[0]))
dists_ = np.zeros((preds.shape[1], preds.shape[0]))
for n in range(preds.shape[0]): #128 - batch
for c in range(preds.shape[1]): # 17 - kpt class
if target[n, c, 0] > 1 and target[n, c, 1] > 1:
normed_preds = preds[n, c, :]
normed_targets = target[n, c, :]
#print('normed_preds: ', normed_preds)
#print('normed_targets', normed_targets)
dists[c, n] = np.linalg.norm(normed_preds - normed_targets)
else:
dists[c, n] = -1
#print('dists shape:' , dists.shape) --> (17, 128)
#exit(0)
#print(dists)
return dists
# normalize head size
# select the value (PCKh??)
# make "dists" value to ratios depending on head size
def dist_acc(dists, thr): # dist=(128)
''' Return percentage below threshold while ignoring values with a -1 '''
num_dist_cal = 0
correct_pred = 0
for i in range(len(dists)):
if dists[i] != -1:
num_dist_cal += 1
if(dists[i] < thr[i]):
correct_pred += 1
if num_dist_cal > 0 :
return correct_pred / num_dist_cal
else:
return -1
def accuracy(output, target, head_size,width, height, hm_type='gaussian', thr=0.5):
image_size = np.array(cfg.MODEL.IMAGE_SIZE)
heatmap_size = np.array(cfg.MODEL.HEATMAP_SIZE)
'''
Calculate accuracy according to PCK, [PCK-->PCK-h]
but uses ground truth heatmap rather than x,y locations
First value to be returned is average accuracy across 'idxs',
followed by individual accuracies
'''
#print('output.shape: ', output.shape) #--> (64,48)
idx = list(range(output.shape[1]))
norm = 1.0
#print('first_pred: ', output)
if hm_type == 'gaussian':
pred, _ = get_max_preds(output) # 128, 17, 2
#print('pred :',type(pred))
#exit()
target, _ = get_max_preds(target) # 128, 17, 2
#### EXAMPLE
#pred = [10, 10]
#head_size = [10]
#target = [15, 10]
#print('target :', target.shape)
#exit(0)
h = output.shape[2]
w = output.shape[3]
norm = np.ones((pred.shape[0], 2)) * np.array([h, w]) / 10 # 6.4, 4.8
head_h = head_size[0] / (height / h)
head_w = head_size[1] / (width / w)
threshold = np.zeros(len(head_size[0]))
for i in range(len(head_size[0])):
threshold[i] = (math.sqrt(math.pow(head_h[i],2) + math.pow(head_w[i],2)) * 0.6) * thr
#print('threshold: ', threshold)
dists = calc_dists(pred, target, norm) # (17, 128)
acc = np.zeros((len(idx) + 1)) #acc 18
avg_acc = 0
cnt = 0
for i in range(len(idx)): # by class
#acc[i + 1] = dist_acc(dists[idx[i]])
acc[i + 1] = dist_acc(dists[idx[i]],threshold)
if acc[i + 1] >= 0:
avg_acc = avg_acc + acc[i + 1]
cnt += 1
avg_acc = avg_acc / cnt if cnt != 0 else 0
if cnt != 0:
acc[0] = avg_acc
#print('acc: ' , acc)
return acc, avg_acc, cnt, pred
|
StarcoderdataPython
|
3271536
|
<filename>GenRep/main_autoencoder.py
## Adapted for biggan based on latent-composite code
from __future__ import print_function
import argparse
import os
import random
import itertools
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torchvision.utils as vutils
from torch.nn.functional import cosine_similarity
from tensorboardX import SummaryWriter
import oyaml as yaml
# from utils import zdataset, customnet, pbar, util, masking
# from utils import customnet, pbar, util, masking
from utils import pbar, util, masking
import customenet_biggan as customnet
# import zdataset_biggan
from networks import biggan_networks
import numpy as np
import json
import sys
sys.path.append('resources/PerceptualSimilarity') # TODO: just use lpips import
import models
import pdb;
def train(opt):
print("Random Seed: ", opt.seed)
random.seed(opt.seed)
torch.manual_seed(opt.seed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# tensorboard
writer = SummaryWriter(logdir='training/runs/%s' % os.path.basename(opt.outf))
device = torch.device("cuda:0" if opt.cuda else "cpu")
batch_size = int(opt.batchSize)
# load the generator
netG = biggan_networks.load_biggan(opt.netG).to(device).eval() #for biggan, it's model_name, e.g. 'biggan-deep-256'
util.set_requires_grad(False, netG)
# print(netG)
# # find output shape
## Ali: to find output shape, we use biggan_networks.truncated_noise_sample_() instead of zdataset_biggan.z_sample_for_model()
# z = zdataset_biggan.z_sample_for_model(netG, size=1).to(device)
# # Prepare an input for netG
truncation = 1.0
zbs = 1
z = biggan_networks.truncated_noise_sample_(truncation=truncation, batch_size=zbs).to(device)
cls_vector = biggan_networks.one_hot_from_int_(77, batch_size=zbs).to(device)
out_shape = netG(z, cls_vector, truncation).shape
in_shape = z.shape
nz = in_shape[1]
# print(out_shape)
# determine encoder input dim
assert(not (opt.masked and opt.vae_like)), "specify 1 of masked or vae_like"
has_masked_input = opt.masked or opt.vae_like
input_dim = 4 if has_masked_input else 3
modify_input = customnet.modify_layers # adds the to_z layer
# load the encoder
depth = int(opt.netE_type.split('-')[-1])
nz = nz * 2 if opt.vae_like else nz
netE = customnet.CustomResNet(size=depth, halfsize=out_shape[-1]<=150,
num_classes=nz,
modify_sequence=modify_input,
channels_in=input_dim)
netE.to(device)
# print(netE)
# import pdb;
# pdb.set_trace()
last_layer_z = torch.nn.Linear(2048, 128).to(device)
last_layer_y = torch.nn.Linear(2048, opt.num_imagenet_classes).to(device)
# losses + optimizers
mse_loss = nn.MSELoss()
l1_loss = nn.L1Loss()
perceptual_loss = models.PerceptualLoss(model='net-lin', net='vgg',
use_gpu=opt.cuda)
# optimizerE = optim.Adam(netE.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
start_ep = 0
## also loss_y and optim for z and y:
ce_loss = nn.CrossEntropyLoss()
# optimizer_z = optim.Adam(last_layer_z.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
# optimizer_y = optim.Adam(last_layer_y.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerE = optim.Adam(list(netE.parameters()) + list(last_layer_z.parameters()) + list(last_layer_y.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999))
# z datasets
min_bs = min(16, batch_size)
train_loader = training_loader(truncation, batch_size, opt.seed)
test_zs = biggan_networks.truncated_noise_sample_(truncation=truncation,
batch_size=min_bs,
seed=opt.seed).to(device)
class_name_list = ['robin', 'standard_poodle', 'African_hunting_dog', 'gibbon', 'ambulance', 'boathouse', 'cinema', 'Dutch_oven',
'lampshade', 'laptop', 'mixing_bowl', 'pedestal', 'rotisserie', 'slide_rule', 'tripod', 'chocolate_sauce']
test_class_vectors = biggan_networks.one_hot_from_names_(class_name_list[0:min_bs], batch_size=min_bs).to(device)
# with open('./imagenet100_class_index.json', 'rb') as fid:
# imagenet100_dict = json.load(fid)
test_idx = [15, 267, 275, 368, 407, 449, 498, 544, 619, 620, 659, 708, 766, 798, 872, 960]
test_idx = test_idx[0:min_bs]
# load data from checkpoint
# come back
assert(not (opt.netE and opt.finetune)), "specify 1 of netE or finetune"
if opt.finetune:
checkpoint = torch.load(opt.finetune)
sd = checkpoint['state_dict']
# skip weights with dim mismatch, e.g. if you finetune from
# an RGB encoder
if sd['conv1.weight'].shape[1] != input_dim:
# skip first conv if needed
print("skipping initial conv")
sd = {k: v for k, v in sd.items() if k != 'conv1.weight'}
if sd['fc.bias'].shape[0] != nz:
# skip fc if needed
print("skipping fc layers")
sd = {k: v for k, v in sd.items() if 'fc' not in k}
netE.load_state_dict(sd, strict=False)
if opt.netE:
checkpoint = torch.load(opt.netE)
netE.load_state_dict(checkpoint['state_dict'])
last_layer_z.load_state_dict(checkpoint['state_dict_last_z'])
last_layer_y.load_state_dict(checkpoint['state_dict_last_y'])
optimizerE.load_state_dict(checkpoint['optimizer'])
start_ep = checkpoint['epoch'] + 1
epoch_batches = 1600 // batch_size
for epoch, epoch_loader in enumerate(pbar(
epoch_grouper(train_loader, epoch_batches),
total=(opt.niter-start_ep)), start_ep):
# stopping condition
if epoch > opt.niter:
break
# run a train epoch of epoch_batches batches
for step, (z_batch,) in enumerate(pbar(
epoch_loader, total=epoch_batches), 1):
z_batch = z_batch.to(device)
netE.zero_grad()
last_layer_z.zero_grad()
last_layer_y.zero_grad()
# fake_im = netG(z_batch).detach()
idx = np.random.choice(opt.num_imagenet_classes, z_batch.shape[0]).tolist()
class_vector = biggan_networks.one_hot_from_int_(idx, batch_size=z_batch.shape[0]).to(device)
fake_im = netG(z_batch, class_vector, truncation).detach()
if has_masked_input:
## come back
hints_fake, mask_fake = masking.mask_upsample(fake_im)
encoded = netE(torch.cat([hints_fake, mask_fake], dim=1)).view(z_batch.shape)
if opt.masked:
regenerated = netG(encoded, class_vector, truncation)
elif opt.vae_like:
sample = torch.randn_like(encoded[:, nz//2:, :, :])
encoded_mean = encoded[:, nz//2:, :, :]
encoded_sigma = torch.exp(encoded[:, :nz//2, :, :])
reparam = encoded_mean + encoded_sigma * sample
regenerated = netG(reparam, class_vector, truncation)
encoded = encoded_mean # just use mean in z loss
else:
# standard RGB encoding
encoded = netE(fake_im)
z_pred = last_layer_z(encoded)
y_pred = last_layer_y(encoded)
regenerated = netG(z_pred, class_vector, truncation)
# compute loss
loss_y = ce_loss(y_pred, torch.tensor(idx, dtype=torch.int64).to(device))
loss_z = cor_square_error_loss(z_pred, z_batch)
loss_mse = mse_loss(regenerated, fake_im)
loss_perceptual = perceptual_loss.forward(
regenerated, fake_im).mean()
loss = (opt.lambda_z * loss_y + opt.lambda_z * loss_z + opt.lambda_mse * loss_mse
+ opt.lambda_lpips * loss_perceptual)
loss = (opt.lambda_z * loss_y + opt.lambda_z * loss_z + opt.lambda_mse * loss_mse
+ opt.lambda_lpips * loss_perceptual)
# optimize
loss.backward()
optimizerE.step()
# optimizer_z.step()
# optimizer_y.step()
# send losses to tensorboard
if step % 20 == 0:
total_batches = epoch * epoch_batches + step
writer.add_scalar('loss/train_y', loss_y, total_batches)
writer.add_scalar('loss/train_z', loss_z, total_batches)
writer.add_scalar('loss/train_mse', loss_mse, total_batches)
writer.add_scalar('loss/train_lpips', loss_perceptual,
total_batches)
writer.add_scalar('loss/train_total', loss, total_batches)
# import pdb;
# pdb.set_trace()
# run the fixed test zs for visualization
netE.eval()
last_layer_z.eval()
last_layer_y.eval()
with torch.no_grad():
fake_im = netG(test_zs, test_class_vectors, truncation)
if has_masked_input:
## come back
hints_fake, mask_fake = masking.mask_upsample(fake_im)
encoded = netE(torch.cat([hints_fake, mask_fake], dim=1)).view(test_zs.shape)
if opt.masked:
regenerated = netG(encoded, test_class_vectors, truncation)
elif opt.vae_like:
sample = torch.randn_like(encoded[:, nz//2:, :, :])
encoded_mean = encoded[:, nz//2:, :, :]
encoded_sigma = torch.exp(encoded[:, :nz//2, :, :])
reparam = encoded_mean + encoded_sigma * sample
regenerated = netG(reparam, test_class_vectors, truncation)
encoded = encoded_mean # just use mean in z loss
else:
encoded = netE(fake_im)
pred_z = last_layer_z(encoded)
pred_y = last_layer_y(encoded)
regenerated = netG(pred_z, test_class_vectors, truncation)
# compute loss
loss_y = ce_loss(y_pred, torch.tensor(test_idx, dtype=torch.int64).to(device))
loss_z = cor_square_error_loss(pred_z, test_zs)
loss_mse = mse_loss(regenerated, fake_im)
loss_perceptual = perceptual_loss.forward(
regenerated, fake_im).mean()
loss = (opt.lambda_z * loss_y + opt.lambda_z * loss_z + opt.lambda_mse * loss_mse
+ opt.lambda_lpips * loss_perceptual)
loss = (opt.lambda_z * loss_y + opt.lambda_z * loss_z)
# send to tensorboard
writer.add_scalar('loss/test_y', loss_y, epoch)
writer.add_scalar('loss/test_z', loss_z, epoch)
writer.add_scalar('loss/test_mse', loss_mse, epoch)
writer.add_scalar('loss/test_lpips', loss_perceptual,
epoch)
writer.add_scalar('loss/test_total', loss, epoch)
if has_masked_input:
grid = vutils.make_grid(
torch.cat((fake_im, hints_fake, regenerated)), nrow=8,
normalize=True, scale_each=(-1, 1))
else:
grid = vutils.make_grid(
torch.cat((fake_im, regenerated)), nrow=8,
normalize=True, scale_each=(-1, 1))
writer.add_image('Image', grid, epoch)
netE.train()
# do checkpointing
if epoch % 1000 == 0 or epoch == opt.niter:
sd = {
'state_dict': netE.state_dict(),
'state_dict_last_z': last_layer_z.state_dict(),
'state_dict_last_y': last_layer_y.state_dict(),
'optimizer': optimizerE.state_dict(),
'epoch': epoch
}
torch.save(sd, '%s/netE_epoch_%d.pth' % (opt.outf, epoch))
def cor_square_error_loss(x, y, eps=1e-8):
# Analogous to MSE, but in terms of Pearson's correlation
return (1.0 - cosine_similarity(x, y, eps=eps)).mean()
def training_loader(truncation, batch_size, global_seed=0):
'''
Returns an infinite generator that runs through randomized z
batches, forever.
'''
g_epoch = 1
while True:
z_data = biggan_networks.truncated_noise_dataset(truncation=truncation,
batch_size=10000,
seed=g_epoch + global_seed)
dataloader = torch.utils.data.DataLoader(
z_data,
shuffle=False,
batch_size=batch_size,
num_workers=10,
pin_memory=True)
for batch in dataloader:
yield batch
g_epoch += 1
def epoch_grouper(loader, epoch_size, num_epochs=None):
'''
To use with the infinite training loader: groups the training data
batches into epochs of the given size.
'''
it = iter(loader)
epoch = 0
while True:
chunk_it = itertools.islice(it, epoch_size)
try:
first_el = next(chunk_it)
except StopIteration:
return
yield itertools.chain((first_el,), chunk_it)
epoch += 1
if num_epochs is not None and epoch >= num_epochs:
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--num_imagenet_classes', type=int, default=1000,
help='e.g., 100 or 1000')
parser.add_argument('--netE_type', type=str, default='resnet-50',
help='type of encoder architecture')
parser.add_argument('--batchSize', type=int, default=8, help='input batch size')
parser.add_argument('--niter', type=int, default=2000, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--netG', default='', help="generator to load")
parser.add_argument('--netE', default='', help="path to netE (to continue training)")
parser.add_argument('--outf', default='./resnet50_zy_pix', help='folder to output model checkpoints')
parser.add_argument('--seed', default=0, type=int, help='manual seed')
parser.add_argument('--lambda_z', default=1.0, type=float, help='loss weighting')
parser.add_argument('--lambda_mse', default=1.0, type=float, help='loss weighting')
parser.add_argument('--lambda_lpips', default=1.0, type=float, help='loss weighting')
parser.add_argument('--finetune', type=str, default='',
help="finetune from these weights")
parser.add_argument('--masked', action='store_true', help="train with masking")
parser.add_argument('--vae_like', action='store_true',
help='train with masking, predict mean and sigma')
opt = parser.parse_args()
opt.outf = '{}_{}'.format(opt.outf, opt.num_imagenet_classes)
print(opt)
assert opt.netE_type == 'resnet-50'
opt.outf = opt.outf.format(**vars(opt))
os.makedirs(opt.outf, exist_ok=True)
# save options
with open(os.path.join(opt.outf, 'optE.yml'), 'w') as f:
yaml.dump(vars(opt), f, default_flow_style=False)
train(opt)
|
StarcoderdataPython
|
3213743
|
with open("p022_names.txt", 'rt', encoding='utf8') as f:
my_file = f.read()
name_lst = [item.strip(r'"') for item in my_file.split(',')]
name_lst = sorted(name_lst)
# print(name_lst[:10])
alp_score_dict = dict(zip("ABCDEFGHIJKLMNOPQRSTUVWXYZ", range(1,len("ABCDEFGHIJKLMNOPQRSTUVWXYZ")+1)))
# print(alp_score_dict)
tot_score = 0
for idx, item in enumerate(name_lst):
tot_score += (idx+1)*sum(map(lambda x:alp_score_dict[x], item))
print(tot_score)
|
StarcoderdataPython
|
3265549
|
<reponame>timtim17/myuw<filename>myuw/dao/__init__.py<gh_stars>0
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
import os
from django.conf import settings
from uw_sws import DAO as SWS_DAO
from userservice.user import (
UserService, get_user, get_original_user)
from myuw.util.settings import get_disable_actions_when_override
logger = logging.getLogger(__name__)
def get_netid_of_current_user(request=None):
"""
return the over-ride user if impersonated
"""
if request:
return get_user(request)
return UserService().get_user()
def get_netid_of_original_user(request=None):
"""
return the actual authenticated user
"""
if request:
return get_original_user(request)
return UserService().get_original_user()
def get_userids(request=None):
"""
Return a dict of {orig_netid: netid,
acting_netid: netid,
is_override: True/False}
"""
user = None
orig_userid = None
try:
user = get_netid_of_current_user(request)
orig_userid = get_netid_of_original_user(request)
except Exception:
pass
return {'acting_netid': user,
'orig_netid': orig_userid,
'is_override': (user is not None and
orig_userid is not None and
user != orig_userid)}
def is_action_disabled():
"""
return True if overriding and
MYUW_DISABLE_ACTIONS_WHEN_OVERRIDE is True
"""
overrider = UserService().get_override_user()
disable_actions_when_override = get_disable_actions_when_override()
return disable_actions_when_override and overrider is not None
def is_using_file_dao():
return SWS_DAO.get_implementation().is_mock()
def _get_file_path(settings_key, filename):
file_path = getattr(settings, settings_key, None)
if file_path:
return os.path.join(file_path, filename)
current_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.abspath(os.path.join(current_dir,
"..", "data",
filename))
return file_path
def log_err(logger, msg_str, stacktrace, request):
logger.error(
{**get_userids(request=request),
**{'at': msg_str,
'err': stacktrace.format_exc(chain=False).splitlines()}})
|
StarcoderdataPython
|
3351578
|
# Copyright 2018 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
import netaddr
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_context import context
from cyborg import db
from cyborg.common import exception
from cyborg import objects
from cyborg.objects import base
from cyborg import tests as test
from cyborg.tests.unit import fake_physical_function
from cyborg.tests.unit import fake_virtual_function
from cyborg.tests.unit import fake_accelerator
from cyborg.tests.unit.objects import test_objects
from cyborg.tests.unit.db.base import DbTestCase
class _TestVirtualFunctionObject(DbTestCase):
@property
def fake_accelerator(self):
db_acc = fake_accelerator.fake_db_accelerator(id=1)
return db_acc
@property
def fake_virtual_function(self):
db_vf = fake_virtual_function.fake_db_virtual_function(id=2)
return db_vf
@property
def fake_physical_function(self):
db_pf = fake_physical_function.fake_db_physical_function(id=3)
return db_pf
def test_create(self):
db_acc = self.fake_accelerator
db_vf = self.fake_virtual_function
acc = objects.Accelerator(context=self.context,
**db_acc)
acc.create(self.context)
acc_get = objects.Accelerator.get(self.context, acc.uuid)
vf = objects.VirtualFunction(context=self.context,
**db_vf)
vf.accelerator_id = acc_get.id
vf.create(self.context)
self.assertEqual(db_vf['uuid'], vf.uuid)
def test_get(self):
db_vf = self.fake_virtual_function
db_acc = self.fake_accelerator
acc = objects.Accelerator(context=self.context,
**db_acc)
acc.create(self.context)
acc_get = objects.Accelerator.get(self.context, acc.uuid)
vf = objects.VirtualFunction(context=self.context,
**db_vf)
vf.accelerator_id = acc_get.id
vf.create(self.context)
vf_get = objects.VirtualFunction.get(self.context, vf.uuid)
self.assertEqual(vf_get.uuid, vf.uuid)
def test_get_by_filter(self):
db_acc = self.fake_accelerator
db_pf = self.fake_physical_function
db_vf = self.fake_virtual_function
acc = objects.Accelerator(context=self.context,
**db_acc)
acc.create(self.context)
acc_get = objects.Accelerator.get(self.context, acc.uuid)
pf = objects.PhysicalFunction(context=self.context,
**db_pf)
pf.accelerator_id = acc_get.id
pf.create(self.context)
pf_get = objects.PhysicalFunction.get(self.context, pf.uuid)
vf = objects.VirtualFunction(context=self.context,
**db_vf)
vf.accelerator_id = pf_get.accelerator_id
vf.create(self.context)
vf_get = objects.VirtualFunction.get(self.context, vf.uuid)
pf_get.add_vf(vf_get)
pf_get.save(self.context)
query = {"vendor": pf_get['vendor']}
vf_get_list = objects.VirtualFunction.get_by_filter(self.context,
query)
self.assertEqual(len(vf_get_list), 1)
self.assertEqual(vf_get_list[0].uuid, vf.uuid)
self.assertEqual(objects.VirtualFunction, type(vf_get_list[0]))
self.assertEqual(1, 1)
def test_get_by_filter2(self):
db_acc = self.fake_accelerator
db_pf = self.fake_physical_function
db_vf = self.fake_virtual_function
db_pf2 = self.fake_physical_function
db_vf2 = self.fake_virtual_function
acc = objects.Accelerator(context=self.context,
**db_acc)
acc.create(self.context)
acc_get = objects.Accelerator.get(self.context, acc.uuid)
pf = objects.PhysicalFunction(context=self.context,
**db_pf)
pf.accelerator_id = acc_get.id
pf.create(self.context)
pf_get = objects.PhysicalFunction.get(self.context, pf.uuid)
pf2 = objects.PhysicalFunction(context=self.context,
**db_pf2)
pf2.accelerator_id = acc_get.id
pf2.create(self.context)
pf_get2 = objects.PhysicalFunction.get(self.context, pf2.uuid)
query = {"uuid": pf2.uuid}
pf_get_list = objects.PhysicalFunction.get_by_filter(self.context,
query)
self.assertEqual(1, 1)
def test_save(self):
db_vf = self.fake_virtual_function
db_acc = self.fake_accelerator
acc = objects.Accelerator(context=self.context,
**db_acc)
acc.create(self.context)
acc_get = objects.Accelerator.get(self.context, acc.uuid)
vf = objects.VirtualFunction(context=self.context,
**db_vf)
vf.accelerator_id = acc_get.id
vf.create(self.context)
vf_get = objects.VirtualFunction.get(self.context, vf.uuid)
vf_get.host = 'test_save'
vf_get.save(self.context)
vf_get_2 = objects.VirtualFunction.get(self.context, vf.uuid)
self.assertEqual(vf_get_2.host, 'test_save')
def test_destroy(self):
db_vf = self.fake_virtual_function
db_acc = self.fake_accelerator
acc = objects.Accelerator(context=self.context,
**db_acc)
acc.create(self.context)
acc_get = objects.Accelerator.get(self.context, acc.uuid)
vf = objects.VirtualFunction(context=self.context,
**db_vf)
vf.accelerator_id = acc_get.id
vf.create(self.context)
vf_get = objects.VirtualFunction.get(self.context, vf.uuid)
self.assertEqual(db_vf['uuid'], vf_get.uuid)
vf_get.destroy(self.context)
self.assertRaises(exception.DeployableNotFound,
objects.VirtualFunction.get, self.context,
vf_get['uuid'])
class TestVirtualFunctionObject(test_objects._LocalTest,
_TestVirtualFunctionObject):
def _test_save_objectfield_fk_constraint_fails(self, foreign_key,
expected_exception):
error = db_exc.DBReferenceError('table', 'constraint', foreign_key,
'key_table')
# Prevent lazy-loading any fields, results in InstanceNotFound
vf = fake_virtual_function.virtual_function_obj(self.context)
fields_with_save_methods = [field for field in vf.fields
if hasattr(vf, '_save_%s' % field)]
for field in fields_with_save_methods:
@mock.patch.object(vf, '_save_%s' % field)
@mock.patch.object(vf, 'obj_attr_is_set')
def _test(mock_is_set, mock_save_field):
mock_is_set.return_value = True
mock_save_field.side_effect = error
vf.obj_reset_changes(fields=[field])
vf._changed_fields.add(field)
self.assertRaises(expected_exception, vf.save)
vf.obj_reset_changes(fields=[field])
_test()
|
StarcoderdataPython
|
3293861
|
DYN_TYPE = 76
BANNER_TYPE = 11
PLAINTEXT_TYPE = 255
|
StarcoderdataPython
|
164253
|
import numpy as np
class RunningScore(object):
def __init__(self, n_classes):
self.n_classes = n_classes
self.confusion_matrix = np.zeros((n_classes, n_classes))
@staticmethod
def _fast_hist(label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(n_class * label_true[mask].astype(int) + label_pred[mask],
minlength=n_class**2).reshape(n_class, n_class)
return hist
def update(self, label_trues, label_preds):
for lt, lp in zip(label_trues, label_preds):
self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes)
def get_scores(self):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = self.confusion_matrix
tp = np.diag(hist)
sum_a1 = hist.sum(axis=1)
acc = tp.sum() / (hist.sum() + np.finfo(np.float32).eps)
acc_cls = tp / (sum_a1 + np.finfo(np.float32).eps)
acc_cls = np.nanmean(acc_cls)
iu = tp / (sum_a1 + hist.sum(axis=0) - tp + np.finfo(np.float32).eps)
mean_iu = np.nanmean(iu)
freq = sum_a1 / (hist.sum() + np.finfo(np.float32).eps)
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
cls_iu = dict(zip(range(self.n_classes), iu))
return {'Overall_Acc': acc,
'Mean_Acc': acc_cls,
'FreqW_Acc': fwavacc,
'Mean_IoU': mean_iu}, cls_iu
def reset(self):
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
if __name__ == "__main__":
n_class = 2
score = RunningScore(n_class)
label_true = np.array([1, 0, 0, 1, 1, 0, 1, 0, 1, 0])
label_pred = np.array([1, 1, 0, 1, 0, 0, 1, 1, 0, 0])
score.update(label_true, label_pred)
print(score.confusion_matrix)
|
StarcoderdataPython
|
1792798
|
# pylint: disable=redefined-outer-name, unused-argument
from __future__ import print_function
import functools
import os
import random
from uuid import uuid4
import pytest
import slash
def test_normal_sorting(test_dir, names):
assert get_file_names(load(test_dir)) == names
def test_custom_ordering(test_dir, names, indices):
@slash.hooks.tests_loaded.register # pylint: disable=no-member
def tests_loaded(tests): # pylint: disable=unused-variable
for index, (test, new_index) in enumerate(zip(tests, indices)):
assert test.__slash__.file_path.endswith(names[index]), 'Tests are loaded in incorrect order'
test.__slash__.set_sort_key(new_index)
assert get_file_names(load(test_dir)) == _sorted_by_indices(names, indices)
################################################################################
## Utils and fixtures
def get_file_names(tests):
returned = []
for t in tests:
file_path = t.__slash__.file_path
assert os.path.isabs(file_path)
returned.append(os.path.join(
os.path.basename(os.path.dirname(file_path)),
os.path.basename(file_path)))
return returned
def load(source):
with slash.Session():
return slash.loader.Loader().get_runnables([source])
@pytest.fixture
def test_dir(tmpdir, names, indices):
returned = tmpdir.join(str(uuid4()))
indices = range(len(names))[::-1]
for index, name in zip(indices, names):
with returned.join(name).open('w', ensure=True) as f:
_print = functools.partial(print, file=f)
_print('import slash')
_print('@slash.tag("index", {})'.format(index))
_print('def test_something():')
_print(' pass')
return str(returned)
@pytest.fixture
def names():
return [os.path.normpath(p) for p in ['a/test_a_b.py', 'a/test_b_a.py', 'b/test_a_a.py', 'b/test_a_b.py']]
def _sorted_by_indices(items, indices):
returned = [None for _ in items]
for index, item in zip(indices, items):
returned[index] = item
return returned
def _randomized(l):
indices = list(range(len(l)))
random.shuffle(indices)
return [l[index] for index in indices]
@pytest.fixture(params=[reversed, _randomized])
def indices(request, names):
return [names.index(name) for name in request.param(names)]
|
StarcoderdataPython
|
3238899
|
<filename>services.py
from anthill.framework.utils.urls import reverse, build_absolute_uri
from anthill.platform.services import PlainService
class Service(PlainService):
"""Anthill default service."""
async def set_messenger_url(self):
path = reverse('messenger')
host_url = self.app.registry_entry['external']
url = build_absolute_uri(host_url, path)
self.settings.update(messenger_url=url)
|
StarcoderdataPython
|
160157
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Property data types.
Ability to import, etc. from text files is
part of the methods in the type.
Import property database from textfile(s):
* See :meth:`PropertyData.from_csv`, for the expected format for data.
* See :meth:`PropertyMetadata()` for the expected format for metadata.
"""
# stdlib
import csv
import json
import logging
# third-party
try:
import pandas as pd
import numpy as np
except ImportError:
np, pd = None, None
# local
from .util import get_file
from . import tabular
__author__ = '<NAME>'
_log = logging.getLogger(__name__)
class AddedCSVColumnError(KeyError):
"""Error for :meth:PropertyData.add_csv()
"""
def __init__(self, names, how_bad, column_type=''):
ctype = column_type + ' ' if column_type else ''
if len(names) == 1:
msg = 'Added CSV data {} {}column "{}"'.format(
how_bad, ctype, list(names)[0]
)
else:
msg = 'Added CSV data {} {}columns: {}'.format(
how_bad, ctype, ', '.join(list(names))
)
KeyError.__init__(self, msg)
class Fields(tabular.Fields):
"""Constants for fields.
"""
# Values for "type" field
C_STATE, C_PROP = 'state', 'property'
class PropertyTable(tabular.Table):
"""Property data and metadata together (at last!)
"""
def __init__(self, data=None, **kwargs):
"""Constructor.
"""
if isinstance(data, PropertyData):
pdata = data
elif isinstance(data, list):
pdata = PropertyData(data)
else:
raise TypeError('list or PropertyData object required')
super(PropertyTable, self).__init__(data=pdata, **kwargs)
@classmethod
def load(cls, file_or_path, validate=True):
"""Create PropertyTable from JSON input.
Args:
file_or_path (file or str): Filename or file object
from which to read the JSON-formatted data.
validate (bool): If true, apply validation to input JSON data.
Example input::
{
"meta": [
{"datatype": "MEA",
"info": "J. Chem. Eng. Data, 2009, Vol 54, pg. 306-310",
"notes": "r is MEA weight fraction in aqueous soln.",
"authors": "<NAME>., <NAME>., <NAME>.",
"title": "Density and Viscosity of ..."}
],
"data": [
{"name": "Viscosity Value",
"units": "mPa-s",
"values": [2.6, 6.2],
"error_type": "absolute",
"errors": [0.06, 0.004],
"type": "property"},
{"name": "r",
"units": "",
"values": [0.2, 1000],
"type": "state"}
]
}
"""
fp = get_file(file_or_path)
d = json.load(fp)
PropertyTable._validate_json(d)
metalist = d[Fields.META]
meta = [PropertyMetadata(m) for m in metalist]
data = PropertyData(d[Fields.DATA])
tbl = PropertyTable(data=data)
for m in meta:
tbl.add_metadata(m)
return tbl
class PropertyData(tabular.TabularData):
"""Class representing property data that knows how to
construct itself from a CSV file.
You can build objects from multiple CSV files as well.
See the property database section of the API docs for
details, or read the code in :meth:`add_csv` and the
tests in :mod:`idaes_dmf.propdb.tests.test_mergecsv`.
"""
embedded_units = r'(.*)\((.*)\)'
def __init__(self, data):
"""Construct new object from input list.
Example input::
[{
"name": "Density Data",
"units": "g/cm^3",
"values": [1.0053, 1.0188, .., ],
"errors": [.00005, .., .00005],
"error_type": "absolute",
"type": "property"
}, ...etc...]
Args:
data (list): Input data columns
Returns:
(PropertyData) New instance.
"""
super(PropertyData, self).__init__(data, error_column=True)
self._nstates = len(self.states)
@property
def states(self):
return [c for c in self.columns if self._is_state(c)]
@property
def properties(self):
return [c for c in self.columns if self._is_prop(c)]
@staticmethod
def _is_state(c):
return c[Fields.COLTYPE] == Fields.C_STATE
@staticmethod
def _is_prop(c):
return c[Fields.COLTYPE] == Fields.C_PROP
def names(self, states=True, properties=True):
"""Get column names.
Args:
states (bool): If False, exclude "state" data, e.g. the
ambient temperature, and only
include measured property values.
properties (bool): If False, excluse property data
Returns:
list[str]: List of column names.
"""
result = []
if states:
result.extend([v[Fields.DATA_NAME] for v in self.states])
if properties:
result.extend([v[Fields.DATA_NAME] for v in self.properties])
return result
def is_state_column(self, index):
"""Whether given column is state.
Args:
index (int): Index of column
Returns:
(bool) State or property and the column number.
Raises:
IndexError: No column at that index.
"""
col = self.columns[index]
return self._is_state(col)
def is_property_column(self, index):
"""Whether given column is a property. See :meth:`is_state_column`."""
return not self.is_state_column(index)
def as_arr(self, states=True):
"""Export property data as arrays.
Args:
states (bool): If False, exclude "state" data, e.g. the
ambient temperature, and only
include measured property values.
Returns:
(values[M,N], errors[M,N]) Two arrays of floats,
each with M columns having N values.
Raises:
ValueError if the columns are not all the same length
"""
n, values, errors = None, [], []
# extract state columns
if states:
for v in self.states:
vals = v[Fields.DATA_VALUES]
if n is None:
n = len(vals)
elif len(vals) != n:
raise ValueError(
'State values "{}" length {} != {}'.format(
v[Fields.DATA_NAME], len(vals), n
)
)
values.append(vals)
errors.append([0] * len(vals))
# extract property columns
for v in self.properties:
vals = v[Fields.DATA_VALUES]
if n is None:
n = len(vals)
elif len(vals) != n:
raise ValueError(
'Property values "{}" length {} != {}'.format(
v[Fields.DATA_NAME], len(vals), n
)
)
values.append(v[Fields.DATA_VALUES])
errors.append(v[Fields.DATA_ERRORS])
return values, errors
def values_dataframe(self, states=True):
"""Get values as a dataframe.
Args:
states (bool): see :meth:`names()`.
Returns:
(pd.DataFrame) Pandas dataframe for values.
Raises:
ImportError: If `pandas` or `numpy` were never
successfully imported.
"""
return self._get_prop_dataframe(Fields.DATA_VALUES, states)
def errors_dataframe(self, states=False):
"""Get errors as a dataframe.
Args:
states (bool): If False, exclude state data.
This is the default, because states do not
normally have associated error information.
Returns:
pd.DataFrame: Pandas dataframe for values.
Raises:
ImportError: If `pandas` or `numpy` were never
successfully imported.
"""
return self._get_prop_dataframe(Fields.DATA_ERRORS, states)
def _get_prop_dataframe(self, field, states):
self._check_pandas_import()
a1, names = [], []
if states:
a1 = [v[field] for v in self.states]
names = [v[Fields.DATA_NAME] for v in self.states]
a1.extend([v[field] for v in self.properties])
names.extend([v[Fields.DATA_NAME] for v in self.properties])
a2 = np.array(a1).transpose()
return pd.DataFrame(a2, columns=names)
@staticmethod
def from_csv(file_or_path, nstates=0):
"""Import the CSV data.
Expected format of the files is a header plus data rows.
Header: Index-column, Column-name(1), Error-column(1), \
Column-name(2), Error-column(2), ..
Data: <index>, <val>, <errval>, <val>, <errval>, ..
Column-name is in the format "Name (units)"
Error-column is in the format "<type> Error", where "<type>" is
the error type.
Args:
file_or_path (file-like or str): Input file
nstates (int): Number of state columns, appearing
first before property columns.
Returns:
PropertyData: New properties instance
"""
input_file = get_file(file_or_path)
csv_file = csv.reader(input_file)
row = next(csv_file)
names, data = PropertyData._prop_parse_csv_headers(nstates, row)
for row in csv_file:
# print('@@ parse csv row: {}'.format(row))
PropertyData._parse_csv_row(data, row, error_column=True)
obj = PropertyData(data)
return obj
def add_csv(self, file_or_path, strict=False):
"""Add to existing object from a new CSV file.
Depending on the value of the `strict` argument (see
below), the new file may or may not have the same
properties as the object -- but it always needs to have
the same number of state columns, and in the same order.
.. note:: Data that is "missing" because of property columns in
one CSV and not the other will be filled with `float(nan)` values.
Args:
file_or_path (file or str): Input file. This should be in exactly
the same format as expected by :meth:from_csv().
strict (bool): If true, require that the columns in the input
CSV match columns in this object. Otherwise, only require
that *state* columns in input CSV match columns in this
object. New property columns are added, and matches
to existing property columns will append the data.
Raises:
AddedCSVColumnError: If the new CSV column headers are not the
same as the ones in this object.
Returns:
(int) Number of added rows
"""
nstates = self._nstates
input_file = get_file(file_or_path)
csv_file = csv.reader(input_file)
# Parse the header
row = next(csv_file)
hdr_names, hdr_data = PropertyData._prop_parse_csv_headers(nstates, row)
# print('@@ add_csv, column names = {}, data columns = {}'
# .format(hdr_names, self.names()))
# Check that set of keys in new data is the same
cur_keys = set(self.names())
new_keys = set(hdr_names)
# This is used to re-order input data
rowmap = None
if strict:
if cur_keys > new_keys:
missing = cur_keys - new_keys
raise AddedCSVColumnError(missing, 'is missing')
elif new_keys > cur_keys:
extra = new_keys - cur_keys
raise AddedCSVColumnError(extra, 'has extra')
elif new_keys != cur_keys:
extra = new_keys - cur_keys
missing = cur_keys - new_keys
namelist = (
'(' + ','.join(extra) + ')',
'instead of',
'(' + ','.join(missing) + ')',
)
raise AddedCSVColumnError(namelist, 'has different')
else:
# check that all states are in common
hdr_states = filter(self._is_state, hdr_data)
new_states = [s[Fields.DATA_NAME] for s in hdr_states]
new_states = set(new_states)
cur_states = set(self.names(properties=False))
if new_states != cur_states:
extra = new_states - cur_states
missing = cur_states - new_states
if extra and missing:
namelist = (
'(' + ','.join(extra) + ')',
'instead of',
'(' + ','.join(missing) + ')',
)
raise AddedCSVColumnError(
namelist, 'has different', column_type='state'
)
elif extra:
raise AddedCSVColumnError(extra, 'has extra', column_type='state')
elif missing:
raise AddedCSVColumnError(
missing, 'is missing', column_type='state'
)
else:
raise RuntimeError('unexpected branch')
# check that at least one property is in common
new_prop = new_keys - new_states
if not new_prop:
return 0 # no data
cur_prop = set(self.names(states=False))
# Add columns for all properties only found on the input,
# and initialize values to a list of NaN's as long as the
# current table, so data in all fields will be the same length.
# Initialize rowmap with mapping for state columns
rowmap = [-1] * len(hdr_names)
idx = 0
for i, s in enumerate(hdr_data):
if s[Fields.COLTYPE] == Fields.C_PROP:
continue
rowmap[i] = idx
idx += 1
nan_list = [float('nan')] * self.num_rows
idx = 0
for i, value in enumerate(hdr_data):
if value[Fields.COLTYPE] == Fields.C_STATE:
continue
name = value[Fields.DATA_NAME]
if name not in cur_prop:
value[Fields.DATA_NAME] = name
value[Fields.DATA_VALUES] = nan_list[:]
value[Fields.DATA_ERRORS] = nan_list[:]
value[Fields.COLTYPE] = Fields.C_PROP
self._data.append(value)
rowmap[i] = len(self.properties) - 1
else:
rowmap[i] = idx + self._nstates
idx += 1
# print("@@ rowmap = {}".format(rowmap))
# Parse the new data
num_added = 0
new_rowlen = 1 + 2 * len(self.names())
for row in csv_file:
if rowmap:
# Re-order according to the rowmap.
# By initializing with NaN, any columns not in the
# input, but in the current data, will be replaced with NaN
# values.
row2 = [float('nan')] * new_rowlen
# print('@@ row={} row2-init={}'.format(row, row2))
for i, j in enumerate(rowmap):
row2[j * 2 + 1] = row[i * 2 + 1] # value
row2[j * 2 + 2] = row[i * 2 + 2] # error
row = row2
self._parse_csv_row(self._data, row, error_column=True)
num_added += 1
self._nrows += 1
return num_added
@classmethod
def _prop_parse_csv_headers(cls, nstates, headers):
"""Parse a row of CSV headers which are pairs
of columns like "<name> [(units)], <error-type> Error".
Returns:
(names, data). Names is a list of all the column names.
Data is a dict with two keys, "properties" and "states".
Each value will be a list of property/state objects.
"""
names, data = cls._parse_csv_headers(headers, error_column=True)
for i in range(0, nstates):
data[i][Fields.COLTYPE] = Fields.C_STATE
for i in range(nstates, len(data)):
data[i][Fields.COLTYPE] = Fields.C_PROP
return names, data
class PropertyMetadata(tabular.Metadata):
"""Class to import property metadata.
"""
pass
class PropertyColumn(tabular.Column):
"""Data column for a property.
"""
type_name = 'Property'
def __init__(self, name, data):
tabular.Column.__init__(self, name, data)
self.errors = data[Fields.DATA_ERRORS]
self.error_type = data[Fields.DATA_ERRTYPE]
def data(self):
return {
Fields.DATA_UNITS: self.units,
Fields.DATA_VALUES: self.values,
Fields.DATA_ERRORS: self.errors,
Fields.DATA_ERRTYPE: self.error_type,
}
class StateColumn(tabular.Column):
"""Data column for a state.
"""
type_name = 'State'
def __init__(self, name, data):
tabular.Column.__init__(self, name, data)
self.errors = [0.0] * len(self)
self.error_type = 'none'
def data(self):
return {Fields.DATA_UNITS: self.units, Fields.DATA_VALUES: self.values}
def convert_csv(meta_csv, datatype, data_csv, nstates, output):
meta = PropertyMetadata.from_csv(meta_csv)
meta.datatype = datatype
data = PropertyData.from_csv(data_csv, nstates)
obj = PropertyTable(data=data, metadata=meta)
ofile = get_file(output, mode='w')
obj.dump(ofile)
|
StarcoderdataPython
|
1600565
|
<filename>aws_dataclasses/cf_event.py
from collections import namedtuple
from typing import Dict, List, Optional
from dataclasses import InitVar, field, dataclass
from aws_dataclasses.base import GenericDataClass, EventClass
KVPair = namedtuple("KVPair", ['key', 'value'])
def _parse_headers(headers) -> Dict[str, List[KVPair]]:
out = {}
for hdr_name, header_list in headers.items():
out[hdr_name] = [KVPair(header.get("key"), header.get("value")) for header in header_list]
return out
@dataclass
class CloudFrontConfig(GenericDataClass):
distribution_id: str = field(init=False)
request_id: str = field(init=False)
distributionId: InitVar[str] = field(repr=False, default=None)
requestId: InitVar[str] = field(repr=False, default=None)
def __post_init__(self, distributionId: str, requestId: str):
self.request_id = requestId
self.distribution_id = distributionId
@dataclass
class CloudFrontfRequest(GenericDataClass):
querystring: str
uri: str
method: str
headers: Dict[str, List[KVPair]]
origin: str = field(default=None)
client_ip: str = field(init=False)
clientIp: InitVar[str] = field(repr=False, default=None)
def __post_init__(self, clientIp: str):
self.client_ip = clientIp
self.headers = _parse_headers(self.headers)
@dataclass
class CloudFrontResponse(GenericDataClass):
status: str
status_description: str = field(init=False)
headers: Dict[str, List[KVPair]]
statusDescription: InitVar[str] = field(repr=False, default=None)
def __post_init__(self, statusDescription: str):
self.status_description = statusDescription
self.headers = _parse_headers(self.headers)
@dataclass
class CloudFrontRecord(GenericDataClass):
config: CloudFrontConfig
request: Optional[CloudFrontfRequest] = field(default=None)
response: Optional[CloudFrontResponse] = field(default=None)
def __post_init__(self):
self.config = CloudFrontConfig.from_json(self.config)
self.request = CloudFrontfRequest.from_json(self.request) if self.request is not None else self.request
self.response = CloudFrontResponse.from_json(self.response) if self.response is not None else self.response
@dataclass
class CloudFrontEvent(EventClass):
records: List[CloudFrontRecord] = field(init=False)
first_record: CloudFrontRecord = field(init=False)
Records: InitVar[List[Dict]] = field(repr=False, default=[])
def __post_init__(self, Records: List[Dict]):
self.records = [CloudFrontRecord.from_json(record["cf"]) for record in Records]
self.first_record = self.records[0]
|
StarcoderdataPython
|
1739106
|
from tensorflow import keras
import tensorflow.keras.layers as layers
LETTERS = list('abcdefghijklmnopqrstuvwxyz')
bn_axis = 3 # channels last
# based on https://github.com/keras-team/keras-applications/blob/master/keras_applications/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = layers.Conv2D(filters1, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2a')(input_tensor)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters2,
kernel_size,
padding='same',
kernel_initializer='he_normal',
name=conv_name_base + '2b')(x)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters3, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = layers.Activation('relu')(x)
return x
def conv_block(input_tensor,
kernel_size,
filters,
stage,
block,
strides=(2, 2)):
"""A block that has a conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
strides: Strides for the first conv layer in the block.
# Returns
Output tensor for the block.
Note that from stage 3,
the first conv layer at main path is with strides=(2, 2)
And the shortcut should have strides=(2, 2) as well
"""
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = layers.Conv2D(filters1, (1, 1),
strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '2a')(input_tensor)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters2,
kernel_size,
padding='same',
kernel_initializer='he_normal',
name=conv_name_base + '2b')(x)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters3, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = layers.Conv2D(filters3, (1, 1),
strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '1')(input_tensor)
shortcut = layers.BatchNormalization(axis=bn_axis,
name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = layers.Activation('relu')(x)
return x
def create_resnet_model(img_input, params):
ksize = params.get('ksize', 5)
nfil = params.get('nfil', 10)
nlayers = params.get('nlayers', 3)
dprob = params.get('dprob', 0.05 if params['batch_norm'] else 0.25)
x = keras.layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(img_input)
x = keras.layers.Conv2D(nfil, (ksize, ksize),
strides=(2, 2),
padding='valid',
kernel_initializer='he_normal',
name='conv1')(x)
bn_axis = 3 # channels_last
x = keras.layers.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = keras.layers.Activation('relu')(x)
x = keras.layers.ZeroPadding2D(padding=(1, 1), name='pool1_pad')(x)
x = keras.layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
# ResNet-50 has 4 of these stages
for layer in range(nlayers):
nfilters = nfil * (layer + 1)
filters = [nfilters, nfilters, nfilters * 4]
x = conv_block(x,
3,
filters,
stage=layer + 2,
block='a',
strides=(1, 1))
for block in range(get_num_blocks(layer, nlayers)):
x = identity_block(x,
3,
filters,
stage=layer + 2,
block=LETTERS[block + 1])
cnn = keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
cnn = keras.layers.Dropout(dprob)(cnn)
cnn = keras.layers.Dense(10, activation='relu')(cnn)
return cnn
def get_num_blocks(layer, nlayers):
# ResNet-50 has 2-3-5-2 for the four layers
# if we have 5 layers, we'll end up with the following calculation
nblocks = min(
max(layer, nlayers // 2), # 0, 1, 2, 2, 2
min(nlayers - layer - 1, nlayers // 2) # 2, 2, 2, 1, 0
) # 0, 1, 2, 1, 0
return (nblocks + 2) # 2-3-4-3-2
|
StarcoderdataPython
|
1691065
|
<reponame>skylarjhdownes/yutu
import random
import discord
from discord.ext import commands
class Interact:
pass
def interact_fwrk(name, text, help, aliases=[], images=None, disallow_none=False):
@commands.command(name=name, aliases=aliases, help=help)
async def cmd(self, ctx: commands.Context, user: discord.Member = None):
if disallow_none:
if user is None:
raise commands.errors.MissingRequiredArgument(commands.DummyArg('user'))
else:
first = ctx.author
second = user
else:
if user is None:
first = ctx.me
second = ctx.author
else:
first = ctx.author
second = user
post = discord.Embed()
if isinstance(text, list):
post.description = random.choice(text).format(first, second)
else:
post.description = text.format(first, second)
if images is not None:
post.set_image(url=random.choice(images))
post.set_thumbnail(url=first.avatar_url)
await ctx.send(embed=post)
setattr(Interact, name, cmd)
interact_fwrk(name='cute',
text='**{0}** thinks that **{1}** is cute!',
help="Tell someone they are cute!",
images=['https://i.imgur.com/MuVAkV2.gif'])
interact_fwrk(name='hug',
text='**{0}** gives **{1}** a warm hug',
help="Give someone a hug",
aliases=['hugs'],
images=['https://i.imgur.com/RDdGYgK.gif'])
interact_fwrk(name='nibble',
text='**{0}** nibbles on **{1}**',
help='Nibble on someone',
aliases=['nibbles'],
disallow_none=True)
interact_fwrk(name='pat',
text='**{0}** pats **{1}** gently',
help='Pat someone',
aliases=['pats'])
interact_fwrk(name='cuddle',
text='**{0}** cuddles around **{1}**',
help='Give someone a cuddle',
aliases=['cuddles'])
interact_fwrk(name='noogie',
text='**{0}** gives **{1}** a noogie',
help='Give someone a noogie',
images=['https://media.giphy.com/media/MdmRuAvv1sQpi/giphy.gif'],
disallow_none=True)
interact_fwrk(name='siton',
text='**{0}** sits down on-top of **{1}**',
help='Sit on someone',
aliases=['sit-on'],
disallow_none=True)
interact_fwrk(name='kiss',
text='**{0}** kisses **{1}**',
help='Kiss someone',
aliases=['smooch'],
disallow_none=True)
|
StarcoderdataPython
|
88139
|
<filename>server/server.py
import os
from flask import Flask, request, redirect, url_for, jsonify, make_response
from werkzeug.utils import secure_filename
from flask_cors import CORS, cross_origin
import json
import sys
import imageservice
from imageservice import myImage
UPLOAD_FOLDER = os.path.abspath("images")
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
cors = CORS(app, resources={r"/*": {"origins": "*"}})
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
print('No file part')
return redirect(request.url)
file = request.files['file']
imageservice.imagelink = file.filename
if file.filename == '':
print('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('upload_file',filename=filename))
# Import the model script
import model
# Process the detection
preprocessed_image = model.prepare_image(imageservice.myImage())
predictions = model.mobile.predict(preprocessed_image)
# Store the results
results = model.imagenet_utils.decode_predictions(predictions)
# Create an object for each result
first_result = [x[0] for x in results]
second_result = [x[1] for x in results]
third_result = [x[2] for x in results]
fourth_result = [x[3] for x in results]
fifth_result = [x[4] for x in results]
analysis_res = {
"first_prob" : {str(first_result[0][1]) : str(int(first_result[0][2] * 100))+'%'},
"second_prob" : {str(second_result[0][1]) : str(int(second_result[0][2] * 100))+'%'},
"third_prob" : {str(third_result[0][1]) : str(int(third_result[0][2] * 100))+'%'},
"fourth_prob" : {str(fourth_result[0][1]) : str(int(fourth_result[0][2] * 100))+'%'},
"fifth_prob" : {str(fifth_result[0][1]) : str(int(fifth_result[0][2] * 100))+'%'}
}
# Send results to client
os.remove(os.path.abspath("images")+'/'+imageservice.myImage())
return json.dumps(analysis_res)
if __name__ == '__main__':
app.run()
|
StarcoderdataPython
|
118196
|
<reponame>campbell-ja/MetashapePythonScripts
# This script created by <NAME> - 03/2021
""" Set up Working Environment """
# import Metashape library module
import Metashape
# create a reference to the current project via Document Class
doc = Metashape.app.document
""" Prompt User to Select Images """
# create array/list of images via user input gui
images = Metashape.app.getOpenFileNames()
""" Create New Chunk and Rename It """
# create new chunk to avoid changing any currently existing chunks
newChunk = doc.addChunk()
# set new chunk as active chunk
Metashape.app.document.chunk = newChunk
# set a new reference for the new chunk
activeChunk = newChunk
"""Rename the New Active Chunk"""
# get full list of chunks
chunkList = doc.chunks
# rename the new chunk,but only if it exists
# also add the integer position in chunks list
# this ensures each time the script runs each chunk has a unique name.
if activeChunk in chunkList:
activeChunk.label = 'pyChunk_' + str(len(chunkList)-1)
""" Add User Selected Photos to Active Chunk"""
# add images to chunk from array/list created earlier
activeChunk.addPhotos(images)
|
StarcoderdataPython
|
3301366
|
import cv2
number_imgs = []
|
StarcoderdataPython
|
172966
|
<filename>scripts/runner.settings.py
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# noinspection PyUnreachableCode
if False:
# noinspection PyUnresolvedReferences
from _stubs import *
import copy
entries = [{
'title': "OPOPENING",
'type': "COMMAND",
'package': "runner.runner",
'executeMethod': "toggleOPOpenBehaviour"
}, {
'title': "TOXFOLDER",
'type': "COMMAND",
'package': "runner.runner",
'executeMethod': "setTOXFolder"
}, {
'title': "PRESETSFOLDER",
'type': "COMMAND",
'package': "runner.runner",
'executeMethod': "setPresetsFolder"
}, {
'title': "SEARCHROOT",
'type': "COMMAND",
'package': "runner.runner",
'executeMethod': "setSearchRoot"
}, {
'title': "PLUGINSFOLDER",
'type': "COMMAND",
'package': "runner.runner",
'executeMethod': "setPluginsFolder"
}, {
'title': "DISPLAYHELP",
'type': "COMMAND",
'package': "runner.runner",
'executeMethod': "toggleDisplayHelp"
}, {
'title': "EXPOSERUNNER",
'type': "COMMAND",
'package': "runner.runner",
'executeMethod': "toggleExposeRunner"
}]
def getEntries():
rows = copy.deepcopy(entries)
settings = iop.settings
for row in rows:
if row['title'] == 'TOXFOLDER':
path = settings['toxfolder', 1].val
if len(path) > 42:
path = '...' + path[-39:]
row['title'] = 'TOXs Folder [' + path + ']'
row['help'] = 'Tell Runner a location on the computer containing TOXs to index.'
elif row['title'] == 'OPOPENING':
row['title'] = 'Open OPs in Place [' + ('Yes' if int(settings['openopinplace', 1].val) else 'No') + ']'
row['help'] = 'Tell if Runner should open operators in the same pane or in a new one.'
elif row['title'] == 'PRESETSFOLDER':
path = settings['presetsfolder', 1].val
if len(path) > 42:
path = '...' + path[-39:]
row['title'] = 'Presets Folder [' + path + ']'
row['help'] = 'Tell Runner where the presets are stored on the computer.'
elif row['title'] == 'SEARCHROOT':
path = settings['searchroot', 1].val
if len(path) > 42:
path = '...' + path[-39:]
row['title'] = 'Search root [' + path + ']'
row['help'] = 'Tell Runner from which node to start indexing. Pressing [Enter] will update the search root to the current location.'
elif row['title'] == 'PLUGINSFOLDER':
path = settings['pluginsfolder', 1].val
if len(path) > 42:
path = '...' + path[-39:]
row['title'] = 'Plugins folder [' + path + ']'
row['help'] = 'Tell Runner where to look on the computer for plugins.'
elif row['title'] == 'DISPLAYHELP':
row['title'] = 'Display help captions [' + ('Yes' if int(settings['displayhelp', 1].val) == 1 else 'No') + ']'
row['help'] = 'Specify if the help captions above the results should be shown.'
elif row['title'] == 'EXPOSERUNNER':
row['title'] = 'Hide Runner operator' if parent.runner.expose else 'Expose Runner operator'
row['help'] = 'Hide or expose the Runner operator. This has no impact on the Runner behavior. ' \
'Hiding the runner helps prevent involuntarily interacting with its node while ' \
'working on something else.'
return rows
def openSettings():
parent.runner.OpenSublist(getEntries())
return False
def setTOXFolder():
location = ui.chooseFolder(title='Select Folder containing TOXs', start=None)
if location is not None:
iop.settings['toxfolder', 1] = location
ui.status = 'Runner TOXs folder set to ' + location
parent.runner.OpenSublist(getEntries())
return False
def toggleOPOpenBehaviour():
val = int(parent.runner.op('settings')['openopinplace', 1].val)
iop.settings['openopinplace', 1] = 0 if val else 1
parent.runner.OpenSublist(getEntries())
return False
def setPresetsFolder():
location = ui.chooseFolder(title='Select Folder Storing Presets', start=None)
if location is not None:
iop.settings['presetsfolder', 1] = location
ui.status = 'Runner presets folder set to ' + location
parent.runner.OpenSublist(getEntries())
return False
def setSearchRoot():
node = ui.panes.current.owner if ui.panes.current is not None else None
if node is None:
ui.status = "Runner : Could not update search root"
else:
iop.settings['searchroot', 1] = node.path
ui.status = "Runner: Search root changed to " + node.path
parent.runner.OpenSublist(getEntries())
return False
def refreshOPsIndex():
parent.runner.GetDB().UpdateProjectOPs(op('/'))
def setPluginsFolder():
location = ui.chooseFolder(title='Select Folder Storing Runner Plugins', start=None)
if location is not None:
iop.settings['pluginsfolder', 1] = location
ui.status = 'Runner plugins folder set to ' + location
parent.runner.OpenSublist(getEntries())
return False
def toggleDisplayHelp():
iop.settings['displayhelp', 1] = 1 if int(iop.settings['displayhelp', 1].val) == 0 else 0
parent.runner.OpenSublist(getEntries())
return False
def toggleExposeRunner():
parent.runner.expose = not parent.runner.expose
parent.runner.OpenSublist(getEntries())
return False
|
StarcoderdataPython
|
4818568
|
"""
The unit test module for AutoScheduler dialect.
"""
# pylint:disable=missing-docstring, redefined-outer-name, invalid-name
# pylint:disable=unused-argument, unused-import, wrong-import-position, ungrouped-imports
import argparse
import os
import re
import tempfile
import mock
import pytest
from moto import mock_dynamodb2
from lorien.util import is_dialect_enabled
if not is_dialect_enabled("auto_scheduler"):
pytest.skip("AutoScheduler dialect is not available", allow_module_level=True)
import tvm
from tvm import auto_scheduler, relay
from tvm.auto_scheduler.measure import MeasureInput, MeasureResult
from tvm.auto_scheduler.measure_record import dump_record_to_string, load_record_from_string
from tvm.auto_scheduler.search_task import SearchTask
from lorien.database.table import create_table
from lorien.dialect.tvm_dial.auto_scheduler_dial.extract import extract_from_models
from lorien.dialect.tvm_dial.auto_scheduler_dial.workload import AutoSchedulerWorkload
from lorien.dialect.tvm_dial.auto_scheduler_dial.job import (
AutoSchedulerJob,
AutoSchedulerJobConfigs,
AutoSchedulerTuneResult,
RecordToMetadata,
)
from lorien.dialect.tvm_dial.auto_scheduler_dial.result import AutoSchedulerRecords
from lorien.dialect.tvm_dial.job import TuneMetadata
from lorien.tune.result import TuneErrorCode
def gen_conv2d_task(ishape, wshape):
dtype = "float32"
data = relay.var("data", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight1", shape=(wshape), dtype=dtype)
conv2d = relay.nn.conv2d(data, weight1, kernel_size=(3, 3), padding=(1, 1))
out = relay.nn.relu(conv2d)
func = relay.Function([data, weight1], out)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
tasks, _ = auto_scheduler.extract_tasks(mod, None, "llvm")
return tasks[0]
def test_workload(mocker):
task = gen_conv2d_task((1, 3, 224, 224), (32, 3, 3, 3))
workload = AutoSchedulerWorkload.from_task(task)
assert str(workload.to_task().compute_dag) == str(task.compute_dag)
assert str(workload)
task2 = gen_conv2d_task((1, 32, 112, 112), (32, 32, 3, 3))
workload2 = AutoSchedulerWorkload.from_task(task2)
assert workload != workload2
assert (workload < workload2) == (workload.workload_key < workload2.workload_key)
job = workload.to_job()
assert isinstance(job, AutoSchedulerJob)
mocker.patch(
"lorien.dialect.tvm_dial.auto_scheduler_dial.workload.pickle.loads",
side_effect=RuntimeError,
)
with pytest.raises(RuntimeError):
workload.to_task()
def test_job_n_config():
task = gen_conv2d_task((1, 3, 224, 224), (32, 3, 3, 3))
workload = AutoSchedulerWorkload.from_task(task)
job = workload.to_job()
configs = argparse.Namespace(
ntrial=4,
test=1,
repeat=1,
min=400,
db="{ region_name: us-west-2 }",
commit_table_name="random-table",
commit_nbest=1,
commit_workload=False,
commit_log_to=None,
)
job_configs = job.create_job_configs(configs)
job_configs.commit_options["table-arn"] = "random-arn"
assert isinstance(job_configs, AutoSchedulerJobConfigs)
assert job_configs.tune_options
assert job_configs.measure_options
assert job_configs.check_tvm_build_config()
# Localize with RPC runner
rpc_config = argparse.Namespace(device="test-device", runner_port=188875)
job_configs.localize("llvm", configs=rpc_config)
assert "runner" in job_configs.measure_options
del job_configs.measure_options["runner"]
# Failed to localize
mock_check_tvm_build_config = mock.MagicMock()
mock_check_tvm_build_config.return_value = False
job_configs.check_tvm_build_config = mock_check_tvm_build_config
with pytest.raises(RuntimeError):
job_configs.localize("llvm")
# Localize with local runner
job_configs = job.create_job_configs(configs)
job_configs.commit_options["table-arn"] = "random-arn"
job_configs.tvm_build_config = {}
job_configs.localize("llvm")
assert "measure_ctx" in job_configs.measure_options
del job_configs.measure_options["measure_ctx"]
# Test callback separately since coverage cannot reach to TVM PackedFunc
metadata = TuneMetadata()
recorder = RecordToMetadata(metadata)
res = mock.MagicMock()
res.error_no = 2
recorder.callback(None, [None], [res])
assert metadata.trial_count == 1
assert metadata.failed_count == 1
inp = mock.MagicMock()
inp.task = mock.MagicMock()
inp.task.compute_dag = mock.MagicMock()
inp.task.compute_dag.flop_ct = 1e9
res = mock.MagicMock()
res.error_no = 0
value = mock.MagicMock()
value.value = 1
res.costs = [value]
recorder.callback(None, [inp], [res])
assert metadata.trial_count == 2
assert metadata.failed_count == 1
assert metadata.max_thrpt == 1
@pytest.fixture
def tune_config_fixture():
with mock_dynamodb2():
with tempfile.TemporaryDirectory(prefix="lorien-test-auto_scheduler-commit-") as temp_dir:
table_name = "lorien-test"
arn = create_table(table_name, region_name="us-west-2")
task = gen_conv2d_task((1, 3, 224, 224), (32, 3, 3, 3))
workload = AutoSchedulerWorkload.from_task(task)
job = workload.to_job()
configs = argparse.Namespace(
ntrial=2,
test=1,
repeat=1,
min=100,
db="{ region_name: us-west-2 }",
commit_table_name=table_name,
commit_nbest=1,
commit_workload=False,
commit_log_to=None,
)
job_configs = job.create_job_configs(configs)
job_configs.tune_options["tune_dir"] = temp_dir
job_configs.commit_options["table-arn"] = arn
job_configs.localize("llvm")
yield table_name, task, job, job_configs
del job_configs.measure_options["measure_ctx"]
def test_tune_n_commit_n_query(tune_config_fixture):
table_name, task, job, job_configs = tune_config_fixture
with mock.patch.object(AutoSchedulerWorkload, "to_task", side_effect=RuntimeError):
job.tune(job_configs.tune_options, job_configs.measure_options, job_configs.commit_options)
assert job.result.error_code == TuneErrorCode.FAIL_TO_CREATE_TASK
with mock.patch.object(SearchTask, "tune", return_value=None):
job.tune(job_configs.tune_options, job_configs.measure_options, job_configs.commit_options)
assert job.result.error_code == TuneErrorCode.NO_VALID_RESULT
# Do not commit
job.tune(job_configs.tune_options, job_configs.measure_options, None)
assert "tune_logs" in job.result.metadata
# Success committed
job.tune(job_configs.tune_options, job_configs.measure_options, job_configs.commit_options)
assert "tune_logs" not in job.result.metadata
workload_key = AutoSchedulerWorkload.from_task(task).get_workload_key()
records = AutoSchedulerRecords(task.target, workload_key)
records.query(table_name, region_name="us-west-2")
assert len(records) == 1
records = AutoSchedulerRecords(task.target, workload_key)
records.query(table_name, use_alter_key=True, region_name="us-west-2")
assert len(records) == 1
# Do not provide workload key to query all records with the same target
records = AutoSchedulerRecords("llvm", workload_key=None)
records.query(table_name, region_name="us-west-2")
assert len(records) == 1
def mock_commit(self, commit_options, workload, silent=False):
self.error_code = TuneErrorCode.STORAGE_ERROR
with mock.patch.object(
AutoSchedulerTuneResult, "commit", side_effect=mock_commit, autospec=True
):
job.tune(job_configs.tune_options, job_configs.measure_options, job_configs.commit_options)
assert "tune_logs" in job.result.metadata
# Test log file filering
log_file = job.result.log_file
with tempfile.NamedTemporaryFile(mode="w", prefix="lorien-test-auto-sch-") as temp_file:
with open(log_file, "r") as filep:
inp, _ = load_record_from_string(filep.readline())
# Commented out record
temp_file.write("\n")
res = MeasureResult([0.1], 0, "", 0.2, 1)
temp_file.write("#{}".format(dump_record_to_string(inp, res)))
# Record with error
res = MeasureResult([0.2], 2, "", 0.3, 2)
temp_file.write(dump_record_to_string(inp, res))
# Record for different workload
res = MeasureResult([0.3], 0, "", 0.5, 4)
record_str = dump_record_to_string(inp, res)
workload_key = re.search(r"\[\"(.+)\", .+\]", str(task.workload_key)).group(1)
record_str = record_str.replace(workload_key, "aaa")
temp_file.write(record_str)
temp_file.flush()
records = AutoSchedulerTuneResult.create_records_by_workloads(
temp_file.name, 1, job.workload
)
assert len(records) == 1
assert records[0].target_key == "llvm -keys=cpu -link-params=0"
assert records[0].alter_key == "<KEY>"
assert records[0].workload_key.find("aaa") == -1
def test_extract_from_model():
configs = argparse.Namespace(
gcv=["alexnet", "alexnet: { data: [1, 3, 224, 224]}"],
target=["llvm -libs=cblas"],
include_simple_tasks=False,
tf=[],
tflite=[],
onnx=[],
keras=[],
torch=[],
mxnet=[],
)
workloads = extract_from_models(configs)
assert len(workloads) == 8
# Test failed to create task.
with mock.patch.object(AutoSchedulerWorkload, "from_task", side_effect=RuntimeError):
workloads = extract_from_models(configs)
assert len(workloads) == 0
# Test failure.
configs = argparse.Namespace(
gcv=["alexnet_wrong_name"],
target=["llvm"],
include_simple_tasks=False,
tf=[],
tflite=[],
onnx=[],
keras=[],
torch=[],
mxnet=[],
)
workloads = extract_from_models(configs)
assert len(workloads) == 0
|
StarcoderdataPython
|
157349
|
###############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
import random
import copy
import pytest
import numpy
from learning import preprocess
from learning.data import datasets
from learning.testing import helpers
def test_shuffle_dataset_does_shuffle():
dataset = (numpy.array([['s1i1', 's1i2'], ['s2i1', 's2i2'],
['s3i1', 's3i2']]),
numpy.array([['s1t1'], ['s2t1'], ['s3t1']]))
# Assert that not all shuffled sets match
def _eq_dataset(dataset_a, dataset_b):
return (dataset_a[0] == dataset_b[0]).all() and (
dataset_a[1] == dataset_b[1]).all()
shuffled_dataset = preprocess.shuffle(dataset)
for i in range(20):
if not _eq_dataset(shuffled_dataset, preprocess.shuffle(dataset)):
return # They don't all match
assert False, 'Add shuffled sets match'
def test_shuffle_dataset_correct_patterns():
dataset = (numpy.array([['s1i1', 's1i2'], ['s2i1', 's2i2'],
['s3i1', 's3i2']]),
numpy.array([['s1t1'], ['s2t1'], ['s3t1']]))
shuffled_dataset = preprocess.shuffle(dataset)
# Make mapping for testing shuffled set testing
target_mapping = {
tuple(inp_vec): tar_vec
for inp_vec, tar_vec in zip(*dataset)
}
for inp_vec, tar_vec in zip(*shuffled_dataset):
assert (target_mapping[tuple(inp_vec)] == tar_vec).all()
target_mapping.pop(tuple(inp_vec))
assert target_mapping == {} # Each original pattern is in shuffle dataset
def test_make_onehot_1d():
assert (preprocess.make_onehot([1, 2, 1]) == numpy.array([[1, 0], [0, 1],
[1, 0]])).all()
assert (preprocess.make_onehot([1, 2, 3, 1]) == numpy.array(
[[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0]])).all()
def test_make_onehot_2d():
labels = [[1, 2, 3], [2, 3, 4], [1, 2, 3]]
assert (preprocess.make_onehot(labels) == numpy.array([[1, 0], [0, 1],
[1, 0]])).all()
def make_labels():
onehot = numpy.array([[1, 0], [0, 1], [1, 0]])
assert (preprocess.make_labels(onehot) == numpy.array([[0], [1],
[0]])).all()
#################
# Normalization
#################
def test_rescale():
assert (preprocess.rescale(
numpy.array([[-100, 2], [100, 0], [0, 1]])) == numpy.array(
[[-1.0, 1.0], [1.0, -1.0], [0.0, 0.0]])).all()
def test_normalize():
random_matrix = numpy.random.rand(
random.randint(2, 10), random.randint(1, 10))
print 'Generated Matrix:'
print random_matrix
normalized_matrix = preprocess.normalize(random_matrix)
assert random_matrix.shape == normalized_matrix.shape
# Original matrix should be unchanged
assert not numpy.array_equal(random_matrix, normalized_matrix)
# Normalized matrix should have mean of 0 standard deviation of 1
# for each dimension
means = numpy.mean(normalized_matrix, 0)
for mean in means:
print mean
assert helpers.approx_equal(mean, 0, tol=1e-10)
sds = numpy.std(normalized_matrix, 0)
for sd in sds:
print sd
assert helpers.approx_equal(sd, 1, tol=1e-10)
# TODO: deterministic test
#inputs = [[0.75, 0.25],
# [0.5, 0.5],
# [0.25, 0.75]]
#expected = [
#assert preprocess.normalize(inputs) == numpy.matrix(expected)
def test_normalize_one_row():
matrix = [[0, 1, 2]]
with pytest.raises(ValueError):
preprocess.normalize(matrix)
with pytest.raises(ValueError):
preprocess.normalize(numpy.array(matrix))
######################
# Depuration functions
######################
def test_list_minus_i():
list_ = [0, 1, 2]
assert preprocess._list_minus_i(list_, 0) == [1, 2]
assert preprocess._list_minus_i(list_, 1) == [0, 2]
assert preprocess._list_minus_i(list_, 2) == [0, 1]
def test_count_classes():
_, target_matrix = datasets.get_xor()
class_counts = preprocess._count_classes(target_matrix)
assert len(class_counts) == 2
assert class_counts[(0, 1)] == 2
assert class_counts[(1, 0)] == 2
target_matrix = [['foo'], ['bar'], ['bar']]
class_counts = preprocess._count_classes(target_matrix)
assert len(class_counts) == 2
assert class_counts[('foo', )] == 1
assert class_counts[('bar', )] == 2
def test_clean_dataset_depuration():
dataset = [[
[0.0],
[0.0],
[0.0],
[0.01],
[0.5],
[0.5],
[0.99],
[1.0],
[1.0],
[1.0],
], [
(0, ),
(0, ),
(0, ),
(1, ),
(0.5, ),
(0.5, ),
(0, ),
(1, ),
(1, ),
(1, ),
]]
cleaned_dataset, changed_points, removed_points = preprocess.clean_dataset_depuration(
*dataset, k=3, k_prime=2)
assert (numpy.array(cleaned_dataset) == numpy.array([[
[0.0],
[0.0],
[0.0],
[0.01],
[0.99],
[1.0],
[1.0],
[1.0],
], [
(0, ),
(0, ),
(0, ),
(0, ),
(1, ),
(1, ),
(1, ),
(1, ),
]])).all()
assert changed_points == [3, 6]
assert removed_points == [4, 5]
######################
# PCA
######################
def test_pca_using_expected_num_dimensions():
data = [[-1, -1], [1, 1]]
expected = numpy.matrix([[-1], [1]])
assert numpy.array_equal(preprocess.pca(data, 1), expected)
def test_pca_using_num_dimensions_func():
data = [[-1, -1], [1, 1]]
expected = numpy.matrix([[-1], [1]])
def selection_func(eigen_values):
return [i for i, v in enumerate(eigen_values) if v > 1]
assert numpy.array_equal(
preprocess.pca(data, select_dimensions_func=selection_func), expected)
def test_pca_no_expected_or_func():
with pytest.raises(ValueError):
preprocess.pca([], None, None)
def test_pca_both_expected_and_func():
with pytest.raises(ValueError):
preprocess.pca([], 1, lambda x: [0])
###########################
# Default cleaning function
###########################
def test_clean_dataset_no_pca():
# Should just apply depuration
patterns = [
([0.0], (0, )),
([0.0], (0, )),
([0.0], (0, )),
([0.0], (1, )),
([1.0], (0, )),
([1.0], (1, )),
([1.0], (1, )),
([1.0], (1, )),
]
# Normalize input
# And depuration should correct the 4th and 5th targets
expected = [
(numpy.array([-1.0]), (0, )),
(numpy.array([-1.0]), (0, )),
(numpy.array([-1.0]), (0, )),
(numpy.array([-1.0]), (0, )),
(numpy.array([1.0]), (1, )),
(numpy.array([1.0]), (1, )),
(numpy.array([1.0]), (1, )),
(numpy.array([1.0]), (1, )),
]
assert ((numpy.array(preprocess.clean_dataset(*zip(*patterns))) ==
numpy.array(zip(*expected)))).all()
def test_clean_dataset_with_pca():
patterns = [
([0.0, 0.0], (0, )),
([0.0, 0.0], (0, )),
([0.0, 0.0], (0, )),
([0.0, 0.0], (1, )),
([1.0, 1.0], (0, )),
([1.0, 1.0], (1, )),
([1.0, 1.0], (1, )),
([1.0, 1.0], (1, )),
]
# PCA should reduce to one dimension (since it is currently just on a
# diagonal)
# PCA will also normalize input
# And depuration should correct the 4th and 5th targets
expected = [
(numpy.array([-1.0]), (0, )),
(numpy.array([-1.0]), (0, )),
(numpy.array([-1.0]), (0, )),
(numpy.array([-1.0]), (0, )),
(numpy.array([1.0]), (1, )),
(numpy.array([1.0]), (1, )),
(numpy.array([1.0]), (1, )),
(numpy.array([1.0]), (1, )),
]
assert ((numpy.array(preprocess.clean_dataset(*zip(*patterns))) ==
numpy.array(zip(*expected)))).all()
|
StarcoderdataPython
|
3287292
|
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for communicating with the VM for training classifiers."""
import hashlib
import hmac
import json
from constants import constants
from core.controllers import base
from core.domain import acl_decorators
from core.domain import classifier_services
from core.domain import config_domain
from core.domain import email_manager
import feconf
# NOTE TO DEVELOPERS: This function should be kept in sync with its counterpart
# in Oppia-ml.
def generate_signature(secret, message):
"""Generates digital signature for given data.
Args:
secret: str. The secret used to communicate with Oppia-ml.
message: dict. The message payload data.
Returns:
str. The signature of the payload data.
"""
message_json = json.dumps(message, sort_keys=True)
return hmac.new(
secret, msg=message_json, digestmod=hashlib.sha256).hexdigest()
def validate_job_result_message_dict(message):
"""Validates the data-type of the message payload data.
Args:
message: dict. The message payload data.
Returns:
bool. Whether the payload dict is valid.
"""
job_id = message.get('job_id')
classifier_data_with_floats_stringified = message.get(
'classifier_data_with_floats_stringified')
if not isinstance(job_id, basestring):
return False
if not isinstance(classifier_data_with_floats_stringified, dict):
return False
return True
def verify_signature(message, vm_id, received_signature):
"""Function that checks if the signature received from the VM is valid.
Args:
message: dict. The message payload data.
vm_id: str. The ID of the VM instance.
received_signature: str. The signature received from the VM.
Returns:
bool. Whether the incoming request is valid.
"""
secret = None
for val in config_domain.VMID_SHARED_SECRET_KEY_MAPPING.value:
if val['vm_id'] == vm_id:
secret = str(val['shared_secret_key'])
break
if secret is None:
return False
generated_signature = generate_signature(secret, message)
if generated_signature != received_signature:
return False
return True
class TrainedClassifierHandler(base.BaseHandler):
"""This handler stores the result of the training job in datastore and
updates the status of the job.
"""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@acl_decorators.open_access
def post(self):
"""Handles POST requests."""
signature = self.payload.get('signature')
message = self.payload.get('message')
vm_id = self.payload.get('vm_id')
if vm_id == feconf.DEFAULT_VM_ID and not constants.DEV_MODE:
raise self.UnauthorizedUserException
if not validate_job_result_message_dict(message):
raise self.InvalidInputException
if not verify_signature(message, vm_id, signature):
raise self.UnauthorizedUserException
job_id = message['job_id']
# The classifier data received in the payload has all floating point
# values stored as strings. This is because floating point numbers
# are represented differently on GAE(Oppia) and GCE(Oppia-ml).
# Therefore, converting all floating point numbers to string keeps
# signature consistent on both Oppia and Oppia-ml.
# For more info visit: https://stackoverflow.com/q/40173295
classifier_data = (
classifier_services.convert_strings_to_float_numbers_in_classifier_data( #pylint: disable=line-too-long
message['classifier_data_with_floats_stringified']))
classifier_training_job = (
classifier_services.get_classifier_training_job_by_id(job_id))
if classifier_training_job.status == (
feconf.TRAINING_JOB_STATUS_FAILED):
# Send email to admin and admin-specified email recipients.
# Other email recipients are specified on admin config page.
email_manager.send_job_failure_email(job_id)
raise self.InternalErrorException(
'The current status of the job cannot transition to COMPLETE.')
try:
classifier_services.store_classifier_data(job_id, classifier_data)
except Exception as e:
raise self.InternalErrorException(e)
# Update status of the training job to 'COMPLETE'.
classifier_services.mark_training_job_complete(job_id)
return self.render_json({})
class NextJobHandler(base.BaseHandler):
"""This handler fetches next job to be processed according to the time
and sends back job_id, algorithm_id and training data to the VM.
"""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@acl_decorators.open_access
def post(self):
"""Handles POST requests."""
signature = self.payload.get('signature')
vm_id = self.payload.get('vm_id')
message = self.payload.get('message')
if vm_id == feconf.DEFAULT_VM_ID and not constants.DEV_MODE:
raise self.UnauthorizedUserException
if not verify_signature(message, vm_id, signature):
raise self.UnauthorizedUserException
response = {}
next_job = classifier_services.fetch_next_job()
if next_job is not None:
classifier_services.mark_training_job_pending(next_job.job_id)
response['job_id'] = next_job.job_id
response['algorithm_id'] = next_job.algorithm_id
response['training_data'] = next_job.training_data
return self.render_json(response)
|
StarcoderdataPython
|
3218075
|
<filename>tests/test_build_html5.py
"""
test_build_html5
~~~~~~~~~~~~~~~~
Test the HTML5 writer and check output against XPath.
This code is digest to reduce test running time.
Complete test code is here:
https://github.com/sphinx-doc/sphinx/pull/2805/files
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import xml.etree.cElementTree as ElementTree
from hashlib import md5
import pytest
from html5lib import getTreeBuilder, HTMLParser
from test_build_html import flat_dict, tail_check, check_xpath
from sphinx.util.docutils import is_html5_writer_available
TREE_BUILDER = getTreeBuilder('etree', implementation=ElementTree)
HTML_PARSER = HTMLParser(TREE_BUILDER, namespaceHTMLElements=False)
etree_cache = {}
@pytest.mark.skipif(not is_html5_writer_available(), reason='HTML5 writer is not available')
@pytest.fixture(scope='module')
def cached_etree_parse():
def parse(fname):
if fname in etree_cache:
return etree_cache[fname]
with (fname).open('rb') as fp:
etree = HTML_PARSER.parse(fp)
etree_cache.clear()
etree_cache[fname] = etree
return etree
yield parse
etree_cache.clear()
@pytest.mark.skipif(not is_html5_writer_available(), reason='HTML5 writer is not available')
@pytest.mark.parametrize("fname,expect", flat_dict({
'images.html': [
(".//img[@src='_images/img.png']", ''),
(".//img[@src='_images/img1.png']", ''),
(".//img[@src='_images/simg.png']", ''),
(".//img[@src='_images/svgimg.svg']", ''),
(".//a[@href='_sources/images.txt']", ''),
],
'subdir/images.html': [
(".//img[@src='../_images/img1.png']", ''),
(".//img[@src='../_images/rimg.png']", ''),
],
'subdir/includes.html': [
(".//a[@class='reference download internal']", ''),
(".//img[@src='../_images/img.png']", ''),
(".//p", 'This is an include file.'),
(".//pre/span", 'line 1'),
(".//pre/span", 'line 2'),
],
'includes.html': [
(".//pre", '<NAME>'),
(".//a[@class='reference download internal']", ''),
(".//pre/span", '"quotes"'),
(".//pre/span", "'included'"),
(".//pre/span[@class='s2']", 'üöä'),
(".//div[@class='inc-pyobj1 highlight-text notranslate']//pre",
r'^class Foo:\n pass\n\s*$'),
(".//div[@class='inc-pyobj2 highlight-text notranslate']//pre",
r'^ def baz\(\):\n pass\n\s*$'),
(".//div[@class='inc-lines highlight-text notranslate']//pre",
r'^class Foo:\n pass\nclass Bar:\n$'),
(".//div[@class='inc-startend highlight-text notranslate']//pre",
'^foo = "Including Unicode characters: üöä"\\n$'),
(".//div[@class='inc-preappend highlight-text notranslate']//pre",
r'(?m)^START CODE$'),
(".//div[@class='inc-pyobj-dedent highlight-python notranslate']//span",
r'def'),
(".//div[@class='inc-tab3 highlight-text notranslate']//pre",
r'-| |-'),
(".//div[@class='inc-tab8 highlight-python notranslate']//pre/span",
r'-| |-'),
],
'autodoc.html': [
(".//dt[@id='autodoc_target.Class']", ''),
(".//dt[@id='autodoc_target.function']/em", r'\*\*kwds'),
(".//dd/p", r'Return spam\.'),
],
'extapi.html': [
(".//strong", 'from class: Bar'),
],
'markup.html': [
(".//title", 'set by title directive'),
(".//p/em", 'Section author: <NAME>'),
(".//p/em", 'Module author: <NAME>'),
# created by the meta directive
(".//meta[@name='author'][@content='Me']", ''),
(".//meta[@name='keywords'][@content='docs, sphinx']", ''),
# a label created by ``.. _label:``
(".//div[@id='label']", ''),
# code with standard code blocks
(".//pre", '^some code$'),
# an option list
(".//span[@class='option']", '--help'),
# admonitions
(".//p[@class='admonition-title']", 'My Admonition'),
(".//div[@class='admonition note']/p", 'Note text.'),
(".//div[@class='admonition warning']/p", 'Warning text.'),
# inline markup
(".//li/p/strong", r'^command\\n$'),
(".//li/p/strong", r'^program\\n$'),
(".//li/p/em", r'^dfn\\n$'),
(".//li/p/kbd", r'^kbd\\n$'),
(".//li/p/span", 'File \N{TRIANGULAR BULLET} Close'),
(".//li/p/code/span[@class='pre']", '^a/$'),
(".//li/p/code/em/span[@class='pre']", '^varpart$'),
(".//li/p/code/em/span[@class='pre']", '^i$'),
(".//a[@href='https://www.python.org/dev/peps/pep-0008']"
"[@class='pep reference external']/strong", 'PEP 8'),
(".//a[@href='https://www.python.org/dev/peps/pep-0008']"
"[@class='pep reference external']/strong",
'Python Enhancement Proposal #8'),
(".//a[@href='https://tools.ietf.org/html/rfc1.html']"
"[@class='rfc reference external']/strong", 'RFC 1'),
(".//a[@href='https://tools.ietf.org/html/rfc1.html']"
"[@class='rfc reference external']/strong", 'Request for Comments #1'),
(".//a[@href='objects.html#envvar-HOME']"
"[@class='reference internal']/code/span[@class='pre']", 'HOME'),
(".//a[@href='#with']"
"[@class='reference internal']/code/span[@class='pre']", '^with$'),
(".//a[@href='#grammar-token-try-stmt']"
"[@class='reference internal']/code/span", '^statement$'),
(".//a[@href='#some-label'][@class='reference internal']/span", '^here$'),
(".//a[@href='#some-label'][@class='reference internal']/span", '^there$'),
(".//a[@href='subdir/includes.html']"
"[@class='reference internal']/span", 'Including in subdir'),
(".//a[@href='objects.html#cmdoption-python-c']"
"[@class='reference internal']/code/span[@class='pre']", '-c'),
# abbreviations
(".//abbr[@title='abbreviation']", '^abbr$'),
# version stuff
(".//div[@class='versionadded']/p/span", 'New in version 0.6: '),
(".//div[@class='versionadded']/p/span",
tail_check('First paragraph of versionadded')),
(".//div[@class='versionchanged']/p/span",
tail_check('First paragraph of versionchanged')),
(".//div[@class='versionchanged']/p",
'Second paragraph of versionchanged'),
# footnote reference
(".//a[@class='footnote-reference brackets']", r'1'),
# created by reference lookup
(".//a[@href='index.html#ref1']", ''),
# ``seealso`` directive
(".//div/p[@class='admonition-title']", 'See also'),
# a ``hlist`` directive
(".//table[@class='hlist']/tbody/tr/td/ul/li/p", '^This$'),
# a ``centered`` directive
(".//p[@class='centered']/strong", 'LICENSE'),
# a glossary
(".//dl/dt[@id='term-boson']", 'boson'),
# a production list
(".//pre/strong", 'try_stmt'),
(".//pre/a[@href='#grammar-token-try1-stmt']/code/span", 'try1_stmt'),
# tests for ``only`` directive
(".//p", 'A global substitution.'),
(".//p", 'In HTML.'),
(".//p", 'In both.'),
(".//p", 'Always present'),
# tests for ``any`` role
(".//a[@href='#with']/span", 'headings'),
(".//a[@href='objects.html#func_without_body']/code/span", 'objects'),
# tests for numeric labels
(".//a[@href='#id1'][@class='reference internal']/span", 'Testing various markup'),
],
'objects.html': [
(".//dt[@id='mod.Cls.meth1']", ''),
(".//dt[@id='errmod.Error']", ''),
(".//dt/code", r'long\(parameter,\s* list\)'),
(".//dt/code", 'another one'),
(".//a[@href='#mod.Cls'][@class='reference internal']", ''),
(".//dl[@class='userdesc']", ''),
(".//dt[@id='userdesc-myobj']", ''),
(".//a[@href='#userdesc-myobj'][@class='reference internal']", ''),
# docfields
(".//a[@class='reference internal'][@href='#TimeInt']/em", 'TimeInt'),
(".//a[@class='reference internal'][@href='#Time']", 'Time'),
(".//a[@class='reference internal'][@href='#errmod.Error']/strong", 'Error'),
# C references
(".//span[@class='pre']", 'CFunction()'),
(".//a[@href='#c.Sphinx_DoSomething']", ''),
(".//a[@href='#c.SphinxStruct.member']", ''),
(".//a[@href='#c.SPHINX_USE_PYTHON']", ''),
(".//a[@href='#c.SphinxType']", ''),
(".//a[@href='#c.sphinx_global']", ''),
# test global TOC created by toctree()
(".//ul[@class='current']/li[@class='toctree-l1 current']/a[@href='#']",
'Testing object descriptions'),
(".//li[@class='toctree-l1']/a[@href='markup.html']",
'Testing various markup'),
# test unknown field names
(".//dt[@class='field-odd']", 'Field_name'),
(".//dt[@class='field-even']", 'Field_name all lower'),
(".//dt[@class='field-odd']", 'FIELD_NAME'),
(".//dt[@class='field-even']", 'FIELD_NAME ALL CAPS'),
(".//dt[@class='field-odd']", 'Field_Name'),
(".//dt[@class='field-even']", 'Field_Name All Word Caps'),
(".//dt[@class='field-odd']", 'Field_name'),
(".//dt[@class='field-even']", 'Field_name First word cap'),
(".//dt[@class='field-odd']", 'FIELd_name'),
(".//dt[@class='field-even']", 'FIELd_name PARTial caps'),
# custom sidebar
(".//h4", 'Custom sidebar'),
# docfields
(".//dd[@class='field-odd']/p/strong", '^moo$'),
(".//dd[@class='field-odd']/p/strong", tail_check(r'\(Moo\) .* Moo')),
(".//dd[@class='field-odd']/ul/li/p/strong", '^hour$'),
(".//dd[@class='field-odd']/ul/li/p/em", '^DuplicateType$'),
(".//dd[@class='field-odd']/ul/li/p/em", tail_check(r'.* Some parameter')),
# others
(".//a[@class='reference internal'][@href='#cmdoption-perl-arg-p']/code/span",
'perl'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-arg-p']/code/span",
'\\+p'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-objc']/code/span",
'--ObjC\\+\\+'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-plugin-option']/code/span",
'--plugin.option'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-arg-create-auth-token']"
"/code/span",
'create-auth-token'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-arg-arg']/code/span",
'arg'),
(".//a[@class='reference internal'][@href='#cmdoption-hg-arg-commit']/code/span",
'hg'),
(".//a[@class='reference internal'][@href='#cmdoption-hg-arg-commit']/code/span",
'commit'),
(".//a[@class='reference internal'][@href='#cmdoption-git-commit-p']/code/span",
'git'),
(".//a[@class='reference internal'][@href='#cmdoption-git-commit-p']/code/span",
'commit'),
(".//a[@class='reference internal'][@href='#cmdoption-git-commit-p']/code/span",
'-p'),
],
'index.html': [
(".//meta[@name='hc'][@content='hcval']", ''),
(".//meta[@name='hc_co'][@content='hcval_co']", ''),
(".//dt[@class='label']/span[@class='brackets']", r'Ref1'),
(".//dt[@class='label']", ''),
(".//li[@class='toctree-l1']/a", 'Testing various markup'),
(".//li[@class='toctree-l2']/a", 'Inline markup'),
(".//title", 'Sphinx <Tests>'),
(".//div[@class='footer']", '<NAME> & Team'),
(".//a[@href='http://python.org/']"
"[@class='reference external']", ''),
(".//li/p/a[@href='genindex.html']/span", 'Index'),
(".//li/p/a[@href='py-modindex.html']/span", 'Module Index'),
(".//li/p/a[@href='search.html']/span", 'Search Page'),
# custom sidebar only for contents
(".//h4", 'Contents sidebar'),
# custom JavaScript
(".//script[@src='file://moo.js']", ''),
# URL in contents
(".//a[@class='reference external'][@href='http://sphinx-doc.org/']",
'http://sphinx-doc.org/'),
(".//a[@class='reference external'][@href='http://sphinx-doc.org/latest/']",
'Latest reference'),
# Indirect hyperlink targets across files
(".//a[@href='markup.html#some-label'][@class='reference internal']/span",
'^indirect hyperref$'),
],
'bom.html': [
(".//title", " File with UTF-8 BOM"),
],
'extensions.html': [
(".//a[@href='http://python.org/dev/']", "http://python.org/dev/"),
(".//a[@href='http://bugs.python.org/issue1000']", "issue 1000"),
(".//a[@href='http://bugs.python.org/issue1042']", "explicit caption"),
],
'genindex.html': [
# index entries
(".//a/strong", "Main"),
(".//a/strong", "[1]"),
(".//a/strong", "Other"),
(".//a", "entry"),
(".//li/a", "double"),
],
'footnote.html': [
(".//a[@class='footnote-reference brackets'][@href='#id9'][@id='id1']", r"1"),
(".//a[@class='footnote-reference brackets'][@href='#id10'][@id='id2']", r"2"),
(".//a[@class='footnote-reference brackets'][@href='#foo'][@id='id3']", r"3"),
(".//a[@class='reference internal'][@href='#bar'][@id='id4']", r"\[bar\]"),
(".//a[@class='reference internal'][@href='#baz-qux'][@id='id5']", r"\[baz_qux\]"),
(".//a[@class='footnote-reference brackets'][@href='#id11'][@id='id6']", r"4"),
(".//a[@class='footnote-reference brackets'][@href='#id12'][@id='id7']", r"5"),
(".//a[@class='fn-backref'][@href='#id1']", r"1"),
(".//a[@class='fn-backref'][@href='#id2']", r"2"),
(".//a[@class='fn-backref'][@href='#id3']", r"3"),
(".//a[@class='fn-backref'][@href='#id4']", r"bar"),
(".//a[@class='fn-backref'][@href='#id5']", r"baz_qux"),
(".//a[@class='fn-backref'][@href='#id6']", r"4"),
(".//a[@class='fn-backref'][@href='#id7']", r"5"),
(".//a[@class='fn-backref'][@href='#id8']", r"6"),
],
'otherext.html': [
(".//h1", "Generated section"),
(".//a[@href='_sources/otherext.foo.txt']", ''),
]
}))
@pytest.mark.sphinx('html', tags=['testtag'], confoverrides={
'html_context.hckey_co': 'hcval_co',
'html_experimental_html5_writer': True})
@pytest.mark.test_params(shared_result='test_build_html5_output')
def test_html5_output(app, cached_etree_parse, fname, expect):
app.build()
print(app.outdir / fname)
check_xpath(cached_etree_parse(app.outdir / fname), fname, *expect)
@pytest.mark.sphinx('html', tags=['testtag'], confoverrides={
'html_context.hckey_co': 'hcval_co',
'html_experimental_html5_writer': True})
@pytest.mark.test_params(shared_result='test_build_html_output')
def test_html_download(app):
app.build()
# subdir/includes.html
result = (app.outdir / 'subdir' / 'includes.html').text()
pattern = ('<a class="reference download internal" download="" '
'href="../(_downloads/.*/img.png)">')
matched = re.search(pattern, result)
assert matched
assert (app.outdir / matched.group(1)).exists()
filename = matched.group(1)
# includes.html
result = (app.outdir / 'includes.html').text()
pattern = ('<a class="reference download internal" download="" '
'href="(_downloads/.*/img.png)">')
matched = re.search(pattern, result)
assert matched
assert (app.outdir / matched.group(1)).exists()
assert matched.group(1) == filename
@pytest.mark.sphinx('html', testroot='roles-download',
confoverrides={'html_experimental_html5_writer': True})
def test_html_download_role(app, status, warning):
app.build()
digest = md5((app.srcdir / 'dummy.dat').encode()).hexdigest()
assert (app.outdir / '_downloads' / digest / 'dummy.dat').exists()
content = (app.outdir / 'index.html').text()
assert (('<li><p><a class="reference download internal" download="" '
'href="_downloads/%s/dummy.dat">'
'<code class="xref download docutils literal notranslate">'
'<span class="pre">dummy.dat</span></code></a></p></li>' % digest)
in content)
assert ('<li><p><code class="xref download docutils literal notranslate">'
'<span class="pre">not_found.dat</span></code></p></li>' in content)
assert ('<li><p><a class="reference download external" download="" '
'href="http://www.sphinx-doc.org/en/master/_static/sphinxheader.png">'
'<code class="xref download docutils literal notranslate">'
'<span class="pre">Sphinx</span> <span class="pre">logo</span>'
'</code></a></p></li>' in content)
|
StarcoderdataPython
|
1666869
|
<filename>IoT_Web/iotweb/views/token_view.py
from django.shortcuts import redirect
from django.http import HttpResponse
from django.template import loader
from http.server import HTTPStatus
from .User import User
import iotweb.views.urls_and_messages as UM
import requests
import json
def tokens(request, shdw_id):
"""
GET request: renders the token page
POST request: revokes a specific token
"""
user = User.get_instance()
if not request.POST:
template = loader.get_template('../templates/shadow_tokens.html')
url = UM.DB_URL+'getShadowTokens/{}/'.format(shdw_id)
headers = {'Authorization': 'Token {}'.format(user.user_token)}
req = requests.get(url=url, headers=headers)
if req.status_code == 200:
tkn_list = json.loads(req.text)['tokens']
context = {'tokens': [], 'email': user.user_email}
if tkn_list:
for tkn in tkn_list:
json_object = json.loads(tkn)
if json_object["revoked"]:
json_object['status'] = "REVOKED"
else:
json_object['status'] = "VALID"
context['tokens'].append(json_object)
context['shadow'] = shdw_id
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('../templates/error_page.html')
context = {'code_error': req.status_code,
'message': req.text,
'error_name': HTTPStatus(req.status_code).phrase,
'back': '/profile/'
}
if req.status_code == 401:
context['message'] = context['message'] + UM.REFRESH_TOKEN
context['back'] = '/login/'
return HttpResponse(template.render(context, request))
else: # it's a post (to revoke a token)
url = UM.DB_URL + 'revokeToken/'
token = request.POST['token']
headers = {'Authorization': 'Token {}'.format(token)}
req = requests.get(url=url, headers=headers) # HERE THE TOKEN IS REVOKED
if req.status_code == 200:
return redirect('/viewDevices/{}/'.format(shdw_id))
else:
template = loader.get_template('../templates/error_page.html')
context = {'code_error': req.status_code,
'message': req.text,
'error_name': HTTPStatus(req.status_code).phrase,
'back': '/login/'
}
if req.status_code == 401:
context['message'] = context['message'] + UM.REFRESH_TOKEN
context['back'] = '/login/'
return HttpResponse(template.render(context, request))
def new_token(request, shdw_id):
'''generates a new token and refresh the page'''
user = User.get_instance()
url = UM.DB_URL+'generateToken/'
headers = {'Authorization': 'Token {}'.format(user.user_token)}
data = {'shadow_id': shdw_id, 'type': 'DEVICE'}
req = requests.post(url=url, data=data, headers=headers) # HERE THE TOKEN IS CREATED
if req.status_code == 200:
tkn_id = json.loads(req.text)['token']
url_update_shadow = UM.DB_URL+'updateShadow/{}/'.format(shdw_id)
data_update = {'token': tkn_id}
req_update = requests.post(url=url_update_shadow, data=data_update, headers=headers) # HERE WE UPDATE THE SHADOW
if req_update.status_code == 200:
return redirect('/viewTokens/{}/'.format(shdw_id))
else:
template = loader.get_template('../templates/error_page.html')
context = {'code_error': req_update.status_code,
'message': req_update.text,
'error_name': HTTPStatus(req_update.status_code).phrase,
'back': '/viewTokens/{}/'.format(shdw_id)
}
if req_update.status_code == 401:
context['message'] = context['message'] + UM.REFRESH_TOKEN
context['back'] = '/login/'
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('../templates/error_page.html')
context = {'code_error': req.status_code,
'message': req.text,
'error_name': HTTPStatus(req.status_code).phrase,
'back': '/viewDevices/{}/'.format(shdw_id)
}
if req.status_code == 401:
context['message'] = context['message'] + UM.REFRESH_TOKEN
context['back'] = '/login/'
return HttpResponse(template.render(context, request))
|
StarcoderdataPython
|
3370606
|
<filename>UT330/UT330.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Provides a cross-platform Python interface for the UNI-T 330A/B/C temperature,
humidity, and pressure data loggers.
This code controls a UNI-T 330 A/B/C device via a cross-platform Python script.
The device accepts commands and provides responses. I’ve decoded all the
commands and responses and put them into this script. There are a few bytes in
both the commands and responses that I couldn’t figure out. If you know what
they are, please add them.
My device is a UNI-T 330B which only has temperature and humidity. The commands
for this device have placeholders for pressure, which I’ve added to my code,
but of course, I can’t check the pressure readings are correct.
Created on Wed Mar 2 18:10:21 2016
@author: <NAME>
"""
# =============================================================================
# Imports
# =============================================================================
import datetime
import serial.tools.list_ports
import time
# =============================================================================
# Module info
# =============================================================================
__author__ = "<NAME>"
__copyright__ = "2016 <NAME>"
__credits__ = "<NAME>"
__license__ = "MIT"
# =============================================================================
# Function decorators
# =============================================================================
def buffer_safety(func):
"""There can be timing errors where a read takes place when the buffer
is either partially written or not written at all. These errors can be
removed by a short pause of 10ms. This function decorator makes sure
there's at least 10ms between calls."""
def buffer_protection(self, argument=None):
# If we're less than 10ms since the last call, wait 10ms
if datetime.datetime.now() - self._last_op_time \
< datetime.timedelta(0, 0, 10000):
time.sleep(0.01)
# Read functions have no arguments, write functions have one
if argument is None:
data = func(self)
else:
data = func(self, argument)
# We don't know how long the operation took, so use the current time
# as the last op time
self._last_op_time = datetime.datetime.now()
return data
return buffer_protection
# =============================================================================
# Functions
# =============================================================================
table = (
0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040)
def modbusCRC(data):
"""Returns the Modbus CRC as two bytes. Be careful of the order."""
# If the contnets of the data list are not all integers, this function
# will have problems. Future action is to check all elements are ints
crc = 0xFFFF
for number in data:
crc = (crc >> 8) ^ table[(crc ^ number) & 0xFF]
MSB = crc >> 8 # Most Significant Byte
LSB = crc & 255 # Least Significant Byte
return MSB, LSB
# =============================================================================
# class UT330
# =============================================================================
class UT330(object):
"""Provides an object-based interface to the UT330.
Here are the commands I know about
# 0x10 - set configuration info
# 0x11 - read configuration info
# 0x12 - synch time
# 0x16 - set offsets
# 0x17 - read offsets
# 0x18 - delete data
# 0x19 - read data
# 0x20 - factory reset
# 0x51 - get device name
"""
def __init__(self):
# The time of the last function call. Set to min initially because
# there hasn't been a last call when the software starts.
self._last_op_time = datetime.datetime.min
# The PySerial object
self._ut330 = None
# Input and output buffer object
self._buffer = None
# Index to the position of the current element being processed
self._index = 0
# Time to wait before timing out
self._read_timeout = 5
self._write_timeout = 5
def __del__(self):
self.disconnect()
def connect(self):
"""Connects to the device or raises an error"""
# Get the port the device is connected to
# ---------------------------------------
port = None
# Get all the serial ports
port_list = serial.tools.list_ports.comports()
# Now find which port has our device
for trial in port_list:
# I'm not sure this is specific enough for general use. It may
# give a false report if another device using the same controller
# is connected. However, I can't find a more specific check.
if trial.vid == 4292 and trial.pid == 60000:
port = trial
if port is None:
raise IOError('Error! The UT330 device was not detected on any '
'USB port.')
# Attempt a connection to the port
# --------------------------------
self._ut330 = serial.Serial(port=port.device,
baudrate=115200,
timeout=self._read_timeout,
write_timeout=self._write_timeout)
# Check that the serial port is open
# ----------------------------------
if not self._ut330.isOpen():
raise IOError('Error! The UT330 is not open on the serial port.')
def __enter__(self):
"""Function to make this class work with Python's with statement"""
self.connect()
return self
def __exit__(self, type_ex, value_ex, traceback_ex):
"""Function to make this class work with Python's with statement"""
self.disconnect()
def _read_buffer(self, byte_count):
"""Reads the contents of the buffer and returns it as an integer list.
"""
# If the page_size is set much larger than this number we tend
# to get problems with partially filled buffers
page_size = 32768
self._buffer = []
# Read in data in as large chuncks as possible to speed up reading.
# Read in the largest possible chunks first.
for i in range(byte_count / page_size):
self._buffer += self._ut330.read(page_size)
# Now read in the smallest chunk.
self._buffer += self._ut330.read(byte_count % page_size)
# Return the data as an integer list
self._buffer = [ord(i) for i in self._buffer]
def _write_buffer(self):
"""Writes the command string to the buffer"""
bytes_written = self._ut330.write(bytearray(self._buffer))
if bytes_written != len(self._buffer):
raise ValueError('Error! _write_buffer: not all command bytes '
'written')
def _get_datetime(self):
"""Returns the date and time as a timestamp"""
timestamp = datetime.datetime(2000 + self._buffer[self._index],
self._buffer[self._index + 1],
self._buffer[self._index + 2],
self._buffer[self._index + 3],
self._buffer[self._index + 4],
self._buffer[self._index + 5])
return timestamp
def _get_temperature(self):
"""Returns the temperature from the device buffer data - including
negative temperatures"""
# Look to see if the temperature's negative - using two's complement
# to represent negative numbers
if self._buffer[self._index + 1] >= 128:
temperature = -float(256*(self._buffer[self._index + 1] ^ 0xff) +
(self._buffer[self._index] ^ 0xff) + 1)/10
# Temperature is positive
else:
temperature = float(256*self._buffer[self._index + 1] +
self._buffer[self._index])/10
return temperature
def _get_name(self):
"""Retrieves the device name from the buffer data"""
temp = self._buffer[self._index: self._index + 10]
return ''.join(chr(entry) for entry in temp).strip()
def disconnect(self):
"""Disconnect the device"""
if self._ut330 is not None:
self._ut330.close()
@buffer_safety
def read_data(self):
"""Downloads the device buffer data (temperature, humidity, pressure),
and decodes it"""
# We split this function into a header and data part to speed up
# reading. Reading the header tells us how much data there is in the
# data part
# The read data command
self._buffer = [0xab, 0xcd, 0x03, 0x19, 0x70, 0xc5]
# Write the command
self._write_buffer()
# Read the header
# ---------------
# Now get the header data from the buffer
self._read_buffer(8)
# Check that some data has actually been returned
if 0 == len(self._buffer):
print "Warning! Empty buffer returned by device "
return []
# Get the length of data in the buffer
length = (self._buffer[4] + 256*self._buffer[5] +
256*256*self._buffer[6] + 256*256*256*self._buffer[7])
# Check that there's actually some data on the device - 22 is the
# minimum buffer length if there's actually data
if length < 22:
# Need to read the CRC code and so clear the buffer before
# returning - gives an error later if this isn't done.
self._read_buffer(2)
print "Warning! No temperature/humidity/pressure data on the " \
"device"
return []
# Now get the data
# ----------------
self._read_buffer(length)
self._index = 0 # This is the offset of the first data item
# The output data structure
data = []
# Loop over every set of readings
while self._index < length - 2:
timestamp = self._get_datetime()
self._index += 6
temperature = self._get_temperature()
self._index += 2
humidity = float(self._buffer[self._index] +
256*self._buffer[self._index + 1])/10
pressure = float(self._buffer[self._index + 2] +
256*self._buffer[self._index + 2])/10
self._index += 4
data.append({'Timestamp': timestamp,
'Temperature (C)': temperature,
'Relative humidity (%)': humidity,
'Pressure (Pa)': pressure})
return data
@buffer_safety
def delete_data(self):
"""Deletes the temperature, humidity, and pressure data from the
device"""
# The delete command
self._buffer = [0xab, 0xcd, 0x03, 0x18, 0xb1, 0x05]
self._buffer[5], self._buffer[4] = modbusCRC(self._buffer[0:4])
# Write the command
self._write_buffer()
# Now get the response data from the buffer
self._read_buffer(7)
# Check the return code shows the data was correctly deleted
if [171, 205, 4, 24, 0, 116, 181] != self._buffer:
raise IOError("Error! Delete data returned error code.")
@buffer_safety
def read_config(self):
"""Read the configuration data from the device, saves it to disk"""
# Send the read info command to the device
self._buffer = [0xab, 0xcd, 0x03, 0x11, 0x71, 0x03]
# Write the command
self._write_buffer()
# Now get the data from the buffer. We know the returned length will
# be 46.
self._read_buffer(46)
# Now, interpret the data in the buffer
config = {}
# Get the device name
self._index = 4
config['device name'] = self._get_name()
# I don't know what bytes 15 to 19 are
config['sampling interval'] = (256*256*self._buffer[22] +
256*self._buffer[21] +
self._buffer[20])
config['readings count'] = 256*self._buffer[24] + self._buffer[23]
config['readings limit'] = 256*self._buffer[26] + self._buffer[25]
config['battery power'] = self._buffer[27]
config['overwrite records'] = bool(self._buffer[28])
config['delay start'] = bool(self._buffer[29])
config['delay timing'] = (256*256*self._buffer[32] +
256*self._buffer[31] +
self._buffer[30])
# I don't know what byte 33 is
# It's possible the high temp alarm could be negative
if self._buffer[34] < 128:
config['high temperature alarm'] = self._buffer[34]
else:
config['high temperature alarm'] = -256 + self._buffer[34]
# It's possible the low temperature alarm could be positive
if self._buffer[35] >= 128:
config['low temperature alarm'] = -256 + self._buffer[35]
else:
config['low temperature alarm'] = self._buffer[35]
config['high humidity alarm'] = self._buffer[36]
config['low humidity alarm'] = self._buffer[37]
self._index = 38
config['timestamp'] = self._get_datetime()
return config
@buffer_safety
def write_config(self, config):
"""Sets the configuration information on the device"""
# The command to send, note we'll be overriding some bytes
self._buffer = [0xab, 0xcd, 0x1a, 0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# Check config parameters
# -----------------------
if len(config['device name']) > 10:
raise ValueError('Error! device name {0} is {1} characters when '
'it can be a maximum of 10'.
format(config['device name'],
len(config['device name'])))
if len(config['device name']) == 0:
raise ValueError('Error! device name is length zero, it needs '
'to be more than zero characters')
if config['sampling interval'] < 0 or \
config['sampling interval'] > 86400:
raise ValueError('Error! sampling interval is {0} but it must be '
'between 0 and 86400'.
format(config['sampling interval']))
if config['delay timing'] < 0 or config['delay timing'] > 604800:
raise ValueError('Error! delay timing is {0} but it must be '
'between 0 and 604800'.
format(config['delay timing']))
# Prepare the data for writing
# ----------------------------
# Add the device name - pad to 10 characters with spaces
for idx, val in enumerate(config['device name'][0:10].rjust(10, ' ')):
self._buffer[4 + idx] = ord(val)
# Add the sampling interval
self._buffer[14] = config['sampling interval'] & 0xff
self._buffer[15] = (config['sampling interval'] & 0x00ff00) >> 8
self._buffer[16] = (config['sampling interval'] & 0xff0000) >> 16
self._buffer[17] = int(config['overwrite records'])
self._buffer[18] = int(config['delay start'])
# Delay timing
self._buffer[19] = config['delay timing'] & 0xff
self._buffer[20] = (config['delay timing'] & 0x00ff00) >> 8
self._buffer[21] = (config['delay timing'] & 0xff0000) >> 16
self._buffer[22] = 0 # I don't know what this byte is
if config['high temperature alarm'] >= 0:
self._buffer[23] = config['high temperature alarm']
else:
self._buffer[23] = 256 + config['high temperature alarm']
if config['low temperature alarm'] < 0:
self._buffer[24] = 256 + config['low temperature alarm']
else:
self._buffer[24] = config['low temperature alarm']
self._buffer[25] = config['high humidity alarm']
self._buffer[26] = config['low humidity alarm']
# Add the CRC bytes
self._buffer[28], self._buffer[27] = modbusCRC(self._buffer[0:27])
# Write the buffer
self._write_buffer()
# Now get the response data from the buffer
self._read_buffer(7)
# Check the return code shows the data was correctly written
if [171, 205, 4, 16, 0, 115, 117] != self._buffer:
raise IOError("Error! Config writing returned error code.")
@buffer_safety
def write_date_time(self, timestamp):
"""Syncs the time to the timestamp"""
# The command to send, note we'll be overriding some bytes
self._buffer = [0xab, 0xcd, 0x09, 0x12, 0, 0, 0, 0, 0, 0, 0, 0]
self._buffer[4] = timestamp.year - 2000
self._buffer[5] = timestamp.month
self._buffer[6] = timestamp.day
self._buffer[7] = timestamp.hour
self._buffer[8] = timestamp.minute
self._buffer[9] = timestamp.second
# Add the CRC bytes
self._buffer[11], self._buffer[10] = modbusCRC(self._buffer[0:10])
self._write_buffer()
# Now get the response data from the buffer
self._read_buffer(7)
# Check the return code shows the data was correctly written
if [171, 205, 4, 18, 0, 114, 21] != self._buffer:
raise IOError("Error! Writing datetime returned error code.")
@buffer_safety
def read_offsets(self):
"""Reads the temperature, humidity, pressure offset"""
self._buffer = [0xab, 0xcd, 0x03, 0x17, 0xF1, 0x01]
self._write_buffer()
# Now get the response data from the buffer. The returned buffer length
# is known to be 18.
self._read_buffer(18)
# Decode the data
offsets = {}
self._index = 4
offsets['temperature'] = self._get_temperature()
if self._buffer[6] < 128:
offsets['temperature offset'] = float(self._buffer[6]) / 10
else:
offsets['temperature offset'] = float(self._buffer[6] - 256) / 10
offsets['humidity'] = float(256*self._buffer[8] + self._buffer[7]) / 10
if self._buffer[9] < 128:
offsets['humidity offset'] = float(self._buffer[9]) / 10
else:
offsets['humidity offset'] = float(self._buffer[9] - 256) / 10
offsets['pressure'] = float(256*self._buffer[11] + self._buffer[10])/10
if self._buffer[12] < 128:
offsets['pressure offset'] = float(self._buffer[12]) / 10
else:
offsets['pressure offset'] = float(self._buffer[12] - 256) / 10
# I don't know what bytes 13, 14, and 15 are
return offsets
@buffer_safety
def write_offsets(self, offsets):
"""Set the device offsets for temperature, humidity, pressure"""
# Check for errors in parameters
if offsets['temperature offset'] > 6.1 or \
offsets['temperature offset'] < -6:
raise ValueError('Error! The temperature offset is {0} when it '
'must be between -6 and 6.1 C'.
format(offsets['temperature offset']))
if offsets['humidity offset'] > 6.1 or offsets['humidity offset'] < -6:
raise ValueError('Error! The humidity offset is {0} when it must '
'be between -6% and 6.1%'.
format(offsets['humidity offset']))
if offsets['pressure offset'] > 6.1 or offsets['pressure offset'] < -6:
raise ValueError('Error! The pressure offset is {0} when it must '
'be between -6hpa and 6.1hpa'.
format(offsets['pressure offset']))
# The command to send, note we'll be overriding some bytes
self._buffer = [0xab, 0xcd, 0x06, 0x16, 0, 0, 0, 0, 0]
if offsets['temperature offset'] < 0:
self._buffer[4] = 256 - int(offsets['temperature offset']*10)
else:
self._buffer[4] = int(offsets['temperature offset']*10)
if offsets['humidity offset'] < 0:
self._buffer[5] = 256 - int(offsets['humidity offset']*10)
else:
self._buffer[5] = int(offsets['humidity offset']*10)
if offsets['pressure offset'] < 0:
self._buffer[6] = 256 - int(offsets['pressure offset']*10)
else:
self._buffer[6] = int(offsets['pressure offset']*10)
# Add the CRC bytes
self._buffer[8], self._buffer[7] = modbusCRC(self._buffer[0:7])
self._write_buffer()
# Now get the response data from the buffer
self._read_buffer(7)
# Check the return code shows the data was correctly written
if [171, 205, 4, 22, 0, 112, 213] != self._buffer:
raise IOError("Error! Offset writing returned error code.")
@buffer_safety
def restore_factory(self):
"""This command is given as a factory reset in the Windows software"""
self._buffer = [0xab, 0xcd, 0x03, 0x20, 0xb0, 0xd7]
self._write_buffer()
# Now get the data from the buffer
self._read_buffer(7)
# Check the return code shows the data was correctly written
if [171, 205, 4, 32, 0, 103, 117] != self._buffer:
raise IOError("Error! Restore factory returned an error code.")
@buffer_safety
def read_device_name(self):
"""Returns the device name"""
self._buffer = [0xab, 0xcd, 0x03, 0x51, 0x70, 0xF3]
self._write_buffer()
# Now get the response data from the buffer, we know the length is
# fixed to 16 bytes
self._read_buffer(16)
self._index = 4
return self._get_name()
|
StarcoderdataPython
|
3307365
|
import time
import requests
import os
import datetime as dt
# import telegram
BOT_TOKEN = os.environ.get('BOT_TOKEN')
# checking status every 12 hours
SLEEP_INTERVAL = 43200
FINANCE_URL = "http://resources.finance.ua/ua/public/currency-cash.json"
def send_telegram(dollar):
# chat = "-383060434"
# chat_test = "-277675492"
chat_viki = "232590195"
# bot = telegram.Bot(token=token)
# print(bot.get_me())
requests.get(f"https://api.telegram.org/bot{BOT_TOKEN}/sendMessage?chat_id={chat_viki}&text=Hallo!💲%20Dollar={dollar}.")
while True:
sent_weekly = 0
myResponse = requests.get(FINANCE_URL)
if myResponse.status_code != 200:
print('Not Found')
content = myResponse.json()
date = content["date"]
for bank in content["organizations"]:
if bank["id"] == "7oiylpmiow8iy1smgg3":
usd = float(bank["currencies"]["USD"]["ask"])
#every monday and thursday
if sent_weekly == 0 and (dt.date.today().isoweekday() == 1 or dt.date.today().isoweekday() == 4):
print("Sending weekly...")
send_telegram(usd)
sent_weekly = 1
else:
sent_weekly = 0
#if is interesting
if 26.40 < usd < 26.96:
print("Sending message...")
send_telegram(usd)
print("Sleeping for {} seconds".format(SLEEP_INTERVAL))
time.sleep(SLEEP_INTERVAL)
|
StarcoderdataPython
|
3351900
|
<filename>src/autogluon_contrib_nlp/data/tokenizers/huggingface.py
__all__ = ['HuggingFaceTokenizer', 'HuggingFaceBPETokenizer', 'HuggingFaceWordPieceTokenizer',
'HuggingFaceByteBPETokenizer']
import os
import json
from pkg_resources import parse_version
from typing import Optional, Union, List, Tuple
from collections import OrderedDict
from uuid import uuid4
from .base import *
from ..vocab import Vocab, load_vocab
from ...utils.lazy_imports import try_import_huggingface_tokenizers
# Disable the TOKENIZERS_PARALLEL as suggested by the huggingface.
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
def is_new_version_model_file(model_file_path: str) -> bool:
"""Check whether the model file belongs to the new version of HuggingFace Tokenizers,
i.e., >= 0.8
Parameters
----------
model_file_path
Path to the model file
Returns
-------
is_new_version
Whether the model file is generated by the new version of huggingface tokenizer.
"""
with open(model_file_path, 'r', encoding='utf-8') as f:
try:
_ = json.load(f)
return True
except Exception:
return False
def hf_encode(model, sentences, output_type: type = str):
"""
Parameters
----------
model
Model object in HuggingFace tokenizer
sentences
Input sentences
output_type
Output type
Returns
-------
ret
"""
is_multi_sentences = isinstance(sentences, list)
if not is_multi_sentences:
sentences = [sentences]
encode_sentences = model.encode_batch(sentences, add_special_tokens=False)
if output_type is str:
ret = [encode_sentence.tokens for encode_sentence in encode_sentences]
elif output_type is int:
ret = [encode_sentence.ids for encode_sentence in encode_sentences]
else:
raise TokenTypeNotSupportedError(output_type)
if is_multi_sentences:
return ret
else:
return ret[0]
def hf_encode_with_offsets(model, sentences, output_type: type = str):
is_multi_sentences = isinstance(sentences, list)
if not is_multi_sentences:
sentences = [sentences]
encode_sentences = model.encode_batch(sentences, add_special_tokens=False)
if output_type is str:
ret = [encode_sentence.tokens for encode_sentence in encode_sentences]
offsets = [encode_sentence.offsets for encode_sentence in encode_sentences]
elif output_type is int:
ret = [encode_sentence.ids for encode_sentence in encode_sentences]
offsets = [encode_sentence.offsets for encode_sentence in encode_sentences]
else:
raise TokenTypeNotSupportedError(output_type)
if is_multi_sentences:
return ret, offsets
else:
return ret[0], offsets[0]
def hf_decode(model, tokens):
is_multiple_sentences = is_tokens_from_multiple_sentences(tokens)
if not is_multiple_sentences:
tokens = [tokens]
token_type = get_token_type(tokens)
if token_type is str:
id_tokens = [[model.token_to_id(token) for token in sentence] for sentence in
tokens]
ret = model.decode_batch(id_tokens)
elif token_type is int:
ret = model.decode_batch(tokens)
else:
raise TokenTypeNotSupportedError(token_type)
if is_multiple_sentences:
return ret
else:
return ret[0]
@TOKENIZER_REGISTRY.register('hf_tokenizer')
class HuggingFaceTokenizer(BaseTokenizerWithVocab):
def __init__(self, model_path: Optional[str] = None,
vocab: Optional[str] = None):
tokenizers = try_import_huggingface_tokenizers()
assert parse_version(tokenizers.__version__) >= parse_version('0.8'), \
'Only support tokenizers>=0.8. You can upgrade tokenizers via ' \
'`python3 -m pip install --upgrade tokenizers`.'
self._model_path = model_path
self._model = tokenizers.Tokenizer.from_file(model_path)
hf_vocab = self._model.get_vocab()
with open(model_path, 'r', encoding='utf-8') as f:
model_info = json.load(f)
self._model_info = model_info
added_tokens = model_info['added_tokens']
if vocab is not None:
self._vocab = load_vocab(vocab)
else:
sorted_hf_vocab_kv = sorted(list(hf_vocab.items()), key=lambda x: x[1])
for i, ele in enumerate(sorted_hf_vocab_kv):
assert ele[1] == i
all_tokens = [ele[0] for ele in sorted_hf_vocab_kv]
special_tokens = [token['content'] for token in added_tokens]
special_token_keys = []
no_valid_name_key_cnt = 0
for special_token in special_tokens:
if special_token.startswith('<') and special_token.endswith('>') \
and len(special_token) > 2:
key = special_token[1:-1] + '_token'
else:
key = 'special{}_token'.format(no_valid_name_key_cnt)
no_valid_name_key_cnt += 1
assert key not in special_token_keys
special_token_keys.append(key)
self._vocab = Vocab(all_tokens,
**{key: token
for key, token in zip(special_token_keys, special_tokens)})
# Verify the special tokens
for added_token in added_tokens:
assert self._vocab[added_token['content']] == added_token['id']
assert added_token['content'] in self._vocab.special_tokens
# Verify all tokens exist
for token, idx in hf_vocab.items():
assert self._vocab[token] == idx
if self._model_info['decoder']['type'] == 'BPEDecoder':
self._last_subtoken_id_set =\
frozenset([i for i, ele in enumerate(self._vocab.all_tokens)
if ele.endswith('</w>')])
elif self._model_info['decoder']['type'] == 'WordPiece':
self._first_subtoken_id_set =\
frozenset([i for i, ele in enumerate(self._vocab.all_tokens)
if not ele.startswith('##')])
def encode(self, sentences: SentencesType,
output_type: type = str) -> Union[TokensType, TokenIDsType]:
return hf_encode(self._model, sentences, output_type)
def decode(self, tokens: Union[TokensType, TokenIDsType]) -> SentencesType:
return hf_decode(self._model, tokens)
def encode_with_offsets(self, sentences: SentencesType,
output_type: type = str) -> Tuple[Union[TokensType, TokenIDsType],
TokenOffsetsType]:
return hf_encode_with_offsets(self._model, sentences, output_type)
@property
def model_type(self):
return self._model_info['decoder']['type']
@property
def model_info(self):
"""Get the model info."""
return self._model_info
def is_last_subword(self, tokens):
"""Whether the sub-token is the last sub-token in a split token list.
Only supports the case when the tokenizer is a HuggingFaceBPETokenizer
Parameters
----------
tokens
A single token or a list of tokens
Returns
-------
ret
The results
"""
assert self.model_type == 'BPEDecoder',\
'Only supports BPE model. The model_type={}'.format(self.model_type)
if isinstance(tokens, str):
return tokens.endswith('</w>')
elif isinstance(tokens, int):
return tokens in self._last_subtoken_id_set
elif isinstance(tokens, list):
if len(tokens) == 0:
return []
if isinstance(tokens[0], str):
return [ele.endswith('</w>') for ele in tokens], False
elif isinstance(tokens[0], int):
return [ele in self._last_subtoken_id_set for ele in tokens], False
else:
raise NotImplementedError
else:
raise NotImplementedError
def is_first_subword(self, tokens):
"""Whether the sub-token is the first sub-token in a token list.
Only supports the case when the tokenizer is a HuggingFaceWordPieceTokenizer
Parameters
----------
tokens
A single token or a list of tokens
Returns
-------
ret
The results
"""
assert self.model_type == 'WordPiece', \
'Only supports WordPiece model. The model_type={}'.format(self.model_type)
if isinstance(tokens, str):
return not tokens.startswith('##')
elif isinstance(tokens, int):
return tokens in self._first_subtoken_id_set
elif isinstance(tokens, list):
if len(tokens) == 0:
return []
if isinstance(tokens[0], str):
return [not ele.startswith('##') for ele in tokens]
elif isinstance(tokens[0], int):
return [ele in self._first_subtoken_id_set for ele in tokens]
else:
raise NotImplementedError
else:
raise NotImplementedError
@property
def vocab(self) -> Optional[Vocab]:
return self._vocab
def set_vocab(self, vocab):
raise NotImplementedError('Cannot set vocabulary for the HuggingFaceTokenizer.')
def __repr__(self):
ret = '{}(\n' \
' type = {}\n' \
' model_path = {}\n' \
' normalizer = {}\n' \
' vocab = {}\n' \
')'.format(self.__class__.__name__,
self._model_info['decoder']['type'],
self._model_path,
self._model_info['normalizer'],
self._vocab)
return ret
class LegacyHuggingFaceTokenizer(BaseTokenizerWithVocab):
def __init__(self):
self._vocab = None
self._model = None
def encode(self, sentences: SentencesType,
output_type: type = str) -> Union[TokensType, TokenIDsType]:
return hf_encode(self._model, sentences, output_type)
def decode(self, tokens: Union[TokensType, TokenIDsType]) -> SentencesType:
return hf_decode(self._model, tokens)
def encode_with_offsets(self, sentences: SentencesType,
output_type: type = str) -> Tuple[Union[TokensType, TokenIDsType],
TokenOffsetsType]:
return hf_encode_with_offsets(self._model, sentences, output_type)
@property
def vocab(self) -> Optional[Vocab]:
return self._vocab
def set_vocab(self, vocab):
raise NotImplementedError('Cannot set vocabulary for the HuggingFaceTokenizer.')
@TOKENIZER_REGISTRY.register('hf_bpe')
class HuggingFaceBPETokenizer(LegacyHuggingFaceTokenizer):
def __init__(self, merges_file: Optional[str] = None,
vocab_file: Optional[str] = None,
unk_token: Optional[str] = Vocab.UNK_TOKEN,
suffix: Optional[str] = '</w>',
dropout: Optional[float] = None,
lowercase: bool = False):
"""
Parameters
----------
merges_file
The merges file saved by HuggingFace
vocab_file
Vocabulary file in GluonNLP
unk_token
The unknown token
suffix
The suffix for sub-tokens. For example, "Sunnyvale" will be "Sunny vale</w>"
dropout
Ratio of the BPE-Dropout
lowercase
Whether to lowercase the input before tokenizer
"""
super().__init__()
self._merges_file = merges_file
self._vocab_file = vocab_file
self._unk_token = unk_token
self._suffix = suffix
self._dropout = dropout
self._lowercase = lowercase
self.__rebuild_tokenizer()
self._last_subword_id_set = frozenset([self._vocab[ele]
for ele in self._vocab.all_tokens
if ele.endswith(self._suffix)])
def is_last_subword(self, tokens: Union[str, int, List[str], List[int]]) \
-> Union[bool, List[bool]]:
"""Whether the token is the last subword token. This can be used for whole-word masking.
Parameters
----------
tokens
The input tokens
Returns
-------
ret
Whether the token is the last subword token in the list of subwords.
"""
if isinstance(tokens, str):
return tokens.endswith(self._suffix)
elif isinstance(tokens, int):
return tokens in self._last_subword_id_set
elif isinstance(tokens, list):
if len(tokens) == 0:
return []
if isinstance(tokens[0], str):
return [ele.endswith(self._suffix) for ele in tokens]
elif isinstance(tokens[0], int):
return [ele in self._last_subword_id_set for ele in tokens]
else:
raise NotImplementedError
else:
raise NotImplementedError
def set_bpe_dropout(self, bpe_dropout: float):
"""Set the BPE Dropout of the tokenizer
Parameters
----------
bpe_dropout
The BPE Dropout ratio
"""
self._dropout = bpe_dropout
self.__rebuild_tokenizer()
def set_lowercase(self, lowercase: float):
"""Set the lowercase flag in the tokenizer
Parameters
----------
lowercase
Whether to lowercase the input
"""
self._lowercase = lowercase
self.__rebuild_tokenizer()
@property
def lowercase(self):
return self._lowercase
def __rebuild_tokenizer(self):
tokenizers = try_import_huggingface_tokenizers()
# Load the merge file from Huggingface tokenizers < 0.8
try:
# using Vocab obj file
self._vocab = load_vocab(self._vocab_file)
all_tokens = self._vocab.all_tokens
hf_vocab = OrderedDict()
for i in range(len(all_tokens)):
hf_vocab[all_tokens[i]] = i
temp_hf_vocab_file = str(uuid4()) + '.hf_vocab'
with open(temp_hf_vocab_file, 'w', encoding='utf-8') as ftv:
json.dump(hf_vocab, ftv, ensure_ascii=False)
except TypeError:
# using hf_bpe vocab file
with open(self._vocab_file, 'r', encoding='utf-8') as fv:
hf_vocab = json.load(fv)
hf_vocab = sorted(list(hf_vocab.items()), key=lambda x: x[1])
all_tokens = [x[0] for x in hf_vocab]
# default special tokens corresponding to the default
# special_tokens setting in CharBPETokenizer.train
# and the default special_tokens=[unk]
self._vocab = Vocab(all_tokens, unk_token=self._unk_token)
temp_hf_vocab_file = None
except Exception as exp:
raise exp
assert self._unk_token == self._vocab.unk_token
self._model = tokenizers.CharBPETokenizer(
vocab=temp_hf_vocab_file if temp_hf_vocab_file else self._vocab_file,
merges=self._merges_file,
unk_token=self._unk_token, suffix=self._suffix, dropout=self._dropout,
lowercase=self._lowercase)
if temp_hf_vocab_file:
os.remove(temp_hf_vocab_file)
@property
def vocab(self):
return self._vocab
def set_vocab(self, vocab):
raise NotImplementedError('Cannot set vocabulary for HuggingFaceBPETokenizer.')
def __repr__(self):
ret = '{}(\n' \
' merges_file = {}\n' \
' vocab_file = {}\n' \
' unk_token = {}, suffix = {}\n' \
' dropout = {}, lowercase = {}\n' \
' vocab = {}\n' \
')'.format(self.__class__.__name__,
os.path.realpath(self._merges_file),
os.path.realpath(self._vocab_file),
self._unk_token, self._suffix,
self._dropout, self._lowercase,
self._vocab)
return ret
def __getstate__(self):
state = self.__dict__.copy()
state['_model'] = None
return state
def __setstate__(self, state):
self.__dict__ = state
self.__rebuild_tokenizer()
@TOKENIZER_REGISTRY.register('hf_bytebpe')
class HuggingFaceByteBPETokenizer(LegacyHuggingFaceTokenizer):
def __init__(self, merges_file: Optional[str] = None, vocab_file: Optional[str] = None,
add_prefix_space: bool = False, lowercase: bool = False,
dropout: Optional[float] = None,
unicode_normalizer: Optional[str] = None,
continuing_subword_prefix: Optional[str] = None,
end_of_word_suffix: Optional[str] = None, trim_offsets: bool = False):
super().__init__()
self._merges_file = merges_file
self._vocab_file = vocab_file
self._add_prefix_space = add_prefix_space
self._lowercase = lowercase
self._dropout = dropout
self._unicode_normalizer = unicode_normalizer
self._continuing_subword_prefix = continuing_subword_prefix
self._end_of_word_suffix = end_of_word_suffix
self._trim_offsets = trim_offsets
self.__rebuild_tokenizer()
def set_bpe_dropout(self, bpe_dropout: float):
self._dropout = bpe_dropout
self.__rebuild_tokenizer()
def set_lowercase(self, lowercase: float):
self._lowercase = lowercase
self.__rebuild_tokenizer()
@property
def lowercase(self):
return self._lowercase
def __rebuild_tokenizer(self):
tokenizers = try_import_huggingface_tokenizers()
# build vocab and temp_hf_vocab_file
try:
# using Vocab obj file
self._vocab = load_vocab(self._vocab_file)
all_tokens = self._vocab.all_tokens
hf_vocab = OrderedDict()
for i in range(len(all_tokens)):
hf_vocab[all_tokens[i]] = i
temp_hf_vocab_file = str(uuid4()) + '.hf_vocab'
with open(temp_hf_vocab_file, 'w', encoding='utf-8') as ftv:
json.dump(hf_vocab, ftv, ensure_ascii=False)
except TypeError:
# using hf_bytebpe vocab file
with open(self._vocab_file, 'r', encoding='utf-8') as fv:
hf_vocab = json.load(fv)
hf_vocab = sorted(list(hf_vocab.items()), key=lambda x: x[1])
all_tokens = [x[0] for x in hf_vocab]
# default special tokens corresponding to the default
# special_tokens setting in ByteBPETokenizer.train
# and the default special_tokens=[]
self._vocab = Vocab(all_tokens)
temp_hf_vocab_file = None
self._model = tokenizers.ByteLevelBPETokenizer(
vocab=temp_hf_vocab_file if temp_hf_vocab_file else self._vocab_file,
merges=self._merges_file,
add_prefix_space=self._add_prefix_space, lowercase=self._lowercase,
dropout=self._dropout, unicode_normalizer=self._unicode_normalizer,
continuing_subword_prefix=self._continuing_subword_prefix,
end_of_word_suffix=self._end_of_word_suffix,
trim_offsets=self._trim_offsets)
if temp_hf_vocab_file:
os.remove(temp_hf_vocab_file)
def __repr__(self):
ret = '{}(\n' \
' merges_file = {}\n' \
' vocab_file = {}\n' \
' add_prefix_space = {}, lowercase = {}, dropout = {}\n' \
' unicode_normalizer = {}, continuing_subword_prefix = {}\n' \
' end_of_word_suffix = {}\n' \
' trim_offsets = {}\n' \
' vocab = {}\n' \
')'.format(self.__class__.__name__,
os.path.realpath(self._merges_file),
os.path.realpath(self._vocab_file),
self._add_prefix_space, self._lowercase, self._dropout,
self._unicode_normalizer, self._continuing_subword_prefix,
self._end_of_word_suffix,
self._trim_offsets,
self._vocab)
return ret
def __getstate__(self):
state = self.__dict__.copy()
state['_model'] = None
return state
def __setstate__(self, state):
self.__dict__ = state
self.__rebuild_tokenizer()
@TOKENIZER_REGISTRY.register('hf_wordpiece')
class HuggingFaceWordPieceTokenizer(LegacyHuggingFaceTokenizer):
def __init__(self, vocab_file: Optional[str] = None,
unk_token: str = Vocab.UNK_TOKEN,
sep_token: str = Vocab.SEP_TOKEN,
cls_token: str = Vocab.CLS_TOKEN,
pad_token: str = Vocab.PAD_TOKEN,
mask_token: str = Vocab.MASK_TOKEN,
clean_text: bool = True, handle_chinese_chars: bool = True,
strip_accents: bool = None, lowercase: bool = False,
wordpieces_prefix: str = "##"):
super().__init__()
self._vocab_file = vocab_file
self._unk_token = unk_token
self._sep_token = sep_token
self._cls_token = cls_token
self._pad_token = pad_token
self._mask_token = mask_token
self._clean_text = clean_text
self._handle_chinese_chars = handle_chinese_chars
self._strip_accents = strip_accents
self._lowercase = lowercase
self._wordpieces_prefix = wordpieces_prefix
self.__rebuild_tokenizer()
self._first_subword_id_set = frozenset([self._vocab[ele]
for ele in self._vocab.all_tokens
if not ele.startswith(self._wordpieces_prefix)])
def is_first_subword(self, tokens: Union[str, int, List[str], List[int]]) \
-> Union[bool, List[bool]]:
"""Whether the token is the first subword token in a sequence of subword tokens.
This can be used for implementing whole-word masking.
We won't care about the special tokens
Parameters
----------
tokens
Returns
-------
ret
"""
if isinstance(tokens, str):
return not tokens.startswith(self._wordpieces_prefix)
elif isinstance(tokens, int):
return tokens in self._first_subword_id_set
elif isinstance(tokens, list):
if len(tokens) == 0:
return []
if isinstance(tokens[0], str):
return [not ele.startswith(self._wordpieces_prefix)
for ele in tokens]
elif isinstance(tokens[0], int):
return [ele in self._first_subword_id_set for ele in tokens]
else:
raise NotImplementedError
else:
raise NotImplementedError
def set_lowercase(self, lowercase: bool):
self._lowercase = lowercase
self.__rebuild_tokenizer()
@property
def lowercase(self):
return self._lowercase
def __rebuild_tokenizer(self):
tokenizers = try_import_huggingface_tokenizers()
# build vocab and temp_hf_vocab_file
try:
# using Vocab obj file
self._vocab = load_vocab(self._vocab_file)
all_tokens = self._vocab.all_tokens
except json.JSONDecodeError:
# using hf_wordpiece vocab file
all_tokens = []
with open(self._vocab_file, 'r', encoding='utf-8') as fv:
for line in fv:
all_tokens.append(line.strip())
# defualt special tokens corresponding to the default
# special_tokens setting in BertWordPieceTokenizer.train
# and the default special_tokens=[pad, unk, cls, sep, mask]
default_special_tokens = {'pad_token': self._pad_token,
'cls_token': self._cls_token,
'sep_token': self._sep_token,
'mask_token': self._mask_token}
self._vocab = Vocab(all_tokens, unk_token=self._unk_token, **default_special_tokens)
all_tokens = self._vocab.all_tokens
hf_wordpiece_vocab = {ele: i for i, ele in enumerate(all_tokens)}
self._vocab.mask_token_id = self._vocab.mask_id
assert [self._unk_token, self._sep_token, self._cls_token, self._pad_token,
self._mask_token] == \
[self._vocab.unk_token, self._vocab.sep_token, self._vocab.cls_token,
self._vocab.pad_token, self._vocab.mask_token]
self._model = tokenizers.BertWordPieceTokenizer(
vocab=hf_wordpiece_vocab,
unk_token=self._unk_token,
sep_token=self._sep_token,
cls_token=self._cls_token,
pad_token=self._pad_token,
mask_token=self._mask_token,
clean_text=self._clean_text,
handle_chinese_chars=self._handle_chinese_chars,
strip_accents=self._strip_accents, lowercase=self._lowercase,
wordpieces_prefix=self._wordpieces_prefix)
def __repr__(self):
ret = '{}(\n' \
' vocab_file = {}\n' \
' unk_token = {}, sep_token = {}, cls_token = {}\n' \
' pad_token = {}, mask_token = {}\n' \
' clean_text = {}, handle_chinese_chars = {}\n' \
' strip_accents = {}, lowercase = {}\n' \
' wordpieces_prefix = {}\n' \
' vocab = {}\n' \
')'.format(self.__class__.__name__,
os.path.realpath(self._vocab_file),
self._unk_token, self._sep_token, self._cls_token,
self._pad_token, self._mask_token,
self._clean_text, self._handle_chinese_chars,
self._strip_accents, self._lowercase,
self._wordpieces_prefix,
self._vocab)
return ret
def __getstate__(self):
state = self.__dict__.copy()
state['_model'] = None
return state
def __setstate__(self, state):
self.__dict__ = state
self.__rebuild_tokenizer()
|
StarcoderdataPython
|
152028
|
<reponame>Gabriel-p/pyABC
from typing import Union
import numpy as np
import pandas as pd
import scipy.stats as stats
from ..parameters import Parameter
from .base import DiscreteTransition
class DiscreteRandomWalkTransition(DiscreteTransition):
"""
This transition is based on a discrete random walk. This may be useful
for discrete ordinal parameter distributions that can be described as
lying on the grid of integers.
.. note::
This transition does not adapt to the problem structure and thus has
potentially slow convergence.
Further, the transition does not satisfy proposal >> prior, so that
it is indeed not valid as an importance sampling distribution. This
can be overcome by selecting the number of steps as a random variable.
Parameters
----------
n_steps: int, optional (default = 1)
Number of random walk steps to take.
"""
def __init__(
self,
n_steps: int = 1,
p_l: float = 1.0 / 3,
p_r: float = 1.0 / 3,
p_c: float = 1.0 / 3,
):
self.n_steps = n_steps
self.p_l = p_l
self.p_r = p_r
self.p_c = p_c
def fit(self, X: pd.DataFrame, w: np.ndarray):
pass
def rvs_single(self) -> Parameter:
# take a step
dim = len(self.X.columns)
step = perform_random_walk(
dim, self.n_steps, self.p_l, self.p_r, self.p_c
)
# select a start point
start_point = self.X.sample(weights=self.w).iloc[0]
# create randomized point
perturbed_point = start_point + step
return Parameter(perturbed_point)
def pdf(
self, x: Union[Parameter, pd.Series, pd.DataFrame]
) -> Union[float, np.ndarray]:
"""
Evaluate the probability mass function (PMF) at `x`.
"""
# convert to numpy array in correct order
if isinstance(x, (Parameter, pd.Series)):
x = np.array([x[key] for key in self.X.columns])
else:
x = x[self.X.columns].to_numpy()
if not np.all(np.isclose(x, x.astype(int))):
raise ValueError(
f"Transition can only handle integer values, not fulfilled "
f"by x={x}."
)
if len(x.shape) == 1:
return self._pdf_single(x)
else:
return np.array([self._pdf_single(xi) for xi in x])
def _pdf_single(self, x: np.ndarray):
p = 0.0
for start, weight in zip(self.X.values, self.w):
# probability if started from start
p_start = calculate_single_random_walk_probability(
start, x, self.n_steps, self.p_l, self.p_r, self.p_c
)
# add p_start times the weight associated to p_start
p += p_start * weight
return p
def perform_random_walk(dim, n_steps, p_l, p_r, p_c):
"""
Perform a random walk in [-1, 0, 1] in each dimension, for `n_steps`
steps.
"""
state = np.zeros(dim)
for _ in range(n_steps):
# sample a step
step = np.random.choice(a=[-1, 0, 1], p=[p_l, p_c, p_r], size=dim)
state += step
return state
def calculate_single_random_walk_probability(
start,
end,
n_steps,
p_l: float = 1.0 / 3,
p_r: float = 1.0 / 3,
p_c: float = 1.0 / 3,
):
"""
Calculate the probability of getting from state `start` to state `end`
in `n_steps` steps, where the probabilities for a left, right, and
no step are `p_l`, `p_r`, `p_c`, respectively.
"""
step = end - start
p = 1.0
for step_j in step:
p_j = 0.0
for n_r in range(max(int(step_j), 0), n_steps + 1):
n_l = n_r - step_j
n_c = n_steps - n_r - n_l
p_j += stats.multinomial.pmf(
x=[n_l, n_r, n_c], n=n_steps, p=[p_l, p_r, p_c]
)
p *= p_j
return p
def calculate_single_random_walk_probability_no_stay(start, end, n_steps):
"""
Calculate the probability of getting from state `start` to state `end`
in `n_steps` steps. Simplified formula assuming the probability to remain
in a given state is zero in each iteration, i.e. that in every step
there is a move to the left or right.
Note that the iteration of this transition is not surjective on the grid
in dimension dim >= 2.
"""
step = end - start
p = 1.0
for step_j in step:
if (step_j + n_steps) % 2 != 0:
# impossible to get there
return 0.0
n_r = int(0.5 * (n_steps + step_j))
p_j = stats.binom.pmf(n=n_steps, p=0.5, k=n_r)
p *= p_j
return p
|
StarcoderdataPython
|
162206
|
<reponame>lordmallam/aether<gh_stars>0
# Copyright (C) 2018 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import IntegrityError
from . import CustomTestCase
from ..models import Project, XForm, MediaFile
class ModelsTests(CustomTestCase):
def test__xform__create__raises_errors(self):
# missing required fields
self.assertRaises(
IntegrityError,
XForm.objects.create,
)
# missing xml_data
self.assertRaises(
IntegrityError,
XForm.objects.create,
project=self.helper_create_project(),
)
# missing project id
self.assertRaises(
IntegrityError,
XForm.objects.create,
xml_data=self.samples['xform']['xml-ok'],
)
# xml_data with missing properties
self.assertRaises(
IntegrityError,
XForm.objects.create,
project=self.helper_create_project(),
xml_data='''
<h:html
xmlns="http://www.w3.org/2002/xforms"
xmlns:h="http://www.w3.org/1999/xhtml">
<h:head>
<model>
<instance>
</instance>
</model>
</h:head>
<h:body>
</h:body>
</h:html>
''',
)
# corrupted xml_data
self.assertRaises(
IntegrityError,
XForm.objects.create,
project=self.helper_create_project(),
xml_data=self.samples['xform']['xml-err'],
)
def test__xform__save(self):
instance = XForm.objects.create(
project=Project.objects.create(),
xml_data=self.samples['xform']['xml-ok'],
)
self.assertEqual(instance.form_id, 'xform-id-test')
self.assertEqual(instance.title, 'xForm - Test')
self.assertEqual(instance.version, 'v1')
self.assertEqual(instance.download_url,
'/forms/{}/form.xml?version=v1'.format(instance.pk))
self.assertEqual(instance.manifest_url, '', 'without media files no manifest url')
self.assertEqual(str(instance), 'xForm - Test - xform-id-test')
self.assertEqual(instance.md5sum, '5e97c4e929f64d7701804043e3b544ba')
self.assertEqual(instance.hash, 'md5:5e97c4e929f64d7701804043e3b544ba')
def test__project__surveyors(self):
instance = Project.objects.create()
self.assertEqual(instance.surveyors.count(), 0, 'no granted surveyors')
self.helper_create_superuser()
self.assertTrue(instance.is_surveyor(self.admin),
'superusers are always granted surveyors')
self.helper_create_user()
self.assertTrue(instance.is_surveyor(self.user),
'if not granted surveyors all users are surveyors')
surveyor = self.helper_create_surveyor()
instance.surveyors.add(surveyor)
instance.save()
self.assertEqual(instance.surveyors.count(), 1, 'one granted surveyor')
self.assertTrue(instance.is_surveyor(surveyor))
self.assertTrue(instance.is_surveyor(self.admin),
'superusers are always granted surveyors')
self.assertFalse(instance.is_surveyor(self.user),
'if granted surveyors not all users are surveyors')
def test__xform__surveyors(self):
instance = XForm.objects.create(
project=Project.objects.create(),
xml_data=self.samples['xform']['xml-ok'],
)
self.assertEqual(instance.surveyors.count(), 0, 'no granted surveyors')
self.helper_create_superuser()
self.assertTrue(instance.is_surveyor(self.admin),
'superusers are always granted surveyors')
self.helper_create_user()
self.assertTrue(instance.is_surveyor(self.user),
'if not granted surveyors all users are surveyors')
surveyor = self.helper_create_surveyor(username='surveyor')
instance.surveyors.add(surveyor)
instance.save()
self.assertEqual(instance.surveyors.count(), 1, 'one custom granted surveyor')
self.assertTrue(instance.is_surveyor(surveyor))
self.assertTrue(instance.is_surveyor(self.admin),
'superusers are always granted surveyors')
self.assertFalse(instance.is_surveyor(self.user),
'if granted surveyors not all users are surveyors')
surveyor2 = self.helper_create_surveyor(username='surveyor2')
instance.project.surveyors.add(surveyor2)
instance.project.save()
self.assertEqual(instance.surveyors.count(), 1, 'one custom granted surveyor')
self.assertTrue(instance.is_surveyor(surveyor))
self.assertTrue(instance.is_surveyor(surveyor2),
'project surveyors are also xform surveyors')
instance.surveyors.clear()
instance.save()
self.assertEqual(instance.surveyors.count(), 0, 'no custom granted surveyor')
self.assertFalse(instance.is_surveyor(surveyor))
self.assertTrue(instance.is_surveyor(surveyor2),
'project surveyors are always xform surveyors')
def test__xform__media(self):
xform = XForm.objects.create(
project=Project.objects.create(),
xml_data=self.samples['xform']['xml-ok'],
)
media = MediaFile.objects.create(
xform=xform,
media_file=SimpleUploadedFile('sample.txt', b'abc'),
)
self.assertEqual(media.name, 'sample.txt', 'takes file name')
self.assertEqual(media.md5sum, '900150983cd24fb0d6963f7d28e17f72')
self.assertEqual(str(media), 'sample.txt')
media.media_file = SimpleUploadedFile('sample2.txt', b'abcd')
media.save()
self.assertEqual(media.name, 'sample.txt', 'no replaces name')
self.assertEqual(media.md5sum, 'e2fc714c4727ee9395f324cd2e7f331f')
self.assertEqual(media.hash, 'md5:e2fc714c4727ee9395f324cd2e7f331f')
self.assertEqual(media.media_file_url, f'http://{settings.HOSTNAME}{media.media_file.url}')
# with media files there is manifest_url
self.assertEqual(xform.manifest_url,
'/forms/{}/manifest.xml?version={}'.format(xform.id, xform.version))
def test__xform__version_control(self):
xform = XForm.objects.create(
project=Project.objects.create(),
xml_data=self.samples['xform']['xml-ok'],
)
last_version = xform.version
last_avro_schema = xform.avro_schema
last_kernel_id = xform.kernel_id
self.assertEqual(last_version, 'v1')
xform.xml_data = self.samples['xform']['xml-ok']
xform.save()
self.assertEqual(last_version, xform.version, 'nothing changed')
self.assertEqual(last_avro_schema, xform.avro_schema, 'nothing changed')
self.assertEqual(last_kernel_id, xform.kernel_id, 'nothing changed')
last_version = xform.version
last_avro_schema = xform.avro_schema
last_kernel_id = xform.kernel_id
xform.xml_data = self.samples['xform']['xml-ok-noversion']
xform.save()
self.assertNotEqual(last_version, xform.version, 'changed xml data')
self.assertNotEqual(last_avro_schema, xform.avro_schema, 'changed AVRO schema')
self.assertNotEqual(last_kernel_id, xform.kernel_id, 'changed Kernel ID')
|
StarcoderdataPython
|
1632102
|
<gh_stars>10-100
name = input()
print("Nice to meet you " + name + ".")
|
StarcoderdataPython
|
1714003
|
import pygame
import random
from pygame import QUIT, KEYDOWN, KEYUP, K_UP, K_DOWN, K_LEFT, K_RIGHT, K_SPACE
pygame.init()
tela = pygame.display.set_mode((800, 600), 0, 32)
imagem = pygame.image.load("images/gato.png").convert_alpha()
angulo = 50
while True:
#Calcular regras
gato_rot = pygame.transform.rotate(imagem, angulo)
angulo += 1
r1 = gato_rot.get_rect()
r1.x = 400 - (r1.w / 2) # 400 - r1.centerx
r1.y = 300 - (r1.h / 2) # 300 - r1.centery
#Desenhar a tela
tela.fill((0, 0, 0))
# Pinta imagem
tela.blit(gato_rot, [r1.x, r1.y])
pygame.draw.rect(tela, (255, 255, 0), r1, 3)
pygame.display.update()
#Captura eventos
for e in pygame.event.get():
if e.type == QUIT:
exit()
|
StarcoderdataPython
|
3322725
|
<filename>multiagent/football/gate.py
import numpy as np
from multiagent.core import Entity
class Gate(Entity):
def __init__(self):
super(Gate, self).__init__()
self.width = 1
self.height = .1
w_half = self.width / 2
h_half = self.height / 2
self.v = [[-w_half, -h_half], [-w_half, h_half], [w_half, h_half], [w_half, -h_half]]
self.collide = False
self.color = np.array([0.75, 0.75, 0.25])
self.state.p_vel = np.array([0., 0.])
|
StarcoderdataPython
|
1787740
|
<reponame>oicr-gsi/dashi
from collections import defaultdict
import dash_html_components as html
from dash.dependencies import Input, Output, State
from ..dash_id import init_ids
from ..utility.plot_builder import *
from ..utility.table_builder import table_tabs_single_lane, cutoff_table_data_ius
from ..utility import df_manipulation as util
from ..utility import sidebar_utils
from ..utility import log_utils
from gsiqcetl.column import RnaSeqQc2Column as RnaColumn
import pinery
import logging
logger = logging.getLogger(__name__)
""" Set up elements needed for page """
page_name = "single-lane-rna"
title = "Single-Lane RNA-seq"
ids = init_ids([
# Buttons
"jira-issue-with-runs-button",
"general-jira-issue-button",
"update-button-top",
"update-button-bottom",
"approve-run-button",
'miso-request-body',
'miso-button',
# Alerts
"alerts-unknown-run",
# Sidebar controls
"all-runs",
"run-id-list",
"all-instruments",
"instruments-list",
"all-projects",
"projects-list",
"all-references",
"references-list",
"all-kits",
"kits-list",
"all-library-designs",
"library-designs-list",
"first-sort",
"second-sort",
"colour-by",
"shape-by",
"search-sample",
"search-sample-ext",
"show-data-labels",
"show-all-data-labels",
"clusters-per-sample-cutoff",
"percent-mapped-to-coding-cutoff",
"rrna-contamination-cutoff",
"insert-mean-cutoff",
"date-range",
# Graphs
"graphs",
"failed-samples",
"data-table",
"failed-count",
"data-count",
])
RNA_COL = RnaColumn
PINERY_COL = pinery.column.SampleProvenanceColumn
INSTRUMENT_COLS = pinery.column.InstrumentWithModelColumn
RUN_COLS = pinery.column.RunsColumn
special_cols = {
"Total Reads (Passed Filter)": "Total Reads PassedFilter",
"Percent Uniq Reads": "Percent Unique Reads",
"rRNA Percent Contamination": "rRNA Percent Contamination",
# Column comes from `df_with_fastqc_data` call
"Total Clusters (Passed Filter)": "Total Clusters",
}
# Specify which columns to display in the DataTable
first_col_set = [
PINERY_COL.SampleName, PINERY_COL.StudyTitle,
special_cols["Total Reads (Passed Filter)"],
special_cols["Total Clusters (Passed Filter)"],
special_cols["Percent Uniq Reads"],
special_cols["rRNA Percent Contamination"]
]
later_col_set = [
PINERY_COL.PrepKit, PINERY_COL.TissuePreparation,
PINERY_COL.LibrarySourceTemplateType, PINERY_COL.RIN, PINERY_COL.DV200,
PINERY_COL.ExternalName, PINERY_COL.GroupID, PINERY_COL.TissueOrigin,
PINERY_COL.TissueType, PINERY_COL.Institute, INSTRUMENT_COLS.ModelName
]
rnaseqqc_table_columns = [*first_col_set, *RNA_COL.values(), *later_col_set]
initial = get_initial_single_lane_values()
# Set additional initial values for dropdown menus
initial["second_sort"] = special_cols["Total Clusters (Passed Filter)"]
# Set initial values for graph cutoff lines
# Sourced from https://docs.google.com/document/d/1L056bikfIJDeX6Qzo6fwBb9j7A5NgC6o/edit
cutoff_rrna_label = sidebar_utils.rrna_contamination_cutoff_label
initial["cutoff_rrna"] = 35
cutoff_insert_mean_label = "Insert Size Mean + Intron"
initial["cutoff_insert_mean"] = 150
cutoff_clusters_per_sample_label = sidebar_utils.clusters_per_sample_cutoff_label
# This is 10 000, but the stat is / 10^6
initial["cutoff_clusters_per_sample"] = 0.01
def get_rna_data():
"""
Join together all the dataframes needed for graphing:
* RNA-SeqQC (where most of the graphed QC data comes from)
* Pinery (sample information)
* Instruments (to allow filtering by instrument model)
* Runs (needed to join Pinery to Instruments)
"""
rna_df = util.get_rnaseqqc2()
rna_df = util.df_with_fastqc_data(
rna_df, [RNA_COL.Run, RNA_COL.Lane, RNA_COL.Barcodes]
)
# Calculate percent uniq reads column
rna_df[special_cols["Percent Uniq Reads"]] = round(
rna_df[RNA_COL.UniqueReads] / (rna_df[RNA_COL.NonPrimaryReads] + rna_df[RNA_COL.UniqueReads]) * 100, 1)
rna_df[special_cols["Total Reads (Passed Filter)"]] = round(
rna_df[RNA_COL.TotalReads] / 1e6, 3)
rna_df[special_cols["Total Clusters (Passed Filter)"]] = round(
rna_df[special_cols["Total Clusters (Passed Filter)"]] / 1e6, 3)
rna_df[special_cols["rRNA Percent Contamination"]] = round(
rna_df[RNA_COL.RRnaContaminationProperlyPaired] / rna_df[RNA_COL.RRnaContaminationInTotal] * 100, 3
)
# Pull in sample metadata from Pinery
pinery_samples = util.get_pinery_samples()
# Filter the Pinery samples for only RNA samples.
pinery_samples = util.filter_by_library_design(pinery_samples,
util.rna_lib_designs)
# Join RNAseqQc and Pinery data
rna_df = util.df_with_pinery_samples_ius(rna_df, pinery_samples,
util.rnaseqqc2_ius_columns)
# Join RNAseqQc and instrument model
rna_df = util.df_with_run_info(rna_df, PINERY_COL.SequencerRunName)
return rna_df, util.cache.versions(["rnaseqqc2"])
# Make the RNA dataframe
(RNA_DF, DATAVERSION) = get_rna_data()
# Build lists of attributes for sorting, shaping, and filtering on
ALL_PROJECTS = util.unique_set(RNA_DF, PINERY_COL.StudyTitle)
ALL_KITS = util.unique_set(RNA_DF, PINERY_COL.PrepKit)
ILLUMINA_INSTRUMENT_MODELS = list(util.get_illumina_instruments(RNA_DF))
ALL_TISSUE_MATERIALS = util.unique_set(RNA_DF, PINERY_COL.TissuePreparation)
ALL_TISSUE_ORIGIN = util.unique_set(RNA_DF, PINERY_COL.TissueOrigin)
ALL_LIBRARY_DESIGNS = util.unique_set(RNA_DF, PINERY_COL.LibrarySourceTemplateType)
ALL_RUNS = util.unique_set(RNA_DF, PINERY_COL.SequencerRunName, True) # reverse the list
ALL_SAMPLE_TYPES = util.unique_set(RNA_DF, util.sample_type_col)
ALL_REFERENCES = util.unique_set(RNA_DF, RNA_COL.Reference)
# N.B. The keys in this object must match the argument names for
# the `update_pressed` function in the views.
collapsing_functions = {
"projects": lambda selected: log_utils.collapse_if_all_selected(selected, ALL_PROJECTS, "all_projects"),
"runs": lambda selected: log_utils.collapse_if_all_selected(selected, ALL_RUNS, "all_runs"),
"kits": lambda selected: log_utils.collapse_if_all_selected(selected, ALL_KITS, "all_kits"),
"instruments": lambda selected: log_utils.collapse_if_all_selected(selected, ILLUMINA_INSTRUMENT_MODELS, "all_instruments"),
"library_designs": lambda selected: log_utils.collapse_if_all_selected(selected, ALL_LIBRARY_DESIGNS, "all_library_designs"),
"references": lambda selected: log_utils.collapse_if_all_selected(selected, ALL_REFERENCES, "all_references"),
}
shape_colour = ColourShapeSingleLane(
ALL_PROJECTS, ALL_RUNS, ALL_KITS, ALL_TISSUE_MATERIALS, ALL_TISSUE_ORIGIN,
ALL_LIBRARY_DESIGNS, ALL_REFERENCES
)
# Add shape, colour, and size cols to RNA dataframe
RNA_DF = add_graphable_cols(RNA_DF, initial, shape_colour.items_for_df())
SORT_BY = sidebar_utils.default_first_sort + [
{"label": "Total Clusters",
"value": special_cols["Total Clusters (Passed Filter)"]},
{"label": "Unique Reads",
"value": special_cols["Percent Uniq Reads"]},
{"label": "5Prime to 3Prime Bias",
"value": RNA_COL.MetricsMedian5PrimeTo3PrimeBias},
{"label": "Correct Read Strand",
"value": RNA_COL.MetricsPercentCorrectStrandReads},
{"label": "Coding",
"value": RNA_COL.MetricsPercentCodingBases},
{"label": "rRNA Percentage Contamination",
"value": special_cols["rRNA Percent Contamination"]},
{"label": "DV200",
"value": PINERY_COL.DV200},
{"label": "RIN",
"value": PINERY_COL.RIN},
{"label": "Sample Name",
"value": PINERY_COL.SampleName},
{"label": "Run Start Date",
"value": RUN_COLS.StartDate},
{"label": "Run End Date",
"value": RUN_COLS.CompletionDate},
]
def generate_total_clusters(df, graph_params):
return SingleLaneSubplot(
"Total Clusters (Passed Filter)",
df,
lambda d: d[special_cols["Total Clusters (Passed Filter)"]],
"# PF Clusters X 10^6",
graph_params["colour_by"],
graph_params["shape_by"],
graph_params["shownames_val"],
cutoff_lines=[(cutoff_clusters_per_sample_label, graph_params["cutoff_clusters_per_sample"])]
)
def generate_insert_mean(df, graph_params):
return SingleLaneSubplot(
"Mean Insert Size + Intron",
df,
lambda d: d[RNA_COL.InsertMean],
"Mean Insert Size + Intron",
graph_params["colour_by"],
graph_params["shape_by"],
graph_params["shownames_val"],
cutoff_lines=[(cutoff_insert_mean_label, graph_params["cutoff_insert_mean"])]
)
def generate_unique_reads(df, graph_params):
return SingleLaneSubplot(
"Unique Reads (%)",
df,
lambda d: d[special_cols["Percent Uniq Reads"]],
"%",
graph_params["colour_by"],
graph_params["shape_by"],
graph_params["shownames_val"]
)
def generate_five_to_three(df, graph_params):
return SingleLaneSubplot(
"5 to 3 Prime Bias",
df,
lambda d: d[RNA_COL.MetricsMedian5PrimeTo3PrimeBias],
"Log Ratio",
graph_params["colour_by"],
graph_params["shape_by"],
graph_params["shownames_val"],
log_y=True
)
def generate_correct_read_strand(df, graph_params):
return SingleLaneSubplot(
"🚧 Correct Strand Reads (%) -- NOT ENABLED YET 🚧",
df,
lambda d: d[RNA_COL.MetricsPercentCorrectStrandReads],
"%",
graph_params["colour_by"],
graph_params["shape_by"],
graph_params["shownames_val"]
)
def generate_coding(df, graph_params):
return SingleLaneSubplot(
"Coding (%)",
df,
lambda d: d[RNA_COL.MetricsPercentCodingBases],
"%",
graph_params["colour_by"],
graph_params["shape_by"],
graph_params["shownames_val"]
)
def generate_rrna_contam(df, graph_params):
return SingleLaneSubplot(
"rRNA Contamination (%)",
df,
lambda d: d[special_cols["rRNA Percent Contamination"]],
"%",
graph_params["colour_by"],
graph_params["shape_by"],
graph_params["shownames_val"],
cutoff_lines=[(cutoff_rrna_label, graph_params["cutoff_rrna"])]
)
def generate_dv200(df, graph_params):
return SingleLaneSubplot(
"DV200 (%)",
df,
lambda d: d[PINERY_COL.DV200],
"%",
graph_params["colour_by"],
graph_params["shape_by"],
graph_params["shownames_val"]
)
def generate_rin(df, graph_params):
return SingleLaneSubplot(
"RIN",
df,
lambda d: d[PINERY_COL.RIN],
"",
graph_params["colour_by"],
graph_params["shape_by"],
graph_params["shownames_val"]
)
def dataversion():
return DATAVERSION
GRAPHS = [
generate_total_clusters,
generate_insert_mean,
generate_unique_reads,
generate_five_to_three,
generate_correct_read_strand,
generate_coding,
generate_rrna_contam,
generate_dv200,
generate_rin,
]
# Layout elements
def layout(query_string):
query = sidebar_utils.parse_query(query_string)
# intial runs: should be empty unless query requests otherwise:
# * if query.req_run: use query.req_run
# * if query.req_start/req_end: use all runs, so that the start/end filters
# will be applied
if "req_runs" in query and query["req_runs"]:
initial["runs"] = query["req_runs"]
elif "req_start" in query and query["req_start"]:
initial["runs"] = ALL_RUNS
query["req_runs"] = ALL_RUNS # fill in the runs dropdown
if "req_projects" in query and query["req_projects"]:
initial["projects"] = query["req_projects"]
df = reshape_single_lane_df(RNA_DF, initial["runs"], initial["instruments"],
initial["projects"], initial["references"], initial["kits"],
initial["library_designs"], initial["start_date"],
initial["end_date"], initial["first_sort"],
initial["second_sort"], initial["colour_by"],
initial["shape_by"], shape_colour.items_for_df(), [])
return core.Loading(fullscreen=True, type="dot", children=[
html.Div(className="body", children=[
html.Div(className="row jira-buttons", children=[
sidebar_utils.jira_button("Open an issue",
ids['general-jira-issue-button'],
{"display": "inline-block"},
sidebar_utils.construct_jira_link([], title)),
sidebar_utils.jira_button("Open an issue about these runs",
ids['jira-issue-with-runs-button'],
{"display": "none"}, ""),
sidebar_utils.unknown_run_alert(
ids['alerts-unknown-run'],
initial["runs"],
ALL_RUNS
),
]),
html.Div(className="row flex-container", children=[
html.Div(className="sidebar four columns", children=[
html.Button("Update", id=ids['update-button-top'], className="update-button"),
sidebar_utils.miso_qc_button(ids['miso-request-body'], ids['miso-button']),
sidebar_utils.approve_run_button(ids['approve-run-button']),
html.Br(),
html.Br(),
# Filters
sidebar_utils.select_runs(ids["all-runs"],
ids["run-id-list"], ALL_RUNS,
query["req_runs"]),
sidebar_utils.run_range_input(ids["date-range"],
query["req_start"],
query["req_end"]),
sidebar_utils.hr(),
sidebar_utils.select_projects(ids["all-projects"],
ids["projects-list"],
ALL_PROJECTS,
query["req_projects"]),
sidebar_utils.select_reference(ids["all-references"],
ids["references-list"],
ALL_REFERENCES),
sidebar_utils.select_kits(ids["all-kits"], ids["kits-list"],
ALL_KITS),
sidebar_utils.select_instruments(ids["all-instruments"],
ids["instruments-list"],
ILLUMINA_INSTRUMENT_MODELS),
sidebar_utils.select_library_designs(
ids["all-library-designs"], ids["library-designs-list"],
ALL_LIBRARY_DESIGNS),
sidebar_utils.hr(),
# Sort, colour, and shape
sidebar_utils.select_first_sort(
ids["first-sort"],
initial["first_sort"],
SORT_BY
),
sidebar_utils.select_second_sort(
ids["second-sort"],
initial["second_sort"],
SORT_BY,
),
sidebar_utils.select_colour_by(ids["colour-by"],
shape_colour.dropdown(),
initial["colour_by"]),
sidebar_utils.select_shape_by(ids["shape-by"],
shape_colour.dropdown(),
initial["shape_by"]),
sidebar_utils.highlight_samples_input(ids['search-sample'],
[]),
sidebar_utils.highlight_samples_by_ext_name_input_single_lane(ids['search-sample-ext'],
None),
sidebar_utils.show_data_labels_input_single_lane(ids["show-data-labels"],
initial["shownames_val"],
"ALL LABELS",
ids["show-all-data-labels"]),
sidebar_utils.hr(),
# Cutoffs
sidebar_utils.cutoff_input(cutoff_insert_mean_label,
ids["insert-mean-cutoff"], initial["cutoff_insert_mean"]),
sidebar_utils.cutoff_input(cutoff_rrna_label,
ids["rrna-contamination-cutoff"], initial["cutoff_rrna"]),
sidebar_utils.cutoff_input(cutoff_clusters_per_sample_label,
ids["clusters-per-sample-cutoff"], initial["cutoff_clusters_per_sample"]),
html.Br(),
html.Button("Update", id=ids['update-button-bottom'], className="update-button"),
]),
# Graphs + Tables tabs
html.Div(className="seven columns",
children=[
core.Tabs([
# Graphs tab
core.Tab(label="Graphs",
children=[
create_graph_element_with_subplots(ids["graphs"], df, initial, GRAPHS),
]),
# Tables tab
core.Tab(label="Tables",
children=[
table_tabs_single_lane(
ids["failed-samples"],
ids["data-table"],
ids["failed-count"],
ids["data-count"],
df,
rnaseqqc_table_columns,
[
(cutoff_insert_mean_label,
RNA_COL.InsertMean, initial["cutoff_insert_mean"],
(lambda row, col, cutoff: row[col] < cutoff)),
(cutoff_rrna_label,
special_cols["rRNA Percent Contamination"], initial["cutoff_rrna"],
(lambda row, col, cutoff: row[col] > cutoff)),
(cutoff_clusters_per_sample_label,
special_cols["Total Clusters (Passed Filter)"], initial["cutoff_clusters_per_sample"],
(lambda row, col, cutoff: row[col] < cutoff)),
]
)
])
]) # End Tabs
]) # End Div
]) # End Div
]) # End Div
]) # End Loading
def init_callbacks(dash_app):
@dash_app.callback(
[
Output(ids["approve-run-button"], "href"),
Output(ids["approve-run-button"], "style"),
Output(ids["graphs"], "figure"),
Output(ids["failed-samples"], "columns"),
Output(ids["failed-samples"], "data"),
Output(ids["data-table"], "data"),
Output(ids["failed-count"], "children"),
Output(ids["data-count"], "children"),
Output(ids["search-sample"], "options"),
Output(ids['search-sample-ext'], "options"),
Output(ids["jira-issue-with-runs-button"], "href"),
Output(ids["jira-issue-with-runs-button"], "style"),
Output(ids['miso-request-body'], 'value'),
Output(ids['miso-button'], 'style')
],
[
Input(ids["update-button-top"], "n_clicks"),
Input(ids["update-button-bottom"], "n_clicks")
],
[
State(ids['run-id-list'], 'value'),
State(ids['instruments-list'], 'value'),
State(ids['projects-list'], 'value'),
State(ids['references-list'], 'value'),
State(ids['kits-list'], 'value'),
State(ids['library-designs-list'], 'value'),
State(ids['first-sort'], 'value'),
State(ids['second-sort'], 'value'),
State(ids['colour-by'], 'value'),
State(ids['shape-by'], 'value'),
State(ids['search-sample'], 'value'),
State(ids['search-sample-ext'], 'value'),
State(ids['show-data-labels'], 'value'),
State(ids['insert-mean-cutoff'], 'value'),
State(ids['clusters-per-sample-cutoff'], 'value'),
State(ids['rrna-contamination-cutoff'], 'value'),
State(ids["date-range"], 'start_date'),
State(ids["date-range"], 'end_date'),
State('url', 'search'),
]
)
def update_pressed(click,
click2,
runs,
instruments,
projects,
references,
kits,
library_designs,
first_sort,
second_sort,
colour_by,
shape_by,
searchsample,
searchsampleext,
show_names,
insert_mean_cutoff,
clusters_per_sample_cutoff,
rrna_cutoff,
start_date,
end_date,
search_query):
log_utils.log_filters(locals(), collapsing_functions, logger)
if searchsample and searchsampleext:
searchsample += searchsampleext
elif not searchsample and searchsampleext:
searchsample = searchsampleext
df = reshape_single_lane_df(RNA_DF, runs, instruments, projects, references, kits, library_designs,
start_date, end_date, first_sort, second_sort, colour_by,
shape_by, shape_colour.items_for_df(), searchsample)
(approve_run_href, approve_run_style) = sidebar_utils.approve_run_url(runs)
graph_params = {
"colour_by": colour_by,
"shape_by": shape_by,
"shownames_val": show_names,
"cutoff_insert_mean": insert_mean_cutoff,
"cutoff_clusters_per_sample": clusters_per_sample_cutoff,
"cutoff_rrna": rrna_cutoff,
}
dd = defaultdict(list)
(failure_df, failure_columns) = cutoff_table_data_ius(df, [
(cutoff_insert_mean_label, RNA_COL.InsertMean, insert_mean_cutoff,
(lambda row, col, cutoff: row[col] < cutoff)),
(cutoff_clusters_per_sample_label, special_cols["Total Clusters (Passed Filter)"], clusters_per_sample_cutoff,
(lambda row, col, cutoff: row[col] < cutoff)),
(cutoff_rrna_label, special_cols["rRNA Percent Contamination"], rrna_cutoff,
(lambda row, col, cutoff: row[col] > cutoff)),
])
new_search_sample = util.unique_set(df, PINERY_COL.SampleName)
(jira_href, jira_style) = sidebar_utils.jira_display_button(runs, title)
(miso_request, miso_button_style) = util.build_miso_info(df, title,
[{
'title': 'Mean Insert Size',
'threshold_type': 'ge',
'threshold': insert_mean_cutoff,
'value': RNA_COL.InsertMean
}, {
'title': 'Clusters per Sample (* 10^6)',
'threshold_type': 'ge',
'threshold': clusters_per_sample_cutoff,
'value': special_cols["Total Clusters (Passed Filter)"],
}, {
'title': "rRNA Contamination",
'threshold_type': 'le',
'threshold': rrna_cutoff,
'value': special_cols["rRNA Percent Contamination"]
}]
)
return [
approve_run_href,
approve_run_style,
generate_subplot_from_func(df, graph_params, GRAPHS),
failure_columns,
failure_df.to_dict('records'),
df[rnaseqqc_table_columns].to_dict("records", into=dd),
"Rows: {0}".format(len(failure_df.index)),
"Rows: {0}".format(len(df.index)),
[{'label': x, 'value': x} for x in new_search_sample],
[{'label': d[PINERY_COL.ExternalName], 'value': d[PINERY_COL.SampleName]} for i, d in df[[PINERY_COL.ExternalName, PINERY_COL.SampleName]].iterrows()],
jira_href,
jira_style,
miso_request,
miso_button_style
]
@dash_app.callback(
Output(ids['run-id-list'], 'value'),
[Input(ids['all-runs'], 'n_clicks')]
)
def all_runs_requested(click):
sidebar_utils.update_only_if_clicked(click)
return [x for x in ALL_RUNS]
@dash_app.callback(
Output(ids['instruments-list'], 'value'),
[Input(ids['all-instruments'], 'n_clicks')]
)
def all_instruments_requested(click):
sidebar_utils.update_only_if_clicked(click)
return [x for x in ILLUMINA_INSTRUMENT_MODELS]
@dash_app.callback(
Output(ids['projects-list'], 'value'),
[Input(ids['all-projects'], 'n_clicks')]
)
def all_projects_requested(click):
sidebar_utils.update_only_if_clicked(click)
return [x for x in ALL_PROJECTS]
@dash_app.callback(
Output(ids['references-list'], 'value'),
[Input(ids['all-references'], 'n_clicks')]
)
def all_references_requested(click):
sidebar_utils.update_only_if_clicked(click)
return [x for x in ALL_REFERENCES]
@dash_app.callback(
Output(ids['kits-list'], 'value'),
[Input(ids['all-kits'], 'n_clicks')]
)
def all_kits_requested(click):
sidebar_utils.update_only_if_clicked(click)
return [x for x in ALL_KITS]
@dash_app.callback(
Output(ids['library-designs-list'], 'value'),
[Input(ids['all-library-designs'], 'n_clicks')]
)
def all_library_designs_requested(click):
sidebar_utils.update_only_if_clicked(click)
return [x for x in ALL_LIBRARY_DESIGNS]
@dash_app.callback(
Output(ids['show-data-labels'], 'value'),
[Input(ids['show-all-data-labels'], 'n_clicks')],
[State(ids['show-data-labels'], 'options')]
)
def all_data_labels_requested(click, avail_options):
sidebar_utils.update_only_if_clicked(click)
return [x['value'] for x in avail_options]
|
StarcoderdataPython
|
3204822
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 30 17:29:28 2016
@author: Michael
How does gaussian white noise behave when it is input to a running average filter,
i.e. a perfect low pass filter (except for discretization errors)?
I assume ergodicity, i.e. marginal probability densities can
be estimated by averaging over time. Pretty sure that assumption is justified.
"""
import numpy as np
import scipy.fftpack
import matplotlib.pyplot as pyplot
def gaussian(x, m, sigma):
return 1./np.sqrt(2 * np.pi)/sigma * np.exp(-np.square((x - m)/sigma)*0.5)
NG = 1024 * 10
gauss_noise_iid = np.random.normal(0., 1., NG)
gauss_fft = scipy.fftpack.fft(gauss_noise_iid)
sigmas = []
cutoffs = []
for cutoff in [1,2,3,4,5,6,7,8,9, 9.5, 9.9]:
NFILT = int(512 * cutoff)
RELATIVE_FREQ_LIMIT = 1.*(NG/2-1-NFILT) / (NG/2-1)
gauss_fft_limited = gauss_fft.copy()
gauss_fft_limited[NG/2-1-NFILT:NG/2+NFILT] = 0.
gauss_limited = scipy.fftpack.ifft(gauss_fft_limited).real
cutoffs.append(RELATIVE_FREQ_LIMIT)
sigmas.append(np.std(gauss_limited))
# low pass frequency limit = 6. / 8.
# correlation length = 8. / 6.
# -> sigma_limit = sigma * sqrt(6. / 8.)
print 'cutoff %s, sigma: white %s, limited %s' % (RELATIVE_FREQ_LIMIT, np.std(gauss_noise_iid), np.std(gauss_limited))
if 0:
pyplot.subplot(4, 1, 1)
pyplot.plot(gauss_noise_iid)
pyplot.plot(gauss_limited)
pyplot.gca().set_ylim((-4., 4.))
pyplot.subplot(4, 1, 2)
pyplot.plot(np.abs(gauss_fft))
pyplot.plot(np.abs(gauss_fft_limited))
pyplot.subplot(4, 1, 3)
y = np.linspace(-4., 4., 128)
h1 = gaussian(y, 0., 1.)
h2 = gaussian(y, 0., 0.44)
pyplot.hist(gauss_noise_iid, bins = 32, range = (-4., 4.), histtype='step', normed = True)
pyplot.hist(gauss_limited, bins = 32, range = (-4., 4.), histtype='step', normed = True)
pyplot.plot(y, h1)
pyplot.plot(y, h2)
pyplot.subplot(4, 1, 4)
pyplot.show()
T = np.reciprocal(np.asarray(cutoffs, dtype = np.float))
print T
pyplot.plot(T, sigmas)
pyplot.gca().set(xscale = 'log')
x = np.logspace(0, 2, 1024)
pyplot.plot(x, np.sqrt(1./x), color = 'red')
pyplot.show()
# The curves should agree very well.
# This means that the variance of the smoothed signal decreases as T0/T, where
# T0 is the correlation length of the input signal and T the width of the
# moving average filter.
# What happens when T < T0? Or in other words when the low pass filter
# corresponding to the moving average over T comprises all frequencies of the
# input. Then of course the signal is preserved. In particular, sigma
# is the same.
# How about super sampling a signal at distances smaller than its correlation
# length? Seems to preserve gaussian processes. Seems intuitive that it
# cannot change the (marginal) distribution of the input.
|
StarcoderdataPython
|
118923
|
import numpy as np
import termcolor
import cnc_structs
def string_array_to_char_array(m):
c = np.array([x.decode('ascii')[0] if len(x) > 0 else ' ' for x in m.flat]).reshape(m.shape)
return '\n'.join(map(''.join, c))
def staticmap_array(map: cnc_structs.CNCMapDataStruct) -> np.ndarray:
tile_names = np.zeros(map.MapCellHeight * map.MapCellWidth, dtype='S32')
for i in range(tile_names.size):
tile_names[i] = map.StaticCells[i].TemplateTypeName
return tile_names.reshape((map.MapCellHeight, map.MapCellWidth))
def tiberium_array(map: cnc_structs.CNCDynamicMapStruct, static_map):
array = np.zeros((static_map.MapCellHeight, static_map.MapCellWidth), dtype=bool)
for entry in map.Entries:
array[
entry.CellY - static_map.MapCellY, entry.CellX - static_map.MapCellX
] = entry.IsResource
return array
def f(dynamic_map, layers, occupiers, shroud_array, map_shape, House, AllyFlags):
# TODO Owner should be Color, not House
MapCellHeight, MapCellWidth = map_shape
fixed_pos_map_assets = np.zeros(MapCellHeight * MapCellWidth, dtype='S32')
fixed_pos_map_shapes = np.zeros(MapCellHeight * MapCellWidth, dtype='uint8')
actors = []
terrains = {}
for o in layers.Objects:
if o.Type == 5: # terrain
terrains[o.ID] = (o.AssetName, o.ShapeIndex)
# exclude ANIM and BULLET?
else:
if (ord(o.Owner) & AllyFlags) or o.Cloak != 2: # CLOAKED
# TODO obey shroud
# buildings have multiple cells, shroud is a bt tricky there
actors.append(
{
'Asset': o.AssetName.decode('ascii'),
'Shape': o.ShapeIndex,
'Position': (o.PositionY, o.PositionX),
'Owner': ord(o.Owner),
'Strength': o.Strength,
'IsSelected': (o.IsSelectedMask & (1 << House)) != 0,
'ControlGroup': o.ControlGroup,
'IsRepairing': o.IsRepairing,
'IsPrimaryFactory': o.IsPrimaryFactory,
'Cloak': o.Cloak,
'Pips': list(o.Pips[: o.MaxPips])
+ [-1] * (cnc_structs.MAX_OBJECT_PIPS - o.MaxPips),
}
)
for entry in dynamic_map.Entries:
if entry.IsOverlay and entry.Type >= 1 and entry.Type <= 5: # walls
actors.append(
{
'Asset': entry.AssetName.decode('ascii'),
'Shape': entry.ShapeIndex,
'Position': (entry.PositionY, entry.PositionX),
'Owner': ord(entry.Owner) if ord(entry.Owner) == House else 255,
'Strength': 0,
'IsSelected': False,
'ControlGroup': -1,
'IsRepairing': False,
'IsPrimaryFactory': False,
'Cloak': 0,
'Pips': [-1] * cnc_structs.MAX_OBJECT_PIPS,
}
)
else:
fixed_pos_map_assets[entry.CellY * MapCellWidth + entry.CellX] = entry.AssetName
fixed_pos_map_shapes[entry.CellY * MapCellWidth + entry.CellX] = entry.ShapeIndex
for i, o in enumerate(occupiers.Entries):
if len(o.Objects) >= 1 and o.Objects[0].Type == 5: # terrain
assert len(o.Objects) == 1
fixed_pos_map_assets[i], fixed_pos_map_shapes[i] = terrains[o.Objects[0].ID]
return (
fixed_pos_map_assets.reshape(map_shape),
fixed_pos_map_shapes.reshape(map_shape),
actors,
)
def layers_array(objects: cnc_structs.CNCObjectListStruct, static_map):
array = np.zeros((static_map.MapCellHeight, static_map.MapCellWidth), dtype=int)
for thing in objects.Objects:
array[thing.CellY - static_map.MapCellY, thing.CellX - static_map.MapCellX] = thing.Type
return array
def layers_list(layers, static_map):
return [
{
'Owner': ord(o.Owner),
'Asset': o.AssetName.decode('ascii'),
'Type': o.Type,
'ID': o.ID,
'X': o.CellX - static_map.MapCellX,
'Y': o.CellY - static_map.MapCellY,
'OccupyList': o.OccupyList[: o.OccupyListLength],
}
for o in layers.Objects
if o.Type > 0
]
def units_and_buildings_dict(layers):
return {(o.Type, o.ID): o for o in layers.Objects if o.Type >= 1 and o.Type <= 4}
def layers_term(layers, dynamic_map, static_map, occupiers):
units_and_buildings = units_and_buildings_dict(layers)
tiberium = tiberium_array(dynamic_map, static_map)
text = ''
for i, (occupier, is_tiberium, static_cell) in enumerate(
zip(occupiers.Entries, tiberium.flat, static_map.StaticCells)
):
if i < static_map.MapCellWidth or i >= static_map.MapCellWidth * (
static_map.MapCellHeight - 1
):
continue
cell_text = ' '
color = 'white'
background = 'on_green' if is_tiberium else 'on_grey'
# print(static_cell.TemplateTypeName)
if i % static_map.MapCellWidth == 0:
cell_text = '|'
elif i % static_map.MapCellWidth == static_map.MapCellWidth - 1:
cell_text = '|\n'
elif static_cell.TemplateTypeName.startswith(
b'W'
) or static_cell.TemplateTypeName.startswith(
b'RV'
): # river or water
background = 'on_blue'
elif static_cell.TemplateTypeName.startswith(
b'S'
) and not static_cell.TemplateTypeName.startswith(
b'SH'
): # slope but not shore
background = 'on_white'
if len(occupier.Objects) > 0:
occupier = occupier.Objects[0]
if (occupier.Type, occupier.ID) in units_and_buildings:
occupier = units_and_buildings[(occupier.Type, occupier.ID)]
color = ['yellow', 'blue', 'red', 'white', 'magenta', 'cyan'][
ord(occupier.Owner) - 4
]
cell_text = occupier.AssetName.decode('ascii')[0]
if occupier.Type >= 1 and occupier.Type <= 3:
cell_text = cell_text.lower()
elif occupier.Type == 4:
cell_text = cell_text.upper()
text += termcolor.colored(cell_text, color, background)
return text.rstrip('\n')
def sidebar_term(sidebar: cnc_structs.CNCSidebarStruct):
return (
f'Tiberium: {(100 * sidebar.Tiberium) // sidebar.MaxTiberium if sidebar.MaxTiberium > 0 else 0 :3d}% '
f'Power: {(100 * sidebar.PowerDrained) // sidebar.PowerProduced if sidebar.PowerProduced > 0 else 0:3d}%'
f' Credits: {sidebar.Credits} | '
+ ', '.join(
sidebar.Entries[i].AssetName.decode('ascii') for i in range(sidebar.EntryCount[0])
),
'|',
', '.join(
sidebar.Entries[i].AssetName.decode('ascii')
for i in range(sidebar.EntryCount[0], sidebar.EntryCount[0] + sidebar.EntryCount[1])
),
)
def players_units(layers, house):
return [o for o in layers.Objects if ord(o.Owner) == house and o.IsSelectable]
def shroud_array(shrouds: cnc_structs.CNCShroudStruct, static_map_shape):
return np.array([entry.IsVisible for entry in shrouds.Entries], dtype=bool).reshape(
static_map_shape
)
def occupiers_list(occupiers_struct, static_map):
return [
{'X': i % static_map.MapCellWidth, 'Y': i // static_map.MapCellWidth, 'Objects': e.Objects}
for i, e in enumerate(occupiers_struct.Entries)
if e.Count > 0
]
def occupiers_array(occupiers_struct, static_map):
return np.array(
[
((-1 if len(e.Objects) == 0 else e.Objects[0].Type) << 32)
+ (-1 if len(e.Objects) == 0 else e.Objects[0].ID)
for e in occupiers_struct.Entries
]
).reshape((static_map.MapCellHeight, static_map.MapCellWidth))
|
StarcoderdataPython
|
1600041
|
"""
Given an integer array nums where every element appears three times except for one, which appears exactly once.
Find the single element and return it.
Example 1:
Input: nums = [2,2,3,2]
Output: 3
Example 2:
Input: nums = [0,1,0,1,0,1,99]
Output: 99
Constraints:
1 <= nums.length <= 3 * 104
-231 <= nums[i] <= 231 - 1
Each element in nums appears exactly three times except for one element which appears once.
Follow up: Your algorithm should have a linear runtime complexity. Could you implement it without using extra memory?
"""
from collections import Counter
from typing import List
class Solution:
"""
Counter approach.
Runtime: 48 ms, faster than 96.58% of Python3
Memory Usage: 16.1 MB, less than 45.11% of Python3
Time complexity: O(n) for iteration over nums, O(n logN) for sorting
Space complexity: O(n) to hold values in counter
counter = {0: 3, 1: 3, 99: 1}
In [241]: timeit sorted(counter, key=lambda k: counter[k])[0]
476 ns ± 2.09 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
In [242]: timeit sorted(counter, key=counter.get)[0]
350 ns ± 5.59 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
"""
def singleNumber(self, nums: List[int]) -> int:
counter = dict()
for n in nums:
if n in counter:
counter[n] += 1
else:
counter[n] = 1
# sort dict keys by value; return first (which has smallest count)
return sorted(counter, key=lambda k: counter[k])[0] # alt.: key=counter.get
class Solution2:
"""
Using Counter type from collections lib.
Other optimisation is simple linear scan for finding element that appears only once,
so that it takes O(n) vs O(n logN) if we'd sorted dict by values.
Runtime: 52 ms, faster than 90.26% of Python3
Memory Usage: 16.2 MB, less than 29.11% of Python3
"""
def singleNumber(self, nums: List[int]) -> int:
counter = Counter(nums)
for k in counter:
if counter[k] == 1:
return k
class Solution3:
"""
Algorithm idea: make 'ideal' list where all nums appears 3 time, calculate sum of all elements from that list,
then subtract from this sum another sum of given input list. As given input array has one num missed we get double
(3-1=2) sum of desired num, so the last operation will be to divide it by half.
e.g.,
3×(a+b+c)−(a+a+a+b+b+b+c)=2c
Runtime: 52 ms, faster than 90.26% of Python3
Memory Usage: 16 MB, less than 45.11% of Python3
Time complexity : O(N) to iterate over the input array.
Space complexity : O(N) to keep the set of N/3 elements.
"""
def singleNumber(self, nums: List[int]) -> int:
return (sum(set(nums)) * 3 - sum(nums)) // 2
class Solution4:
"""
O(1) space solution by using three bitwise operators
∼x that means bitwise NOT
x&y that means bitwise AND
x⊕y that means bitwise XOR
Runtime: 52 ms, faster than 90.26% of Python3
Memory Usage: 15.7 MB, less than 88.79% of Python3
Time complexity : O(N) to iterate over the input array.
Space complexity : O(1) since no additional data structures are allocated.
"""
def singleNumber(self, nums: List[int]) -> int:
seen_once = seen_twice = 0
for n in nums:
"""
first appearance:
add num to seen_once
don't add to seen_twice because of presence in seen_once
second appearance:
remove num from seen_once
add num to seen_twice
third appearance:
don't add to seen_once because of presence in see_twice
remove num from seen_twice
"""
seen_once = ~seen_twice & (seen_once ^ n)
seen_twice = ~seen_once & (seen_twice ^ n)
return seen_once
if __name__ == '__main__':
solutions = [Solution(), Solution2(), Solution3(), Solution4()]
tc = (
([2, 2, 3, 2], 3),
([0, 1, 0, 1, 0, 1, 99], 99),
)
for s in solutions:
for inp, exp in tc:
assert s.singleNumber(inp) == exp
|
StarcoderdataPython
|
3395225
|
<gh_stars>0
# -*-coding:Utf-8 -*
# Copyright (c) 2014 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY historiqueCT, INhistoriqueCT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'historique'."""
from datetime import datetime
from primaires.interpreteur.commande.commande import Commande
class CmdHistorique(Commande):
"""Commande 'historique'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "historique", "history")
self.nom_categorie = "parler"
self.aide_courte = "affiche les derniers messages reçus"
self.aide_longue = \
"Cette commande permet de voir les dix derniers messages reçus " \
"dans divers canaux (que ce soient les canaux HRP auxquels " \
"vous êtes connectés, les messages dits RP dans la salle grâce " \
"à la commande %dire% où ceux qui vous sont transmis " \
"directement par %parler%)."
def interpreter(self, personnage, dic_masques):
"""Interprétation de la commande"""
messages = importeur.communication.messages.get(personnage, [])
if len(messages) == 0:
personnage << "|err|Vous n'avez encore aucune conversation " \
"à rappeler.|ff|"
return
tableau = importeur.communication.extraire_historique(personnage)
personnage << tableau.afficher()
|
StarcoderdataPython
|
1732325
|
"""
1字符串:不可变
2.测试字符串拼接操作的效率
"""
import time
def ba_str():
str1 = 'hello, world!'
# 通过len函数计算字符串的长度
print(len(str1)) # 13
# 获得字符串首字母大写的拷贝
print(str1.capitalize()) # Hello, world!
# 获得字符串变大写后的拷贝
print(str1.upper()) # HELLO, WORLD!
# 从字符串中查找子串所在位置
print(str1.find('or')) # 8
print(str1.find('???')) # -1
# 与find类似但找不到子串时会引发异常
print(str1.index('or')) # 8
# 检查字符串是否以指定的字符串开头
print(str1.startswith('He')) # False
print(str1.startswith('hel')) # True
# 检查字符串是否以指定的字符串结尾
print(str1.endswith('!')) # True
# 将字符串以指定的宽度居中并在两侧填充指定的字符
print(str1.center(50, '*'))
# 将字符串以指定的宽度靠右放置左侧填充指定的字符
print(str1.rjust(50, ' '))
str2 = 'abc123456'
# 从字符串中取出指定位置的字符(下标运算)
print(str2[2]) # c
# 字符串切片(从指定的开始索引到指定的结束索引)
print(str2[2:5]) # c12
print(str2[2:]) # c123456
print(str2[2::2]) # c246
print(str2[::2]) # ac246
print(str2[::-1]) # 654321cba
print(str2[-3:-1]) # 45
# 检查字符串是否由数字构成
print(str2.isdigit()) # False
# 检查字符串是否以字母构成
print(str2.isalpha()) # False
# 检查字符串是否以数字和字母构成
print(str2.isalnum()) # True
str3 = ' <EMAIL> '
print(str3)
# 获得字符串修剪左右两侧指定字符的拷贝
print(str3.strip(' jma'))
# join操作
l1 = ['a', 'b', 'c']
str4 = ' '.join(l1)
print(str4)
def test_str():
x = 10000
# 方法一
start_time1 = time.time()
s1 = ''
for n in range(x):
s1 += str(n)
end_time1 = time.time()
print(end_time1 - start_time1)
# 方法二
start_time2 = time.time()
list1 = []
for n in range(x):
list1.append(str(n))
s2 = ''.join(list1)
end_time2 = time.time()
print(end_time2 - start_time2)
# 方法3
start_time3 = time.time()
s3 = ''.join(map(str, range(x)))
end_time3 = time.time()
print(end_time3 - start_time3)
if __name__ == "__main__":
ba_str()
test_str()
|
StarcoderdataPython
|
24285
|
import numpy
from kapteyn import maputils
from matplotlib.pyplot import show, figure
import csv # Read some poitions from file in Comma Separated Values format
# Some initializations
blankcol = "#334455" # Represent undefined values by this color
epsilon = 0.0000000001
figsize = (9,7) # Figure size in inches
plotbox = (0.1,0.05,0.8,0.8)
fig = figure(figsize=figsize)
frame = fig.add_axes(plotbox)
Basefits = maputils.FITSimage("allsky_raw.fits") # Here is your downloaded FITS file in rectangular coordinates
Basefits.hdr['CTYPE1'] = 'GLON-CAR' # For transformations we need to give it a projection type
Basefits.hdr['CTYPE2'] = 'GLAT-CAR' # CAR is rectangular
# Use some header values to define reprojection parameters
cdelt1 = Basefits.hdr['CDELT1']
cdelt2 = Basefits.hdr['CDELT2']
naxis1 = Basefits.hdr['NAXIS1']
naxis2 = Basefits.hdr['NAXIS2']
# Header works only with a patched wcslib 4.3
# Note that changing CRVAL1 to 180 degerees, shifts the plot 180 deg.
header = {'NAXIS' : 2, 'NAXIS1': naxis1, 'NAXIS2': naxis2,
'CTYPE1' : 'GLON-AIT',
'CRVAL1' : 0, 'CRPIX1' : naxis1//2, 'CUNIT1' : 'deg', 'CDELT1' : cdelt1,
'CTYPE2' : 'GLAT-AIT',
'CRVAL2' : 30.0, 'CRPIX2' : naxis2//2, 'CUNIT2' : 'deg', 'CDELT2' : cdelt2,
'LONPOLE' :60.0,
'PV1_1' : 0.0, 'PV1_2' : 90.0, # IMPORTANT. This is a setting from Cal.section 7.1, p 1103
}
Reprojfits = Basefits.reproject_to(header)
annim_rep = Reprojfits.Annotatedimage(frame)
annim_rep.set_colormap("heat.lut") # Set color map before creating Image object
annim_rep.set_blankcolor(blankcol) # Background are NaN's (blanks). Set color here
annim_rep.Image(vmin=30000, vmax=150000) # Just a selection of two clip levels
annim_rep.plot()
# Draw the graticule, but do not cover near -90 to prevent ambiguity
X = numpy.arange(0,390.0,15.0);
Y = numpy.arange(-75,90,15.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
grat.setp_lineswcs0(0, color='w', lw=2)
grat.setp_lineswcs1(0, color='w', lw=2)
# Draw border with standard graticule, just to make the borders look smooth
header['CRVAL1'] = 0.0
header['CRVAL2'] = 0.0
del header['PV1_1']
del header['PV1_2']
header['LONPOLE'] = 0.0
header['LATPOLE'] = 0.0
border = annim.Graticule(header, axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),
startx=(180-epsilon, -180+epsilon), skipy=True)
border.setp_lineswcs0(color='w', lw=2) # Show borders in arbitrary color (e.g. background color)
border.setp_lineswcs1(color='w', lw=2)
# Plot the 'inside' graticules
lon_constval = 0.0
lat_constval = 0.0
lon_fmt = 'Dms'; lat_fmt = 'Dms' # Only Degrees must be plotted
addangle0 = addangle1=0.0
deltapx0 = deltapx1 = 1.0
labkwargs0 = {'color':'r', 'va':'center', 'ha':'center'}
labkwargs1 = {'color':'r', 'va':'center', 'ha':'center'}
lon_world = list(range(0,360,30))
lat_world = [-60, -30, 30, 60]
ilabs1 = grat.Insidelabels(wcsaxis=0,
world=lon_world, constval=lat_constval,
deltapx=1.0, deltapy=1.0,
addangle=addangle0, fmt=lon_fmt, **labkwargs0)
ilabs2 = grat.Insidelabels(wcsaxis=1,
world=lat_world, constval=lon_constval,
deltapx=1.0, deltapy=1.0,
addangle=addangle1, fmt=lat_fmt, **labkwargs1)
# Read marker positions (in 0h0m0s 0d0m0s format) from file
reader = csv.reader(open("positions.txt"), delimiter=' ', skipinitialspace=True)
for line in reader:
if line:
hms, dms = line
postxt = "{eq fk4-no-e} "+hms+" {} "+dms # Define the sky system of the source
print(postxt)
annim.Marker(pos=postxt, marker='*', color='yellow', ms=20)
# Plot a title
titlepos = 1.02
title = r"""All sky map in Hammer Aitoff projection (AIT) oblique with:
$(\alpha_p,\delta_p) = (0^\circ,30^\circ)$, $\phi_p = 75^\circ$ also:
$(\phi_0,\theta_0) = (0^\circ,90^\circ)$."""
t = frame.set_title(title, color='g', fontsize=13, linespacing=1.5)
t.set_y(titlepos)
annim.plot()
annim.interact_toolbarinfo()
annim_rep.interact_imagecolors()
show()
|
StarcoderdataPython
|
1655545
|
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponseRedirect
from django.contrib import messages
from .models import Comments
# Create your views here.
def delete_comment(request, id):
"""function baraye pak kardane comment"""
comment = get_object_or_404(Comments, id=id)
if request.user == comment.user or request.user.is_admin or request.user.is_staff:
comment.delete()
messages.success(request, "کامنت شما با موفقیت حذف شد")
return HttpResponseRedirect(request.META.get("HTTP_REFERER"))
# return redirect(comment.content_object.get_absolute_url())
return None
def approve_comment(request, id):
comment = get_object_or_404(Comments, id=id, approved=False)
if request.user.is_staff or request.user.is_admin:
comment.approved = True
comment.save()
return HttpResponseRedirect(request.META.get("HTTP_REFERER"))
|
StarcoderdataPython
|
3296396
|
from .face import FaceDetector
from .body import BodyDetector, Pose
|
StarcoderdataPython
|
114937
|
<filename>Content/Scripts/ObjectLoader.py
import os.path
import json,codecs
import unreal_engine as ue
from unreal_engine import FVector,FRotator
from unreal_engine.classes import Actor, Pawn, Character, ProjectileMovementComponent, PawnSensingComponent, StaticMesh
from unreal_engine.classes import StaticMeshComponent, StaticMeshActor, PointLightComponent
class ObjectLoader:
def begin_play(self):
ue.log("begin object loader")
self.pawn = self.uobject.get_owner()
#self.world = ue.get_editor_world()
self.datapath = str(self.pawn.get_property('datafilename'))
self.objects = []
ue.log("------------------")
def loadAndSpawnObjects(self):
ue.log("+++++++++++++++++++")
ue.log("loadAndSpawnObjects")
ue.log("checking for "+self.datapath)
if os.path.exists(self.datapath):
with codecs.open(self.datapath,"r","utf-8") as f:
data = json.loads(f.read())
ue.log(data)
for obj in data:
objclass = ue.find_class(obj["type"])
#ue.log(str(type(objclass))+str(objclass)+"="+obj["json"])
objinst = self.uobject.actor_spawn(objclass, FVector(0, 0, 0),FRotator(0, 0, 0))
jsonstr = obj["json"]
self.objects.append(objinst)
objinst.call_function("loadjson",jsonstr)
ue.log("------------------")
def clear(self):
self.objects.clear()
def add(self):
self.objects.append(self.pawn.get_property('whattoadd'))
#ue.log(len(self.objects))
def printall(self):
ue.log(len(self.objects))
def saveAllObjects(self):
with codecs.open(self.datapath,"w","utf-8") as f:
res = []
for obj in self.objects:
res.append({"type":obj.get_class().get_name(),"json":obj.savejson()[0]})
f.write(json.dumps(res))
def tick(self, delta_time):
pass
|
StarcoderdataPython
|
199957
|
#
# @lc app=leetcode id=977 lang=python3
#
# [977] Squares of a Sorted Array
#
# https://leetcode.com/problems/squares-of-a-sorted-array/description/
#
# algorithms
# Easy (72.86%)
# Total Accepted: 56.2K
# Total Submissions: 77.7K
# Testcase Example: '[-4,-1,0,3,10]'
#
# Given an array of integers A sorted in non-decreasing order, return an array
# of the squares of each number, also in sorted non-decreasing order.
#
#
#
#
# Example 1:
#
#
# Input: [-4,-1,0,3,10]
# Output: [0,1,9,16,100]
#
#
#
# Example 2:
#
#
# Input: [-7,-3,2,3,11]
# Output: [4,9,9,49,121]
#
#
#
#
# Note:
#
#
# 1 <= A.length <= 10000
# -10000 <= A[i] <= 10000
# A is sorted in non-decreasing order.
#
#
#
#
class Solution:
def sortedSquares(self, A: List[int]) -> List[int]:
# left,right=0,len(A)-1
# sol=[_ for _ in range(len(A))]
# for i in range(len(A)-1,-1,-1):
# if abs(A[left])>abs(A[right]):
# sol[i]=A[left]*A[left]
# left+=1
# else:
# sol[i]=A[right]*A[right]
# right-=1
# return sol
return sorted(x*x for x in A)
|
StarcoderdataPython
|
4805848
|
# -*- coding: utf-8 -*-
import sys
import os.path
import math
import nltk
from nltk.corpus import PlaintextCorpusReader
# sys.argv.append('./gold/pku_training_words.utf8')
# sys.argv.append('./training/pku_training.utf8')
# sys.argv.append('./testing/pku_test.utf8')
assert len(sys.argv) == 4
with open(sys.argv[1], 'rt', encoding='utf8') as f:
training_words = [w.strip() for w in f.readlines()]
training = PlaintextCorpusReader( *os.path.split(sys.argv[2]) )
training_words += list(training.words())
#training_words = list(training.words())
N = len(training_words)
V = len( set(training_words) )
fdist = nltk.FreqDist(training_words)
fdist = dict([(w, math.log((c+1.0)/(N+V))) for w, c in fdist.items()])
defprob = math.log(1.0/(N+V))
with open(sys.argv[3], 'rt', encoding='utf8') as f:
test = f.readlines()
def get_DAG(sentence):
DAG = {}
T = len(sentence)
for x in range(T):
ys = []
for y in range(x+1, T+1):
if sentence[x:y] in fdist:
ys.append(y)
if not ys:
ys.append(x+1)
DAG[x] = ys
return DAG
def dfs(DAG, sentence):
segments = []
T = len(sentence)
def _dfs(words, x):
for y in DAG[x]:
if y < T:
new = words.copy()
new.append(sentence[x:y])
_dfs(new, y)
else:
new = words.copy()
new.append(sentence[x:y])
segments.append( new )
_dfs([], 0)
bestseg = max([(sum(fdist.get(w, defprob) for w in seg), seg) for seg in segments])
return bestseg[1]
def dp(DAG, sentence):
T = len(sentence)
prob = {T:(0.0,)}
for x in range(T-1, -1, -1):
prob[x] = max([(fdist.get(sentence[x:y], defprob) + prob[y][0], y) for y in DAG[x]])
x = 0
bestseg = []
while x < T:
y = prob[x][1]
bestseg.append( sentence[x:y] )
x = y
return bestseg
for sent in test:
DAG = get_DAG(sent)
#seg1 = dfs(DAG, sent)
seg2 = dp(DAG, sent)
#print(' '.join(seg1), sep='', end='')
print(' '.join(seg2), sep='', end='')
#break
|
StarcoderdataPython
|
1713629
|
from time import sleep
i = int(input('inicio: '))
f = int(input('Fim: '))
p = int(input('Passo: '))
for i in range(i,f+1,p):
sleep(1)
print(i)
|
StarcoderdataPython
|
3231260
|
# coding: utf-8
STATS_RESOURCE_MAPPING = {
"stats": {
"resource": "stat/v1/data",
"docs": "https://yandex.ru/dev/metrika/doc/api2/api_v1/intro-docpage/",
"params": [
"direct_client_logins=<string,_string,...>",
"ids=<int,int,...>",
"metrics=<string>",
"accuracy=<string>",
"callback=<string>",
"date1=<string>",
"date2=<string>",
"dimensions=<string>",
"filters=<string>",
"include_undefined=<boolean>",
"lang=<string>",
"limit=<integer>",
"offset=<integer>",
"preset=<string>",
"pretty=<boolean>",
"proposed_accuracy=<boolean>",
"sort=<string>",
"timezone=<string>",
]
},
}
LOGSAPI_RESOURCE_MAPPING = {
"allinfo": {
"resource": "management/v1/counter/{counterId}/logrequests",
"docs": "https://yandex.ru/dev/metrika/doc/api2/logs/queries/getlogrequests-docpage/",
"params": None,
"methods": ["GET"],
},
"info": {
"resource": "management/v1/counter/{counterId}/logrequest/{requestId}",
"docs": "https://yandex.ru/dev/metrika/doc/api2/logs/queries/getlogrequest-docpage/",
"params": None,
"methods": ["GET"],
},
"download": {
"resource": "management/v1/counter/{counterId}/logrequest/{requestId}/part/{partNumber}/download",
"docs": "https://yandex.ru/dev/metrika/doc/api2/logs/queries/download-docpage/",
"params": None,
"methods": ["GET"],
},
"clean": {
"resource": "management/v1/counter/{counterId}/logrequest/{requestId}/clean",
"docs": "https://yandex.ru/dev/metrika/doc/api2/logs/queries/clean-docpage/",
"params": None,
"methods": ["POST"],
},
"cancel": {
"resource": "management/v1/counter/{counterId}/logrequest/{requestId}/cancel",
"docs": "https://yandex.ru/dev/metrika/doc/api2/logs/queries/cancel-docpage/",
"params": None,
"methods": ["POST"],
},
"create": {
"resource": "management/v1/counter/{counterId}/logrequests",
"docs": "https://yandex.ru/dev/metrika/doc/api2/logs/queries/createlogrequest-docpage/",
"params": ["date1", "date2", "fields", "source"],
"methods": ["POST"],
},
"evaluate": {
"resource": "management/v1/counter/{counterId}/logrequests/evaluate",
"docs": "https://yandex.ru/dev/metrika/doc/api2/logs/queries/evaluate-docpage/",
"params": ["date1", "date2", "fields", "source"],
"methods": ["GET"],
},
}
MANAGEMENT_RESOURCE_MAPPING = {
"counters": {
"resource": "management/v1/counters",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/counters/counters-docpage/",
"params": """[callback=<string>]
& [favorite=<boolean>]
& [field=<string>]
& [label_id=<integer>]
& [offset=<int>]
& [per_page=<int>]
& [permission=<string>]
& [reverse=<boolean>]
& [search_string=<string>]
& [sort=<counters_sort>]
& [status=<counter_status>]
& [type=<counter_type>]""",
"methods": ["GET", "POST"]
},
"counter": {
"resource": "management/v1/counter/{counterId}",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/counters/counter-docpage/",
"params": """[callback=<string>] & [field=<string>]""",
"methods": ["GET", "DELETE", "PUT"],
},
"counter_undelete": {
"resource": "management/v1/counter/{counterId}/undelete",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/counters/undeletecounter-docpage/",
"params": """""",
"methods": ["POST"]
},
"goals": {
"resource": "management/v1/counter/{counterId}/goals",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/goals/goals-docpage/",
"params": """[callback=<string>] & [useDeleted=<boolean>]""",
"methods": ["GET", "POST"]
},
"goal": {
"resource": "management/v1/counter/{counterId}/goal/{goalId}",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/goals/goal-docpage/",
"params": """[callback=<string>]""",
"methods": ["GET", "DELETE", "PUT"]
},
"accounts": {
"resource": "management/v1/accounts",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/accounts/accounts-docpage/",
"params": """[callback=<string>] & [user_login=<string>]""",
"methods": ["GET", "DELETE", "PUT"]
},
"clients": {
"resource": "management/v1/clients",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/direct_clients/getclients-docpage/",
"params": """counters=<list>""",
"methods": ["GET", ]
},
"filters": {
"resource": "management/v1/counter/{counterId}/filters",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/filters/filters-docpage/",
"params": """[callback=<string>]""",
"methods": ["GET", "POST"]
},
"filter": {
"resource": "management/v1/counter/{counterId}/filter/{filterId}",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/filters/filter-docpage/",
"params": """[callback=<string>]""",
"methods": ["GET", "DELETE", "PUT"]
},
"operations": {
"resource": "management/v1/counter/{counterId}/operations",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/operations/operations-docpage/",
"params": """[callback=<string>]""",
"methods": ["GET", "POST"]
},
"operation": {
"resource": "management/v1/counter/{counterId}/operation/{operationId}",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/operations/operation-docpage/",
"params": """[callback=<string>]""",
"methods": ["GET", "DELETE", "PUT"]
},
"grants": {
"resource": "management/v1/counter/{counterId}/grants",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/grants/grants-docpage/",
"params": """[callback=<string>]""",
"methods": ["GET", "POST"]
},
"grant": {
"resource": "management/v1/counter/{counterId}/grant",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/grants/grant-docpage/",
"params": """user_login=<string>""",
"methods": ["GET", "PUT", "DELETE"]
},
"public_grant": {
"resource": "management/v1/counter/{counterId}/public_grant",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/public-grants/addgrant-docpage/",
"params": """""",
"methods": ["POST", "DELETE"]
},
"delegates": {
"resource": "management/v1/delegates",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/delegates/delegates-docpage/",
"params": """[callback=<string>]""",
"methods": ["GET", "POST"]
},
"delegate": {
"resource": "management/v1/delegate",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/delegates/deletedelegate-docpage/",
"params": """user_login=<string>""",
"methods": ["DELETE"]
},
"labels": {
"resource": "management/v1/labels",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/labels/getlabels-docpage/",
"params": None,
"methods": ["GET", "POST"]
},
"label": {
"resource": "management/v1/label/{labelId}",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/labels/getlabel-docpage/",
"params": None,
"methods": ["GET", "DELETE", "PUT"]
},
"set_counter_label": {
"resource": "management/v1/counter/{counterId}/label/{labelId}",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/links/setcounterlabel-docpage/",
"params": None,
"methods": ["POST", "DELETE"]
},
"segments": {
"resource": "management/v1/counter/{counterId}/apisegment/segments",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/segments/getsegmentsforcounter-docpage/",
"params": None,
"methods": ["GET", "POST"]
},
"segment": {
"resource": "management/v1/counter/{counterId}/apisegment/segment/{segmentId}",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/segments/getsegment-docpage/",
"params": None,
"methods": ["GET", "DELETE", "PUT"]
},
"user_params_uploadings": {
"resource": "management/v1/counter/{counterId}/user_params/uploadings",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/userparams/findall-docpage/",
"params": None,
"methods": ["GET"]
},
"user_params_uploading": {
"resource": "management/v1/counter/{counterId}/user_params/uploading/{uploadingId}",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/userparams/findbyid-docpage/",
"params": None,
"methods": ["GET", "PUT"]
},
"user_params_upload": {
"resource": "management/v1/counter/{counterId}/user_params/uploadings/upload",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/userparams/upload-docpage/",
"params": """action=<user_params_uploading_action>""",
"methods": ["POST"]
},
"user_params_uploading_confirm": {
"resource": "management/v1/counter/{counterId}/user_params/uploading/{uploadingId}/confirm",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/userparams/confirm-docpage/",
"params": None,
"methods": ["POST"]
},
"chart_annotations": {
"resource": "management/v1/counter/{counterId}/chart_annotations",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/chart_annotation/findall-docpage/",
"params": None,
"methods": ["GET", "POST"]
},
"chart_annotation": {
"resource": "management/v1/counter/{counterId}/chart_annotation/{id}",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/chart_annotation/get-docpage/",
"params": None,
"methods": ["GET", "DELETE", "PUT"]
},
"yclid_conversions_uploadings": {
"resource": "management/v1/counter/{counterId}/yclid_conversions/uploadings",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/yclid-conversion/findall-docpage/",
"params": """[limit=<integer>] & [offset=<integer>""",
"methods": ["GET"]
},
"yclid_conversions_uploading": {
"resource": "management/v1/counter/{counterId}/yclid_conversions/uploading/{id}",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/yclid-conversion/findbyid-docpage/",
"params": None,
"methods": ["GET"]
},
"yclid_conversions_upload": {
"resource": "management/v1/counter/{counterId}/yclid_conversions/upload",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/yclid-conversion/upload-docpage/",
"params": """[comment=<string>]""",
"methods": ["GET"]
},
"offline_conversions_uploadings": {
"resource": "management/v1/counter/{counterId}/offline_conversions/uploadings",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/offline_conversion/findall-docpage/",
"params": None,
"methods": ["GET"]
},
"offline_conversions_calls_uploadings": {
"resource": "management/v1/counter/{counterId}/offline_conversions/calls_uploadings",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/offline_conversion/findallcalluploadings-docpage/",
"params": None,
"methods": ["GET"]
},
"offline_conversions_uploading": {
"resource": "management/v1/counter/{counterId}/offline_conversions/uploading/{id}",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/offline_conversion/findbyid-docpage/",
"params": None,
"methods": ["GET"]
},
"offline_conversions_calls_uploading": {
"resource": "management/v1/counter/{counterId}/offline_conversions/calls_uploading/{id}",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/offline_conversion/findcalluploadingbyid-docpage/",
"params": None,
"methods": ["GET"]
},
"offline_conversions_upload": {
"resource": "management/v1/counter/{counterId}/offline_conversions/upload",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/offline_conversion/upload-docpage/",
"params": """client_id_type=<offline_conversion_uploading_client_id_type> & [comment=<string>]""",
"methods": ["POST"]
},
"offline_conversions_upload_calls": {
"resource": "management/v1/counter/{counterId}/offline_conversions/upload_calls",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/offline_conversion/uploadcalls-docpage/",
"params": """client_id_type=<offline_conversion_uploading_client_id_type>
& [comment=<string>]
& [new_goal_name=<string>]""",
"methods": ["POST"]
},
"offline_conversions_extended_threshold": {
"resource": "management/v1/counter/{counterId}/offline_conversions/extended_threshold",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/offline_conversion/enableextendedthreshold-docpage/",
"params": None,
"methods": ["POST", "DELETE"]
},
"offline_conversions_calls_extended_threshold": {
"resource": "management/v1/counter/{counterId}/offline_conversions/calls_extended_threshold",
"docs": "https://yandex.ru/dev/metrika/doc/api2/management/offline_conversion/enablecallsextendedthreshold-docpage/",
"params": None,
"methods": ["POST", "DELETE"]
},
}
|
StarcoderdataPython
|
3311200
|
<reponame>gabriel-samfira/nova<filename>nova/tests/unit/api/openstack/compute/test_microversions.py<gh_stars>0
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import cfg
from oslo.serialization import jsonutils
from nova import test
from nova.tests.unit.api.openstack import fakes
CONF = cfg.CONF
class MicroversionsTest(test.NoDBTestCase):
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_no_header(self, mock_namespace):
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions')
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val', resp_json['param'])
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_with_header(self, mock_namespace):
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions')
req.headers = {'X-OpenStack-Compute-API-Version': '2.3'}
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val2', resp_json['param'])
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions_with_header_exact_match(self, mock_namespace):
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions')
req.headers = {'X-OpenStack-Compute-API-Version': '2.2'}
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val2', resp_json['param'])
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions2_no_2_1_version(self, mock_namespace):
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions2')
req.headers = {'X-OpenStack-Compute-API-Version': '2.3'}
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('controller2_val1', resp_json['param'])
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions2_later_version(self, mock_namespace):
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions2')
req.headers = {'X-OpenStack-Compute-API-Version': '3.0'}
res = req.get_response(app)
self.assertEqual(202, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('controller2_val2', resp_json['param'])
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions2_version_too_high(self, mock_namespace):
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions2')
req.headers = {'X-OpenStack-Compute-API-Version': '3.2'}
res = req.get_response(app)
self.assertEqual(404, res.status_int)
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace",
return_value='nova.api.v3.test_extensions')
def test_microversions2_version_too_low(self, mock_namespace):
app = fakes.wsgi_app_v21(init_only='test-microversions')
req = fakes.HTTPRequest.blank('/v2/fake/microversions2')
req.headers = {'X-OpenStack-Compute-API-Version': '2.1'}
res = req.get_response(app)
self.assertEqual(404, res.status_int)
|
StarcoderdataPython
|
4839185
|
<reponame>aljer/ptf
"""
Remote platform
This platform uses physical ethernet interfaces.
"""
# Update this dictionary to suit your environment.
remote_port_map = {
(0, 0): "eth0",
(0, 1): "eth1",
(0, 2): "eth2",
(0, 3): "eth3",
(0, 4): "eth4",
(0, 5): "eth5",
(0, 6): "eth6",
(0, 7): "eth7",
(0, 8): "eth8",
(0, 9): "eth9",
(0, 10): "eth10",
(0, 11): "eth11",
(0, 12): "eth12",
(0, 13): "eth13",
(0, 14): "eth14",
(0, 15): "eth15",
(0, 16): "eth16",
(0, 17): "eth17",
(0, 18): "eth18",
(0, 19): "eth19",
(0, 20): "eth20",
(0, 21): "eth21",
(0, 22): "eth22",
(0, 23): "eth23",
(0, 24): "eth24",
(0, 25): "eth25",
(0, 26): "eth26",
(0, 27): "eth27",
(0, 28): "eth28",
(0, 29): "eth29",
(0, 30): "eth30",
(0, 31): "eth31",
}
def platform_config_update(config):
"""
Update configuration for the remote platform
@param config The configuration dictionary to use/update
"""
global remote_port_map
config["port_map"] = remote_port_map.copy()
config["caps_table_idx"] = 0
|
StarcoderdataPython
|
1633420
|
from .import_data import import_data
|
StarcoderdataPython
|
3253761
|
<reponame>zmxdream/Paddle
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest, convert_uint16_to_float, convert_float_to_uint16
import paddle
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from paddle.fluid.tests.unittests.test_uniform_random_op import output_hist, output_hist_diag
class TestUniformRandomOpBF16(OpTest):
def setUp(self):
self.op_type = "uniform_random"
self.dtype = "uint16"
self.inputs = {}
self.init_attrs()
self.outputs = {"Out": np.zeros((1000, 784)).astype("uint16")}
def init_attrs(self):
self.attrs = {
"shape": [1000, 784],
"min": -5.0,
"max": 10.0,
"seed": 10,
'dtype': int(core.VarDesc.VarType.BF16)
}
self.output_hist = output_hist
def verify_output(self, outs):
if np.array(outs[0]).dtype == np.uint16:
result = convert_uint16_to_float(np.array(outs[0]))
else:
result = np.array(outs[0])
hist, prob = self.output_hist(result)
self.assertTrue(
np.allclose(
hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
def test_check_output(self):
outs = self.calc_output(core.CPUPlace())
outs = [np.array(out) for out in outs]
outs.sort(key=len)
self.verify_output(outs)
class TestUniformRandomOpBF16AttrTensorList(TestUniformRandomOpBF16):
def setUp(self):
self.op_type = "uniform_random"
self.new_shape = (1000, 784)
self.dtype = "uint16"
shape_tensor = []
for index, ele in enumerate(self.new_shape):
shape_tensor.append(("x" + str(index), np.ones(
(1)).astype("int64") * ele))
self.inputs = {'ShapeTensorList': shape_tensor}
self.init_attrs()
self.outputs = {"Out": np.zeros((1000, 784)).astype("uint16")}
def init_attrs(self):
self.attrs = {
"min": -5.0,
"max": 10.0,
"seed": 10,
'dtype': int(core.VarDesc.VarType.BF16)
}
self.output_hist = output_hist
class TestUniformRandomOpBF16AttrTensorInt32(
TestUniformRandomOpBF16AttrTensorList):
def setUp(self):
self.op_type = "uniform_random"
self.dtype = "uint16"
self.inputs = {"ShapeTensor": np.array([1000, 784]).astype("int32")}
self.init_attrs()
self.outputs = {"Out": np.zeros((1000, 784)).astype("uint16")}
class TestUniformRandomOpBF16WithDiagInit(TestUniformRandomOpBF16):
def init_attrs(self):
self.attrs = {
"shape": [1000, 784],
"min": -5.0,
"max": 10.0,
"seed": 10,
"diag_num": 784,
"diag_step": 784,
"diag_val": 1.0,
'dtype': int(core.VarDesc.VarType.BF16)
}
self.output_hist = output_hist_diag
class TestUniformRandomOpBF16SelectedRows(unittest.TestCase):
def test_check_output(self):
self.check_with_place(core.CPUPlace())
def check_with_place(self, place):
scope = core.Scope()
out = scope.var("X").get_selected_rows()
paddle.seed(10)
op = Operator(
"uniform_random",
Out="X",
shape=[1000, 784],
min=-5.0,
max=10.0,
seed=10,
dtype=int(core.VarDesc.VarType.BF16))
op.run(scope, place)
self.assertEqual(out.get_tensor().shape(), [1000, 784])
result = convert_uint16_to_float(np.array(out.get_tensor()))
hist, prob = output_hist(result)
self.assertTrue(
np.allclose(
hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
class TestUniformRandomOpBF16SelectedRowsWithDiagInit(
TestUniformRandomOpBF16SelectedRows):
def check_with_place(self, place):
scope = core.Scope()
out = scope.var("X").get_selected_rows()
paddle.seed(10)
op = Operator(
"uniform_random",
Out="X",
shape=[500, 784],
min=-5.0,
max=10.0,
seed=10,
diag_num=500,
diag_step=784,
diag_val=1.0,
dtype=int(core.VarDesc.VarType.BF16))
op.run(scope, place)
self.assertEqual(out.get_tensor().shape(), [500, 784])
result = convert_uint16_to_float(np.array(out.get_tensor()))
hist, prob = output_hist(result)
self.assertTrue(
np.allclose(
hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
class TestUniformRandomOpBF16AttrTensorAPI(unittest.TestCase):
def test_attr_tensor_API(self):
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
dim_tensor = fluid.layers.fill_constant([1], "int64", 3)
ret = fluid.layers.nn.uniform_random(
[1, dim_tensor, 2], dtype=np.uint16)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
outs = exe.run(train_program, fetch_list=[ret])
class TestUniformRandomOpAPISeed(unittest.TestCase):
def test_attr_tensor_API(self):
_seed = 10
gen = paddle.seed(_seed)
gen._is_init_py = False
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
_min = 5
_max = 10
ret = fluid.layers.nn.uniform_random(
[2, 3, 2], min=_min, max=_max, seed=_seed)
ret_2 = fluid.layers.nn.uniform_random(
[2, 3, 2], min=_min, max=_max, seed=_seed)
res = fluid.layers.equal(ret, ret_2)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
ret_value, cmp_value = exe.run(train_program, fetch_list=[ret, res])
self.assertTrue(np.array(cmp_value).all())
for i in ret_value.flatten():
self.assertGreaterEqual(i, _min)
self.assertLess(i, _max)
class TestUniformRandomOpBF16SelectedRowsShapeTensor(unittest.TestCase):
def test_check_output(self):
place = core.CPUPlace()
scope = core.Scope()
out = scope.var("X").get_selected_rows()
shape_tensor = scope.var("Shape").get_tensor()
shape_tensor.set(np.array([1000, 784]).astype("int64"), place)
paddle.seed(10)
op = Operator(
"uniform_random",
ShapeTensor="Shape",
Out="X",
min=-5.0,
max=10.0,
seed=10,
dtype=int(core.VarDesc.VarType.BF16))
op.run(scope, place)
self.assertEqual(out.get_tensor().shape(), [1000, 784])
result = convert_uint16_to_float(np.array(out.get_tensor()))
hist, prob = output_hist(result)
self.assertTrue(
np.allclose(
hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
class TestUniformRandomOpBF16SelectedRowsShapeTensorList(
TestUniformRandomOpBF16SelectedRowsShapeTensor):
def test_check_output(self):
place = core.CPUPlace()
scope = core.Scope()
out = scope.var("X").get_selected_rows()
shape_1 = scope.var("shape1").get_tensor()
shape_1.set(np.array([1000]).astype("int64"), place)
shape_2 = scope.var("shape2").get_tensor()
shape_2.set(np.array([784]).astype("int64"), place)
paddle.seed(10)
op = Operator(
"uniform_random",
ShapeTensorList=["shape1", "shape2"],
Out="X",
min=-5.0,
max=10.0,
seed=10,
dtype=int(core.VarDesc.VarType.BF16))
op.run(scope, place)
self.assertEqual(out.get_tensor().shape(), [1000, 784])
result = convert_uint16_to_float(np.array(out.get_tensor()))
hist, prob = output_hist(result)
self.assertTrue(
np.allclose(
hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
class TestUniformRandomBatchSizeLikeOpBF16API(unittest.TestCase):
def test_attr_tensorlist_int32_API(self):
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
input = fluid.data(name="input", shape=[1, 3], dtype='uint16')
out_1 = fluid.layers.uniform_random_batch_size_like(
input, [2, 4], dtype=np.uint16) # out_1.shape=[1, 4]
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
outs = exe.run(train_program, fetch_list=[out_1])
if __name__ == "__main__":
from paddle import enable_static
enable_static()
unittest.main()
|
StarcoderdataPython
|
1680092
|
<filename>tests/benchmark/test_benchmark_engine.py
# This file is part of the Reproducible Open Benchmarks for Data Analysis
# Platform (ROB).
#
# Copyright (C) 2019 NYU.
#
# ROB is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Test functionality of the benchmark engine."""
import os
from benchengine.benchmark.repo import BenchmarkRepository
from benchengine.benchmark.engine import BenchmarkEngine
from benchengine.db import DatabaseDriver
from benchtmpl.io.files.base import FileHandle
from benchtmpl.workflow.benchmark.loader import BenchmarkTemplateLoader
from benchtmpl.workflow.parameter.value import TemplateArgument
from benchtmpl.workflow.template.repo import TemplateRepository
import benchengine.benchmark.base as bm
import benchengine.config as config
import benchengine.error as err
DIR = os.path.dirname(os.path.realpath(__file__))
DATA_FILE = os.path.join(DIR, '../.files/templates/helloworld/data/names.txt')
TEMPLATE_DIR = os.path.join(DIR, '../.files/templates/helloworld')
class TestBenchmarkEngine(object):
"""Test running benchmarks using the simple synchronous benchmark engine."""
def test_run_benchmark(self, tmpdir):
"""Test running a benchmarks."""
# Initialize the BASEDIR environment variable
os.environ[config.ENV_BASEDIR] = os.path.abspath(str(tmpdir))
# Create a new database and open a connection
connect_string = 'sqlite:{}/auth.db'.format(str(tmpdir))
DatabaseDriver.init_db(connect_string=connect_string)
con = DatabaseDriver.connect(connect_string=connect_string)
# Create repository and engine instances
repository = BenchmarkRepository(
con=con,
template_store=TemplateRepository(
base_dir=config.get_template_dir(),
loader=BenchmarkTemplateLoader(),
filenames=['benchmark', 'template', 'workflow']
)
)
engine = BenchmarkEngine(con)
# Add with minimal information
benchmark = repository.add_benchmark(
name='My benchmark',
src_dir=TEMPLATE_DIR
)
template = benchmark.template
arguments = {
'names': TemplateArgument(
parameter=template.get_parameter('names'),
value=FileHandle(DATA_FILE)
),
'sleeptime': TemplateArgument(
parameter=template.get_parameter('sleeptime'),
value=1
),
'greeting': TemplateArgument(
parameter=template.get_parameter('greeting'),
value='Welcome'
)
}
run_id, state = engine.run(benchmark, arguments, 'USERID')
assert state.is_success()
sql = 'SELECT * FROM benchmark_run WHERE run_id = ?'
rs = con.execute(sql, (run_id, )).fetchone()
assert rs['benchmark_id'] == benchmark.identifier
assert rs['user_id'] == 'USERID'
assert rs['state'] == state.type_id
table_name = bm.PREFIX_RESULT_TABLE + benchmark.identifier
sql ='SELECT * FROM {} WHERE run_id = ?'.format(table_name)
rs = con.execute(sql, (run_id, )).fetchone()
assert rs['max_line'] == 'Welcome Alice!'
|
StarcoderdataPython
|
3207926
|
class ValidationException(Exception):
pass
def guard_alphanumeric(string: str, message: str):
if not string.isalnum():
raise ValidationException(message)
|
StarcoderdataPython
|
3318899
|
'''图片文字识别示例'''
import re
from urllib.parse import urljoin
from renderer.utils import GeneralOcr, retry_get
url = 'http://www.snqindu.gov.cn/html/zwgk/xxgkml/xzzf/xzcf/202006/44820.html'
resp = retry_get(url)
string = resp.content.decode('utf-8')
imgs = re.findall(r'''src=['"](/uploadfile\S+(jpg|png))['"]''', string)
img, _ = imgs[0]
ocr = GeneralOcr()
ocr.basic_ocr(urljoin(url, img))
url = 'http://ztb.panan.gov.cn/fileserver/down?md5=B6B1765B3732F55D48C7449F6D4CEAA3&bucket=2'
ocr = GeneralOcr()
ocr.basic_ocr(url, certain=True)
|
StarcoderdataPython
|
1744667
|
<gh_stars>0
############################################
#当前数据集的标注存放在txt文件,并且与同名图片成对存在同一个文件夹下
#按照一个格式提取相应的标注信息
#图片存放的绝对路径 标注1 类别2 标注2 类别2 ...
#提取出的信息放到train, val, test
##############################################
import os
from os import getcwd
import glob
from convert_bbox_for_anno_extraction import convert_bbox
#将要生成的三个文件,可以改年份,其他最好不变
sets=[('2007', 'train'), ('2007', 'val'), ('2007', 'test')]
#按照上一步对数据集的划分结果,即按照Main生成的4个txt文件(里面全是文件名,不带后缀)
#中的顺序提取标注中的信息。还是那个问题,本数据集的图片、标注均不在一个文件夹
#这带来了麻烦,当前只采用简便处理。就把第一类当作验证集
wd = getcwd()#当前文件的绝对路径
#遍历数据集获得每个小类的数据总数
path = os.path.join(wd, 'VOCdevkit', 'VOC2007', 'JPEGImages')
files = os.listdir(path)
num = list()
num.append(0)#num[1]对应第一小类的数据总量,num[0] = 0是为了程序上的方便
class_datasets = 2
flag = 0
for fl in files:
path1 = os.path.join(path,fl,'*.jpg')
files_2 = glob.glob(path1)
num.append(len(files_2) + num[flag])
flag += 1
for year, image_set in sets:
#打开Main下的分割好的相关数据集(里面全是文件名,不带后缀),这样就不需要去JPEGImages
#因为图片和标注文件重名这个缘故
image_ids = open('VOCdevkit//VOC%s//ImageSets//Main//%s.txt'%(year, image_set)).read().strip().split()
list_file = open('%s_%s.txt'%(year, image_set), 'w')
count = 0#记录当前取了多少张图片
for image_id in image_ids:
count += 1
#又是因为标注、图片分了小类,因此必须获得每个小类的总数
#根据每个小类得数量
for k in range(class_datasets):
if count>num[k] and count<=num[k+1]:
list_file.write('%s\VOCdevkit\VOC%s\JPEGImages\%s\%s.jpg '%(wd, year, k+1,image_id))
#进入第i+1小类中去找相应的标注文件
annotation_txt_file = open('%s//VOCdevkit//VOC%s//JPEGImages//%s//%s.txt'%(wd, year, k+1,image_id), 'r')
#file1将txt文件按行读取,每行内容读出为string
annotation_txt_file1 = annotation_txt_file.readlines()
for i in range(len(annotation_txt_file1)):
#对行字符串按空格进行拆分
annotation_txt_file2 = annotation_txt_file1[i].split()
#先替换换行符.python规定,字符串一旦生成就不允许更改
#使用索引更换会报错。对字符串使用replace()即可
annotation_txt_file2[-1].replace('\n', '')
#可能需要对bbox的值进行换算
bbox = convert_bbox(annotation_txt_file2)
#print('annotation_txt_file2 ', annotation_txt_file2)
if i != len(annotation_txt_file1)-1:
list_file.write(str(bbox[0])+',')
list_file.write(str(bbox[1])+',')
list_file.write(str(bbox[2])+',')
list_file.write(str(bbox[3])+',')
list_file.write(annotation_txt_file2[0]+' ')
else:
list_file.write(str(bbox[0])+',')
list_file.write(str(bbox[1])+',')
list_file.write(str(bbox[2])+',')
list_file.write(str(bbox[3])+',')
list_file.write(annotation_txt_file2[0]+'\n')
break
else:
continue
list_file.close()
print('图像绝对路径以及对应标注全部提取完毕。请继续你的旅途,少年!前路漫漫,荡涤障碍!')
|
StarcoderdataPython
|
4806933
|
<reponame>dolbyio-samples/dolbyio-rest-apis-client-python
"""
dolbyio_rest_apis.communications.authentication
~~~~~~~~~~~~~~~
This module contains the functions to work with the authentication API.
"""
from deprecated import deprecated
from dolbyio_rest_apis.core.helpers import add_if_not_none
from dolbyio_rest_apis.communications.internal.http_context import CommunicationsHttpContext
from dolbyio_rest_apis.communications.internal.urls import get_api_v1_url, get_session_url
from .models import AccessToken
async def _get_access_token(
url: str,
consumer_key: str,
consumer_secret: str,
expires_in: int=None,
) -> AccessToken:
data = {
'grant_type': 'client_credentials',
}
add_if_not_none(data, 'expires_in', expires_in)
async with CommunicationsHttpContext() as http_context:
json_response = await http_context.requests_post_basic_auth(
consumer_key=consumer_key,
consumer_secret=consumer_secret,
url=url,
data=data
)
return AccessToken(json_response)
async def get_api_access_token(
consumer_key: str,
consumer_secret: str,
expires_in: int=None,
) -> AccessToken:
r"""
To make any API call, you must acquire a JWT (JSON Web Token) format access token.
Make sure to use this API against https://api.voxeet.com/v1.
Note: Even though the OAuth terminology is used in the following APIs, they are not OAuth compliant.
See: https://docs.dolby.io/communications-apis/reference/get-bearer-token
Args:
consumer_key: Your Dolby.io Consumer Key.
consumer_secret: Your Dolby.io Consumer Secret.
expires_in: (Optional) Access token expiration time in seconds.
The maximum value is 2,592,000, indicating 30 days. If no value is specified, the default is 600,
indicating ten minutes.
Returns:
An :class:`AccessToken` object.
Raises:
HttpRequestError: If a client error one occurred.
HTTPError: If one occurred.
"""
return await _get_access_token(f'{get_api_v1_url()}/auth/token', consumer_key, consumer_secret, expires_in)
async def get_client_access_token(
consumer_key: str,
consumer_secret: str,
expires_in: int=None,
) -> AccessToken:
r"""
This API returns an access token that your backend can request on behalf of a client to initialize
the Dolby.io SDK in a secure way. Make sure to use this API against https://session.voxeet.com.
Note: Even though the OAuth2 terminology is used in the following APIs, they are not OAuth2 compliant.
See: https://docs.dolby.io/communications-apis/reference/get-client-access-token
Args:
consumer_key: Your Dolby.io Consumer Key.
consumer_secret: Your Dolby.io Consumer Secret.
expires_in: (Optional) Access token expiration time in seconds.
The maximum value is 2,592,000, indicating 30 days. If no value is specified, the default is 600,
indicating ten minutes.
Returns:
An :class:`AccessToken` object.
Raises:
HttpRequestError: If a client error one occurred.
HTTPError: If one occurred.
"""
return await _get_access_token(f'{get_session_url()}/oauth2/token', consumer_key, consumer_secret, expires_in)
@deprecated(reason='This API is no longer applicable for applications on the new Dolby.io Communications APIs platform.')
async def revoke_access_token(
consumer_key: str,
consumer_secret: str,
access_token: str,
) -> None:
r"""
Revokes the authentication token.
See: https://docs.dolby.io/communications-apis/reference/revoke-token
Args:
consumer_key: Your Dolby.io Consumer Key.
consumer_secret: Your Dolby.io Consumer Secret.
access_token: The access token to revoke.
Raises:
HttpRequestError: If a client error one occurred.
HTTPError: If one occurred.
"""
data = {
'access_token': access_token,
}
async with CommunicationsHttpContext() as http_context:
await http_context.requests_post_basic_auth(
consumer_key=consumer_key,
consumer_secret=consumer_secret,
url=f'{get_session_url()}/oauth2/invalidate',
data=data
)
|
StarcoderdataPython
|
1610337
|
<gh_stars>0
from importNormativeTypes import *
#####################################################################################################################################################################################################
# #
# Import Nfv Types from a given file #
# #
# activation : #
# python importNfvTypes.py [optional -s <scheme> | --scheme=<scheme>, default http] [-i <be host> | --ip=<be host>] [-p <be port> | --port=<be port> ] [-f <input file> | --ifile=<input file> ] #
# #
# shortest activation (be host = localhost, be port = 8080): #
# python importUsers.py [-f <input file> | --ifile=<input file> ] #
# #
#####################################################################################################################################################################################################
def importNfvTypes(scheme, be_host, be_port, admin_user, file_dir, update_version):
nfv_types = ["underlayVpn",
"overlayTunnel",
"genericNeutronNet",
"allottedResource",
"extImageFile",
"extLocalStorage",
"extZteCP",
"extZteVDU",
"extZteVL",
"NSD",
"VDU",
"vduCompute",
"Cp",
"vduVirtualStorage",
"vduVirtualBlockStorage",
"vduVirtualFileStorage",
"vduVirtualObjectStorage",
"vduVirtualStorage",
"vnfVirtualLink",
"vnfExtCp",
"vduCp",
"VNF",
"PonUni",
"OltNni",
"OntNni"]
response_codes = [200, 201]
if update_version == 'false':
response_codes = [200, 201, 409]
results = []
for nfv_type in nfv_types:
result = createNormativeType(scheme, be_host, be_port, admin_user, file_dir, nfv_type, update_version)
results.append(result)
if result[1] is None or result[1] not in response_codes:
print "Failed creating heat type " + nfv_type + ". " + str(result[1])
return results
def main(argv):
print 'Number of arguments:', len(sys.argv), 'arguments.'
be_host = 'localhost'
be_port = '8080'
admin_user = 'jh0003'
update_version = 'true'
scheme = 'http'
try:
opts, args = getopt.getopt(argv, "i:p:u:v:h:", ["ip=", "port=", "user=", "updateversion="])
except getopt.GetoptError:
usage()
error_and_exit(2, 'Invalid input')
for opt, arg in opts:
# print opt, arg
if opt == '-h':
usage()
sys.exit(3)
elif opt in ("-i", "--ip"):
be_host = arg
elif opt in ("-p", "--port"):
be_port = arg
elif opt in ("-u", "--user"):
admin_user = arg
elif opt in ("-s", "--scheme"):
scheme = arg
elif opt in ("-v", "--updateversion"):
if arg.lower() == "false" or arg.lower() == "no":
update_version = 'false'
print 'scheme =', scheme, ',be host =', be_host, ', be port =', be_port, ', user =', admin_user
if be_host is None:
usage()
sys.exit(3)
results = importNfvTypes(scheme, be_host, be_port, admin_user, "../../../import/tosca/nfv-types/", update_version)
print "-----------------------------"
for result in results:
print "{0:20} | {1:6}".format(result[0], result[1])
print "-----------------------------"
response_codes = [200, 201]
if update_version == 'false':
response_codes = [200, 201, 409]
failed_normatives = filter(lambda x: x[1] is None or x[1] not in response_codes, results)
if len(list(failed_normatives)) > 0:
error_and_exit(1, None)
else:
error_and_exit(0, None)
if __name__ == "__main__":
main(sys.argv[1:])
|
StarcoderdataPython
|
55130
|
<filename>6 programs work/logarithm.py
import math
def main():
def logList(numList):
for i in range(len(numList)):
if numList[i] > 0:
numList[i] = math.log(numList[i])
else:
numList[i] = None
return numList
numList = [1, 3, 2.5, -1, 9, 0, 2.71]
print(logList(numList))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3357583
|
cfg = dict(
model_type='STDCNet813',
n_cats=19,
num_aux_heads=2,
lr_start=1e-2,
weight_decay=5e-4,
warmup_iters=1000,
max_iter=80000,
dataset='CityScapes',
im_root='./datasets/cityscapes',
train_im_anns='./datasets/cityscapes/train.txt',
val_im_anns='./datasets/cityscapes/val.txt',
scales=[0.75, 2.],
cropsize=[1024, 512],
eval_crop=[1024, 512],
eval_scales=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
ims_per_gpu=8,
eval_ims_per_gpu=2,
use_fp16=True,
use_sync_bn=False,
respth='./res',
)
|
StarcoderdataPython
|
66415
|
<filename>examples/decoupledibpm/cylinder2dRe550_GPU/scripts/plot_drag_coefficient_compare_ibpm.py
"""Plot the history of the drag coefficient.
Compare with the numerical results using the IBPM of PetIBM.
Compare with the numerical results reported in Koumoutsakos & Leonard (1995).
_References:_
* <NAME>., & <NAME>. (1995).
High-resolution simulations of the flow around an impulsively started
cylinder using vortex methods.
Journal of Fluid Mechanics, 296, 1-38.
"""
from matplotlib import pyplot
import numpy
import pathlib
import petibmpy
# Set directories and parameters.
simudir = pathlib.Path(__file__).absolute().parents[1]
rootdir = simudir.parents[2]
datadir = rootdir / 'data'
show_figure = True # display the Matplotlib figure
save_figure = True # save the Matplotlib figure as PNG
# Load drag force from file and compute drag coefficient.
filepath = simudir / 'output' / 'forces-0.txt'
t, fx, _ = petibmpy.read_forces(filepath)
cd = 2 * fx
# Load drag force from file from IBPM run and compute drag coefficient.
otherdir = rootdir / 'examples' / 'ibpm' / 'cylinder2dRe550_GPU'
filepath = otherdir / 'output' / 'forces-0.txt'
t1, fx1, _ = petibmpy.read_forces(filepath)
cd1 = 2 * fx1
# Load drag coefficient from Koumoutsakos & Leonard (1995).
filename = 'koumoutsakos_leonard_1995_cylinder_dragCoefficientRe550.dat'
filepath = datadir / filename
t2, cd2 = petibmpy.read_forces(filepath)
t2 *= 0.5
# Plot the history of the drag coefficient.
pyplot.rc('font', family='serif', size=14)
fig, ax = pyplot.subplots(figsize=(6.0, 4.0))
ax.set_xlabel('Non-dimensional time')
ax.set_ylabel('Drag coefficient')
ax.plot(t, cd, label='Decoupled IBPM')
ax.plot(t1, cd1, label='IBPM')
ax.plot(t2, cd2, label='Koumoutsakos \n& Leonard (1995)',
marker='o', linewidth=0, color='black')
ax.axis((0.0, 3.0, 0.0, 2.0))
ax.legend(frameon=False)
fig.tight_layout()
if show_figure:
pyplot.show()
if save_figure:
figdir = simudir / 'figures'
figdir.mkdir(parents=True, exist_ok=True)
filepath = figdir / 'drag_coefficient_compare_ibpm.png'
fig.savefig(filepath, dpi=300, bbox_inches='tight')
|
StarcoderdataPython
|
105152
|
"""
Aim: Given an undirected graph and an integer M. The task is to determine if
the graph can be colored with at most M colors such that no two adjacent
vertices of the graph are colored with the same color.
Intuition: We consider all the different combinations of the colors for the
given graph using backtacking.
"""
def isSafe(graph, v, n, temp, color):
# This checks whether if it safe to color the given node with temp color i.e checking if the adjacent nodes are different from temp
for i in range(v):
if graph[n][i] == 1 and color[i] == temp:
return False
return True
def check(graph, m, v, n, color):
# This function iteratively checks different combinations.
if n == v: # base case : if all the nodes are traversed return
return True
for i in range(1, m + 1):
if isSafe(graph, v, n, i, color): # checking if it is safe to color
color[n] = i
if check(graph, m, v, n + 1, color):
return True
color[n] = 0
return False
def graphcoloring(graph, M, V):
color = [0] * (V + 1) # assigning colors to different nodes
return check(graph, M, V, 0, color)
# ------------------------DRIVER CODE ------------------------
def main():
for _ in range(int(input())):
V = int(input())
M = int(input())
E = int(input())
list = [int(x) for x in input().strip().split()]
graph = [[0 for i in range(V)] for j in range(V)]
cnt = 0
for i in range(E):
graph[list[cnt] - 1][list[cnt + 1] - 1] = 1
graph[list[cnt + 1] - 1][list[cnt] - 1] = 1
cnt += 2
if graphcoloring(graph, M, V) == True:
print(1)
else:
print(0)
if __name__ == "__main__":
main()
"""
Sample Input:
2
4
3
5
1 2 2 3 3 4 4 1 1 3
3
2
3
1 2 2 3 1 3
Sample Output:
1
0
"""
|
StarcoderdataPython
|
191604
|
# Generated by Django 2.2.9 on 2020-02-07 11:36
from django.db import migrations, models
import django.db.models.deletion
import wagtail.core.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'),
]
operations = [
migrations.CreateModel(
name='ResetNetworkOpenCallsPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('content_heading', models.CharField(max_length=255, verbose_name='Heading')),
('content_text', models.TextField(blank=True, verbose_name='Text')),
('no_open_calls_heading', models.CharField(max_length=255, verbose_name='Heading')),
('no_open_calls_text', models.TextField(blank=True, verbose_name='Text')),
('no_open_calls_success', models.TextField(verbose_name='Success Message')),
],
options={
'verbose_name': 'Reset Network Open Calls Page',
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='ResetNetworkOpenCallPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('content_heading', models.CharField(max_length=255, verbose_name='Heading')),
('content_text', wagtail.core.fields.RichTextField(verbose_name='Text')),
('card_heading', models.CharField(max_length=100)),
('card_text', models.TextField(blank=True)),
('fund', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page')),
],
options={
'verbose_name': 'Reset Network Open Call Page',
},
bases=('wagtailcore.page',),
),
]
|
StarcoderdataPython
|
3240253
|
import logging
import numpy as np
import rasterio
from pesto.ws.core.pesto_feature import PestoFeature
from pesto.ws.features.converter.image.bands import FullBand, Band
log = logging.getLogger(__name__)
# TODO: Classe WIP (les méthodes expérimentales sont privées)
# Potentiellement: Revoir le rationnel / faire un travail fonctionnel
# Notamment, la gestion des ROI doit-elle être dans pesto ? => Normalement le traitement devrait recevoir des
# tuiles déjà formatées et ne pas devoir gérer le tuilage (cf spam lib etc...)
# Vérifier: Pourquoi ?
class ImageROI(object):
def __init__(self, roi: dict):
self.target_in = roi['target_in']
self.target_out = roi['target_out']
self.lines = Band(roi['lines']) if 'lines' in roi else FullBand()
self.columns = Band(roi['columns']) if 'columns' in roi else FullBand()
self.crop_infos = (0, 0, 0, 0)
def compute_crop_infos(self) -> PestoFeature:
return _ComputeCropInfos(self)
def remove_margin(self) -> PestoFeature:
return _RemoveMargin(self)
class DummyImageROI(ImageROI):
"""
This class is just a placeholder to initialize pipelines when no roi is provided ...
"""
def __init__(self):
"""
do not call super().__init__(roi) because no roi is provided
"""
pass
def compute_crop_infos(self):
return None
def remove_margin(self):
return None
class _ComputeCropInfos(PestoFeature):
def __init__(self, roi: ImageROI):
self.roi = roi
def process(self, payload: dict) -> dict:
self.check_payload(payload)
for _ in self.roi.target_in:
log.info('ROI preprocess: [{}]'.format(_))
image_path = payload[_]
with rasterio.open(image_path) as dataset:
shape = dataset.shape
top, bottom = self.roi.lines.compute_crop(shape[0])
left, right = self.roi.columns.compute_crop(shape[1])
self.roi.crop_infos = (top, bottom, left, right)
return payload
def check_payload(self, payload: dict):
ref = None
for _ in self.roi.target_in:
image_path = payload[_]
with rasterio.open(image_path) as dataset:
shape = dataset.shape
if ref is None:
ref = shape
if shape != ref:
raise ValueError('All images should have the same shape {} ! Wrong shape is {}'.format(ref, shape))
class _RemoveMargin(PestoFeature):
def __init__(self, roi: ImageROI):
self.roi = roi
def process(self, payload: dict) -> dict:
for x in payload:
log.info('payload: {}'.format(x))
top, bottom, left, right = self.roi.crop_infos
for _ in self.roi.target_out:
log.info('ROI postprocess: [{}]'.format(_))
image = payload[_]
h, w = _get_shape(image)
payload[_] = image[..., top:h - bottom, left:w - right]
return payload
def _get_shape(image: np.ndarray):
if len(image.shape) == 2:
return image.shape
return image.shape[1:3]
|
StarcoderdataPython
|
33957
|
<gh_stars>0
from django.shortcuts import render,redirect
from django.views.generic import View
from django.contrib.auth.models import User
from .forms import LoginUser,RegisterUser
from django.http import HttpResponse,Http404
from django.contrib.auth import authenticate,login,logout
class UserLogin(View):
form_class = LoginUser
def get(self,request):
return redirect('users:test')
def post(self,request):
form = self.form_class(request.POST)
if form.is_valid:
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username,password=password)
if user is not None:
login(request,user)
return redirect('drinks:index')
return redirect('users:test')
class UserRegister(View):
form_class = RegisterUser
def get(self,request):
return redirect('users:test')
def post(self,request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
user = authenticate(username=username,password=password)
if user is not None:
login(request,user)
return redirect('drinks:index')
return redirect('users:test')
def LogoutView(request):
logout(request)
return redirect('users:test')
def test(request):
log_form = LoginUser
Reg_form = RegisterUser
template = 'users/login_test.html'
if request.user.is_authenticated:
return redirect('drinks:index')
context = {
'form' : log_form,
'tmp' : Reg_form,
}
return render(request , template ,context)
|
StarcoderdataPython
|
1766575
|
<filename>useraccount/migrations/0001_initial.py
# Generated by Django 4.0.3 on 2022-03-06 09:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(blank=True, null=True, upload_to='')),
('email', models.EmailField(max_length=200)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='')),
('name', models.CharField(max_length=60)),
('caption', models.TextField(max_length=200)),
('like', models.IntegerField(default=0)),
('comments', models.IntegerField(default=0)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='useraccount.profile')),
],
),
]
|
StarcoderdataPython
|
1619969
|
import cv2 as cv
import numpy as np
import utilities
def empty(a):
pass
cv.namedWindow("Trackbars")
cv.resizeWindow("Trackbars", 640, 240)
cv.createTrackbar("Hue Min", "Trackbars", 55, 179,empty)
cv.createTrackbar("Hue Max", "Trackbars", 155, 179,empty)
cv.createTrackbar("Sat Min", "Trackbars", 21, 255,empty)
cv.createTrackbar("Sat Max", "Trackbars", 255, 255,empty)
cv.createTrackbar("Val Min", "Trackbars", 0, 255,empty)
cv.createTrackbar("Val Max", "Trackbars", 255, 255,empty)
# Config Webcam
frameWidth = 1920
frameHeight = 1080
cap = cv.VideoCapture(0)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
cap.set(10, 150) #brightness
while True:
success, img = cap.read()
imgHSV = cv.cvtColor(img, cv.COLOR_BGR2HSV)
h_min = cv.getTrackbarPos("Hue Min", "Trackbars")
h_max = cv.getTrackbarPos("Hue Max", "Trackbars")
s_min = cv.getTrackbarPos("Sat Min", "Trackbars")
s_max = cv.getTrackbarPos("Sat Max", "Trackbars")
v_min = cv.getTrackbarPos("Val Min", "Trackbars")
v_max = cv.getTrackbarPos("Val Max", "Trackbars")
# print(h_min, h_max, s_min, s_max, v_min, v_max)
lower = np.array([h_min, s_min, v_min])
upper = np.array([h_max, s_max, v_max])
mask = cv.inRange(imgHSV, lower, upper)
imgResult = cv.bitwise_and(img, img, mask=mask)
imgStack = utilities.stackImages(0.5, ([img, imgHSV], [mask, imgResult]))
cv.imshow("stacked", imgStack)
if cv.waitKey(1) & 0xFF == ord('q'):
break
|
StarcoderdataPython
|
163159
|
# ======================================================================
# Air Duct Spelunking
# Advent of Code 2016 Day 24 -- <NAME> -- https://adventofcode.com
#
# Python implementation by Dr. <NAME> III
# ======================================================================
# ======================================================================
# r o b o t . p y
# ======================================================================
"A solver for the Advent of Code 2016 Day 24 puzzle"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
from itertools import permutations
import ducts
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
# ======================================================================
# Robot
# ======================================================================
class Robot(object): # pylint: disable=R0902, R0205
"Object for Air Duct Spelunking"
def __init__(self, text=None, part2=False):
# 1. Set the initial values
self.part2 = part2
self.text = text
self.ducts = None
self.steps = {}
# 2. Process text (if any)
if text is not None and len(text) > 0:
self.ducts = ducts.Ducts(text=text, part2=part2)
self.steps = self.ducts.all_steps()
def visit_all(self, verbose=False, loop=False):
"Return the number of steps to locations"
# 1. Start with a very bad guess
best_steps = None
best_route = None
# 2. Loop for all possible routes between locations
for route in permutations(range(1, max(self.steps) + 1)):
# 3. Get the number of steps in this route
full_route = [0]
full_route.extend(list(route))
if loop:
full_route.append(0)
steps = self.cost_of_route(full_route)
# 4. If better than the previous best, save it
if best_steps is None or steps < best_steps:
if verbose:
print("saving better route", steps, full_route)
best_steps = steps
best_route = full_route
# 5. Return the number of steps in the best route
if verbose:
print("Returning best", best_steps, best_route)
return best_steps
def cost_of_route(self, route):
"Return the cost of the route"
# 1. Start with nothing
result = 0
if route is None or len(route) == 0:
return result
# 2. Start at the first location
previous = route[0]
# 3. Loop for the rest of the locations
for loc in route[1:]:
# 4. Add in the distance to that location
result += self.steps[previous][loc]
# 5. Advance to that location
previous = loc
# 6. Return the total cost of the route
return result
def part_one(self, verbose=False, limit=0):
"Returns the solution for part one"
# 0. Precondition axioms
assert verbose in [True, False]
assert limit >= 0
# 1. Return the solution for part one
return self.visit_all(verbose=verbose)
def part_two(self, verbose=False, limit=0):
"Returns the solution for part two"
# 0. Precondition axioms
assert verbose in [True, False]
assert limit >= 0
# 1. Return the solution for part two
return self.visit_all(verbose=verbose, loop=True)
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
pass
# ======================================================================
# end r o b o t . p y end
# ======================================================================
|
StarcoderdataPython
|
1770731
|
<reponame>MiaRatkovic/LAMA
"""
LAMA produces lots of data. Sometimes we can get rid of much of it afterwards.
This script removes folders specified in a config file.
This is a work in progress
example yaml config.
-------------------
This will delete all folders named 'resolution_images'.
And will delete all contents of registraitons except folder named 'similarity'
--------------------
folders_to_rm:
resolution_images: []
registrations: [similarity]
--------------------
python3 data_clean_up.py config root_dir
This will recursively search directories and delete any folder called in the list
"""
from pathlib import Path
import shutil
import yaml
from typing import Iterable, List
def is_subseq(x: Iterable, y: Iterable) -> bool:
"""
Check whether x is within y.
For example
registrations/deformable is in output/registration/deformable/192_12
"""
it = iter(y)
return all(c in it for c in x)
def rm_by_name(root: Path, name: str, to_keep: List):
"""
Remove directories. If any path or part path is in to keep, delete the rest of the folder put keep that one.
"""
dirs = root.glob(f'**/{name}') # Get all matching directories
for d in dirs:
subfolders_to_keep = []
if not d.is_dir():
continue
for subdir in d.iterdir():
if not subdir.is_dir():
continue
for subseq in to_keep:
if is_subseq(Path(subseq).parts, subdir.parts):
subfolders_to_keep.append(subdir)
if not to_keep:
# Theres no subfolders to keep, delete the whole directory
shutil.rmtree(d)
elif not subfolders_to_keep and to_keep:
# There is a folder we should be keeping, but it's not present. May a typo?
# Just in cases, do not delete
raise ValueError(f'Could not find specified subfolder to keep {to_keep} in {d}')
else:
# We have located the subdirs to keep. Now delte the rest of the folder
for subdir in d.iterdir():
if not subdir.is_dir():
continue
if subdir not in subfolders_to_keep:
shutil.rmtree(subdir)
def run(config_path: str, root_dir: Path):
with open(config_path) as fh:
config = yaml.load(fh)
print(f"deleting {config['folders_to_rm']}")
for dir_, subdirs_to_keep in config['folders_to_rm'].items():
rm_by_name(root_dir, dir_, subdirs_to_keep)
if __name__ == '__main__':
import sys
config_path = sys.argv[1]
root_dir = sys.argv[2]
run(config_path, Path(root_dir))
|
StarcoderdataPython
|
178053
|
<filename>colassigner/core.py<gh_stars>0
from .constants import PREFIX_SEP
from .meta_base import ColMeta
from .util import camel_to_snake
class ColAccessor(metaclass=ColMeta):
"""describe and access raw columns
useful for
- getting column names from static analysis
- documenting types
- dry describing nested structures
e. g.
class LocationCols(ColAccessor):
lon = float
lat = float
class TableCols(ColAccessor):
col1 = int
col2 = str
foreign_key1 = "name_of_key"
class NestedCols(ColAccessor):
s = str
x = float
start_loc = LocationCols
end_loc = LocationCols
>>> TableCols.start_loc.lat
'start_loc__lat'
"""
class ColAssigner(ColAccessor):
"""define functions that create columns in a dataframe
later the class attributes can be used to access the column
can be used to created nested structures of columns
either by assigning or inheriting within:
class MyStaticChildAssigner(ColAssigner):
pass
class MyAssigner(ColAssigner):
class MySubAssigner(ColAssigner):
pass
chass1 = MyStaticChildAssigner
"""
def __call__(self, df, carried_prefixes=()):
# dir() is alphabetised object.__dir__ is not
# important here if assigned cols rely on each other
for attid in self.__dir__():
if attid.startswith("_"):
continue
att = getattr(self, attid)
new_pref_arr = (*carried_prefixes, camel_to_snake(attid))
if isinstance(att, ColMeta):
if ChildColAssigner in att.mro():
inst = att(df, self)
else:
inst = att()
df = inst(df, carried_prefixes=new_pref_arr)
elif callable(att):
col_name = getattr(type(self), attid)
colname = PREFIX_SEP.join((*carried_prefixes, col_name))
df = df.assign(**{colname: self._call_att(att, df)})
return df
@staticmethod
def _call_att(att, df):
return att(df)
class ChildColAssigner(ColAssigner):
"""assigner specifically for nested structures
methods of these are not called with parameters
the dataframe and the parent assigner are passed
to the __init__ method as parameters
"""
def __init__(self, df, parent_assigner: ColAssigner) -> None:
pass
@staticmethod
def _call_att(att, _):
return att()
|
StarcoderdataPython
|
142438
|
import os
from celery.result import AsyncResult
from fastapi import APIRouter, Depends, Request
from fastapi.responses import FileResponse, JSONResponse
from services.database.user_database import UserDatabase
from services.pandi import Pandi
from services.schemas import User
from tasks import fetch_for_accounting, fetch_profit_margin
services = UserDatabase()
reports = APIRouter(prefix="/reports")
@reports.get("/for_accounting/download_report")
async def download_report(
current_user: User = Depends(services.get_current_user), ):
path: str = os.path.abspath("apps/static/reports/accounting.xlsx")
filename: str = "accounting.xlsx"
return FileResponse(
path=path,
filename=filename,
media_type=
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
)
@reports.get("/for_accounting")
async def for_accounting(
start_date: str,
end_date: str,
request: Request,
current_user: User = Depends(services.get_current_user),
):
redis = request.app.redis
task = fetch_for_accounting.delay(start_date, end_date)
task_for_accounting = {"start": start_date, "end": end_date, "id": task.id}
await redis.dump_data("for_accounting_tasks", task_for_accounting)
return JSONResponse(status_code=200,
content=f"{task.id} has been created.")
@reports.get("/for_accounting/tasks")
async def get_tasks(request: Request,
current_user: User = Depends(services.get_current_user)):
redis = request.app.redis
return await redis.load_data("for_accounting_tasks")
@reports.get("/for_accounting/{task_id}")
async def get_report(task_id: str,
current_user: User = Depends(services.get_current_user)):
result = AsyncResult(task_id).result
status = AsyncResult(task_id).status
if result:
Pandi.save_xlsx(result.get("report"), "accounting")
return {"status": status, "result": result}
@reports.get("/for_accounting/tasks/delete")
async def delete_tasks(request: Request,
current_user: User = Depends(
services.get_current_user)):
await request.app.redis.delete_key("for_accounting_tasks")
@reports.get("/profit_margin/download_report")
async def download():
path: str = os.path.abspath("apps/static/reports/profit_margin.xlsx")
filename: str = "profit_margin.xlsx"
return FileResponse(
path=path,
filename=filename,
media_type=
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
)
@reports.get("/profit_margin/tasks")
async def get_tasks(request: Request):
redis = request.app.redis
return await redis.load_data("profit_margin_tasks")
@reports.get("/profit_margin/{task_id}")
async def get_report(task_id: str):
result = AsyncResult(task_id).result
status = AsyncResult(task_id).status
if result:
Pandi.save_xlsx(
result.get("report").get("profit_margin"), "profit_margin")
return {"status": status, "result": result}
@reports.get("/profit_margin/tasks/delete")
async def delete_tasks(request: Request):
await request.app.redis.delete_key("profit_margin_tasks")
@reports.get("/profit_margin")
async def profit_margin(request: Request, start_date, end_date):
redis = request.app.redis
task = fetch_profit_margin.delay(start_date, end_date)
task_profit_margin = {"start": start_date, "end": end_date, "id": task.id}
await redis.dump_data("profit_margin_tasks", task_profit_margin)
return JSONResponse(status_code=200,
content=f"{task.id} has been created.")
|
StarcoderdataPython
|
190953
|
<filename>pte_module.py
import pandas as pd
def main():
list_effectiveness = import_effectiveness()
list_types = get_types(list_effectiveness)
li = find_n_way_double(list_effectiveness, list_types, 16)
for row in li:
print(row)
print(len(li))
def import_effectiveness():
path = 'effectiveness.csv'
df = pd.read_csv(path, index_col=0)
return df
def get_types(df):
list_types = df['Attacking'].drop_duplicates()
return list_types.tolist()
def find_n_way_double(list_effectiveness, list_types, n):
li = []
list_types = ['Bug']
for t in list_types:
df = list_effectiveness.loc[list_effectiveness['Attacking'] == t]
res = super_effective(list_effectiveness, t, n, df, 1, [])
if type(res) is str:
li.append([res, t])
if type(res) is list:
if len(res) == 0:
continue
ali = []
for row in res:
ali.append(row + [t])
li = li + ali
return li
def super_effective(list_effectiveness, t, n, df, i, li_def0):
li = []
for index, row in df.iterrows():
attacking = row['Attacking']
defending = row['Defending']
effectiveness = row['Effectiveness']
if effectiveness == 'Super effective':
mask = list_effectiveness['Attacking'] == defending
li_def1 = li_def0 + [defending]
for defender in li_def1:
mask1 = list_effectiveness['Defending'] != defender
mask = mask1 & mask
df = list_effectiveness.loc[mask]
if defending == t and i == n:
print(li_def0)
return attacking
if i < n:
res = super_effective(list_effectiveness, t, n, df, i + 1, li_def1)
if type(res) is str:
li.append([attacking, res])
if type(res) is list:
if len(res) == 0:
continue
ali = []
for row in res:
ali.append([attacking] + row)
li = li + ali
return li
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1705235
|
from advanced_reports.defaults import Action
class BackOfficeAction(Action):
form_template = 'advanced_reports/backoffice/contrib/advanced-reports/bootstrap-modal-form.html'
def action(*args, **kwargs):
return BackOfficeAction(*args, **kwargs)
|
StarcoderdataPython
|
3373574
|
<filename>satchmo/apps/satchmo_store/accounts/urls.py
"""
URLConf for Django user registration.
Recommended usage is to use a call to ``include()`` in your project's
root URLConf to include this URLConf for any URL beginning with
'/accounts/'.
"""
from django.conf.urls import patterns
from satchmo_store.accounts.views import RegistrationComplete
# extending the urls in contacts
from satchmo_store.contact.urls import urlpatterns
from satchmo_utils.signals import collect_urls
from satchmo_store import accounts
# The following import of satchmo_store.contact.config should not be removed
# because it is sometimes indirectly important for loading config_value('SHOP', 'ACCOUNT_VERIFICATION')
import satchmo_store.contact.config
# Activation keys get matched by \w+ instead of the more specific
# [a-fA-F0-9]+ because a bad activation key should still get to the view;
# that way it can return a sensible "invalid key" message instead of a
# confusing 404.
urlpatterns += patterns('satchmo_store.accounts.views',
(r'^activate/(?P<activation_key>\w+)/$', 'activate', {}, 'registration_activate'),
(r'^login/$', 'emaillogin', {'template_name': 'registration/login.html'}, 'auth_login'),
(r'^register/$', 'register', {}, 'registration_register'),
(r'^secure/login/$', 'emaillogin', {'SSL' : True, 'template_name': 'registration/login.html'}, 'auth_secure_login'),
)
urlpatterns += patterns('',
('^logout/$','django.contrib.auth.views.logout', {'template_name': 'registration/logout.html'}, 'auth_logout'),
)
urlpatterns += patterns('',
(r'^register/complete/$',
RegistrationComplete.as_view(template_name='registration/registration_complete.html'), {},
'registration_complete'),
)
#Dictionary for authentication views
password_reset_dict = {
'template_name': 'registration/password_reset_form.html',
'email_template_name': 'registration/password_reset.txt',
}
# the "from email" in password reset is problematic... it is hard coded as None
urlpatterns += patterns('django.contrib.auth.views',
(r'^password_reset/$', 'password_reset', password_reset_dict, 'auth_password_reset'),
(r'^password_reset/done/$', 'password_reset_done', {'template_name':'registration/password_reset_done.html'}, 'auth_password_reset_done'),
(r'^password_change/$', 'password_change', {'template_name':'registration/password_change_form.html'}, 'auth_password_change'),
(r'^password_change/done/$', 'password_change_done', {'template_name':'registration/password_change_done.html'}, 'auth_change_done'),
(r'^reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', 'password_reset_confirm'),
(r'^reset/done/$', 'password_reset_complete'),
)
collect_urls.send(sender=accounts, patterns=urlpatterns)
|
StarcoderdataPython
|
74646
|
###############################################
# ZPEED: Z' Exclusions from Experimental Data #
###############################################
# By <NAME> and <NAME>, 2019
from __future__ import division
import numpy as np
import scipy.integrate as integrate
from chi2_CLs import get_likelihood
from ATLAS_13TeV_calibration import xi_function
from ATLAS_13TeV import calculate_chi2
import dileptons_functions as df
import time
start_time = time.clock()
#Step 1: Define model parameter point
Zp_model = {
'MZp': 1000., #Zp mass
'mDM': 100., #Dark matter mass
'gxv': 1., #Zp-DM vector coupling
'guv': 0.1, #Zp-up-type-quark vector coupling
'gdv': 0.1, #Zp-down-type-quark vector coupling
'glv': 0.01, #Zp-lepton vector coupling
'gxa': 0., #Zp-DM axial coupling
'gua': 0., #Zp-up-type-quark axial coupling
'gda': 0., #Zp-down-type-quark axial coupling
'gla': 0., #Zp-lepton axial coupling
}
# The couplings to neutrinos follow from SM gauge invariance and the fact that right-handed neutrinos do not exist
Zp_model['gnv'] = 0.5 * (Zp_model['glv'] - Zp_model['gla'])
Zp_model['gna'] = 0.5 * (Zp_model['gla'] - Zp_model['glv'])
Zp_model['Gamma'] = df.DecayWidth(Zp_model)
step1_time = time.clock()
#Step 2: Calculate differential cross section (including detector efficiency)
ee_signal = lambda x : xi_function(x, "ee") * df.dsigmadmll(x, Zp_model, "ee")
mm_signal = lambda x : xi_function(x, "mm") * df.dsigmadmll(x, Zp_model, "mm")
ee_signal_with_interference = lambda x : xi_function(x, "ee") * df.dsigmadmll_wint(x, Zp_model, "ee")
mm_signal_with_interference = lambda x : xi_function(x, "mm") * df.dsigmadmll_wint(x, Zp_model, "mm")
step2_time = time.clock()
#Step 3: Create likelihood functions
Mlow = Zp_model['MZp'] - 3.*Zp_model['Gamma']
Mhigh = Zp_model['MZp'] + 3.*Zp_model['Gamma']
sig_range = [Mlow,Mhigh]
chi2, chi2_Asimov = calculate_chi2(ee_signal, mm_signal, signal_range=sig_range)
chi2_with_interference, chi2_Asimov_with_interference = calculate_chi2(ee_signal_with_interference, mm_signal_with_interference, signal_range=sig_range)
step3_time = time.clock()
# Step 4: Evaluate test statistic
result = get_likelihood(chi2, chi2_Asimov)
result_with_interference = get_likelihood(chi2_with_interference, chi2_Asimov_with_interference)
print("Without interference")
print("-2 log L: ", result[0])
print("-2 Delta log L: ", result[1])
print("CLs: ", result[2])
print("With interference")
print("-2 log L: ", result_with_interference[0])
print("-2 Delta log L: ", result_with_interference[1])
print("CLs: ", result_with_interference[2])
step4_time = time.clock()
print("Timing information")
print("Step 1: ", step1_time - start_time)
print("Step 2: ", step2_time - step1_time)
print("Step 3: ", step3_time - step2_time)
print("Step 4: ", step4_time - step3_time)
print("Total: ", step4_time - start_time)
exit()
|
StarcoderdataPython
|
16122
|
<gh_stars>1-10
from pathlib import Path
import pandas
from muller.dataio import import_tables
from loguru import logger
DATA_FOLDER = Path(__file__).parent.parent / "data"
def test_filter_empty_trajectories():
input_column_0 = ['genotype-1', 'genotype-2', 'genotype-3', 'genotype-4', 'genotype-5', 'genotype-6']
input_column_1 = [0.000, 0.000, 0.000, 0.111, 0.000, 0.000]
input_column_2 = [0.000, 0.380, 0.000, 0.222, 0.000, 0.000]
input_column_3 = [0.261, 0.432, 0.000, 0.333, 0.000, 0.000]
input_column_4 = [1.000, 0.432, 0.000, 0.444, 1.470, 0.272]
expected_column_0 = ['genotype-1', 'genotype-2', 'genotype-4', 'genotype-5', 'genotype-6']
expected_column_1 = [0.000, 0.000, 0.111, 0.000, 0.000]
expected_column_2 = [0.000, 0.380, 0.222, 0.000, 0.000]
expected_column_3 = [0.261, 0.432, 0.333, 0.000, 0.000]
expected_column_4 = [1.000, 0.432, 0.444, 1.470, 0.272]
# Convert to a dataframe
input_dataframe_definition = {
'Genotype': input_column_0,
0:input_column_1,
1:input_column_2,
2:input_column_3,
3:input_column_4,
}
expected_dataframe_definition = {
'Genotype': expected_column_0,
0: expected_column_1,
1: expected_column_2,
2: expected_column_3,
3: expected_column_4
}
logger.debug(input_dataframe_definition)
input_dataframe = pandas.DataFrame(input_dataframe_definition)
input_dataframe = input_dataframe.set_index('Genotype')
logger.debug(input_dataframe)
expected_dataframe = pandas.DataFrame(expected_dataframe_definition).set_index('Genotype')
logger.debug(input_dataframe.to_string())
result = import_tables.filter_empty_trajectories(input_dataframe)
logger.debug(result.to_string())
assert list(result.columns) == list(expected_dataframe.columns)
assert len(result) == len(expected_dataframe)
assert list(result.index) == list(expected_dataframe.index)
#pandas.testing.assert_frame_equal(result, expected_dataframe)
|
StarcoderdataPython
|
4837333
|
<reponame>h4ck3rm1k3/scrapy<gh_stars>10-100
"""
This modules implements the CrawlSpider which is the recommended spider to use
for scraping typical web sites that requires crawling pages.
See documentation in docs/topics/spiders.rst
"""
import copy
from scrapy.http import Request, HtmlResponse
from scrapy.utils.spider import iterate_spider_output
from scrapy.spider import Spider
def identity(x):
return x
class Rule(object):
def __init__(self, link_extractor, callback=None, cb_kwargs=None, follow=None, process_links=None, process_request=identity):
self.link_extractor = link_extractor
self.callback = callback
self.cb_kwargs = cb_kwargs or {}
self.process_links = process_links
self.process_request = process_request
if follow is None:
self.follow = False if callback else True
else:
self.follow = follow
class CrawlSpider(Spider):
rules = ()
def __init__(self, *a, **kw):
super(CrawlSpider, self).__init__(*a, **kw)
self._compile_rules()
def parse(self, response):
return self._parse_response(response, self.parse_start_url, cb_kwargs={}, follow=True)
def parse_start_url(self, response):
return []
def process_results(self, response, results):
return results
def _requests_to_follow(self, response):
if not isinstance(response, HtmlResponse):
return
seen = set()
for n, rule in enumerate(self._rules):
links = [l for l in rule.link_extractor.extract_links(response) if l not in seen]
if links and rule.process_links:
links = rule.process_links(links)
for link in links:
seen.add(link)
r = Request(url=link.url, callback=self._response_downloaded)
r.meta.update(rule=n, link_text=link.text)
yield rule.process_request(r)
def _response_downloaded(self, response):
rule = self._rules[response.meta['rule']]
return self._parse_response(response, rule.callback, rule.cb_kwargs, rule.follow)
def _parse_response(self, response, callback, cb_kwargs, follow=True):
if callback:
cb_res = callback(response, **cb_kwargs) or ()
cb_res = self.process_results(response, cb_res)
for requests_or_item in iterate_spider_output(cb_res):
yield requests_or_item
if follow and self._follow_links:
for request_or_item in self._requests_to_follow(response):
yield request_or_item
def _compile_rules(self):
def get_method(method):
if callable(method):
return method
elif isinstance(method, basestring):
return getattr(self, method, None)
self._rules = [copy.copy(r) for r in self.rules]
for rule in self._rules:
rule.callback = get_method(rule.callback)
rule.process_links = get_method(rule.process_links)
rule.process_request = get_method(rule.process_request)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(CrawlSpider, cls).from_crawler(crawler, *args, **kwargs)
spider._follow_links = crawler.settings.getbool(
'CRAWLSPIDER_FOLLOW_LINKS', True)
return spider
def set_crawler(self, crawler):
super(CrawlSpider, self).set_crawler(crawler)
self._follow_links = crawler.settings.getbool('CRAWLSPIDER_FOLLOW_LINKS', True)
|
StarcoderdataPython
|
3240449
|
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
class Post(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
text = models.CharField(max_length=255)
user = models.ForeignKey(User, blank=True, null=True, default=None)
def __str__(self):
return self.text
|
StarcoderdataPython
|
1616924
|
<filename>DataUse/migrations/0004_datapull_author_datapull_detail_datapull_keyword_datapull_title.py<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-01-17 18:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('DataUse', '0003_auto_20180117_1323'),
]
operations = [
migrations.CreateModel(
name='DataPull_Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('associatedid', models.CharField(max_length=255)),
('forename', models.CharField(max_length=1000)),
('lastname', models.CharField(max_length=1000)),
('contributortype', models.CharField(max_length=255)),
('contributorcontact', models.CharField(max_length=255)),
('affiliation', models.TextField()),
],
),
migrations.CreateModel(
name='DataPull_Detail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('associatedid', models.CharField(max_length=255)),
('valuestore', models.CharField(choices=[('store', 'store'), ('duplicate', 'duplicate'), ('false positive', 'false positive')], default='', max_length=255)),
('note', models.CharField(max_length=1000)),
('pubtype', models.CharField(max_length=255)),
('pullid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='DataUse.DataPull_ID')),
],
),
migrations.CreateModel(
name='DataPull_Keyword',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('associatedid', models.CharField(max_length=255)),
('keywordvalue', models.CharField(max_length=255)),
('category1', models.CharField(max_length=255)),
('category2', models.CharField(max_length=255)),
('category3', models.CharField(max_length=255)),
('category4', models.CharField(max_length=255)),
('category5', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='DataPull_Title',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('associatedid', models.CharField(max_length=255)),
('title', models.CharField(max_length=1000)),
('journal', models.CharField(max_length=500)),
('publicationdate', models.DateField()),
('optionalid01', models.CharField(max_length=500)),
('optionalid02', models.CharField(max_length=500)),
],
),
]
|
StarcoderdataPython
|
1795680
|
<reponame>gsi-luis/djangolearning
import json
from rest_framework import serializers
from django_elasticsearch_dsl_drf.serializers import DocumentSerializer
from learning_search_indexes.documents.tag import TagDocument
class TagDocumentSerializer(DocumentSerializer):
"""Serializer for the Book document."""
class Meta(object):
"""Meta options."""
# Specify the correspondent document class
document = TagDocument
# List the serializer fields. Note, that the order of the fields
# is preserved in the ViewSet.
fields = (
'id',
'title',
)
|
StarcoderdataPython
|
1672893
|
from hierarc.LensPosterior.ddt_kin_constraints import DdtKinConstraints
from lenstronomy.Analysis.kinematics_api import KinematicsAPI
from hierarc.Likelihood.hierarchy_likelihood import LensLikelihood
from lenstronomy.Cosmo.lens_cosmo import LensCosmo
import numpy as np
import numpy.testing as npt
import pytest
class TestDdtKinGaussConstraints(object):
def setup(self):
pass
def test_likelihoodconfiguration_om(self):
anisotropy_model = 'OM'
kwargs_aperture = {'aperture_type': 'shell', 'r_in': 0, 'r_out': 3 / 2., 'center_ra': 0.0, 'center_dec': 0}
kwargs_seeing = {'psf_type': 'GAUSSIAN', 'fwhm': 1.4}
# numerical settings (not needed if power-law profiles with Hernquist light distribution is computed)
kwargs_numerics_galkin = {'interpol_grid_num': 1000, # numerical interpolation, should converge -> infinity
'log_integration': True,
# log or linear interpolation of surface brightness and mass models
'max_integrate': 100,
'min_integrate': 0.001} # lower/upper bound of numerical integrals
# redshift
z_lens = 0.5
z_source = 1.5
# lens model
from astropy.cosmology import FlatLambdaCDM
cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
lens_cosmo = LensCosmo(z_lens=z_lens, z_source=z_source, cosmo=cosmo)
ddt_mean = lens_cosmo.ddt
ddt_sigma = ddt_mean/50
ddt_samples = np.random.normal(loc=ddt_mean, scale=ddt_sigma, size=50000)
theta_E = 1.
r_eff = 1
gamma = 2.1
# kwargs_model
lens_light_model_list = ['HERNQUIST']
lens_model_list = ['SPP']
kwargs_model = {'lens_model_list': lens_model_list, 'lens_light_model_list': lens_light_model_list}
# settings for kinematics calculation with KinematicsAPI of lenstronomy
kwargs_kin_api_settings = {'multi_observations': False, 'kwargs_numerics_galkin': kwargs_numerics_galkin,
'MGE_light': False, 'kwargs_mge_light': None, 'sampling_number': 1000,
'num_kin_sampling': 1000, 'num_psf_sampling': 100}
kin_api = KinematicsAPI(z_lens, z_source, kwargs_model, kwargs_aperture, kwargs_seeing, anisotropy_model,
cosmo=cosmo, **kwargs_kin_api_settings)
# compute kinematics with fiducial cosmology
kwargs_lens = [{'theta_E': theta_E, 'gamma': gamma, 'center_x': 0, 'center_y': 0}]
kwargs_lens_light = [{'Rs': r_eff * 0.551, 'amp': 1.}]
kwargs_anisotropy = {'r_ani': r_eff}
sigma_v = kin_api.velocity_dispersion(kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=r_eff,
theta_E=theta_E, gamma=gamma, kappa_ext=0)
# compute likelihood
kin_constraints = DdtKinConstraints(z_lens=z_lens, z_source=z_source, theta_E=theta_E, theta_E_error=0.01,
ddt_samples=ddt_samples, ddt_weights=None,
gamma=gamma, gamma_error=0.02, r_eff=r_eff, r_eff_error=0.05, sigma_v=[sigma_v],
sigma_v_error_independent=[10], sigma_v_error_covariant=0,
kwargs_aperture=kwargs_aperture, kwargs_seeing=kwargs_seeing,
kwargs_lens_light=kwargs_lens_light,
anisotropy_model=anisotropy_model, **kwargs_kin_api_settings)
kwargs_likelihood = kin_constraints.hierarchy_configuration(num_sample_model=5)
kwargs_likelihood['normalized'] = False
ln_class = LensLikelihood(**kwargs_likelihood)
kwargs_kin = {'a_ani': 1}
ln_likelihood = ln_class.lens_log_likelihood(cosmo, kwargs_lens={}, kwargs_kin=kwargs_kin)
npt.assert_almost_equal(ln_likelihood, 0, decimal=1)
if __name__ == '__main__':
pytest.main()
|
StarcoderdataPython
|
1695451
|
def prepare_scorecounter(scorenumber):
"""
:param scorenumber: ScoreNumber
:return: [PIL.Image]
"""
img = []
for image in scorenumber.score_images:
image.change_size(0.87, 0.87)
img.append(image.img)
return img
|
StarcoderdataPython
|
115257
|
# board/models.py
from django.contrib.auth.models import User
from django.db import models
class Article(models.Model):
title = models.CharField(max_length=120, null=False)
author = models.ForeignKey(User, on_delete=models.CASCADE)
content = models.TextField(null=False)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
# default=timezone.now
def __str__(self):
return f'{self.title} / {self.author}'
|
StarcoderdataPython
|
178840
|
#Faça um programa que leia um número inteiro e diga
#se ele é ou não um número primo
tot = 0
num = int(input("digite um número inteiro: "))
for c in range(1, num + 1):
if num % c == 0:
print('\033[33m', end='')
tot += 1
else:
print('\033[31m', end='')
print(f'{c} ', end='')
print(f'\n\033[mO número {num} foi divisivel {tot} vezes')
if tot == 2:
print('Por tanto, ELE É PRIMO')
else:
print('Por tanto, ELE NÃO É PRIMO')
|
StarcoderdataPython
|
196705
|
<filename>cron/__init__.py
import schedule
import settings
from .poll_pull_requests import poll_pull_requests as poll_pull_requests
from .restart_homepage import restart_homepage as restart_homepage
def schedule_jobs():
schedule.every(settings.PULL_REQUEST_POLLING_INTERVAL_SECONDS).seconds.do(poll_pull_requests)
schedule.every(120).seconds.do(restart_homepage)
|
StarcoderdataPython
|
22680
|
<gh_stars>1-10
import random
import database
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
# Instantiate FastAPI
app = FastAPI()
# Whitelist origins
app.add_middleware(
CORSMiddleware,
allow_origins = ["*"],
allow_credentials = True,
allow_methods = ["*"],
allow_headers = ["*"]
)
# POST
@app.post('/api/create', response_description = "Add new schedule")
async def create(req: Request) -> dict:
json = await req.json()
id = database.add_schedule(json)
return {"id": id}
# GET
@app.get('/api/{id}/tags', response_description = "Get tags associated with given id")
async def tags(id: str) -> list:
data = database.get_schedule(id)
tags = []
for elem in data:
tags += elem['tags']
tags = set(tags) # Remove duplicates
return tags
# POST
@app.post('/api/{id}/schedule', response_description = "Get best schedule associated with given id and user chosen tags")
async def schedule(req: Request, id: str) -> list:
schedule = database.get_schedule(id)
tags = await req.json()
to_remove = []
colided = check_colide(schedule) # Returns a list of tuples containing events happening at the same time
for event in colided:
h1 = schedule[event[0]]
h1_sum = 0.0
h1_tags = 0
h2 = schedule[event[1]]
h2_sum = 0.0
h2_tags = 0
for tag in h1.get('tags', []):
h1_tags += 1
h1_sum += int(tags[tag])
for tag in h2.get('tags', []):
h2_tags += 1
h2_sum += int(tags[tag])
if h1_tags != 0:
h1_sum = h1_sum / h1_tags
if h1_sum == 0:
to_remove.append(h1)
if h2_tags != 0:
h2_sum = h2_sum / h2_tags
if h2_sum == 0:
to_remove.append(h2)
h1_len = len(h1.get('tags', []))
h2_len = len(h2.get('tags', []))
if (h1_sum > h2_sum) and (h2_len > 0):
to_remove.append(h2)
elif (h1_sum < h2_sum) and (h1_len > 0):
to_remove.append(h1)
elif (h1_sum == h2_sum) and (h1_len > 0) and (h2_len > 0):
# Chooses a random schedule and remove it
if (random.randint(0,1)) == 0:
to_remove.append(h1)
else:
to_remove.append(h2)
for elem in to_remove:
if elem in schedule:
schedule.remove(elem)
return schedule
# Checks for coliding events inside the main schedule
def check_colide(schedule: list) -> list:
colided = []
for i in range(len(schedule)):
for j in range(i + 1, len(schedule)):
if (check_colide_aux(schedule[i], schedule[j])):
colided.append((i,j))
return colided
def check_colide_aux(h1, h2) -> bool:
start1 = h1['date_start']
end1 = h1['date_end']
start2 = h2['date_start']
end2 = h2['date_end']
if start1 == start2 and end1 == end2:
return True
if start1 < start2 and end1 > start2:
return True
if start1 > start2 and end1 < end2:
return True
if start1 < start2 and end1 > start2:
return True
if start1 > start2 and end1 < end2:
return True
return False
if __name__ == "__main__":
uvicorn.run("api:app", host = "0.0.0.0", port = 8000, reload = True)
|
StarcoderdataPython
|
3238420
|
<filename>tests/unit/utils/test_permissions.py
"""
This test will use the default permissions found in
flaskbb.utils.populate
"""
from flaskbb.utils.permissions import *
def test_moderator_permissions_in_forum(
forum, moderator_user, topic, topic_moderator):
"""Test the moderator permissions in a forum where the user is a
moderator.
"""
assert moderator_user in forum.moderators
assert can_post_reply(moderator_user, forum)
assert can_post_topic(moderator_user, forum)
assert can_edit_post(moderator_user, topic.user_id, forum)
assert can_moderate(moderator_user, forum)
assert can_delete_post(moderator_user, topic.user_id, forum)
assert can_delete_topic(moderator_user, topic.user_id, forum)
def test_moderator_permissions_without_forum(
forum, moderator_user, topic, topic_moderator):
"""Test the moderator permissions in a forum where the user is not a
moderator.
"""
forum.moderators.remove(moderator_user)
assert not moderator_user in forum.moderators
assert not can_moderate(moderator_user, forum)
assert can_post_reply(moderator_user, forum)
assert can_post_topic(moderator_user, forum)
assert not can_edit_post(moderator_user, topic.user_id, forum)
assert not can_delete_post(moderator_user, topic.user_id, forum)
assert not can_delete_topic(moderator_user, topic.user_id, forum)
# Test with own topic
assert can_delete_post(moderator_user, topic_moderator.user_id, forum)
assert can_delete_topic(moderator_user, topic_moderator.user_id, forum)
assert can_edit_post(moderator_user, topic_moderator.user_id, forum)
# Test moderator permissions
assert can_edit_user(moderator_user)
assert can_ban_user(moderator_user)
def test_normal_permissions(forum, user, topic):
"""Test the permissions for a normal user."""
assert not can_moderate(user, forum)
assert can_post_reply(user, forum)
assert can_post_topic(user, forum)
assert can_edit_post(user, topic.user_id, forum)
assert not can_delete_post(user, topic.user_id, forum)
assert not can_delete_topic(user, topic.user_id, forum)
assert not can_edit_user(user)
assert not can_ban_user(user)
def test_admin_permissions(forum, admin_user, topic):
"""Test the permissions for a admin user."""
assert can_moderate(admin_user, forum)
assert can_post_reply(admin_user, forum)
assert can_post_topic(admin_user, forum)
assert can_edit_post(admin_user, topic.user_id, forum)
assert can_delete_post(admin_user, topic.user_id, forum)
assert can_delete_topic(admin_user, topic.user_id, forum)
assert can_edit_user(admin_user)
assert can_ban_user(admin_user)
def test_super_moderator_permissions(forum, super_moderator_user, topic):
"""Test the permissions for a super moderator user."""
assert can_moderate(super_moderator_user, forum)
assert can_post_reply(super_moderator_user, forum)
assert can_post_topic(super_moderator_user, forum)
assert can_edit_post(super_moderator_user, topic.user_id, forum)
assert can_delete_post(super_moderator_user, topic.user_id, forum)
assert can_delete_topic(super_moderator_user, topic.user_id, forum)
assert can_edit_user(super_moderator_user)
assert can_ban_user(super_moderator_user)
def test_can_moderate_without_permission(moderator_user):
"""Test can moderate for a moderator_user without a permission."""
assert can_moderate(moderator_user) == False
|
StarcoderdataPython
|
109656
|
<reponame>matham/kivy-trio
import trio
import random
from kivy.app import App
from kivy.lang import Builder
from kivy.properties import StringProperty
from kivy_trio.to_kivy import async_run_in_kivy, EventLoopStoppedError
from kivy_trio.context import kivy_trio_context_manager
kv = '''
Label:
text: 'trio sent: {}'.format(app.trio_msg)
'''
class DemoApp(App):
trio_msg = StringProperty('')
def build(self):
return Builder.load_string(kv)
@async_run_in_kivy
def send_kivy_message(self, packet):
ex = '!' * (packet % 3 + 1)
self.trio_msg = f'beetle juice {packet + 1} times{ex}'
async def send_msg_to_kivy_from_trio(self):
i = 0
while True:
try:
await self.send_kivy_message(i)
except EventLoopStoppedError:
# kivy stopped so nothing more to do
return
i += 1
await trio.sleep(1 + random.random())
async def run_app(self):
with kivy_trio_context_manager():
async with trio.open_nursery() as nursery:
nursery.start_soon(self.async_run, 'trio')
nursery.start_soon(self.send_msg_to_kivy_from_trio)
if __name__ == '__main__':
trio.run(DemoApp().run_app)
|
StarcoderdataPython
|
80532
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 25 10:44:24 2017
@author: wroscoe
"""
import time
from threading import Thread
import socket
from donkeycar.parts.controller import JoystickController, PS3JoystickController, PS3Joystick
class Vehicle():
def __init__(self, mem=None):
if not mem:
mem = Memory()
self.mem = mem
self.parts = []
self.on = True
self.threads = []
def add(self, part, inputs=[], outputs=[],
threaded=False, run_condition=None):
"""
Method to add a part to the vehicle drive loop.
Parameters
----------
inputs : list
Channel names to get from memory.
ouputs : list
Channel names to save to memory.
threaded : boolean
If a part should be run in a separate thread.
"""
p = part
print('Adding part {}.'.format(p.__class__.__name__))
entry={}
entry['part'] = p
entry['inputs'] = inputs
entry['outputs'] = outputs
entry['run_condition'] = run_condition
if threaded:
t = Thread(target=part.update, args=())
t.daemon = True
entry['thread'] = t
self.parts.append(entry)
def start(self, rate_hz=10, max_loop_count=None):
"""
Start vehicle's main drive loop.
This is the main thread of the vehicle. It starts all the new
threads for the threaded parts then starts an infinit loop
that runs each part and updates the memory.
Parameters
----------
rate_hz : int
The max frequency that the drive loop should run. The actual
frequency may be less than this if there are many blocking parts.
max_loop_count : int
Maxiumum number of loops the drive loop should execute. This is
used for testing the all the parts of the vehicle work.
"""
try:
self.on = True
for entry in self.parts:
if entry.get('thread'):
#start the update thread
entry.get('thread').start()
#wait until the parts warm up.
print('Starting vehicle...')
time.sleep(1)
loop_count = 0
while self.on:
start_time = time.time()
loop_count += 1
self.update_parts()
#stop drive loop if loop_count exceeds max_loopcount
if max_loop_count and loop_count > max_loop_count:
self.on = False
sleep_time = 1.0 / rate_hz - (time.time() - start_time)
if sleep_time > 0.0:
time.sleep(sleep_time)
except KeyboardInterrupt:
pass
finally:
self.stop()
def update_parts(self):
'''
loop over all parts
'''
for entry in self.parts:
#don't run if there is a run condition that is False
run = True
if entry.get('run_condition'):
run_condition = entry.get('run_condition')
run = self.mem.get([run_condition])[0]
#print('run_condition', entry['part'], entry.get('run_condition'), run)
if run:
p = entry['part']
#get inputs from memory
inputs = self.mem.get(entry['inputs'])
#run the part
if entry.get('thread'):
outputs = p.run_threaded(*inputs)
else:
outputs = p.run(*inputs)
#save the output to memory
if outputs is not None:
self.mem.put(entry['outputs'], outputs)
def stop(self):
print('Shutting down vehicle and its parts...')
for entry in self.parts:
try:
entry['part'].shutdown()
except Exception as e:
print(e)
print(self.mem.d)
def sendCommand( steering, throttle, recording ):
#valid_commands = ["forward","backward","stop","left","right"]
min_ctrl = 0.1
direction = "stop"
if recording is not None:
direction = recording
print( "Recording: {}".format( direction ) )
else:
if throttle > min_ctrl:
direction = "forward"
elif throttle < -min_ctrl:
direction = "stop"
elif steering < -min_ctrl:
direction = "left"
elif steering > min_ctrl:
direction = "right"
else:
return
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host ="127.0.0.1"
port = 12347
s.connect((host,port))
s.send(direction.encode())
s.close()
except Exception as inst:
print( "Failed to send command" )
def sendContinuousCommand( left_throttle, right_throttle, recording, dk=False ):
#min_ctrl = 0.1
#if abs(left_throttle) < min_ctrl and abs(right_throttle) < min_ctrl:
# left_throttle = 0.0
# right_throttle = 0.0
if recording is not None:
direction = recording
print( "Recording: {}".format( direction ) )
elif dk:
direction = "dk {} {}".format(left_throttle, right_throttle)
else:
direction = "throttles {} {}".format(left_throttle, right_throttle)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host ="127.0.0.1"
port = 12347
s.connect((host,port))
s.send(direction.encode())
s.close()
except Exception as inst:
print( "Failed to send continuous command" )
class MalpiJoystickController(JoystickController):
'''
A Controller object that maps inputs to actions
'''
def __init__(self, *args, **kwargs):
super(MalpiJoystickController, self).__init__(*args, **kwargs)
def init_js(self):
'''
attempt to init joystick
'''
try:
self.js = PS3Joystick(self.dev_fn)
if not self.js.init():
self.js = None
except FileNotFoundError:
print(self.dev_fn, "not found.")
self.js = None
return self.js is not None
def init_trigger_maps(self):
'''
init set of mapping from buttons to function calls
'''
self.button_down_trigger_map = {
'select' : self.toggle_mode,
'circle' : self.toggle_manual_recording,
'triangle' : self.erase_last_N_records,
'cross' : self.emergency_stop,
#'dpad_up' : self.increase_max_throttle,
#'dpad_down' : self.decrease_max_throttle,
'start' : self.toggle_constant_throttle,
#"R1" : self.chaos_monkey_on_right,
#"L1" : self.chaos_monkey_on_left,
}
self.button_up_trigger_map = {
#"R1" : self.chaos_monkey_off,
#"L1" : self.chaos_monkey_off,
}
self.axis_trigger_map = {
'left_stick_horz' : self.set_steering,
'right_stick_vert' : self.set_throttle,
}
if __name__ == "__main__":
JOYSTICK_MAX_THROTTLE = 1.0
JOYSTICK_STEERING_SCALE = 1.0
AUTO_RECORD_ON_THROTTLE = False
ctr = MalpiJoystickController(throttle_scale=JOYSTICK_MAX_THROTTLE,
steering_scale=JOYSTICK_STEERING_SCALE,
auto_record_on_throttle=AUTO_RECORD_ON_THROTTLE)
rate_hz=10
max_loop_count=None
recording_state = False
RAW_OUTPUTS = False
t = Thread(target=ctr.update, args=())
t.daemon = True
t.start()
try:
#wait until the parts warm up.
print('Starting vehicle...')
time.sleep(1)
loop_count = 0
done = False
while not done:
start_time = time.time()
loop_count += 1
if loop_count == 1:
ctr.print_controls()
ctr.js.show_map()
if RAW_OUTPUTS:
button, button_state, axis, axis_val = ctr.js.poll()
print( f"Raw: {button} {button_state} {axis} {axis_val}" )
else:
outputs = ctr.run_threaded()
print( "{}".format(outputs) )
if outputs[2] != "user":
break
if outputs[3] != recording_state:
recording_state = outputs[3]
rec = 'record_start' if recording_state else 'record_end'
else:
rec = None
#sendCommand( outputs[0], outputs[1], rec )
#sendContinuousCommand( outputs[4], outputs[1], rec )
#sendContinuousCommand( outputs[0], outputs[1], rec, dk=True )
#print( "L/R: {} {}".format(outputs[0],outputs[1]) )
#stop drive loop if loop_count exceeds max_loopcount
if max_loop_count and loop_count > max_loop_count:
print( "breaking for max count" )
break
sleep_time = 1.0 / rate_hz - (time.time() - start_time)
if sleep_time > 0.0:
time.sleep(sleep_time)
except KeyboardInterrupt:
pass
finally:
#sendContinuousCommand( 0.0, 0.0, 'record_end' )
ctr.shutdown()
|
StarcoderdataPython
|
3229405
|
import os
import sentry_sdk
from dotenv import load_dotenv
from django.contrib.messages import constants as messages
from sentry_sdk.integrations.django import DjangoIntegration
# Environment variables
load_dotenv()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
if os.getenv("ENV") == 'dev':
DEBUG = True
# DEBUG_PROPAGATE_EXCEPTIONS = True
else:
DEBUG = False
DEBUG_PROPAGATE_EXCEPTIONS = False
ALLOWED_HOSTS = [os.getenv("HOST")]
# Application definition
INSTALLED_APPS = [
'apps.bouygue',
'apps.users',
'apps.agenda',
'apps.blog',
'apps.activities',
'apps.info',
'apps.work',
'apps.budget',
'crispy_forms',
'captcha',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'client_side_image_cropping',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": os.getenv("DB_NAME"),
"USER": os.getenv("DB_USER"),
"PASSWORD": os.getenv("DB_PASSWORD"),
"HOST": "localhost",
"PORT": "5432",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = "users.MyUser"
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'fr'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATIC_URL = "/static/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_REDIRECT_URL = 'bouygue-home'
LOGIN_URL = 'users-login'
if os.getenv("ENV") == "prod":
# Sentry
sentry_sdk.init(
dsn="https://[email protected]/5635710",
integrations=[DjangoIntegration()],
traces_sample_rate=1.0,
send_default_pii=True,
)
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_HOST = "smtp.gmail.com"
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = os.getenv("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = os.getenv("EMAIL_HOST_PASSWORD")
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
RECAPTCHA_PUBLIC_KEY = os.getenv("RECAPTCHA_PUBLIC")
RECAPTCHA_PRIVATE_KEY = os.getenv("RECAPTCHA_PRIVATE")
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
StarcoderdataPython
|
39984
|
import numpy as np
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import compute_unary, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax
def dense_crf(img, prob):
'''
input:
img: numpy array of shape (num of channels, height, width)
prob: numpy array of shape (9, height, width), neural network last layer sigmoid output for img
output:
res: (height, width)
Modified from:
http://warmspringwinds.github.io/tensorflow/tf-slim/2016/12/18/image-segmentation-with-tensorflow-using-cnns-and-conditional-random-fields/
https://github.com/yt605155624/tensorflow-deeplab-resnet/blob/e81482d7bb1ae674f07eae32b0953fe09ff1c9d1/inference_crf.py
'''
img = np.swapaxes(img, 0, 2)
# img.shape: (width, height, num of channels)(224,224,3)
num_iter = 50
prob = np.swapaxes(prob, 1, 2) # shape: (1, width, height) (9,224,224)
num_classes = 9 #2
d = dcrf.DenseCRF2D(img.shape[0] , img.shape[1], num_classes)
unary = unary_from_softmax(prob) # shape: (num_classes, width * height)
unary = np.ascontiguousarray(unary)
img = np.ascontiguousarray(img,dtype=np.uint8)
d.setUnaryEnergy(unary)
d.addPairwiseBilateral(sxy=5, srgb=3, rgbim=img, compat=3)
Q = d.inference(num_iter) # set the number of iterations
res = np.argmax(Q, axis=0).reshape((img.shape[0], img.shape[1]))
# res.shape: (width, height)
res = np.swapaxes(res, 0, 1) # res.shape: (height, width)
# res = res[np.newaxis, :, :] # res.shape: (1, height, width)
# func_end = time.time()
# print('{:.2f} sec spent on CRF with {} iterations'.format(func_end - func_start, num_iter))
# about 2 sec for a 1280 * 960 image with 5 iterations
return res
|
StarcoderdataPython
|
3292841
|
<filename>contabilidad/contabilidad/celery.py
from celery import Celery
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'contabilidad.contabilidad.settings')
app = Celery('contabilidad')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
@app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(
10.0, say_hello.s(), name='Say hello')
@app.task
def say_hello():
pass
|
StarcoderdataPython
|
1678721
|
<reponame>serglit72/Python_exercises<gh_stars>0
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
for x, y in thisdict.items():
print(x, y)
print(type(x),type(y))
|
StarcoderdataPython
|
1784257
|
<filename>asyncserial/async_serial_wrapper.py<gh_stars>1-10
# :Filename:
# async_serial_wrapper.py
# :Authors:
# <NAME> <<EMAIL>>
# :License:
# Apache 2.0
import asyncio
import serial
from . import AbstractAsyncWrapper
class Serial(AbstractAsyncWrapper):
"""
asyncserial is a simple wrapper for the pyserial library to provide async functionality.
It is transparent to the pyserial interface and supports all parameters.
You can e.g. create a connection by:
>>> test = Serial(loop, "/dev/ttyUSB0", baudrate=115200)`
:param loop: The main eventloop
:param args: Arguments passed through to serial.Serial()
:param kwargs: Keyword arguments passed through to serial.Serial()
"""
def __init__(self, loop: asyncio.AbstractEventLoop, *args, **kwargs):
"""
Initializes the async wrapper and Serial interface
"""
self._serial_instance = serial.Serial(*args, **kwargs)
self._asyncio_sleep_time = 0.0005
super().__init__(loop)
def _init(self):
"""
Setups the serial connection for use with async (Setting both read and writetimeouts to 0)
"""
# Set the serial instance to non blocking
self._serial_instance.timeout = 0
self._serial_instance.write_timeout = 0
@property
def is_open(self) -> bool:
"""
True if the connection is open, false otherwise
"""
return self._serial_instance.isOpen()
@property
def serial_instance(self) -> serial.Serial:
"""
Serial instance
"""
return self._serial_instance
@property
def out_waiting(self) -> int:
"""
Number of not yet written bytes
"""
return self._serial_instance.out_waiting
@property
def in_waiting(self) -> int:
"""
Number of bytes available to be read
"""
return self._serial_instance.in_waiting
async def abort(self):
"""
Closes the serial connection immediately, output queue will be discarded
"""
self._serial_instance.close()
async def _write(self, towrite: bytes):
"""
Adds towrite to the write queue
:param towrite: Write buffer
"""
self._serial_instance.write(towrite)
async def _read(self, num_bytes) -> bytes:
"""
Reads a given number of bytes
:param num_bytes: How many bytes to read
:returns: incoming bytes
"""
while True:
if self.in_waiting < num_bytes:
await asyncio.sleep(self._asyncio_sleep_time)
else:
# Try to read bytes
inbytes = self._serial_instance.read(num_bytes)
# Just for safety, should never happen
if not inbytes:
await asyncio.sleep(self._asyncio_sleep_time)
else:
return inbytes
async def readline(self) -> bytes:
"""
Reads one line
>>> # Keeps waiting for a linefeed incase there is none in the buffer
>>> await test.readline()
:returns: bytes forming a line
"""
while True:
line = self._serial_instance.readline()
if not line:
await asyncio.sleep(self._asyncio_sleep_time)
else:
return line
|
StarcoderdataPython
|
3231335
|
<filename>03 - Types/3.2 - InbuiltTypes-ListsTuples/28-named-tuple.py
# HEAD
# DataType - Named Tuples
# DESCRIPTION
# Working with Named Tuples
# RESOURCES
#
# https://docs.python.org/2/library/collections.html#collections.namedtuple
# https://stackoverflow.com/questions/39345995/how-does-python-return-multiple-values-from-a-function
import collections
# Create a tuple called NamedTuple
NamedTuple = collections.namedtuple('Point', ['x', 'y'])
p = NamedTuple(1, y=2)
# Access NamedTuple
print(p.x, p.y)
|
StarcoderdataPython
|
1646640
|
#!/usr/bin/env python
# md5: dd33245d9893bd42b01276a1b0a5b1cf
# coding: utf-8
from tmilib import *
from h2o_utils import *
import h2o
h2o.init()
import traceback
#print len(sdir_glob('*mtries_*_sample_rate_*'))
#classifier = load_h2o_model(sdir_path('binclassifier_catfeatures_gradientboost_v3.h2o'))
#print classifier
for model_file in sdir_glob('*mtries_*_sample_rate_*'):
print model_file
try:
classifier = load_h2o_model(model_file)
print classifier
except:
traceback.print_exc()
continue
model_file = sdir_path('binclassifier_catfeatures_randomforest_v6.h2o')
classifier = load_h2o_model(model_file)
print classifier
test_data = h2o.import_file(sdir_path('catdata_test_second_v2.csv'))
#test_data_2[0] = test_data_2[0].asfactor()
#print test_data_2.describe()
#test_data = h2o.import_file(sdir_path('catdata_test_second.csv'))
#print test_data.describe()
#test_data[0] = test_data[0].asfactor()
#test_data[0,:] = 1
#test_predictions = classifier.predict(test_data)
#print classifier
#print h2o.confusion_matrix(test_predictions, )
print classifier.model_performance(test_data)
#print classifier.confusion_matrix
#print test_data['label']
#print test_predictions
#print classifier.F1
#print test_data.describe()
testdata= h2o.import_file(sdir_path('catdata_test_insession_tensecond.csv'))
print testdata.describe()
#h2o.export_file(h2o.get_frame('h2odata_test_threefeatures_insession.hex'), sdir_path('h2odata_test_threefeatures_insession.hex'))
#print classifier.confusion_matrix(test_data)
|
StarcoderdataPython
|
1642838
|
from argparse import ArgumentParser
from glob import glob
from importlib import import_module
import numpy as np
import tensorflow as tf
from common import load_labels, load_pickle_file
def run_prediction(args):
batch_size = args.batch_size
model_class = import_module("models.{}".format(args.model)).Model()
model = model_class.get_model()
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]
)
model.load_weights(args.checkpoint)
prediction_generator, _ = \
model_class.get_input_fn_and_steps_per_epoch('prediction', batch_size)
results = model.predict(prediction_generator, batch_size=None)
predicted_labels_id = np.argmax(results, axis=1)
id_to_labels, _ = load_labels()
predicted_labels = [id_to_labels[label_id] for label_id in predicted_labels_id]
test_filenames = list(sorted(list(load_pickle_file('test_filenames.pickle'))))
print("fname,label")
for filename, predicted_label in zip(test_filenames, predicted_labels):
print("{},{}".format(filename, predicted_label))
def main():
parser = ArgumentParser(description='DL-MAI project #2 (RNN) prediction script.')
available_models = [model_name.split("/")[1].split(".")[0] for model_name in glob("models/*.py")]
parser.add_argument('model', choices=available_models)
parser.add_argument('checkpoint', metavar="model.ckpt") # type=lambda x: is_valid_file(parser, x)
parser.add_argument('--batch-size', default=1024, type=int)
args = parser.parse_args()
run_prediction(args)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3230256
|
<reponame>MilesWJ/Jokey<gh_stars>0
from datetime import datetime
import discord
from discord.ext import commands, tasks
from discord_slash import SlashCommand, SlashContext
from discord_slash.utils.manage_commands import create_option, create_choice
from json import loads
from itertools import cycle
from random import choice
from urllib import request
TOKEN = YOUR TOKEN HERE
GUILD_ID = YOUR GUILD ID HERE
# Slash commands enabled, use those instead. ("application.commands" on discord.com/developers)
Jokey = commands.Bot(command_prefix="/")
slash = SlashCommand(Jokey, sync_commands=True)
URL = "https://v2.jokeapi.dev/joke/Any?type=twopart"
status = cycle(
["Minecraft",
"Garry's Mod",
"Grand Theft Auto V",
"Terraria",
"League of Legends"]
)
# ------------------------------------------------------------- #
# Bot Presence Loop
@tasks.loop(seconds=3600)
async def status_loop():
await Jokey.change_presence(activity=discord.Game(next(status)))
# ------------------------------------------------------------- #
# Bot Running Indicator
@Jokey.event
async def on_ready():
print(f"\n{Jokey.user} is running! (Started at {datetime.now()})")
status_loop.start()
# ------------------------------------------------------------- #
# Help Command
@slash.slash(
name="help",
description="Returns a list of available commands.",
guild_ids=[GUILD_ID],
)
async def _help(ctx: SlashContext):
messages = ["Here you go!", "Hope this helps!"]
with open("command_list.txt", "r") as command_list:
all_commands = command_list.read()
help_command_embed = discord.Embed(
title="ALL AVAILABLE COMMANDS",
color=discord.Color.blue(),
description=all_commands,
)
help_command_embed.set_author(name="Jokey", icon_url=Jokey.user.avatar_url)
await ctx.send(embed=help_command_embed)
# ------------------------------------------------------------- #
# Ping Command
@slash.slash(
name="ping",
description="Returns bot latency.",
guild_ids=[GUILD_ID],
)
async def _ping(ctx: SlashContext):
await ctx.send(f"Pong! ({round(Jokey.latency*1000)}ms)")
# ------------------------------------------------------------- #
# Invite Command
@slash.slash(
name="invite",
description="Returns the bot invite link.",
guild_ids=[GUILD_ID],
)
async def _invite(ctx: SlashContext):
invite_link = "https://discord.com/api/oauth2/authorize?client_id=873627985327030284&permissions=2147560512&scope=bot%20applications.commands"
# Required Scopes: bot, application.commands
# Required Permissions: Use Slash Commands, Send Messages, Read Message History, Manage Messages, View Channels, Add Reactions
# Permissions Integer: 2147560512
invite_command_embed = discord.Embed(
title="BOT INVITE LINK",
color=discord.Color.blue(),
description=invite_link
)
invite_command_embed.set_author(
name="Jokey", icon_url=Jokey.user.avatar_url)
await ctx.send(embed=invite_command_embed)
# ------------------------------------------------------------- #
# Clear Command
@slash.slash(
name="clear",
description="Clears a suggested amount of messages.",
guild_ids=[GUILD_ID],
options=[
create_option(
name="amount",
description="How many messages would you like to clear?",
required=True,
option_type=4,
)
]
)
@commands.has_permissions(manage_messages=True)
async def _clear(ctx: SlashContext, amount: int):
# Required Permissions: Manage Messages
if amount > 0:
if amount == 1:
await ctx.send(f"Clearing **{amount}** message...")
else:
await ctx.send(f"Clearing **{amount}** messages...")
await ctx.channel.purge(limit=amount + 1)
else:
await ctx.send(f"{ctx.author.mention} clear amount must be greater than 0.")
# ------------------------------------------------------------- #
# Joke Command (1/2)
def request_joke(url):
r = request.urlopen(url)
data = r.read()
json_data = loads(data)
information = [json_data["setup"], json_data["delivery"]]
joke = f"{information[0]} {information[1]}"
return joke
# Joke Command (2/2)
@slash.slash(
name="joke",
description="Returns a random joke.",
guild_ids=[GUILD_ID],
)
async def _joke(ctx: SlashContext):
joke = await ctx.send(request_joke(URL))
await joke.add_reaction("👍")
await joke.add_reaction("👎")
# ------------------------------------------------------------- #
if __name__ == "__main__":
print(f"\nStarting bot...")
Jokey.run(TOKEN)
|
StarcoderdataPython
|
99005
|
<filename>BOJ/13000~13999/13800~13899/13871.py
N, C, S, *l = map(int, open(0).read().split())
c = 0
S -= 1
ans = 0
for i in l:
ans += c == S
c = (c+i+N)%N
ans += c == S
print(ans)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.