id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3290941
|
<reponame>pengjunn/KD-GAN<gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from nltk.tokenize import RegexpTokenizer
from collections import defaultdict
from miscc.config import cfg
import torch
import torch.utils.data as data
from torch.autograd import Variable
import torchvision.transforms as transforms
import torchtext.vocab as vocab
import os
import sys
import numpy as np
import pandas as pd
from PIL import Image
from ipdb import set_trace
import numpy.random as random
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
def load_pkl(path):
with open(path, 'rb') as f:
kb = pickle.load(f)
return kb
# def load_prior():
# kb = load_pkl(cfg.KNOWLEDGE_BASE)
# kb = kb[cfg.DATASET_NAME] # {"ckass_id1": [img_name1, ..., img_name10]}
# kb_keys = []
# kb_values = []
# kb_chosen = []
# for k, v in kb.items():
# for image in v:
# kb_keys.append(np.load(os.path.join(cfg.KNOWLEDGE_FEAT, k, image[:-3]+'pool.npy')))
# kb_values.append(np.load(os.path.join(cfg.KNOWLEDGE_FEAT, k, image[:-3]+'layer4.npy')))
# kb_chosen.append(['%s/%s' % (k, image) for image in v])
# if cfg.CUDA:
# kb_keys = Variable(torch.from_numpy(np.array(kb_keys))).cuda()
# kb_values = Variable(torch.from_numpy(np.array(kb_values))).cuda()
# else:
# kb_keys = Variable(torch.from_numpy(np.array(kb_keys)))
# kb_values = Variable(torch.from_numpy(np.array(kb_values)))
# priors = (kb_keys, kb_values)
# return priors, kb_chosen
# def load_prior_batch(data, flag='rand'):
# imgs, captions, captions_lens, class_ids, keys = data
# # prepare knowledge from kb
# kb = load_pkl(cfg.KNOWLEDGE_BASE)
# if 'coco' in cfg.DATASET_NAME:
# name2classid = \
# load_pkl(os.path.join(cfg.DATA_DIR, 'pickles/imgName_to_classID.pkl'))
# kb_keys = []
# kb_values = []
# kb_chosen = []
# for i in range(len(keys)):
# if 'coco' in cfg.DATASET_NAME:
# class_name = list(name2classid[keys[i] + '.jpg'])[0]
# choices = kb[cfg.DATASET_NAME][str(class_name)]
# _names = []
# for _ in choices:
# split = 'train2014' if 'train' in _ else 'val2014'
# _names.append(os.path.join(cfg.KNOWLEDGE_FEAT, split, _))
# else:
# class_name = os.path.split(keys[i])[0]
# choices = kb[cfg.DATASET_NAME][class_name]
# _names = [os.path.join(cfg.KNOWLEDGE_FEAT, class_name, _) for _ in choices]
# if 'rand' in flag:
# # random choose
# rand_idx = random.randint(0, len(_names))
# img_name = _names[rand_idx]
# # print(class_name, img_name)
# kb_value_feat = np.load(img_name[:-3] + 'layer4.npy') # (2048,7,7)
# kb_chosen.append('%s/%s' % (class_name, img_name))
# elif 'sum' in flag:
# # sum all choices
# kb_value_feat = [np.load(_[:-3] + 'layer4.npy') for _ in _names]
# kb_value_feat = np.sum(np.array(kb_value_feat), axis=0)
# elif 'query_pool' in flag:
# # all keys and values, 10 in total
# # [(2048,1,1), ...]
# kb_key_feat = [np.load(_[:-3] + 'pool.npy') for _ in _names]
# kb_keys.append(kb_key_feat)
# # [(2048,7,7), ...]
# kb_value_feat = [np.load(_[:-3] + 'layer4.npy') for _ in _names]
# kb_chosen.append(['%s/%s' % (class_name, img_name) for img_name in choices])
# kb_values.append(kb_value_feat)
# kb_keys = np.squeeze(np.array(kb_keys)) # (N,K,2048)
# kb_values = np.array(kb_values) # (N,K,2048,7,7)
# if cfg.CUDA:
# kb_keys = Variable(torch.from_numpy(kb_keys)).cuda()
# kb_values = Variable(torch.from_numpy(kb_values)).cuda()
# else:
# kb_keys = Variable(torch.from_numpy(kb_keys))
# kb_values = Variable(torch.from_numpy(kb_values))
# priors = (kb_keys, kb_values)
# return priors, kb_chosen
def prepare_data(data):
imgs, captions, captions_lens, class_ids, keys, \
priors, kb_candi = data
# imgs: [(N,3,64,64), (N,3,128,128), (N,3,256,256)]
# captions: (N,seq_len)
# sort data by the length in a decreasing order
sorted_cap_lens, sorted_cap_indices = \
torch.sort(captions_lens, 0, True)
real_imgs = []
for i in range(len(imgs)): # 3
imgs[i] = imgs[i][sorted_cap_indices] #
if cfg.CUDA:
real_imgs.append(Variable(imgs[i]).cuda())
else:
real_imgs.append(Variable(imgs[i]))
captions = captions[sorted_cap_indices].squeeze() # sorted
class_ids = class_ids[sorted_cap_indices].numpy() # sorted
# sent_indices = sent_indices[sorted_cap_indices]
keys = [keys[i] for i in sorted_cap_indices.numpy()] # sorted
# print('keys', type(keys), keys[-1]) # list
if cfg.CUDA:
captions = Variable(captions).cuda()
sorted_cap_lens = Variable(sorted_cap_lens).cuda()
else:
captions = Variable(captions)
sorted_cap_lens = Variable(sorted_cap_lens)
kb_keys, kb_values = priors
if cfg.CUDA:
kb_keys = Variable(kb_keys).cuda()
kb_values = Variable(kb_values).cuda()
else:
kb_keys = Variable(torch.kb_keys)
kb_values = Variable(torch.kb_values)
priors = [kb_keys, kb_values]
return [real_imgs, captions, sorted_cap_lens,
class_ids, keys, priors, kb_candi]
def get_imgs(img_path, imsize, bbox=None,
transform=None, normalize=None):
img = Image.open(img_path).convert('RGB')
width, height = img.size
if bbox is not None:
r = int(np.maximum(bbox[2], bbox[3]) * 0.75)
center_x = int((2 * bbox[0] + bbox[2]) / 2)
center_y = int((2 * bbox[1] + bbox[3]) / 2)
y1 = np.maximum(0, center_y - r)
y2 = np.minimum(height, center_y + r)
x1 = np.maximum(0, center_x - r)
x2 = np.minimum(width, center_x + r)
img = img.crop([x1, y1, x2, y2])
if transform is not None:
img = transform(img)
ret = []
if cfg.GAN.B_DCGAN:
ret = [normalize(img)]
else:
for i in range(cfg.TREE.BRANCH_NUM):
# print(imsize[i])
if i < (cfg.TREE.BRANCH_NUM - 1):
re_img = transforms.Scale(imsize[i])(img) # 64, 128
else:
re_img = img # 256
ret.append(normalize(re_img))
return ret # [(3,64,64), (3,128,128), (3,256,256)]
class TextDataset(data.Dataset):
def __init__(self, data_dir, split='train',
base_size=64,
transform=None, target_transform=None):
self.transform = transform
self.norm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
self.target_transform = target_transform
self.embeddings_num = cfg.TEXT.CAPTIONS_PER_IMAGE # 10
self.imsize = [] # [64,128,256]
for i in range(cfg.TREE.BRANCH_NUM): # 3
self.imsize.append(base_size)
base_size = base_size * 2
self.data = []
self.data_dir = data_dir # ../data/birds
if data_dir.find('birds') != -1: # 'birds' in data_dir
# {'001.Black_footed_Albatross/Black_Footed_Albatross_0046_18': [[60, 27, 325, 304]], ...}
self.bbox = self.load_bbox()
else:
self.bbox = None
self.split = split
split_dir = os.path.join(data_dir, split) # ../data/birds/train_or_test
self.glove = vocab.GloVe(name='6B', dim=cfg.TEXT.EMBEDDING_DIM)
self.word2ix = self.glove.stoi
self.ix2word = self.glove.itos
self.ix2vec = self.glove.vectors
print('Text embedding {}'.format(cfg.TEXT.EMBEDDING_DIM))
print('Loaded {} words from GloVe'.format(len(self.ix2word)))
# also update ix2word, word2ix, ix2vec
self.filenames, self.captions, self.n_words = \
self.load_text_data(data_dir, split)
# np.range(length) for coco
self.class_id = self.load_class_id(split_dir, len(self.filenames))
self.number_example = len(self.filenames)
# load knowledge base
if 'rand' in cfg.CONFIG_NAME:
if 'all' in cfg.CONFIG_NAME:
# rand sample from all images
# self.kb = load_pkl('%s/%s/filenames.pickle' % (cfg.DATA_DIR, 'train'))
kb_path = '%s/%s/filenames.pickle' % (cfg.DATA_DIR, 'train')
else:
# rand sample from fixed random 10 images
# self.kb = load_pkl(cfg.KNOWLEDGE_BASE.replace('.pickle', '_rand.pickle'))
kb_path = cfg.KNOWLEDGE_BASE.replace('.pickle', '_rand.pickle')
else:
# self.kb = load_pkl(cfg.KNOWLEDGE_BASE)
kb_path = cfg.KNOWLEDGE_BASE
self.kb = load_pkl(kb_path)
print('Load KB from: ', kb_path)
if 'coco' in cfg.DATASET_NAME:
self.name2classid = \
load_pkl(os.path.join(cfg.DATA_DIR, 'pickles/imgName_to_classID.pkl'))
else:
self.name2classid = None
def load_bbox(self):
data_dir = self.data_dir
bbox_path = os.path.join(data_dir, 'CUB_200_2011/bounding_boxes.txt')
df_bounding_boxes = pd.read_csv(bbox_path,
delim_whitespace=True,
header=None).astype(int)
#
filepath = os.path.join(data_dir, 'CUB_200_2011/images.txt')
df_filenames = \
pd.read_csv(filepath, delim_whitespace=True, header=None)
filenames = df_filenames[1].tolist() # 11788 class_id/image_name.jpg
print('Total filenames: ', len(filenames), filenames[0])
#
filename_bbox = {img_file[:-4]: [] for img_file in filenames}
numImgs = len(filenames)
for i in xrange(0, numImgs):
# bbox = [x-left, y-top, width, height]
bbox = df_bounding_boxes.iloc[i][1:].tolist()
key = filenames[i][:-4]
filename_bbox[key] = bbox
#
return filename_bbox
def load_captions(self, data_dir, filenames):
all_captions = []
for i in range(len(filenames)):
cap_path = '%s/text/%s.txt' % (data_dir, filenames[i])
with open(cap_path, "r") as f:
captions = f.read().decode('utf8').split('\n')
cnt = 0
for cap in captions:
if len(cap) == 0:
continue
cap = cap.replace("\ufffd\ufffd", " ")
# picks out sequences of alphanumeric characters as tokens
# and drops everything else
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(cap.lower())
# print('tokens', tokens)
if len(tokens) == 0:
print('cap', cap, cap_path)
continue
tokens_new = []
for t in tokens:
t = t.encode('ascii', 'ignore').decode('ascii')
if len(t) > 0:
tokens_new.append(t)
all_captions.append(tokens_new)
cnt += 1
if cnt == self.embeddings_num:
break
if cnt < self.embeddings_num:
print('ERROR: the captions for %s less than %d'
% (filenames[i], cnt))
return all_captions
def get_word_idx(self, word):
try:
ix = self.word2ix[word]
except:
ix = self.word2ix['unk']
return ix
def build_dictionary(self, train_captions, test_captions):
word_counts = defaultdict(float)
captions = train_captions + test_captions
for sent in captions:
for word in sent:
word_counts[word] += 1
w_threshold = cfg.TEXT.WORD_THRESHOLD
vocab = [w for w in word_counts if word_counts[w] >= w_threshold]
vocab.append('<end>')
first_s = self.ix2word[0] # 'the'
new_idx = len(self.ix2word) # 400000
self.ix2word.append(first_s) # 'the'
self.ix2word[0] = u'<end>'
self.word2ix[first_s] = new_idx
self.word2ix[u'<end>'] = 0
first_vec = self.ix2vec[0].unsqueeze(0)
self.ix2vec = torch.cat([self.ix2vec, first_vec], dim=0)
self.ix2vec[0] = torch.zeros([300])
train_captions_new = []
for t in train_captions:
rev = []
for w in t:
rev.append(self.get_word_idx(w))
# rev.append(0) # do not need '<end>' token
train_captions_new.append(rev)
test_captions_new = []
for t in test_captions:
rev = []
for w in t:
rev.append(self.get_word_idx(w))
# rev.append(0) # do not need '<end>' token
test_captions_new.append(rev)
return [train_captions_new, test_captions_new, vocab]
def load_text_data(self, data_dir, split):
filepath = os.path.join(data_dir, 'captions.pickle')
# [002.Laysan_Albatross/Laysan_Albatross_0002_1027, ...], length=8855
train_names = self.load_filenames(data_dir, 'train')
# [001.Black_footed_Albatross/Black_Footed_Albatross_0046_18, ...], length=2933
test_names = self.load_filenames(data_dir, 'test')
if not os.path.isfile(filepath):
train_captions = self.load_captions(data_dir, train_names)
test_captions = self.load_captions(data_dir, test_names)
train_captions, test_captions, vocab = \
self.build_dictionary(train_captions, test_captions)
with open(filepath, 'wb') as f:
pickle.dump([train_captions, test_captions,
self.ix2word, self.word2ix,
vocab], f, protocol=2)
print('Save to: ', filepath)
ix2vec_path = filepath.replace('captions.pickle', 'ix2vec.pickle')
if not os.path.isfile(ix2vec_path):
with open(ix2vec_path, 'wb') as f:
pickle.dump(self.ix2vec, f)
print('Save to: ', filepath)
else:
with open(filepath, 'rb') as f:
x = pickle.load(f)
train_captions, test_captions = x[0], x[1]
self.ix2word, self.word2ix = x[2], x[3]
vocab = x[4]
del x
print('Load from: ', filepath)
ix2vec_path = filepath.replace('captions.pickle', 'ix2vec.pickle')
with open(ix2vec_path, 'rb') as f:
self.ix2vec = pickle.load(f)
print('Load from: ', ix2vec_path)
if split == 'train':
# a list of list: each list contains
# the indices of words in a sentence
captions = train_captions
filenames = train_names
else: # split=='test'
captions = test_captions
filenames = test_names
return filenames, captions, len(vocab)
def load_class_id(self, data_dir, total_num):
if os.path.isfile(data_dir + '/class_info.pickle'):
with open(data_dir + '/class_info.pickle', 'rb') as f:
class_id = pickle.load(f)
else:
class_id = np.arange(total_num)
return class_id
def load_filenames(self, data_dir, split):
filepath = '%s/%s/filenames.pickle' % (data_dir, split)
if os.path.isfile(filepath):
with open(filepath, 'rb') as f:
filenames = pickle.load(f)
print('Load filenames from: %s (%d)' % (filepath, len(filenames)))
else:
filenames = []
return filenames
def get_caption(self, sent_ix):
# a list of indices for a sentence
sent_caption = np.asarray(self.captions[sent_ix]).astype('int64')
if (sent_caption == 0).sum() > 0:
print('ERROR: do not need END (0) token', sent_caption)
num_words = len(sent_caption)
# pad with 0s (i.e., '<end>')
x = np.zeros((cfg.TEXT.WORDS_NUM, 1), dtype='int64') # (18,1)
x_len = num_words
if num_words <= cfg.TEXT.WORDS_NUM:
x[:num_words, 0] = sent_caption
else:
ix = list(np.arange(num_words)) # 1, 2, 3,..., maxNum
np.random.shuffle(ix)
ix = ix[:cfg.TEXT.WORDS_NUM]
ix = np.sort(ix) # randomly remove words from raw sentence
x[:, 0] = sent_caption[ix]
x_len = cfg.TEXT.WORDS_NUM
return x, x_len
def get_knowledge(self, key):
# get reference images by class
_names = []
if self.name2classid:
cls_name = list(self.name2classid[key+'.jpg'])[0]
choices = self.kb[cfg.DATASET_NAME][str(cls_name)]
for _ in choices:
split = 'train2014' if 'train' in _ else 'val2014'
_names.append(os.path.join(cfg.KNOWLEDGE_FEAT, split, _))
else:
cls_name = os.path.split(key)[0]
choices = self.kb[cfg.DATASET_NAME][cls_name]
for _ in choices:
_names.append(os.path.join(cfg.KNOWLEDGE_FEAT, cls_name, _))
kb_keys = [np.load(_[:-3] + 'pool.npy') for _ in _names]
kb_values = [np.load(_[:-3] + 'layer4.npy') for _ in _names]
kb_keys = np.squeeze(np.array(kb_keys)) # (K,2048) # (K,300)
kb_values = np.array(kb_values) # (K,2048,7,7) # (K,300,17,17)
priors = [kb_keys, kb_values]
kb_candi = ['%s/%s' % (cls_name, img_name) for img_name in choices]
return priors, kb_candi
def get_knowledge_new(self, index):
# get reference images by text
if 'rand' in cfg.CONFIG_NAME and 'all' in cfg.CONFIG_NAME:
rand_image = self.kb[random.randint(0, len(self.kb))]
candidates = rand_image
if 'inception' in cfg.KNOWLEDGE_FEAT:
kb_path = os.path.join(cfg.KNOWLEDGE_FEAT, rand_image)
kb_global = np.load(kb_path + '.300g.npy') # (K,300)
kb_region = np.load(kb_path + '.300l.npy') # (K,300,17,17)
elif 'resnet' in cfg.KNOWLEDGE_FEAT:
if 'bird' in cfg.DATASET_NAME:
kb_path = os.path.join(cfg.KNOWLEDGE_FEAT, rand_image)
kb_global = np.load(kb_path + '.pool.npy') # (K,2048)
kb_region = np.load(kb_path + '.layer4.npy') # (K,2048,7,7)
else:
if 'train' in rand_image:
kb_path = os.path.join(cfg.KNOWLEDGE_FEAT, 'train2014', rand_image)
else:
kb_path = os.path.join(cfg.KNOWLEDGE_FEAT, 'val2014', rand_image)
kb_global = np.load(kb_path + '.pool.npy') # (K,2048)
kb_region = np.load(kb_path + '.layer4.npy') # (K,2048,7,7)
else:
kbase = self.kb[index] # [[top1_qid, top1_sim, filename], ..., [top10_.. ]]
candidates = [_[2] for _ in kbase]
if 'inception' in cfg.KNOWLEDGE_FEAT:
_names = [os.path.join(cfg.KNOWLEDGE_FEAT, _[2]) for _ in kbase]
kb_global = [np.load(_ + '.300g.npy') for _ in _names] # (K,300)
kb_region = [np.load(_ + '.300l.npy') for _ in _names] # (K,300,17,17)
elif 'resnet' in cfg.KNOWLEDGE_FEAT:
if 'bird' in cfg.DATASET_NAME:
_names = [os.path.join(cfg.KNOWLEDGE_FEAT, _[2]) for _ in kbase] # 'class_id/filename'
kb_global = [np.load(_ + '.pool.npy') for _ in _names] # (K,2048)
kb_region = [np.load(_ + '.layer4.npy') for _ in _names] # (K,2048,7,7)
else:
_names = []
for _ in kbase:
filename = _[2]
if 'train' in filename:
_names.append(os.path.join(cfg.KNOWLEDGE_FEAT, 'train2014', filename))
else:
_names.append(os.path.join(cfg.KNOWLEDGE_FEAT, 'val2014', filename))
kb_global = [np.load(_ + '.pool.npy') for _ in _names] # (K,2048)
kb_region = [np.load(_ + '.layer4.npy') for _ in _names] # (K,2048,7,7)
kb_global = np.squeeze(np.array(kb_global)) # (K,2048)
kb_region = np.array(kb_region) # (K,2048,7,7)
priors = [kb_global, kb_region]
return priors, candidates
def __getitem__(self, index):
if 'train' in self.split:
# sent
cap, cap_len = self.get_caption(index)
# img
img_ix = index // cfg.TEXT.CAPTIONS_PER_IMAGE
key = self.filenames[img_ix]
cls_id = self.class_id[img_ix]
q_idx = index
else:
# img
key = self.filenames[index]
cls_id = self.class_id[index]
# random select a sentence
sent_ix = random.randint(0, self.embeddings_num)
new_sent_ix = index * self.embeddings_num + sent_ix
cap, cap_len = self.get_caption(new_sent_ix)
q_idx = new_sent_ix
if self.bbox is not None:
bbox = self.bbox[key]
data_dir = '%s/CUB_200_2011' % self.data_dir
else:
bbox = None
data_dir = self.data_dir
#
img_name = '%s/images/%s.jpg' % (data_dir, key)
imgs = get_imgs(img_name, self.imsize,
bbox, self.transform, normalize=self.norm)
# priors, candidates = self.get_knowledge(key)
priors, candidates = self.get_knowledge_new(q_idx)
return imgs, cap, cap_len, cls_id, key, priors, candidates
def __len__(self):
if 'train' in self.split:
return len(self.captions)
else:
return len(self.filenames)
|
StarcoderdataPython
|
3244236
|
import torch
class AnchorGenerator(object):
def __init__(self, anchor_range, anchor_generator_config):
super().__init__()
self.anchor_generator_cfg = anchor_generator_config # list:3
# 得到anchor在点云中的分布范围[0, -39.68, -3, 69.12, 39.68, 1], [0, -40, -3, 70.4, 40, 1]
self.anchor_range = anchor_range
# 得到配置参数中所有尺度anchor的长宽高
# list:3 --> 车、人、自行车[[[3.9, 1.6, 1.56]],[[0.8, 0.6, 1.73]],[[1.76, 0.6, 1.73]]]
self.anchor_sizes = [config['anchor_sizes'] for config in anchor_generator_config]
# 得到anchor的旋转角度,这是是弧度,也就是0度和90度
# list:3 --> [[0, 1.57],[0, 1.57],[0, 1.57]]
self.anchor_rotations = [config['anchor_rotations'] for config in anchor_generator_config]
# 得到每个anchor初始化在点云中z轴的位置,其中在kitti中点云的z轴范围是-3米到1米
# list:3 --> [[-1.78],[-0.6],[-0.6]]
self.anchor_heights = [config['anchor_bottom_heights'] for config in anchor_generator_config]
# 每个先验框产生的时候是否需要在每个格子的中间,
# 例如坐标点为[1,1],如果需要对齐中心点的话,需要加上0.5变成[1.5, 1.5]
# 默认为False
# list:3 --> [False, False, False]
self.align_center = [config.get('align_center', False) for config in anchor_generator_config]
assert len(self.anchor_sizes) == len(self.anchor_rotations) == len(self.anchor_heights)
self.num_of_anchor_sets = len(self.anchor_sizes) # 3
def generate_anchors(self, grid_sizes):
assert len(grid_sizes) == self.num_of_anchor_sets
# 1.初始化
all_anchors = []
num_anchors_per_location = []
# 2.三个类别的先验框逐类别生成
for grid_size, anchor_size, anchor_rotation, anchor_height, align_center in zip(
grid_sizes, self.anchor_sizes, self.anchor_rotations, self.anchor_heights, self.align_center):
# 2 = 2x1x1 --> 每个位置产生2个anchor,这里的2代表两个方向
num_anchors_per_location.append(len(anchor_rotation) * len(anchor_size) * len(anchor_height))
# 不需要对齐中心点来生成先验框
if align_center:
x_stride = (self.anchor_range[3] - self.anchor_range[0]) / grid_size[0]
y_stride = (self.anchor_range[4] - self.anchor_range[1]) / grid_size[1]
# 中心对齐,平移半个网格
x_offset, y_offset = x_stride / 2, y_stride / 2
else:
# 2.1计算每个网格的在点云空间中的实际大小
# 用于将每个anchor映射回实际点云中的大小
# (69.12 - 0) / (216 - 1) = 0.3214883848678234 单位:米
x_stride = (self.anchor_range[3] - self.anchor_range[0]) / (grid_size[0] - 1)
# (39.68 - (-39.68.)) / (248 - 1) = 0.3212955490297634 单位:米
y_stride = (self.anchor_range[4] - self.anchor_range[1]) / (grid_size[1] - 1)
# 由于没有进行中心对齐,所有每个点相对于左上角坐标的偏移量都是0
x_offset, y_offset = 0, 0
# 2.2 生成单个维度x_shifts,y_shifts和z_shifts
# 以x_stride为step,在self.anchor_range[0] + x_offset和self.anchor_range[3] + 1e-5,
# 产生x坐标 --> 216个点 [0, 69.12]
x_shifts = torch.arange(
self.anchor_range[0] + x_offset, self.anchor_range[3] + 1e-5, step=x_stride, dtype=torch.float32,
).cuda()
# 产生y坐标 --> 248个点 [0, 79.36]
y_shifts = torch.arange(
self.anchor_range[1] + y_offset, self.anchor_range[4] + 1e-5, step=y_stride, dtype=torch.float32,
).cuda()
"""
new_tensor函数可以返回一个新的张量数据,该张量数据与指定的tensor具有相同的属性
如拥有相同的数据类型和张量所在的设备情况等属性;
并使用anchor_height数值个来填充这个张量
"""
# [-1.78]
z_shifts = x_shifts.new_tensor(anchor_height)
# num_anchor_size = 1
# num_anchor_rotation = 2
num_anchor_size, num_anchor_rotation = anchor_size.__len__(), anchor_rotation.__len__() # 1, 2
# [0, 1.57] 弧度制
anchor_rotation = x_shifts.new_tensor(anchor_rotation)
# [[3.9, 1.6, 1.56]]
anchor_size = x_shifts.new_tensor(anchor_size)
# 2.3 调用meshgrid生成网格坐标
x_shifts, y_shifts, z_shifts = torch.meshgrid([
x_shifts, y_shifts, z_shifts
])
# meshgrid可以理解为在原来的维度上进行扩展,例如:
# x原来为(216,)-->(216,1, 1)--> (216,248,1)
# y原来为(248,)--> (1,248,1)--> (216,248,1)
# z原来为 (1, ) --> (1,1,1) --> (216,248,1)
# 2.4.anchor各个维度堆叠组合,生成最终anchor(1,432,496,1,2,7)
# 2.4.1.堆叠anchor的位置
# [x, y, z, 3]-->[216, 248, 1, 3] 代表了每个anchor的位置信息
# 其中3为该点所在映射tensor中的(z, y, x)数值
anchors = torch.stack((x_shifts, y_shifts, z_shifts), dim=-1)
# 2.4.2.将anchor的位置和大小进行组合,编程为将anchor扩展并复制为相同维度(除了最后一维),然后进行组合
# (216, 248, 1, 3) --> (216, 248, 1 , 1, 3)
# 维度分别代表了: z,y,x, 该类别anchor的尺度数量,该个anchor的位置信息
anchors = anchors[:, :, :, None, :].repeat(1, 1, 1, anchor_size.shape[0], 1)
# (1, 1, 1, 1, 3) --> (216, 248, 1, 1, 3)
anchor_size = anchor_size.view(1, 1, 1, -1, 3).repeat([*anchors.shape[0:3], 1, 1])
# anchors生成的最终结果需要有位置信息和大小信息 --> (216, 248, 1, 1, 6)
# 最后一个纬度中表示(z, y, x, l, w, h)
anchors = torch.cat((anchors, anchor_size), dim=-1)
# 2.4.3.将anchor的位置和大小和旋转角进行组合
# 在倒数第二个维度上增加一个维度,然后复制该维度一次
# (216, 248, 1, 1, 2, 6) 长, 宽, 深, anchor尺度数量, 该尺度旋转角个数,anchor的6个参数
anchors = anchors[:, :, :, :, None, :].repeat(1, 1, 1, 1, num_anchor_rotation, 1)
# (216, 248, 1, 1, 2, 1) 两个不同方向先验框的旋转角度
anchor_rotation = anchor_rotation.view(1, 1, 1, 1, -1, 1).repeat(
[*anchors.shape[0:3], num_anchor_size, 1, 1])
# [z, y, x, num_size, num_rot, 7] --> (216, 248, 1, 1, 2, 7)
# 最后一个纬度表示为anchors的位置+大小+旋转角度(z, y, x, l, w, h, theta)
anchors = torch.cat((anchors, anchor_rotation), dim=-1) # [z, y, x, num_size, num_rot, 7]
# 2.5 置换anchor的维度
# [z, y, x, num_anchor_size, num_rot, 7]-->[x, y, z, num_anchor_zie, num_rot, 7]
# 最后一个纬度代表了 : [x, y, z, dx, dy, dz, rot]
anchors = anchors.permute(2, 1, 0, 3, 4, 5).contiguous()
# 使得各类anchor的z轴方向从anchor的底部移动到该anchor的中心点位置
# 车 : -1.78 + 1.56/2 = -1.0
# 人、自行车 : -0.6 + 1.73/2 = 0.23
anchors[..., 2] += anchors[..., 5] / 2
all_anchors.append(anchors)
# all_anchors: [(1,248,216,1,2,7),(1,248,216,1,2,7),(1,248,216,1,2,7)]
# num_anchors_per_location:[2,2,2]
return all_anchors, num_anchors_per_location
if __name__ == '__main__':
from easydict import EasyDict
config = [
EasyDict({
'anchor_sizes': [[2.1, 4.7, 1.7], [0.86, 0.91, 1.73], [0.84, 1.78, 1.78]],
'anchor_rotations': [0, 1.57],
'anchor_heights': [0, 0.5]
})
]
A = AnchorGenerator(
anchor_range=[-75.2, -75.2, -2, 75.2, 75.2, 4],
anchor_generator_config=config
)
import pdb
pdb.set_trace()
A.generate_anchors([[188, 188]])
|
StarcoderdataPython
|
3378540
|
import os
import subprocess
from shulkr.minecraft.source import detect_mappings, generate_sources
class GitTree:
def __init__(self, name: str = None) -> None:
self.name = name
class SubprocessMock:
def __init__(self, returncode=0, stderr=None):
self.returncode = returncode
self.stderr = stderr
def test_detect_mappings_with_yarn_commit_returns_yarn(mocker, nonempty_repo):
mocker.patch.object(nonempty_repo.head.commit.tree, 'trees', [GitTree('src')])
assert detect_mappings() == 'yarn'
def test_detect_mappings_with_mojang_commit_returns_mojang(mocker, nonempty_repo):
mocker.patch.object(nonempty_repo.head.commit.tree, 'trees', [GitTree('client'), GitTree('server')])
assert detect_mappings() == 'mojang'
def test_generate_sources_with_yarn_runs_decompiler(mocker, empty_repo, versions):
subprocess_run = mocker.patch(
'subprocess.run',
return_value=SubprocessMock()
)
mocker.patch('shutil.rmtree')
mocker.patch('shutil.move')
mocker.patch('os.makedirs')
generate_sources(versions.snapshot, 'yarn')
decompiler_dir = os.path.join(empty_repo.working_tree_dir, '.yarn')
subprocess_run.assert_called_once_with(
['./gradlew', 'decompileCFR'],
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
cwd=decompiler_dir
)
def test_generate_sources_with_yarn_moves_sources_to_repo(mocker, empty_repo, versions):
mocker.patch(
'subprocess.run',
return_value=SubprocessMock()
)
mocker.patch('shutil.rmtree')
mocker.patch('os.makedirs')
move = mocker.patch('shutil.move')
generate_sources(versions.snapshot, 'yarn')
decompiler_dir = os.path.join(empty_repo.working_tree_dir, '.yarn')
move.assert_called_once_with(
os.path.join(decompiler_dir, 'namedSrc'),
os.path.join(empty_repo.working_tree_dir, 'src')
)
def test_generate_sources_with_mojang_runs_decompiler(mocker, empty_repo, versions):
subprocess_run = mocker.patch(
'subprocess.run',
return_value=SubprocessMock()
)
mocker.patch('shutil.rmtree')
mocker.patch('shutil.move')
mocker.patch('os.makedirs')
generate_sources(versions.snapshot, 'mojang')
decompiler_dir = os.path.join(empty_repo.working_tree_dir, '.DecompilerMC')
calls = [
mocker.call(
[
'python3',
'main.py',
'--mcv',
str(versions.snapshot),
'-s',
env,
'-c',
'-f',
'-q'
],
stderr=subprocess.PIPE,
cwd=decompiler_dir
)
for env in ('client', 'server')
]
subprocess_run.assert_has_calls(calls)
def test_generate_sources_with_mojang_moves_sources_to_repo(mocker, empty_repo, versions):
mocker.patch(
'subprocess.run',
return_value=SubprocessMock()
)
mocker.patch('shutil.rmtree')
mocker.patch('os.makedirs')
move = mocker.patch('shutil.move')
generate_sources(versions.snapshot, 'mojang')
decompiler_dir = os.path.join(empty_repo.working_tree_dir, '.DecompilerMC')
calls = [
mocker.call(
os.path.join(decompiler_dir, 'src', str(versions.snapshot), env),
os.path.join(empty_repo.working_tree_dir, env, 'src')
)
for env in ('client', 'server')
]
move.assert_has_calls(calls)
|
StarcoderdataPython
|
1681967
|
<filename>scripts/filter.py
#!/usr/bin/env python
#--------Include modules---------------
from copy import copy
import rospy
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point
from nav_msgs.msg import OccupancyGrid
from geometry_msgs.msg import PointStamped
import tf
from numpy import array,vstack,delete
from functions import gridValue,informationGain
from sklearn.cluster import MeanShift
from rrt_exploration.msg import PointArray
# Subscribers' callbacks------------------------------
mapData=OccupancyGrid()
frontiers=[]
globalmaps=[]
def callBack(data,args):
global frontiers,min_distance
transformedPoint=args[0].transformPoint(args[1],data)
x=[array([transformedPoint.point.x,transformedPoint.point.y])]
if len(frontiers)>0:
frontiers=vstack((frontiers,x))
else:
frontiers=x
def mapCallBack(data):
global mapData
mapData=data
def globalMap(data):
global global1,globalmaps,litraIndx,namespace_init_count,n_robots
global1=data
if n_robots>1:
indx=int(data._connection_header['topic'][litraIndx])-namespace_init_count
elif n_robots==1:
indx=0
globalmaps[indx]=data
# Node----------------------------------------------
def node():
global frontiers,mapData,global1,global2,global3,globalmaps,litraIndx,n_robots,namespace_init_count
rospy.init_node('filter', anonymous=False)
# fetching all parameters
map_topic= rospy.get_param('~map_topic','/map_merge/map')
threshold= rospy.get_param('~costmap_clearing_threshold',70)
info_radius= rospy.get_param('~info_radius',1.0) #this can be smaller than the laser scanner range, >> smaller >>less computation time>> too small is not good, info gain won't be accurate
goals_topic= rospy.get_param('~goals_topic','/detected_points')
n_robots = rospy.get_param('~n_robots',1)
namespace = rospy.get_param('~namespace','')
namespace_init_count = rospy.get_param('~namespace_init_count',1)
rateHz = rospy.get_param('~rate',2)
litraIndx=len(namespace)
rate = rospy.Rate(rateHz)
#-------------------------------------------
rospy.Subscriber(map_topic, OccupancyGrid, mapCallBack)
#---------------------------------------------------------------------------------------------------------------
for i in range(0,n_robots):
globalmaps.append(OccupancyGrid())
if len(namespace) > 0:
for i in range(0,n_robots):
rospy.Subscriber(namespace+str(i+namespace_init_count)+'/move_base/global_costmap/costmap', OccupancyGrid, globalMap)
elif len(namespace)==0:
rospy.Subscriber('/move_base/global_costmap/costmap', OccupancyGrid, globalMap)
#wait if map is not received yet
while (len(mapData.data)<1):
pass
#wait if any of robots' global costmap map is not received yet
for i in range(0,n_robots):
while (len(globalmaps[i].data)<1):
pass
global_frame="/"+mapData.header.frame_id
tfLisn=tf.TransformListener()
if len(namespace) > 0:
for i in range(0,n_robots):
tfLisn.waitForTransform(global_frame[1:], namespace+str(i+namespace_init_count)+'/base_footprint', rospy.Time(0),rospy.Duration(10.0))
elif len(namespace)==0:
tfLisn.waitForTransform(global_frame[1:], '/base_footprint', rospy.Time(0),rospy.Duration(10.0))
rospy.Subscriber(goals_topic, PointStamped, callback=callBack,callback_args=[tfLisn,global_frame[1:]])
pub = rospy.Publisher('frontiers', Marker, queue_size=10)
pub2 = rospy.Publisher('centroids', Marker, queue_size=10)
filterpub = rospy.Publisher('filtered_points', PointArray, queue_size=10)
rospy.loginfo("the map and global costmaps are received")
# wait if no frontier is received yet
while len(frontiers)<1:
pass
points=Marker()
points_clust=Marker()
#Set the frame ID and timestamp. See the TF tutorials for information on these.
points.header.frame_id= mapData.header.frame_id
points.header.stamp= rospy.Time.now()
points.ns= "markers2"
points.id = 0
points.type = Marker.POINTS
#Set the marker action for latched frontiers. Options are ADD, DELETE, and new in ROS Indigo: 3 (DELETEALL)
points.action = Marker.ADD;
points.pose.orientation.w = 1.0
points.scale.x=0.2
points.scale.y=0.2
points.color.r = 255.0/255.0
points.color.g = 255.0/255.0
points.color.b = 0.0/255.0
points.color.a=1;
points.lifetime = rospy.Duration();
p=Point()
p.z = 0;
pp=[]
pl=[]
points_clust.header.frame_id= mapData.header.frame_id
points_clust.header.stamp= rospy.Time.now()
points_clust.ns= "markers3"
points_clust.id = 4
points_clust.type = Marker.POINTS
#Set the marker action for centroids. Options are ADD, DELETE, and new in ROS Indigo: 3 (DELETEALL)
points_clust.action = Marker.ADD;
points_clust.pose.orientation.w = 1.0;
points_clust.scale.x=0.2;
points_clust.scale.y=0.2;
points_clust.color.r = 0.0/255.0
points_clust.color.g = 255.0/255.0
points_clust.color.b = 0.0/255.0
points_clust.color.a=1;
points_clust.lifetime = rospy.Duration();
temppoint=PointStamped()
temppoint.header.frame_id= mapData.header.frame_id
temppoint.header.stamp=rospy.Time(0)
temppoint.point.z=0.0
arraypoints=PointArray()
tempPoint=Point()
tempPoint.z=0.0
#-------------------------------------------------------------------------
#--------------------- Main Loop -------------------------------
#-------------------------------------------------------------------------
while not rospy.is_shutdown():
#-------------------------------------------------------------------------
#Clustering frontier points
centroids=[]
front=copy(frontiers)
if len(front)>1:
ms = MeanShift(bandwidth=0.3)
ms.fit(front)
centroids= ms.cluster_centers_ #centroids array is the centers of each cluster
#if there is only one frontier no need for clustering, i.e. centroids=frontiers
if len(front)==1:
centroids=front
frontiers=copy(centroids)
#-------------------------------------------------------------------------
#clearing old frontiers
z=0
while z<len(centroids):
cond=False
temppoint.point.x=centroids[z][0]
temppoint.point.y=centroids[z][1]
for i in range(0,n_robots):
transformedPoint=tfLisn.transformPoint(globalmaps[i].header.frame_id,temppoint)
x=array([transformedPoint.point.x,transformedPoint.point.y])
cond=(gridValue(globalmaps[i],x)>threshold) or cond
if (cond or (informationGain(mapData,[centroids[z][0],centroids[z][1]],info_radius*0.5))<0.2):
centroids=delete(centroids, (z), axis=0)
z=z-1
z+=1
#-------------------------------------------------------------------------
#publishing
arraypoints.points=[]
for i in centroids:
tempPoint.x=i[0]
tempPoint.y=i[1]
arraypoints.points.append(copy(tempPoint))
filterpub.publish(arraypoints)
pp=[]
for q in range(0,len(frontiers)):
p.x=frontiers[q][0]
p.y=frontiers[q][1]
pp.append(copy(p))
points.points=pp
pp=[]
for q in range(0,len(centroids)):
p.x=centroids[q][0]
p.y=centroids[q][1]
pp.append(copy(p))
points_clust.points=pp
pub.publish(points)
pub2.publish(points_clust)
rate.sleep()
#-------------------------------------------------------------------------
if __name__ == '__main__':
try:
node()
except rospy.ROSInterruptException:
pass
|
StarcoderdataPython
|
150763
|
import csv
import os
import time
from SignalModel import TrainingModel
from SignalModel import SignalsClassifier
import re
import pandas as pd
import SymSpell
from utils.utils import read_glove_vecs
from Spec import Specification
#0019_DTC, 0027_NET, 0054_HBA, 0057_HRB, 0058_HFC, 0061_HHC, 0062_AVH, 0068_AEB, 0069_CDP
#0106_PT, 0110_SAS, 0188_YRS, 0518_HMI,0614_ABS, 0618_SCM, 0865_BSM, 1048_EPM, 1138_LeanTCS
RELATIVE_SW_SPEC_PATH = "/zQC_Spec/SwT/"
RELATIVE_SYT_SPEC_PATH = "/zQC_Spec/SysT/"
SW_DataSPEC_PATH = "./zQC_Spec/z_SwT_Data/test.csv" #SwT_Data
SyT_DataSPEC_PATH = "./zQC_Spec/z_SysT_Data/test.csv"
EMBEDDED_PATH = "./Data/glove_6B_50d.txt"
NEWWORDS_PATH = "./Data/NewWords.txt"
FUNCTION_PATH = "./Signals/FunctionNames.txt"
ENUM_PATH = "./Signals/ENUMs.txt"
SHEET_NAME = "Test: Description"
cwd = os.getcwd() # Get the current working directory (cwd)
specs = []
def getExceptedWords(text, embeddedPath = EMBEDDED_PATH, newWordsPath = NEWWORDS_PATH):
NotInDictWords = []
setOfWords = set(text.split())
word_to_index, __, __ = read_glove_vecs(embeddedPath, newWordsPath)
all_words = word_to_index.keys()
NotInDictWords = [word for word in setOfWords if word not in all_words]
return NotInDictWords, all_words
def getSheetSpec(specPath, display = False):
df = pd.read_excel(specPath, sheet_name = 'Sheet1', encoding = "utf-8")
descriptions = df[SHEET_NAME]; sheet = ""
number_of_tests = 0
for des in descriptions:
spec = Specification(des)
specs.append(spec)
sheet = os.linesep.join([sheet, spec.preTrainingData + spec.stiTrainingData])
number_of_tests += 1
if display:
print("STT is {}".format(number_of_tests))
print(spec.preTrainingData + spec.stiTrainingData)
return sheet
def enum_function(labels):
label_dict = {}
remain_labels = labels.copy()
with open(FUNCTION_PATH, 'r') as file:
FUNCTION_TEXT = file.read().lower()
with open(ENUM_PATH, 'r') as file:
ENUM_TEXT = file.read().lower()
FUNCTION_TEXTs = FUNCTION_TEXT.split()
ENUM_TEXTs = ENUM_TEXT.split()
for label in labels:
if label.lower() in FUNCTION_TEXTs:
label_dict[label] = "function"
remain_labels.remove(label)
for label in labels:
if label.lower() in ENUM_TEXTs:
label_dict[label] = "value"
remain_labels.remove(label)
return label_dict, remain_labels
def allPassFilter(sheet):
signals = []
unknowWords, all_words = getExceptedWords(sheet.lower(), EMBEDDED_PATH, NEWWORDS_PATH)
accepted_signals_dict, resignals = enum_function(unknowWords)
spellchecked_dict = SymSpell.checker(resignals)
for key in spellchecked_dict:
spellchecked_dict[key] = None if spellchecked_dict[key] not in all_words else spellchecked_dict[key]
for key in spellchecked_dict:
if not spellchecked_dict[key]:
signals.append(key)
else:
accepted_signals_dict[key] = spellchecked_dict[key]
spellchecked_dict.clear()
proper_signals = SignalsClassifier(signals)
signals_dict = dict(zip(signals, proper_signals))
signals_dict.update(accepted_signals_dict)
return signals_dict
def CollectData():
sheet = None
csvFile = open(SyT_DataSPEC_PATH, 'a')
for filename in os.listdir(os.getcwd() + RELATIVE_SYT_SPEC_PATH):
if filename in ('.DS_Store'):
pass
else:
print("Handling file {}".format(filename))
sheet = getSheetSpec(os.getcwd() + RELATIVE_SYT_SPEC_PATH + filename, True)
if not sheet:
continue
signals_dict = allPassFilter(sheet)
print(signals_dict)
for spec in specs:
for label, new_label in signals_dict.items():
spec.preTrainingData = re.sub(r'\b' + label + r'\b', new_label, spec.preTrainingData, flags = re.IGNORECASE)
spec.stiTrainingData = re.sub(r'\b' + label + r'\b', new_label, spec.stiTrainingData, flags=re.IGNORECASE)
preTextList = spec.preTrainingData.splitlines()
stiTextList = spec.stiTrainingData.splitlines()
for preText in preTextList:
csvFile.write("\"" + preText + "\"\n")
for preText in stiTextList:
csvFile.write("\"" + preText + "\"\n")
specs.clear()
csvFile.close()
def get_spec_list(relative_path, spec_display = False):
sheet = None
specs.clear()
sheet = getSheetSpec(os.getcwd() + relative_path, spec_display)
signals_dict = allPassFilter(sheet)
for spec in specs:
for label, new_label in signals_dict.items():
spec.preTrainingData = re.sub(r'\b' + label + r'\b', new_label, spec.preTrainingData, flags = re.IGNORECASE)
spec.stiTrainingData = re.sub(r'\b' + label + r'\b', new_label, spec.stiTrainingData, flags=re.IGNORECASE)
spec.preTextList = spec.preTrainingData.splitlines()
spec.stiTestList = spec.stiTrainingData.splitlines()
spec.preTextList = list(filter(None, spec.preTextList))
spec.stiTestList = list(filter(None, spec.stiTestList))
spec.content = None #don't need anymore.
return specs
if __name__ == "__main__":
#CollectData()
myspecs = get_spec_list('/zQC_Spec/DTC.xlsx')
|
StarcoderdataPython
|
106155
|
<reponame>jkingsman/mockmail.io
import smtpd
import random
import pprint
import asyncore
from email.parser import Parser
from twisted.internet import task
from Config import bindingPort, bindingIP, dropSize
staged = []
class MailboxHandler():
def __init__(self, queue):
self.binding = (bindingIP, bindingPort)
def stagedToQueue():
while len(staged) > 0:
queue.put(staged.pop())
lc = task.LoopingCall(stagedToQueue)
lc.start(2)
server = CustomSMTPServer(self.binding, None)
print 'SMTP starting on', self.binding[1]
asyncore.loop(timeout=1)
class CustomSMTPServer(smtpd.SMTPServer):
def process_message(self, peer, mailFrom, mailTo, data):
# handle drop conditions
if len(data) > dropSize:
# too big; drop
print 'Dropping message to', mailTo, ': too big'
return
# begin assembling email object
parser = Parser()
print 'Receiving message from:', mailFrom, peer, 'to', mailTo
email = parser.parsestr(data)
emailObj = {}
emailObj['raw'] = data
emailObj['from'] = email.get('From')
emailObj['fromIP'] = peer
emailObj['to'] = email.get('To')
emailObj['subject'] = email.get('Subject')
emailObj['transferEncoding'] = email.get('Content-Transfer-Encoding')
emailObj['attachments'] = []
if email.is_multipart():
# loop through each chunk of the body
for index, part in enumerate(email.get_payload()):
if index == 0:
# first object of multipart is probably body
emailObj['body'] = part.get_payload()
else:
attachment = {}
attachment['name'] = part.get_filename()
attachment['type'] = part.get_content_type()
attachment['data'] = part.get_payload()
attachment['transferEncoding'] = part.get('Content-Transfer-Encoding')
emailObj['attachments'].append(attachment)
else:
# not multipart; grab the body and run
emailObj['body'] = email.get_payload(decode=True)
staged.append(emailObj)
return
|
StarcoderdataPython
|
1631103
|
<gh_stars>0
from prefixspan import PrefixSpan
w_shot = 2
w_pass = 0.5
w_other = 1
def getweight(e):
if e < 10 :
return w_other
elif e < 35:
return w_pass
else:
return w_shot
# 模式的类
class Pattern:
def __init__(self, freq, flow):
super(Pattern, self).__init__()
self.freq = freq
self.flow = flow
self.score = self.score()
# 在构造函数中直接计算出该模式的Score方便排序
def score(self):
event_value = 0
for x in self.flow:
event_value += getweight(x)
return self.freq * event_value
def loadFile(filepath):
"从指定文件名载入数据到列表"
db = []
with open(filepath) as file:
line = file.readline()
while line:
db.append(eval(line))
line = file.readline()
return db
def generateFilename(tno,cno):
"根据队编号与聚类编号产生文件名"
return "Team"+str(tno)+"Cluster"+str(cno)+".txt";
path = "Cluster/"
list_p = []
# tno 代表 team number
# cno 代表 cluster number
for tno in range(1,3):
for cno in range(0,5):
filepath = path+generateFilename(tno,cno)
db = loadFile(filepath)
ps = PrefixSpan(db)
for x in range(0,10):
list_p.append( Pattern(ps.topk(10)[x][0], ps.topk(10)[x][1]) )
# 输出当前Cluster中出现频率最高的10个Pattern
# print(ps.topk(10))
# 对Score进行排序
list_p = sorted(list_p, key=lambda x:x.score, reverse=True)
print("#######################################")
for x in list_p:
print("score:", x.score, "freq:", x.freq)
|
StarcoderdataPython
|
158840
|
"""Windows base service implementation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import errno
import functools
import logging
import os
# Disable E0401: unable to import on linux
import win32security # pylint: disable=E0401
from treadmill import dirwatch
from treadmill import fs
from . import _base_service
_LOGGER = logging.getLogger(__name__)
class WindowsResourceService(_base_service.ResourceService):
"""Windows server class for all Treadmill services.
/service_dir/resources/<containerid>-<uid>/ ->
/apps/<containerid>/rsrc/req-<svc_name>/
/apps/<container>/rsrc/<svc_name>/
request.yml
reply.yml
svc_req_id
"""
__slots__ = ()
def status(self, timeout=30):
"""Query the status of the resource service.
"""
# TODO: implement status for windows
return None
def _run(self, impl, watchdog_lease):
"""Linux implementation of run.
"""
# Run initialization
impl.initialize(self._dir)
watcher = dirwatch.DirWatcher(self._rsrc_dir)
# Call all the callbacks with the implementation instance
watcher.on_created = functools.partial(self._on_created, impl)
watcher.on_deleted = functools.partial(self._on_deleted, impl)
# NOTE: A modified request is treated as a brand new request
watcher.on_modified = functools.partial(self._on_created, impl)
# Before starting, check the request directory
svcs = self._check_requests()
# and "fake" a created event on all the existing requests
for existing_svcs in svcs:
self._on_created(impl, existing_svcs)
# Before starting, make sure backend state and service state are
# synchronized.
impl.synchronize()
loop_timeout = impl.WATCHDOG_HEARTBEAT_SEC // 2
while not self._is_dead:
if watcher.wait_for_events(timeout=loop_timeout):
watcher.process_events()
# Clean up stale requests
self._check_requests()
# Heartbeat
watchdog_lease.heartbeat()
def clt_update_request(self, req_id):
"""Update an existing request.
This should only be called by the client instance.
"""
_update_request(self._rsrc_dir, req_id)
# Disable W0223: pylint thinks that it is not abstract
# pylint: disable=W0223
class WindowsBaseResourceServiceImpl(_base_service.BaseResourceServiceImpl):
"""Base interface of Resource Service implementations.
"""
__slots__ = ()
def retry_request(self, rsrc_id):
"""Force re-evaluation of a request.
"""
_update_request(self._service_rsrc_dir, rsrc_id)
def _update_request(rsrc_dir, req_id):
"""Update an existing request.
This should only be called by the client instance.
"""
svc_req_lnk = os.path.join(rsrc_dir, req_id)
_LOGGER.debug('Updating %r: %r', req_id, svc_req_lnk)
# Remove any reply if it exists
fs.rm_safe(os.path.join(svc_req_lnk, _base_service.REP_FILE))
# NOTE: This does the equivalent of a touch on the symlink
try:
sd = win32security.GetFileSecurity(
svc_req_lnk,
win32security.DACL_SECURITY_INFORMATION
)
win32security.SetFileSecurity(
svc_req_lnk,
win32security.DACL_SECURITY_INFORMATION,
sd
)
except OSError as err:
if err.errno != errno.ENOENT:
raise
|
StarcoderdataPython
|
3207569
|
###############################################################################
#Author: <NAME>
#Filename: Library.py
#Application: DragonShout
#Date: June 2014
#Description: Contain the class for handling the library file (saving and
# loading)
#
# Class Library:
# _name as string
# Contains the name of the library which also is the filename
# on the drive
# _categories as list
# Contains the list of categories (instances of Category class)
# _filepath as string
# Contains the path to the library file on the drive
#
#Last edited: January 31th 2018
###############################################################################
import os
import json
from classes.interface import MainWindow
from classes.library.Category import Category
from classes.interface.Sampler import Sampler
class Library:
"""Class Library:
_name as string
Contains the name of the library which also is the filename
on the drive
_categories as list
Contains the list of categories
_filepath as string
Contains the path to the library file on the drive
"""
def load(cls, mainWindow:MainWindow, filepath: str):
"""Used to load the library from the hard drive (JSON).
Takes one parameter:
- filepath as string
"""
try :
with open(filepath, "r", encoding="utf-8") as json_file:
completeJSON = json.load(json_file)
library_object = Library.unserialize(mainWindow,completeJSON["Library"])
library_object.filepath = filepath
return library_object
except :
return False
load = classmethod(load)
#class method
def unserialize(cls,mainWindow:MainWindow, data: dict):
"""Used to unserialize json data to set Library instance attributs and create Category
class instances.
Takes two parameters:
- data as dictionnary
- filepath as string
"""
if "__class__" in data :
if data["__class__"] == "Library":
#creating Library instance
library_object = Library(mainWindow, data["name"],"")
#unserializing categories for this library
category_list = []
for category in data["categories"]:
category_list.append(Category.unserialize(category))
library_object.categories = category_list
return library_object
return data
unserialize = classmethod(unserialize)
#constructor
def __init__(self, mainWindow:MainWindow, name:str, filepath: str):
self._name = name
self._filepath = filepath
self._categories = []
self.mainWindow = mainWindow
#accessors
def _get_name(self):
return self._name
def _get_filepath(self):
return self._filepath
def _get_categories(self):
return self._categories
#mutators
def _set_name(self, new_name: str):
self._name = new_name
def _set_filepath(self, new_filepath: str):
self._filepath = new_filepath
def _set_categories(self,categories: list):
self._categories = categories
#destructors
def _del_name(self):
del self._name
def _del_filepath(self):
del self._filepath
def _del_categories(self):
del self._categories
#help
def _help_name():
return "Contains the name of the library which also is the filename on the drive"
def _help_filepath():
return "Contains the filepath to the library file"
def _help_categories():
return "Contains the list of categories"
#properties
name = property(_get_name, _set_name, _del_name, _help_name)
filepath = property(_get_filepath, _set_filepath, _del_filepath, _help_filepath)
categories = property(_get_categories, _set_categories, _del_categories, _help_categories)
#methods
def add_category(self,name: str, iconPath: str=''):
"""Used to add a category to the library.
Takes one parameter:
- name as string
"""
self._categories.append(Category(name,iconPath))
def remove_category(self, name:str):
"""Used to remove a category from the library.
Takes one paramter:
- name as string.
Returns nothing.
"""
category = self.get_category(name)
if category :
self.categories.remove(category)
def get_category(self,name: str):
"""Used to get a specific category from the library.
Takes one parameter:
- name as string
"""
for category in self.categories:
if category.name == name:
return category
return False
def gather_library(self):
"""Used to gather the categories and tracks for this library.
Takes no parameter
Returns one dictionnary containing the lists of tracks:
[category1 => [track,track,track],category2 = >[track]]
"""
library = {}
for category in self.categories:
track_list = []
for track in category.tracks:
track_list.append(track.name)
library[category.name] = track_list
return library
#file handling
def save(self,filepath:str='./new_library.json'):
"""Used to save the library on the hard drive (JSON).
Takes one parameters:
- filepath as string
"""
self.filepath = filepath
serial_library = self.serialize()
serial_sampler = self.mainWindow.sampler.serialize()
completeJSON = {"Library" : serial_library,
"SampleSet" : serial_sampler}
#if the file doesn't exist, create it
if not(os.path.isfile(filepath)):
jsonFile = open(filepath,"x", encoding="utf-8")
jsonFile.close()
with open(filepath,"w", encoding="utf-8") as json_file:
json.dump(completeJSON,json_file, indent=4)
json_file.close()
def serialize(self):
"""Used to serialize instance data to JSON format
toakes no parameter.
"""
category_list = []
for category in self.categories:
category_list.append(category.serialize())
return {"__class__": "Library",
"name": self.name,
"categories": category_list}
|
StarcoderdataPython
|
3365702
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
import datetime
import os
import pyoozie
import pytest
import pywebhdfs.webhdfs
import six
@pytest.mark.skipif(not bool(os.environ.get(str('INTERACTIVE'))), reason='Requires INTERACTIVE=1 env var')
def test_pyoozie_typical_use_case():
# Configure
workflow_path = '/user/oozie/test_workflow.xml'
coord_path = '/user/oozie/test_coordinator.xml'
hadoop_user_name = 'root'
name_node = 'hdfs://hdfs-namenode:9000'
job_tracker = 'resourcemanager:8032'
configuration = {
'oozie.launcher.mapreduce.job.ubertask.enable': 'false',
}
# Create XML
workflow_xml = pyoozie.WorkflowApp(
name='integration',
entities=pyoozie.Action(
action=pyoozie.Shell(
exec_command="echo",
arguments="test",
name_node=name_node,
job_tracker=job_tracker,
configuration=configuration,
)
)
).xml(indent=True)
print('Created workflow XML')
print(workflow_xml.decode('utf-8'))
coord_xml = pyoozie.CoordinatorApp(
name='integration',
workflow_app_path=workflow_path,
frequency=5,
start=datetime.datetime.now(),
end=datetime.datetime(2115, 1, 1, 10, 56),
concurrency=1,
timeout=5,
execution_order=pyoozie.ExecutionOrder.LAST_ONLY,
).xml(indent=True)
print('Created coordinator XML')
print(coord_xml.decode('utf-8'))
# Store on HDFS
hdfs_client = pywebhdfs.webhdfs.PyWebHdfsClient(host='localhost', port='14000', user_name=hadoop_user_name)
for path, data in {workflow_path: workflow_xml, coord_path: coord_xml}.items():
hdfs_client.create_file(path=path, file_data=data, overwrite=True)
status = hdfs_client.get_file_dir_status(path)
assert status and status['FileStatus']['type'] == 'FILE'
print('Wrote to HDFS %s' % path)
# Submit coordinator to Oozie
oozie_client = pyoozie.OozieClient(
url='http://localhost:11000',
user=hadoop_user_name,
)
coord_config = {
'user.name': hadoop_user_name,
'custom.config': '😢',
}
print('Submitting coordinator to Oozie')
coordinator = oozie_client.jobs_submit_coordinator(
coord_path,
coord_config,
)
# Test that all is well
print('Coordinator %s created' % coordinator.coordJobId)
assert coordinator.status.is_active
# Prompt
six.moves.input("Press enter to delete coordinator")
print('Deleting %s' % coordinator.coordJobId)
assert oozie_client.job_coordinator_kill(coordinator.coordJobId)
|
StarcoderdataPython
|
3359638
|
<reponame>xashru/robust-vad<gh_stars>1-10
from .cnn import *
from .dnn import DNN20
from .lstm import LSTM
from .preact_resnet import PreActResNet18
|
StarcoderdataPython
|
1636186
|
<filename>settings/diffractometer/NIH Diffractometer_settings.py
phi_motor_name = 'SamplePhi'
phi_scale = 1.0
rotation_center_x = -1.2333
rotation_center_y = 2.0598
x_motor_name = 'SampleX'
x_scale = 1.0
xy_rotating = False
y_motor_name = 'SampleY'
y_scale = 1.0
z_motor_name = 'SampleZ'
z_scale = 1.0
|
StarcoderdataPython
|
1667560
|
<reponame>NengLu/topopy
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 10 09:32:58 2018
@author: vicen
"""
import warnings
warnings.filterwarnings('ignore')
import sys
import numpy as np
from scipy import ndimage
# Add to the path code folder and data folder
sys.path.append("../")
from topopy import DEM
from topopy.ext.distance import cost
from mcompare import compare_indexes, compare_row_cols, compare_arrays, load_array
from skimage import graph
import scipy.io as sio
dem = DEM("../data/tunez.tif")
# USED TO DEBUG
dims = dem._array.shape
ncells = dem._array.size
def _get_aux_topography(dem):
"""
This function calculate the auxiliary topography by using the topography in
depressions to derive the most realistic flow paths. It uses as weight sweights the
differences between the filled and the raw DEM.
References:
-----------
This algoritm is adapted to Python from TopoToolbox matlab codes by <NAME>
(version 2017-09-02). It is equivalent to use the preprocessing option
'carve' in that code (default preprocessing option).
<NAME>., <NAME>., 2010. TopoToolbox: A set of Matlab functions
for topographic analysis. Environ. Model. Softw. 25, 770–781.
https://doi.org/10.1016/j.envsoft.2009.12.002
<NAME>., <NAME>., 2014. Short Communication: TopoToolbox 2 -
MATLAB-based software for topographic analysis and modeling in Earth
surface sciences. Earth Surf. Dyn. 2, 1–7. https://doi.org/10.5194/esurf-2-1-2014
"""
# Fill the DEM *
fill = dem.fill_sinks()
diff = fill.read_array() - dem.read_array()
dem = fill
# Identify flats and sills *
flats, sills = dem.identify_flats(nodata=False)
# Derive the cost of routing through sills *
carvemin = 0.1
struct = np.ones((3, 3), dtype="int8")
lbl_arr, nlbl = ndimage.label(flats.read_array(), structure=struct)
lbls = np.arange(1, nlbl + 1)
tweight = 2
for lbl in lbls:
diff[lbl_arr == lbl] = (diff[lbl_arr==lbl].max() - diff[lbl_arr == lbl])**tweight + carvemin
del lbl_arr
# Get presill pixels i.e. pixels immediately upstream to sill pixels *
zvals = dem.read_array()
flats_arr = flats.read_array().astype("bool")
dims = zvals.shape
row, col = sills.find()
rowadd = np.array([-1, -1, 0, 1, 1, 1, 0, -1])
coladd = np.array([ 0, 1, 1, 1, 0, -1, -1, -1])
presill_rows = np.array([], dtype="int")
presill_cols = np.array([], dtype="int")
for n in range(8):
rowp = row + rowadd[n]
colp = col + coladd[n]
# Avoid neighbors outside array (remove cells and their neighbors)
valid_rc = (rowp >= 0) & (colp >= 0) & (rowp < dims[0]) & (colp < dims[1])
rowp = rowp[valid_rc]
colp = colp[valid_rc]
# Discard cells (row-col pairs) that do not fullfill both conditions
cond01 = zvals[row[valid_rc], col[valid_rc]] == zvals[rowp, colp]
cond02 = flats_arr[rowp, colp]
valid_pix = np.logical_and(cond01, cond02)
presill_rows = np.append(presill_rows, rowp[valid_pix])
presill_cols = np.append(presill_cols, colp[valid_pix])
starts = [xx for xx in zip(presill_rows, presill_cols)]
# Calulate auxiliary topography, ie. the cost surface seeded at presill pixels *
flats_arr = np.invert(flats_arr)
diff[flats_arr] = 99999
lg = graph.MCP_Geometric(diff)
diff = lg.find_costs(starts=starts)[0] + 1
diff[flats_arr] = -99999
return diff
def sort_pixels(dem, aux_topo):
dem = dem.fill_sinks().read_array()
rdem = dem.ravel()
# if aux_topo:
diff = aux_topo.ravel()
ix_sorted_flats = np.argsort(-diff, kind='mergesort')
# ### TEST
# ix_sorted_flats = sio.loadmat("IXSortedFlats.mat")["IXSortedFlats"] - 1
# row, col = np.unravel_index(ix_sorted_flats, dims, "F")
# ix_sorted_flats = np.ravel_multi_index((row, col), dims)
# ix_sorted_flats = ix_sorted_flats.reshape((ncells,))
# ### TEST
# ix_sorted_flats_z = np.append(rdem[ix_sorted_flats].reshape(ncells, 1),
# ix_sorted_flats.reshape(ncells, 1), axis=1).reshape((ncells, 2))
#
# ind = np.argsort(ix_sorted_flats_z[:,0])[::-1]
# ix = np.array(ix_sorted_flats_z[:,1][ind], dtype=np.int32)
ndx = np.arange(ncells)
ndx = ndx[ix_sorted_flats]
# This F#@#@ line took me three days
# Numpy has not 'descent', but you CANNOT invert the index array!
# Since you'll mess up all the previous sorting!!!
#ix = np.argsort(rdem[ndx])[::-1] # My F@#@¢@ mistake!!
ix = np.argsort(-rdem[ndx], kind='mergesort')
ix = ndx[ix]
return ix
aux_topo = _get_aux_topography(dem)
ix = sort_pixels(dem, aux_topo)
#del aux_topo
#ix = sio.loadmat('ix.mat')['ix'] - 1
#ix = ix.reshape((ncells, ))
#row, col = np.unravel_index(ix, dims, "F")
#ix = np.ravel_multi_index((row, col), dims)
pp = np.zeros(dims, dtype=np.int32)
IX = np.arange(ncells, dtype=np.int32)
pp = pp.ravel()
pp[ix] = IX
pp = pp.reshape(dims)
demarr = dem.fill_sinks().read_array()
f_dem = demarr.ravel()
# CARDINAL NEIGHBORS
footprint= np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]], dtype=np.int)
IXC1 = ndimage.morphology.grey_dilation(pp, footprint=footprint)
xxx1 = np.copy(IXC1)
IX = IXC1.ravel()[ix]
IXC1 = ix[IX]
G1 = (f_dem[ix]-f_dem[IXC1])/(dem.get_cellsize())
"""
% cardinal neighbors
IXC1 = imdilate(pp,[0 1 0; 1 1 1; 0 1 0]>0);
xxx1 = IXC1;
IX = IXC1(FD.ix);
IXC1 = FD.ix(IX);
G1 = (dem(FD.ix)-dem(IXC1))/(FD.cellsize);
G1(FD.ix == IXC1) = -inf;
"""
# DIAGONAL NEIGHBORS
footprint= np.array([[1, 0, 1],
[0, 1, 0],
[1, 0, 1]], dtype=np.int)
IXC2 = ndimage.morphology.grey_dilation(pp, footprint=footprint)
xxx2 = np.copy(IXC2)
IX = IXC2.ravel()[ix]
IXC2 = ix[IX]
G2 = (f_dem[ix]-f_dem[IXC2])/(dem.get_cellsize() * np.sqrt(2))
"""
IXC2 = imdilate(pp,[1 0 1; 0 1 0; 1 0 1]>0);
xxx2 = IXC2;
IX = IXC2(FD.ix);
IXC2 = FD.ix(IX);
G2 = (dem(FD.ix)-dem(IXC2))/(norm([FD.cellsize,FD.cellsize]));
"""
I = (G1<=G2) & (xxx2.ravel()[ix]>xxx1.ravel()[ix])
ixc = IXC1
ixc[I] = IXC2[I]
I = ixc == ix
I = np.invert(I)
ix = ix[I]
ixc =ixc[I]
"""
% choose the steeper one
I = G1<=G2 & xxx2(FD.ix)>xxx1(FD.ix);
FD.ixc = IXC1;
FD.ixc(I) = IXC2(I);
I = FD.ixc == FD.ix;
FD.ix(I) = [];
FD.ixc(I) = [];
"""
#ix = sio.loadmat('ix.mat')['ix'] - 1
#ix = ix.reshape((304980, ))
#row, col = np.unravel_index(ix, dims, "F")
#ix = np.ravel_multi_index((row, col), dims)
#ixc = sio.loadmat('ixc.mat')['ixc'] - 1
#ixc = ixc.reshape((304980, ))
#row, col = np.unravel_index(ixc, dims, "F")
#ixc = np.ravel_multi_index((row, col), dims)
nix = len(ix)
A = np.ones(ncells)
for n in range(nix):
A[ixc[n]] = A[ix[n]] + A[ixc[n]]
A = A.reshape(dims)
acc = DEM()
acc.copy_layout(dem)
acc.set_array(A)
acc.save("flow_Arr.tif")
##del aux_topo
#
#aux_topo = _get_aux_topography(dem)
#
#
#dem = dem.fill_sinks().read_array()
#f_dem = dem.ravel()
#
#IXSortedFlats = np.argsort(aux_topo.ravel())[::-1]
##del aux_topo
#
#ndx = np.arange(ncells, dtype=np.int32)
#ndx = ndx[IXSortedFlats]
##del IXSortedFlats
#
#ix = np.argsort(f_dem[ndx])[::-1]
#ix = ndx[ix]
#
#resta = ix2 - ix
#ix = np.argsort(rdem[ix_sorted_flats])[::-1]
#
#sdem = dem.ravel()[ix]
#fdem = np.ravel(mdem, "F")
#ix = np.argsort(fdem[ix_sorted_flats])[::-1]
## Sort pixels (givers)
#aux_topo = diff.ravel()
#dem = dem.fill_sinks()
#
#f_dem = dem.read_array().ravel("F")
#ix = np.argsort(f_dem[ix_sorted_flats])[::-1]
#ndx = np.arange(ncells, dtype = np.int32)[ix_sorted_flats]
#ix = ndx[np.argsort(f_dem[ndx])] ## FD.IX (Givers)
#
#r, c = np.unravel_index(ix, dims)
#ind = np.ravel_multi_index((r, c), dims, order = "F")
#
##
##ncells = dims[0] * dims[1]
##
#pp = np.zeros(dims, dtype=np.int32)
#iixx = np.arange(dims[0] * dims[1])
#
#pp[np.unravel_index(ix, dims)] = iixx
##pp = pp.reshape(dims)
|
StarcoderdataPython
|
3358137
|
<reponame>psorus/f
from param1 import *
from collector import *
from transform import addtrafo,addinv
import fmath
import math
class atanh(param1):
def __init__(s,p):
param1.__init__(s)
s.q=p
def diff(s,by)->'mult':
return s.q.diff(by)/(fmath.value(1)-fmath.square(s.q))
def eval(s,**v)->float:
return math.atanh(s.q.eval(**v))
def gettyp(s)->str:
return "atanh"
def _copywithparam(s,p)->"param1":
return atanh(p)
def mininp(s)->"float(possibly inf)":
return -1.0
def maxinp(s)->"float(possibly inf)":
return 1.0
def minpos(s)->"float(possibly inf)":
mp=s.q.minpos()
mp=max(s.mininp(),mp)
mp=min(s.maxinp(),mp)
return math.atanh(mp)
def maxpos(s)->"float(possibly inf)":
mp=s.q.maxpos()
mp=max(s.mininp(),mp)
mp=min(s.maxinp(),mp)
return math.atanh(mp)
register(atanh(0.5))
addinv("tanh","atanh")
|
StarcoderdataPython
|
110514
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""health check reposnser tests."""
from http.server import HTTPServer
import threading
import unittest
import mock
import requests
from python.bot.startup.health_check_responder import EXPECTED_SCRIPTS
from python.bot.startup.health_check_responder import RequestHandler
from python.bot.startup.health_check_responder import RESPONDER_IP
from python.bot.startup.health_check_responder import RESPONDER_PORT
RESPONDER_ADDR = f'http://{RESPONDER_IP}:{RESPONDER_PORT}'
class HealthCheckResponderTest(unittest.TestCase):
"""Test health check responder."""
def setUp(self):
"""Prepare mock processes and start the responder server thread."""
self.mock_run_process = mock.MagicMock()
self.mock_run_process.cmdline.return_value = ['./' + EXPECTED_SCRIPTS[0]]
self.mock_run_bot_process = mock.MagicMock()
self.mock_run_bot_process.cmdline.return_value = [
'./' + EXPECTED_SCRIPTS[1]
]
self.health_check_responder_server = HTTPServer(
(RESPONDER_IP, RESPONDER_PORT), RequestHandler)
server_thread = threading.Thread(
target=self.health_check_responder_server.serve_forever)
server_thread.start()
def tearDown(self):
self.health_check_responder_server.shutdown()
self.health_check_responder_server.server_close()
@mock.patch(
'python.bot.startup.health_check_responder.process_handler.psutil')
def test_healthy(self, mock_psutil):
"""Testcase for both scripts are running."""
mock_psutil.process_iter.return_value = [
self.mock_run_process, self.mock_run_bot_process
]
self.assertEqual(200, requests.get(f'{RESPONDER_ADDR}').status_code)
@mock.patch(
'python.bot.startup.health_check_responder.process_handler.psutil')
def test_run_terminated(self, mock_psutil):
"""Testcase for only the run script is running."""
mock_psutil.process_iter.return_value = [self.mock_run_process]
self.assertEqual(500, requests.get(f'{RESPONDER_ADDR}').status_code)
@mock.patch(
'python.bot.startup.health_check_responder.process_handler.psutil')
def test_run_bot_terminated(self, mock_psutil):
"""Testcase for only the run_bot script is running."""
mock_psutil.process_iter.return_value = [self.mock_run_bot_process]
self.assertEqual(500, requests.get(f'{RESPONDER_ADDR}').status_code)
@mock.patch(
'python.bot.startup.health_check_responder.process_handler.psutil')
def test_both_terminated(self, mock_psutil):
"""Testcase for neither script is running."""
mock_psutil.process_iter.return_value = []
self.assertEqual(500, requests.get(f'{RESPONDER_ADDR}').status_code)
|
StarcoderdataPython
|
1675332
|
class Solution:
def bitwiseComplement(self, n: int) -> int:
if n == 0:
return 1
elif n == 1:
return 0
b = "".join("0" if x == "1" else "1" for x in bin(n)[2:])
return int(b, 2)
|
StarcoderdataPython
|
100170
|
# -*- coding: utf-8 -*-
from pyleecan.Classes.NodeMat import NodeMat
import numpy as np
def get_all_node_coord(self, group=None):
"""Return a matrix of nodes coordinates and the vector of nodes tags corresponding to group.
If no group specified, it returns all the nodes of the mesh.
Parameters
----------
self : Mesh
an Mesh object
group : numpy.array
Vector of targeted group
Returns
-------
coordinate: numpy.array
Nodes coordinates
tag : numpy.array
Nodes tags
"""
if type(self.node) is NodeMat:
if group is None:
return self.node.coordinate, self.node.tag
else:
for key in self.element:
element_group = self.element[key].get_group(group)
node_tags = element_group.get_all_node_tags()
coord = self.node.get_coord(node_tags)
return coord, node_tags
else:
return None
|
StarcoderdataPython
|
3213767
|
import gym
env = gym.make('MsPacman-v0')
print(env.action_space)
#> Discrete(2)
print(env.observation_space)
#> Box(4,)
print(env.observation_space.high)
#> array([ 2.4 , inf, 0.20943951, inf])
print(env.observation_space.low)
#> array([-2.4 , -inf, -0.20943951, -inf])
from gym import spaces
space = spaces.Discrete(8) # Set with 8 elements {0, 1, 2, ..., 7}
x = space.sample()
assert space.contains(x)
assert space.n == 8
|
StarcoderdataPython
|
86376
|
<reponame>flyflyinit/GUI-admin-tool
from PyQt5.QtCore import Qt
try:
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QProgressBar, QPushButton, QSpinBox, QLabel, QLineEdit, \
QFormLayout, \
QHBoxLayout, QListWidget, QMessageBox, QCheckBox
except ImportError as e:
print(
f'package PyQt5 Not Found\n{e}\ntry :\npip3 install --user pyqt5\nOR\ndnf install python3-pyqt5, yum install python3-pyqt5\n')
try:
import subprocess
import concurrent.futures
from datetime import datetime
except ImportError as e:
print(f'package not found\n{e}\n')
class CreateUsersWindow(QWidget):
def __init__(self):
super().__init__()
self.setGeometry(200, 50, 300, 400)
self.setWindowTitle("Add Users")
self.layouts()
self.widgets()
def layouts(self):
self.mainLayout = QVBoxLayout()
self.topLayout = QVBoxLayout()
self.topLayout.setContentsMargins(20, 20, 20, 20)
self.bottomLayout = QHBoxLayout()
self.progeesBar = QProgressBar()
self.progeesBar.setHidden(True)
self.okBtn = QPushButton("Ok")
self.okBtn.clicked.connect(self.cancelAction)
self.okBtn.setStyleSheet("color: #ecf0f1; background-color: #27ae60 ; border: 0px solid #27ae60")
self.okBtn.setHidden(True)
self.submitBtn = QPushButton("Submit")
self.submitBtn.clicked.connect(self.submitAction)
self.cancelBtn = QPushButton("Cancel")
self.cancelBtn.clicked.connect(self.cancelAction)
self.submitBtn.setHidden(False)
self.cancelBtn.setHidden(False)
self.okBtn.setFixedHeight(30)
self.submitBtn.setFixedHeight(30)
self.cancelBtn.setFixedHeight(30)
self.submitBtn.setStyleSheet("color: #ecf0f1; background-color: #27ae60 ; border: 0px solid #27ae60")
self.cancelBtn.setStyleSheet("color: #ecf0f1; background-color: #e74c3c; border: 0px solid #e74c3c")
self.bottomLayout.addWidget(self.okBtn)
self.bottomLayout.addWidget(self.submitBtn)
self.bottomLayout.addWidget(self.cancelBtn)
self.mainLayout.addLayout(self.topLayout)
self.mainLayout.addStretch()
self.mainLayout.addWidget(self.progeesBar)
self.mainLayout.addLayout(self.bottomLayout)
self.setLayout(self.mainLayout)
def widgets(self):
self.usersNbr = QSpinBox(self)
self.usersNbr.setMinimum(1)
self.usersNbr.setMaximum(1000)
self.usersNbr.setSuffix(" user")
self.createHomeDir = QCheckBox('Create Home Directory')
self.form = QFormLayout()
self.editLineUsername = QLineEdit('')
self.editLineUsername.setPlaceholderText('enter username')
self.form.addRow(QLabel('Username :'), self.editLineUsername)
self.editLineUserShell = QLineEdit('')
self.editLineUserShell.setPlaceholderText('enter shell')
self.form.addRow(QLabel('User Shell :'), self.editLineUserShell)
self.editLineUserComment = QLineEdit('')
self.editLineUserComment.setPlaceholderText('enter comment')
self.form.addRow(QLabel('Comment :'), self.editLineUserComment)
self.note = QLabel('')
self.topLayout.addWidget(self.usersNbr)
self.topLayout.addWidget(self.editLineUsername)
self.topLayout.addWidget(self.editLineUserShell)
self.topLayout.addWidget(self.editLineUserComment)
self.topLayout.addWidget(self.createHomeDir)
self.topLayout.addWidget(self.note)
def submitAction(self):
self.setCursor(Qt.WaitCursor)
self.progeesBar.setHidden(False)
self.progeesBar.setMaximum(self.usersNbr.value())
self.progeesBar.setValue(0)
usersList = self.generateList()
txt = ''
nbr = 0
with concurrent.futures.ThreadPoolExecutor() as executor:
results = executor.map(self.createuserThreading, usersList)
for result in results:
nbr += 1
self.progeesBar.setValue(nbr)
txt = txt + result + "\n"
self.note.setText(txt)
self.setCursor(Qt.ArrowCursor)
self.okBtn.setHidden(False)
self.submitBtn.setHidden(True)
self.cancelBtn.setHidden(True)
def generateList(self):
usersList = []
homeDir = 'False'
if self.createHomeDir.isChecked():
homeDir = 'True'
if int(self.usersNbr.value()) == 1:
usersList.append(
[self.editLineUsername.text(), self.editLineUserComment.text(), self.editLineUserShell.text(), homeDir])
else:
for user in range(self.usersNbr.value()):
usersList.append(
[self.editLineUsername.text() + str(user + 1), self.editLineUserComment.text() + str(user + 1),
self.editLineUserShell.text(), homeDir])
return usersList
def createuserThreading(self, user):
if user[3] == 'True':
homedir = '-m'
else:
homedir = ''
try:
c = f'useradd {homedir} -s {user[2]} -c "{user[1]}" {user[0]}'
subprocess.run(c, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True, check=True)
except subprocess.CalledProcessError:
return f"error occured during creating {user[0]} "
else:
return f"{user[0]} has been created succesfully!"
def cancelAction(self):
self.close()
class EditUsersWindow(QWidget):
def __init__(self, userDetails):
super().__init__()
self.setGeometry(200, 50, 500, 500)
self.setWindowTitle("Edit User")
self.userDetails = userDetails
self.layouts()
self.widgets()
def layouts(self):
self.mainLayout = QVBoxLayout()
self.topLayout = QVBoxLayout()
self.middleLayout = QHBoxLayout()
self.topLayout.setContentsMargins(20, 20, 20, 20)
self.bottomLayout = QHBoxLayout()
self.text = QLabel('')
self.progeesBar = QProgressBar()
self.progeesBar.setHidden(True)
self.submitBtn = QPushButton("Submit")
self.submitBtn.clicked.connect(self.submitAction)
self.cancelBtn = QPushButton("Cancel")
self.cancelBtn.clicked.connect(self.cancelAction)
self.okBtn = QPushButton("Ok")
self.okBtn.clicked.connect(self.okAction)
self.okBtn.setHidden(True)
self.submitBtn.setFixedHeight(30)
self.cancelBtn.setFixedHeight(30)
self.okBtn.setFixedHeight(30)
self.submitBtn.setStyleSheet("color: #ecf0f1; background-color: #27ae60 ; border: 0px")
self.okBtn.setStyleSheet("color: #ecf0f1; background-color: #27ae60 ; border: 0px")
self.cancelBtn.setStyleSheet("color: #ecf0f1; background-color: #e74c3c; border: 0px")
self.bottomLayout.addWidget(self.submitBtn)
self.bottomLayout.addWidget(self.cancelBtn)
self.bottomLayout.addWidget(self.okBtn)
self.mainLayout.addLayout(self.topLayout)
self.mainLayout.addStretch()
self.mainLayout.addLayout(self.bottomLayout)
self.setLayout(self.mainLayout)
def widgets(self):
self.form = QFormLayout()
print(self.userDetails)
self.username = QLineEdit(self.userDetails[0])
self.form.addRow(QLabel('Username :'), self.username)
self.id = QLineEdit(self.userDetails[1])
self.form.addRow(QLabel('User ID :'), self.id)
self.primaryGroup = self.userDetails[2].split('(')[1].split(')')[0]
self.priGroup = QLineEdit(self.primaryGroup)
self.form.addRow(QLabel('Primary Group :'), self.priGroup)
self.comment = QLineEdit(self.userDetails[4])
self.form.addRow(QLabel('Comment :'), self.comment)
self.homeDir = QLineEdit(self.userDetails[5])
self.form.addRow(QLabel('Home Directory :'), self.homeDir)
self.shell = QLineEdit(self.userDetails[6])
self.form.addRow(QLabel('Shell :'), self.shell)
if self.userDetails[7] == 'never':
self.expirationDate = QLineEdit()
else:
import dateutil.parser as parser
self.expirationDate_adapted = datetime.strptime(self.userDetails[7], '%b %d, %Y').strftime('%Y-%m-%d')
date = parser.parse(self.expirationDate_adapted)
self.expirationDate = QLineEdit(date.isoformat().split('T')[0])
self.form.addRow(QLabel('Expiration Date :'), self.expirationDate)
self.groupsBtns = QVBoxLayout()
self.lineEditAddGroup = QLineEdit()
self.lineEditAddGroup.setPlaceholderText('enter group name')
self.addGroupBtn = QPushButton('Add')
self.addGroupBtn.clicked.connect(self.addGroup)
self.deleteGroupBtn = QPushButton('Delete')
self.deleteGroupBtn.clicked.connect(self.deleteGroup)
self.deleteAllGroupsBtn = QPushButton('Delete All')
self.deleteAllGroupsBtn.clicked.connect(self.deleteAllGroups)
self.groupsBtns.addWidget(self.lineEditAddGroup)
self.groupsBtns.addWidget(self.addGroupBtn)
self.groupsBtns.addWidget(self.deleteGroupBtn)
self.groupsBtns.addWidget(self.deleteAllGroupsBtn)
self.groupsBtns.addStretch()
self.listGroups = QListWidget()
self.form.addRow(QLabel('Groups :'), self.middleLayout)
groups = self.userDetails[3].split(',')
for group in groups:
grp = group.split('(')[1].split(')')[0]
if grp == self.primaryGroup:
continue
else:
self.listGroups.addItem(grp)
self.middleLayout.addWidget(self.listGroups)
self.middleLayout.addLayout(self.groupsBtns)
self.topLayout.addLayout(self.form)
self.topLayout.addWidget(self.text)
self.topLayout.addWidget(self.progeesBar)
def addGroup(self):
group = self.lineEditAddGroup.text()
if group == "":
pass
else:
self.listGroups.addItem(group)
def deleteGroup(self):
listGroups = self.listGroups.selectedItems()
if not listGroups: return
for group in listGroups:
self.listGroups.takeItem(self.listGroups.row(group))
def deleteAllGroups(self):
self.listGroups.clear()
def submitAction(self):
try:
self.setCursor(Qt.WaitCursor)
self.progeesBar.setHidden(False)
self.progeesBar.setMaximum(1)
self.progeesBar.setValue(0)
self.edituser()
except subprocess.CalledProcessError:
QMessageBox.warning(self, 'warning', f"error occured during editing this user\n")
else:
self.setCursor(Qt.ArrowCursor)
self.submitBtn.setHidden(True)
self.cancelBtn.setHidden(True)
self.okBtn.setHidden(False)
def okAction(self):
self.close()
def edituser(self):
usernamee = self.username.text()
idd = self.id.text()
priGroupp = self.priGroup.text()
commentt = self.comment.text()
homeDirr = self.homeDir.text()
shelll = self.shell.text()
expirationDatee = self.expirationDate.text()
txt = ''
groupsitems = []
for index in range(self.listGroups.count()):
groupsitems.append(str(self.listGroups.item(index).text()))
groupsitemsstring = ",".join(groupsitems)
print(groupsitemsstring)
if expirationDatee == "never":
QMessageBox.warning(self, 'expiration field error', "expiration field can't be 'never' ")
return 0
elif expirationDatee == '':
pass
else:
try:
subprocess.run(f'usermod -e {expirationDatee} {self.userDetails[0]}', stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True, shell=True)
except subprocess.CalledProcessError:
txt = txt + "error occured during editing expiration date for this user\n"
self.text.setText(txt)
else:
txt = txt + "expiration date edited succesfully\n"
self.text.setText(txt)
try:
subprocess.run(f'usermod -g {priGroupp} {self.userDetails[0]}', stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True, shell=True)
except subprocess.CalledProcessError:
txt = txt + "error occured during editing primary group for this user\n"
self.text.setText(txt)
else:
txt = txt + "primary group edited succesfully\n"
self.text.setText(txt)
try:
subprocess.run(f'usermod -G {groupsitemsstring} {self.userDetails[0]}', stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True, shell=True)
except subprocess.CalledProcessError:
txt = txt + "error occured during editing supplementary groups for this user\n"
self.text.setText(txt)
else:
txt = txt + "supplementary groups edited succesfully\n"
self.text.setText(txt)
try:
subprocess.run(f'usermod -s {shelll} {self.userDetails[0]}', stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True, shell=True)
except subprocess.CalledProcessError:
txt = txt + "error occured during editing shell for this user\n"
self.text.setText(txt)
else:
txt = txt + "shell edited succesfully\n"
self.text.setText(txt)
try:
subprocess.run(f'usermod -d {homeDirr} {self.userDetails[0]}', stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True, shell=True)
except subprocess.CalledProcessError:
txt = txt + "error occured during editing home directory for this user\n"
self.text.setText(txt)
else:
txt = txt + "home directory edited succesfully\n"
self.text.setText(txt)
try:
subprocess.run(f"usermod -c '{commentt}' {self.userDetails[0]}", stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True, shell=True)
except subprocess.CalledProcessError:
txt = txt + "error occured during editing comment for this user\n"
self.text.setText(txt)
else:
txt = txt + "comment edited succesfully\n"
self.text.setText(txt)
try:
subprocess.run(f"usermod -u {idd} {self.userDetails[0]}", stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True, shell=True)
except subprocess.CalledProcessError:
txt = txt + "error occured during editing user id for this user\n"
self.text.setText(txt)
else:
txt = txt + "user id edited succesfully\n"
self.text.setText(txt)
try:
subprocess.run(f'usermod -l {usernamee} {self.userDetails[0]}', stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True, shell=True)
except subprocess.CalledProcessError:
txt = txt + "error occured during editing username for this user\n"
self.text.setText(txt)
else:
txt = txt + "username edited succesfully\n"
self.text.setText(txt)
self.progeesBar.setValue(1)
def cancelAction(self):
self.close()
class DeleteUsersWindow(QWidget):
def __init__(self, d):
super().__init__()
self.setGeometry(200, 50, 300, 300)
self.setWindowTitle("Delete Users")
self.listUsersToDelete = d
self.layouts()
self.widgets()
def layouts(self):
self.mainLayout = QVBoxLayout()
self.topLayout = QVBoxLayout()
self.topLayout.setContentsMargins(20, 20, 20, 20)
self.bottomLayout = QHBoxLayout()
self.progeesBar = QProgressBar()
self.progeesBar.setHidden(True)
self.submitBtn = QPushButton("Submit")
self.submitBtn.clicked.connect(self.submitAction)
self.cancelBtn = QPushButton("Cancel")
self.cancelBtn.clicked.connect(self.cancelAction)
self.okBtn = QPushButton("Ok")
self.okBtn.clicked.connect(self.okAction)
self.okBtn.setHidden(True)
self.submitBtn.setFixedHeight(30)
self.cancelBtn.setFixedHeight(30)
self.okBtn.setFixedHeight(30)
self.submitBtn.setStyleSheet("color: #ecf0f1; background-color: #27ae60 ; border: 0px")
self.okBtn.setStyleSheet("color: #ecf0f1; background-color: #27ae60 ; border: 0px")
self.cancelBtn.setStyleSheet("color: #ecf0f1; background-color: #e74c3c; border: 0px")
self.bottomLayout.addWidget(self.submitBtn)
self.bottomLayout.addWidget(self.cancelBtn)
self.bottomLayout.addWidget(self.okBtn)
self.mainLayout.addLayout(self.topLayout)
self.mainLayout.addStretch()
self.mainLayout.addLayout(self.bottomLayout)
self.setLayout(self.mainLayout)
def widgets(self):
self.a = ', '.join(self.listUsersToDelete)
self.text = QLabel(f'Are You Sure You want To Delete The Following Users :\n\n{self.a}')
self.text2 = QLabel()
self.topLayout.addWidget(self.text)
self.topLayout.addWidget(self.text2)
self.topLayout.addWidget(self.progeesBar)
def submitAction(self):
try:
self.setCursor(Qt.WaitCursor)
self.progeesBar.setHidden(False)
self.progeesBar.setMaximum(len(self.listUsersToDelete))
self.progeesBar.setValue(0)
self.deleteuser()
except subprocess.CalledProcessError:
QMessageBox.warning(self, 'warning', f"error occured during setting this hostname\n")
else:
self.setCursor(Qt.ArrowCursor)
self.submitBtn.setHidden(True)
self.cancelBtn.setHidden(True)
self.okBtn.setHidden(False)
def okAction(self):
self.close()
def deleteuserThreading(self, username):
try:
subprocess.run(f'userdel -r {username}', stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True,
shell=True)
except subprocess.CalledProcessError:
return f"error occured during deleting {username}"
else:
return f"{username} deleted succesfully!"
def deleteuser(self):
with concurrent.futures.ThreadPoolExecutor() as executor:
results = executor.map(self.deleteuserThreading, self.listUsersToDelete)
i = 0
r = ''
for result in results:
i = i + 1
r = r + "\n" + result
self.progeesBar.setValue(i)
self.text2.setText(r)
def cancelAction(self):
self.close()
class MoreUsersWindow(QWidget):
def __init__(self, text, username):
super().__init__()
self.setGeometry(200, 50, 300, 300)
self.setWindowTitle(username)
self.text = text
self.layouts()
def layouts(self):
self.mainLayout = QVBoxLayout()
self.topLayout = QVBoxLayout()
self.topLayout.setContentsMargins(20, 20, 20, 20)
self.bottomLayout = QHBoxLayout()
self.label = QLabel(self.text)
self.okBtn = QPushButton("Ok")
self.okBtn.clicked.connect(self.okAction)
self.okBtn.setFixedHeight(30)
self.okBtn.setStyleSheet("color: #ecf0f1; background-color: #27ae60 ; border: 0px")
self.topLayout.addWidget(self.label)
self.bottomLayout.addWidget(self.okBtn)
self.mainLayout.addLayout(self.topLayout)
self.mainLayout.addStretch()
self.mainLayout.addLayout(self.bottomLayout)
self.setLayout(self.mainLayout)
def okAction(self):
self.close()
|
StarcoderdataPython
|
5463
|
<gh_stars>1-10
import re
regex = re.compile(r'[\n\r\t]')
def acm_digital_library(soup):
try:
keywords = set()
keywords_parent_ol = soup.find('ol', class_="rlist organizational-chart")
keywords_divs = keywords_parent_ol.findChildren('div', recursive=True)
for kw_parent in keywords_divs:
kw = kw_parent.text
keywords.add(regex.sub("", kw.split(",")[0]))
return list(keywords)
except Exception as e:
print(e)
return None
def graphics_interface_proceedings(soup):
return None
def ieee_explore(soup):
try:
keywords = set()
ggp_ul = soup.find('ul', class_="doc-keywords-list stats-keywords-list")
gp_li = ggp_ul.findChildren("li", class_="doc-keywords-list-item", recursive=False)
for p_li in gp_li:
if p_li.find('strong').text in ["IEEE Keywords", "INSPEC: Controlled Indexing", "INSPEC: Non-Controlled Indexing", "MeSH Terms"]:
for keywords_l in p_li.find('ul').findChildren("li", recursive=False):
a_tag = keywords_l.find("a", class_="stats-keywords-list-item")
if a_tag is not None:
keywords.add(str(regex.sub("", a_tag.text.split(",")[0])))
else:
keywords.add(str(regex.sub("", str(keywords_l.text).split(",")[0])))
return list(keywords)
except Exception as e:
print(e)
return None
def eurographics_digital_library(soup):
try:
keywords_set = set()
p_tablebody = soup.find('table', class_="detailtable").find("tbody")
p_trs = p_tablebody.findChildren('tr')
for tr in p_trs:
label = tr.find("td", class_="label-cell")
if label.text == "dc.subject":
keywords = tr.find("td", class_="word-break")
# e.g. CASE 1: ['Categories and Subject Descriptors (according to ACM CCS): I.4.1 [Image Processing and Computer Vision]: Enhancement-Filtering I.3.3 [Computer Graphics]: Picture/Image Generation-Bitmap and framebuffer operations']
# e.g. CASE 2 [TODO: Not taken care of yet] Categories and Subject Descriptors (according to ACM CCS): Information Interfaces And Presentation (e.g., HCI) [H.5.2]: User Interfaces-Graphical user interfaces (GUI)
# Step 1: Remove annoying substrings
# Step 2: Choose to take ONLY Categories, not the Subject Descriptors > Write a REGEX to take substrings between [].
# Step 3: Split the string by , or ; or :
to_replaces = ["CCS Concepts", "Categories and Subject Descriptors", "Categories and subject descriptors", "Categories and Subject Descriptors (according to ACM CCS)", "according to ACM CCS"]
keywords_str = keywords.text
for to_replace in to_replaces:
keywords_str = keywords_str.replace(to_replace, "")
keywords_extracted = re.findall(r'\[(.*?)\]', keywords_str)
if keywords_extracted:
keywords_set.update(keywords_extracted)
else:
keywords_set.update(re.split(',|:|;', keywords_str))
return list(keywords_set)
except Exception as e:
print(e)
return None
def springer_v2(soup):
try:
keywords = set()
keywords_parent_div = soup.find('div', class_="KeywordGroup")
keywords_span = keywords_parent_div.findChildren("span", class_="Keyword")
for k in keywords_span:
keywords.add(k.text)
return list(keywords)
except Exception as e:
print(e)
return None
def dagstuhl(soup):
try:
keywords_label = soup.find('b', text="Keywords:")
keywords_parent_font = keywords_label.parent
keywords_parent_td = keywords_parent_font.parent
keywords_font = keywords_parent_td.find_next('td').find_next('td').find("font")
if keywords_font is not None:
return re.split(',', keywords_font.text)
except Exception as e:
print(e)
return None
def springer_v1(soup):
try:
keywords = set()
keywords_parent_section = soup.find('ul', class_="c-article-subject-list")
keywords_li = keywords_parent_section.findChildren("li", class_="c-article-subject-list__subject")
for k in keywords_li:
kw = k.find("span").text
keywords.add(str(regex.sub("", kw)).strip())
return list(keywords)
except Exception as e:
print(e)
return None
def wiley_online_library(soup):
try:
keywords_parent_section = soup.find('section', class_="keywords")
keywords_ul = keywords_parent_section.find('ul')
keywords_lis = keywords_ul.findChildren("li")
keywords_set = set()
for keywords_li in keywords_lis:
# e.g. Case 1: "[3.1.1] Human-Centered Computing" and so on
# e.g. Case 2: CCS Concepts don't have '[' and ']' but they have strings such as "• Human‐centered computing → Graph drawings"
# Step 1: Remove annoying substrings
# Step 2: Choose to take ONLY Categories, not the Subject Descriptors > Write a REGEX to take substrings between [].
# Step 3: Split the string by , or ; or :
to_replaces = ["CCS Concepts", "Categories and Subject Descriptors", "Categories and subject descriptors", "Categories and Subject Descriptors (according to ACM CCS)", "according to ACM CCS"]
keywords_str = keywords_li.find("a").text
for to_replace in to_replaces:
keywords_str = keywords_str.replace(to_replace, "")
keywords_extracted = re.findall(r'\[(.*?)\]', keywords_str)
if keywords_extracted:
keywords_set.update(keywords_extracted)
else:
# CCS Concepts don't have '[' and ']' but they have strings such as "• Human‐centered computing → Graph drawings"
regex_find = r'•(.*)→(.*)'
regex_replace = r'\1;\2' # set the delimiter to either , : ; (as is used below to split)
keywords_str = re.sub(regex_find, regex_replace, keywords_str)
keywords_set.update(re.split(',|:|;', keywords_str))
return list(keywords_set)
except Exception as e:
print(e)
return None
def cogsci(soup):
return None
def scitepress(soup):
try:
keywords_set = set()
keywords_span = soup.find('span', id="ContentPlaceHolder1_LinkPaperPage_LinkPaperContent_LabelPublicationDetailKeywords")
for kw in keywords_span.text.split(","):
keywords_set.add(kw)
return list(keywords_set)
except Exception as e:
print(e)
return None
def scienceopen(soup):
try:
keywords_set = set()
for span_label in soup.find_all('span', class_="so-metadata-label"):
if "Keywords" in span_label.text:
for keyword_a in span_label.find_next_siblings('a'):
keywords_set.add(keyword_a.text)
return list(keywords_set)
except Exception as e:
pass
return None
def aaai(soup):
return None
def get_keywords(publisher, soup):
keywords_list = None
if publisher == "acm_digital_library":
keywords_list = acm_digital_library(soup)
elif publisher == "graphics_interface_proceedings":
keywords_list = graphics_interface_proceedings(soup)
elif publisher == "ieee_explore":
keywords_list = ieee_explore(soup)
elif publisher == "cogsci":
keywords_list = cogsci(soup)
elif publisher == "springer_v1":
keywords_list = springer_v1(soup)
elif publisher == "springer_v2":
keywords_list = springer_v2(soup)
elif publisher == "scitepress":
keywords_list = scitepress(soup)
elif publisher == "scienceopen":
keywords_list = scienceopen(soup)
elif publisher == "eurographics_digital_library":
keywords_list = eurographics_digital_library(soup)
elif publisher == "wiley_online_library":
keywords_list = wiley_online_library(soup)
elif publisher == "dagstuhl":
keywords_list = dagstuhl(soup)
elif publisher == "aaai":
keywords_list = aaai(soup)
return None if len(keywords_list) == 0 else keywords_list
|
StarcoderdataPython
|
147619
|
## data folder: D:\work\project\ITA Refresh\Session4 Oil Prediction
# -*- coding: utf-8 -*-
from __future__ import print_function
import time
import warnings
import numpy as np
import time
import matplotlib.pyplot as plt
from numpy import newaxis
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
warnings.filterwarnings("ignore")
def load_data(filename, seq_len, normalise_window):
f = open(filename, 'rb').read()
print("f:",type(f))
# data = f.decode().split("\n")
data = f.decode().replace("b'","").split("\r\n")
print("data:",data)
# data = f;
print('data len:',len(data))
print('sequence len:',seq_len)
sequence_length = seq_len + 1
result = []
for index in range(len(data) - sequence_length):
result.append(data[index: index + sequence_length]) #得到长度为seq_len+1的向量,最后一个作为label
print('result len:',len(result))
print('result shape:',np.array(result).shape)
print(result[:1])
if normalise_window:
result = normalise_windows(result)
print(result[:1])
print('normalise_windows result shape:',np.array(result).shape)
result = np.array(result)
#划分train、test
row = round(0.9 * result.shape[0])
train = result[:row, :]
np.random.shuffle(train)
x_train = train[:, :-1]
y_train = train[:, -1]
x_test = result[row:, :-1]
y_test = result[row:, -1]
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
return [x_train, y_train, x_test, y_test]
def normalise_windows(window_data):
normalised_data = []
for window in window_data: #window shape (sequence_length L ,) 即(51L,)
normalised_window = [((float(p) / float(window[0])) - 1) for p in window]
normalised_data.append(normalised_window)
return normalised_data
def build_model(layers): #layers [1,50,100,1]
model = Sequential()
model.add(LSTM(input_dim=layers[0],output_dim=layers[1],return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(layers[2],return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(output_dim=layers[3]))
model.add(Activation("linear"))
start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
print("Compilation Time : ", time.time() - start)
return model
#直接全部预测
def predict_point_by_point(model, data):
predicted = model.predict(data)
print('predicted shape:',np.array(predicted).shape) #(412L,1L)
predicted = np.reshape(predicted, (predicted.size,))
return predicted
#滚动预测
def predict_sequence_full(model, data, window_size): #data X_test
curr_frame = data[0] #(50L,1L)
predicted = []
for i in range(len(data)):
#x = np.array([[[1],[2],[3]], [[4],[5],[6]]]) x.shape (2, 3, 1) x[0,0] = array([1]) x[:,np.newaxis,:,:].shape (2, 1, 3, 1)
predicted.append(model.predict(curr_frame[newaxis,:,:])[0,0]) #np.array(curr_frame[newaxis,:,:]).shape (1L,50L,1L)
curr_frame = curr_frame[1:]
curr_frame = np.insert(curr_frame, [window_size-1], predicted[-1], axis=0) #numpy.insert(arr, obj, values, axis=None)
return predicted
def predict_sequences_multiple(model, data, window_size, prediction_len): #window_size = seq_len
prediction_seqs = []
for i in range(len(data)/prediction_len):
curr_frame = data[i*prediction_len]
predicted = []
for j in range(prediction_len):
predicted.append(model.predict(curr_frame[newaxis,:,:])[0,0])
curr_frame = curr_frame[1:]
curr_frame = np.insert(curr_frame, [window_size-1], predicted[-1], axis=0)
prediction_seqs.append(predicted)
return prediction_seqs
def plot_results(predicted_data, true_data, filename):
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(true_data, label='True Data')
# plt.ylim(min(predicted_data),max(predicted_data))
# print("min(predicted_data):",min(predicted_data))
plt.plot(predicted_data, label='Prediction')
plt.legend()
plt.show()
plt.savefig(filename+'.png')
def plot_results_multiple(predicted_data, true_data, prediction_len):
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(true_data, label='True Data')
#Pad the list of predictions to shift it in the graph to it's correct start
for i, data in enumerate(predicted_data):
padding = [None for p in range(i * prediction_len)]
plt.plot(padding + data, label='Prediction')
plt.legend()
plt.show()
plt.savefig('plot_results_multiple.png')
if __name__=='__main__':
global_start_time = time.time()
epochs = 1
seq_len = 50
print('> Loading data... ')
data_file_name = 'D:/software/Python/data/oil prediction data/Cushing_OK_WTI_Spot_Price_FOB __Day_Price_Only.csv'
# data_file_name1 = 'D:/software/Python/data/oil prediction data/sp500.csv'
X_train, y_train, X_test, y_test = load_data(data_file_name, seq_len, True)
print('X_train shape:',X_train.shape) #(3709L, 50L, 1L)
print('y_train shape:',y_train.shape) #(3709L,)
print('X_test shape:',X_test.shape) #(412L, 50L, 1L)
print('y_test shape:',y_test.shape) #(412L,)
print('> Data Loaded. Compiling...')
model = build_model([1, 50, 100, 1])
model.fit(X_train,y_train,batch_size=512,nb_epoch=epochs,validation_split=0.05)
# multiple_predictions = predict_sequences_multiple(model, X_test, seq_len, prediction_len=50)
# print('multiple_predictions shape:',np.array(multiple_predictions).shape) #(8L,50L)
# full_predictions = predict_sequence_full(model, X_test, seq_len)
# print('full_predictions shape:',np.array(full_predictions).shape) #(412L,)
point_by_point_predictions = predict_point_by_point(model, X_test)
print('point_by_point_predictions shape:',np.array(point_by_point_predictions).shape) #(412L)
print('Training duration (s) : ', time.time() - global_start_time)
# plot_results_multiple(multiple_predictions, y_test, 50)
# plot_results(full_predictions,y_test,'full_predictions')
plot_results(point_by_point_predictions,y_test,'point_by_point_predictions')
|
StarcoderdataPython
|
1782891
|
<gh_stars>0
from setuptools import setup
package_name = 'perception_genie'
setup(
name=package_name,
version='0.1.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml'])
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='<NAME>',
maintainer_email='<EMAIL>',
description='publish groundtruth relative info ',
license='MIT',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'ground_truth = perception_genie.perception_ground_truth:main'
],
},
)
|
StarcoderdataPython
|
62498
|
# Copyright (C) 2018 DataArt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from devicehive import DeviceError, SubscriptionError
from devicehive import ApiResponseError
def test_save(test):
device_hive_api = test.device_hive_api()
device_id = test.generate_id('d-s', test.DEVICE_ENTITY)
device = device_hive_api.put_device(device_id)
name = '%s-name' % device_id
data = {'data_key': 'data_value'}
device.name = name
device.data = data
device.is_blocked = True
device.save()
device = device_hive_api.get_device(device_id)
assert device.id == device_id
assert device.name == name
assert device.data == data
assert isinstance(device.network_id, int)
assert isinstance(device.device_type_id, int)
assert device.is_blocked
device.remove()
try:
device.save()
assert False
except DeviceError:
pass
def test_remove(test):
device_hive_api = test.device_hive_api()
device_id = test.generate_id('d-r', test.DEVICE_ENTITY)
device = device_hive_api.put_device(device_id)
device_1 = device_hive_api.get_device(device_id)
device.remove()
assert not device.id
assert not device.name
assert not device.data
assert not device.network_id
assert not device.device_type_id
assert not device.is_blocked
try:
device.remove()
assert False
except DeviceError:
pass
try:
device_1.remove()
assert False
except ApiResponseError as api_response_error:
if test.is_user_admin:
assert api_response_error.code == 404
else:
assert api_response_error.code == 403
def test_subscribe_insert_commands(test):
def init_data(handler):
device_id = test.generate_id('d-s-i-c', test.DEVICE_ENTITY)
command_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
device = handler.api.put_device(device_id)
return device, command_names, []
def send_data(handler, device, command_names):
for command_name in command_names:
command = device.send_command(command_name)
handler.data['command_ids'].append(command.id)
def set_handler_data(handler, device, command_names, command_ids):
handler.data['device'] = device
handler.data['command_names'] = command_names
handler.data['command_ids'] = command_ids
def handle_connect(handler):
device, command_names, command_ids = init_data(handler)
set_handler_data(handler, device, command_names, command_ids)
send_data(handler, device, command_names)
handler.data['subscription'] = device.subscribe_insert_commands()
def handle_command_insert(handler, command):
assert command.id in handler.data['command_ids']
handler.data['command_ids'].remove(command.id)
if handler.data['command_ids']:
return
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.disconnect()
test.run(handle_connect, handle_command_insert)
def handle_connect(handler):
device, command_names, command_ids = init_data(handler)
command_name = command_names[:1]
set_handler_data(handler, device, command_names, command_ids)
send_data(handler, device, command_name)
handler.data['subscription'] = device.subscribe_insert_commands(
names=command_name)
def handle_command_insert(handler, command):
assert command.id == handler.data['command_ids'][0]
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.disconnect()
test.run(handle_connect, handle_command_insert)
def handle_connect(handler):
device, commands, command_ids = init_data(handler)
device_1 = handler.api.get_device(device.id)
device.remove()
try:
device_1.subscribe_insert_commands()
assert False
except ApiResponseError as api_response_error:
if test.is_user_admin:
assert api_response_error.code == 404
else:
assert api_response_error.code == 403
test.run(handle_connect)
def test_unsubscribe_insert_commands(test):
def handle_connect(handler):
device_id = test.generate_id('d-u-i-c', test.DEVICE_ENTITY)
device = handler.api.put_device(device_id)
subscription = device.subscribe_insert_commands()
subscription.remove()
try:
subscription.remove()
assert False
except SubscriptionError:
pass
device.remove()
test.run(handle_connect)
def test_subscribe_update_commands(test):
def init_data(handler):
device_id = test.generate_id('d-s-u-c', test.DEVICE_ENTITY)
command_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
device = handler.api.put_device(device_id)
return device, command_names, []
def send_data(handler, device, command_names):
for command_name in command_names:
command = device.send_command(command_name)
handler.data['command_ids'].append(command.id)
command.status = 'status'
command.save()
def set_handler_data(handler, device, command_names, command_ids):
handler.data['device'] = device
handler.data['command_names'] = command_names
handler.data['command_ids'] = command_ids
def handle_connect(handler):
device, command_names, command_ids = init_data(handler)
set_handler_data(handler, device, command_names, command_ids)
send_data(handler, device, command_names)
handler.data['subscription'] = device.subscribe_update_commands()
def handle_command_update(handler, command):
assert command.id in handler.data['command_ids']
handler.data['command_ids'].remove(command.id)
if handler.data['command_ids']:
return
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.disconnect()
test.run(handle_connect, handle_command_update=handle_command_update)
def handle_connect(handler):
device, command_names, command_ids = init_data(handler)
command_name = command_names[:1]
set_handler_data(handler, device, command_names, command_ids)
send_data(handler, device, command_name)
handler.data['subscription'] = device.subscribe_update_commands(
names=command_name)
def handle_command_update(handler, command):
assert command.id == handler.data['command_ids'][0]
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.disconnect()
test.run(handle_connect, handle_command_update=handle_command_update)
def handle_connect(handler):
device, commands, command_ids = init_data(handler)
device_1 = handler.api.get_device(device.id)
device.remove()
try:
device_1.subscribe_update_commands()
assert False
except ApiResponseError as api_response_error:
if test.is_user_admin:
assert api_response_error.code == 404
else:
assert api_response_error.code == 403
test.run(handle_connect)
def test_unsubscribe_update_commands(test):
def handle_connect(handler):
device_id = test.generate_id('d-u-u-c', test.DEVICE_ENTITY)
device = handler.api.put_device(device_id)
subscription = device.subscribe_update_commands()
subscription.remove()
try:
subscription.remove()
assert False
except SubscriptionError:
pass
device.remove()
test.run(handle_connect)
def test_list_commands(test):
device_hive_api = test.device_hive_api()
server_timestamp = device_hive_api.get_info()['server_timestamp']
test_id = test.generate_id('d-l-c', test.DEVICE_ENTITY)
options = [{'command': '%s-name-1' % test_id, 'lifetime': 100,
'status': '1'},
{'command': '%s-name-2' % test_id, 'lifetime': 100,
'status': '2'}]
device = device_hive_api.put_device(test_id)
for option in options:
device.send_command(option['command'], lifetime=option['lifetime'],
status=option['status'])
commands = device.list_commands()
assert len(commands) == len(options)
commands = device.list_commands(start=server_timestamp)
assert len(commands) == len(options)
assert not device.list_commands(start=server_timestamp,
end=server_timestamp)
command_name = options[0]['command']
command, = device.list_commands(command=command_name)
assert command.command == command_name
status = options[0]['status']
command, = device.list_commands(status=status)
assert command.status == status
command_0, command_1 = device.list_commands(sort_field='command',
sort_order='ASC')
assert command_0.command == options[0]['command']
assert command_1.command == options[1]['command']
command_0, command_1 = device.list_commands(sort_field='command',
sort_order='DESC')
assert command_0.command == options[1]['command']
assert command_1.command == options[0]['command']
command, = device.list_commands(sort_field='command', sort_order='ASC',
take=1)
assert command.command == options[0]['command']
command, = device.list_commands(sort_field='command', sort_order='ASC',
take=1, skip=1)
assert command.command == options[1]['command']
device_1 = device_hive_api.get_device(test_id)
device.remove()
try:
device.list_commands()
assert False
except DeviceError:
pass
try:
device_1.list_commands()
assert False
except ApiResponseError as api_response_error:
if test.is_user_admin:
assert api_response_error.code == 404
else:
assert api_response_error.code == 403
def test_send_command(test):
device_hive_api = test.device_hive_api()
device_id = test.generate_id('d-s-c', test.DEVICE_ENTITY)
command_name = test.generate_id('d-s-c')
device = device_hive_api.put_device(device_id)
command = device.send_command(command_name)
assert command.device_id == device_id
assert isinstance(command.id, int)
assert isinstance(command.user_id, int)
assert command.command == command_name
assert not command.parameters
assert not command.lifetime
assert command.timestamp
assert command.last_updated
assert not command.status
assert not command.result
command_name = test.generate_id('d-s-c')
parameters = {'parameters_key': 'parameters_value'}
lifetime = 10
status = 'status'
result = {'result_key': 'result_value'}
command = device.send_command(command_name, parameters=parameters,
lifetime=lifetime, status=status,
result=result)
assert command.device_id == device_id
assert isinstance(command.id, int)
assert isinstance(command.user_id, int)
assert command.command == command_name
assert command.parameters == parameters
assert command.lifetime == lifetime
assert command.timestamp
assert command.last_updated
assert command.status == status
assert command.result == result
device_1 = device_hive_api.get_device(device_id)
device.remove()
try:
device.send_command(command_name)
assert False
except DeviceError:
pass
try:
device_1.send_command(command_name)
assert False
except ApiResponseError as api_response_error:
if test.is_user_admin:
assert api_response_error.code == 404
else:
assert api_response_error.code == 403
def test_subscribe_notifications(test):
def init_data(handler):
device_id = test.generate_id('d-s-n', test.DEVICE_ENTITY)
notification_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
device = handler.api.put_device(device_id)
return device, notification_names, []
def send_data(handler, device, notification_names):
for notification_name in notification_names:
notification = device.send_notification(notification_name)
handler.data['notification_ids'].append(notification.id)
def set_handler_data(handler, device, notification_names,
notification_ids):
handler.data['device'] = device
handler.data['notification_names'] = notification_names
handler.data['notification_ids'] = notification_ids
def handle_connect(handler):
device, notification_names, notification_ids = init_data(handler)
set_handler_data(handler, device, notification_names, notification_ids)
send_data(handler, device, notification_names)
handler.data['subscription'] = device.subscribe_notifications()
def handle_notification(handler, notification):
assert notification.id in handler.data['notification_ids']
handler.data['notification_ids'].remove(notification.id)
if handler.data['notification_ids']:
return
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.disconnect()
test.run(handle_connect, handle_notification=handle_notification)
def handle_connect(handler):
device, notification_names, notification_ids = init_data(handler)
notification_name = notification_names[:1]
set_handler_data(handler, device, notification_names, notification_ids)
send_data(handler, device, notification_name)
handler.data['subscription'] = device.subscribe_notifications(
names=notification_name)
def handle_notification(handler, notification):
assert notification.id == handler.data['notification_ids'][0]
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.disconnect()
test.run(handle_connect, handle_notification=handle_notification)
def handle_connect(handler):
device, notification_names, notification_ids = init_data(handler)
device_1 = handler.api.get_device(device.id)
device.remove()
try:
device_1.subscribe_notifications()
assert False
except ApiResponseError as api_response_error:
if test.is_user_admin:
assert api_response_error.code == 404
else:
assert api_response_error.code == 403
test.run(handle_connect)
def test_unsubscribe_notifications(test):
def handle_connect(handler):
device_id = test.generate_id('d-u-n', test.DEVICE_ENTITY)
device = handler.api.put_device(device_id)
subscription = device.subscribe_notifications()
subscription.remove()
try:
subscription.remove()
assert False
except SubscriptionError:
pass
device.remove()
test.run(handle_connect)
def list_notifications(device, **params):
notifications = device.list_notifications(**params)
return [notification for notification in notifications
if notification.notification[0] != '$']
def test_list_notifications(test):
device_hive_api = test.device_hive_api()
server_timestamp = device_hive_api.get_info()['server_timestamp']
test_id = test.generate_id('d-l-n', test.DEVICE_ENTITY)
options = [{'notification': '%s-name-1' % test_id,
'parameters': {'parameters_key': '1'}},
{'notification': '%s-name-2' % test_id,
'parameters': {'parameters_key': '2'}}]
device = device_hive_api.put_device(test_id)
for option in options:
device.send_notification(option['notification'],
parameters=option['parameters'])
notifications = list_notifications(device)
assert len(notifications) == len(options)
notifications = list_notifications(device, start=server_timestamp)
assert len(notifications) == len(options)
assert not list_notifications(device, start=server_timestamp,
end=server_timestamp)
notification_name = options[0]['notification']
notification, = list_notifications(device,
notification=notification_name)
assert notification.notification == notification_name
notification_0, notification_1 = list_notifications(
device, sort_field='notification', sort_order='ASC')
assert notification_0.notification == options[0]['notification']
assert notification_1.notification == options[1]['notification']
notification_0, notification_1 = list_notifications(
device, sort_field='notification', sort_order='DESC')
assert notification_0.notification == options[1]['notification']
assert notification_1.notification == options[0]['notification']
notification_name = test_id
notification_0 = device.send_notification(notification_name)
notification_1 = device.send_notification(notification_name)
notification, = device.list_notifications(
notification=notification_name, sort_field='timestamp',
sort_order='ASC', take=1)
assert notification.id == notification_0.id
notification, = device.list_notifications(
notification=notification_name, sort_field='timestamp',
sort_order='ASC', take=1, skip=1)
assert notification.id == notification_1.id
device_1 = device_hive_api.get_device(test_id)
device.remove()
try:
device.list_notifications()
assert False
except DeviceError:
pass
try:
device_1.list_commands()
assert False
except ApiResponseError as api_response_error:
if test.is_user_admin:
assert api_response_error.code == 404
else:
assert api_response_error.code == 403
def test_send_notification(test):
device_hive_api = test.device_hive_api()
device_id = test.generate_id('d-s-n', test.DEVICE_ENTITY)
notification_name = test.generate_id('d-s-n')
device = device_hive_api.put_device(device_id)
notification = device.send_notification(notification_name)
assert notification.device_id == device_id
assert isinstance(notification.id, int)
assert notification.notification == notification_name
assert not notification.parameters
assert notification.timestamp
parameters = {'parameters_key': 'parameters_value'}
notification = device.send_notification(notification_name,
parameters=parameters)
assert notification.device_id == device_id
assert isinstance(notification.id, int)
assert notification.notification == notification_name
assert notification.parameters == parameters
assert notification.timestamp
device_1 = device_hive_api.get_device(device_id)
device.remove()
try:
device.send_notification(notification_name)
assert False
except DeviceError:
pass
try:
device_1.send_notification(notification_name)
assert False
except ApiResponseError as api_response_error:
if test.is_user_admin:
assert api_response_error.code == 404
else:
assert api_response_error.code == 403
|
StarcoderdataPython
|
171543
|
#!/usr/bin/env python3
"""scapy-dhcp-listener.py
Listen for DHCP packets using scapy to learn when LAN
hosts request IP addresses from DHCP Servers.
Copyright (C) 2018 <NAME>
https://jcutrer.com/python/scapy-dhcp-listener
License Dual MIT, 0BSD
Extended by jkulawik, 2020
"""
from __future__ import print_function
from scapy.all import *
from scapy.layers.dhcp import DHCP
from scapy.layers.dhcp import Ether
from scapy.layers.dhcp import BOOTP
from scapy.layers.dhcp import IP
# Logging
from datetime import date
from inspect import getsourcefile
from sc_utils import mac_vendor
from scapy.layers.l2 import getmacbyip
whitelist_file = 'MAC_whitelist.txt'
__version__ = "0.0.4"
def add_zero_to_time(time):
if time < 10:
return '0'+ time
else:
return time
# print_and_log current time
def pal_time():
todays_date = datetime.now()
hour = add_zero_to_time(todays_date.hour)
minute = add_zero_to_time(todays_date.minute)
curr_time = '\n{}:{}'.format(hour, minute)
print_and_log(curr_time)
def check_whitelist(mac):
with open(whitelist_file, 'r') as file:
whitelist = file.read()
return mac in whitelist
def print_and_log(message):
print(message)
log(message)
# Print a message into today's log in /logs
def log(message):
dir = 'logs'
if not os.path.exists(dir):
# The folder gets created in the run directory automatically...
os.makedirs(dir)
# ...but for file writing a full path is needed
current_dir = os.path.dirname(getsourcefile(lambda: 0))
path = os.path.join(current_dir, 'logs')
file_name = str(date.today()) + '-log.txt'
file_path = os.path.join(path, file_name)
file = open(file_path, "a")
file.write(message + '\n')
file.close()
# Fixup function to extract dhcp_options by key
def get_option(dhcp_options, key):
must_decode = ['hostname', 'domain', 'vendor_class_id']
try:
for i in dhcp_options:
if i[0] == key:
# If DHCP Server Returned multiple name servers
# return all as comma separated string.
if key == 'name_server' and len(i) > 2:
return ",".join(i[1:])
# domain and hostname are binary strings,
# decode to unicode string before returning
elif key in must_decode:
return i[1].decode()
else:
return i[1]
except:
pass
def handle_dhcp_packet(packet):
# Match DHCP discover
if DHCP in packet and packet[DHCP].options[0][1] == 1:
print('---')
print('New DHCP Discover')
#print(packet.summary())
#print(ls(packet))
hostname = get_option(packet[DHCP].options, 'hostname')
mac = packet[Ether].src
if check_whitelist(mac):
print(f'Whitelisted host {hostname} asked for an IP.')
print(f'Host vendor: {mac_vendor.get_str(mac)}')
print(f'Host MAC: {mac}')
return
pal_time()
print_and_log(f"Unknown host {hostname} asked for an IP.")
print_and_log(f'Host vendor: {mac_vendor.get_str(mac)}')
print_and_log(f'Host MAC: {mac}')
# Match DHCP ack
elif DHCP in packet and packet[DHCP].options[0][1] == 5\
and packet[BOOTP].yiaddr != '0.0.0.0':
print('---')
print('New DHCP Ack')
#print(packet.summary())
#print(ls(packet))
subnet_mask = get_option(packet[DHCP].options, 'subnet_mask')
lease_time = get_option(packet[DHCP].options, 'lease_time')
router = get_option(packet[DHCP].options, 'router')
name_server = get_option(packet[DHCP].options, 'name_server')
server_mac = packet[Ether].src
server_ip = packet[IP].src
sus_ip = packet[BOOTP].yiaddr
sus_mac = str(getmacbyip(sus_ip))
sus_vendor = mac_vendor.get_str(sus_mac)
if check_whitelist(sus_mac):
print(f"DHCP Server {server_ip} ({server_mac}) acknowledged a whitelisted device on IP {sus_ip}")
print(f'Host vendor: {mac_vendor.get_str(sus_vendor)}')
print(f'Host MAC: {sus_mac}\n')
return
pal_time()
print_and_log(f"DHCP Server {server_ip} ({server_mac}) acknowledged unknown device on IP {sus_ip}")
print_and_log(f'Unknown host vendor: {mac_vendor.get_str(sus_vendor)}')
print_and_log(f'Unknown host MAC: {sus_mac}\n')
print(f"DHCP Options: subnet_mask: {subnet_mask}, lease_time: "
f"{lease_time}, router: {router}, name_server: {name_server}")
# Match DHCP inform
elif DHCP in packet and packet[DHCP].options[0][1] == 8:
print('---')
print('New DHCP Inform')
#print(packet.summary())
#print(ls(packet))
hostname = get_option(packet[DHCP].options, 'hostname')
vendor_class_id = get_option(packet[DHCP].options, 'vendor_class_id')
print(f"DHCP Inform from {packet[IP].src} ({packet[Ether].src}) "
f"hostname: {hostname}, vendor_class_id: {vendor_class_id}")
else:
print('---')
print('Some Other DHCP Packet')
print(packet.summary())
#print(ls(packet))
return
# This is just to use this script as a dependency
def start_sniffing():
# Create MAC whitelist
if not os.path.exists(whitelist_file):
open(whitelist_file, "w+")
print('Sniffing DHCP broadcasts...')
print('Press Ctrl+C to stop.')
sniff(filter="udp and (port 67 or 68)", prn=handle_dhcp_packet)
if __name__ == "__main__":
sniff(filter="udp and (port 67 or 68)", prn=handle_dhcp_packet)
|
StarcoderdataPython
|
45293
|
<filename>Problems/Dynamic Programming/Easy/BuySellStock1/buy_sell_stock_1.py
from typing import List
def max_profit_1(prices: List[int]) -> int:
min_price, max_profit = prices[0], 0
for price in prices:
min_price = min(min_price, price)
profit = price - min_price
max_profit = max(max_profit, profit)
return max_profit
def max_profit_2(prices: List[int]) -> int:
ans, dt = 0, 0
for i in range(0, len(prices) - 1):
q = prices[i + 1] - prices[i]
dt = max(dt + q, q)
ans = max(ans, dt)
return ans
|
StarcoderdataPython
|
101975
|
#!/usr/local/bin/python3
from random import randint
def sortea_numero():
return randint(1, 6)
def eh_impar(numero: float):
return numero % 2 != 0
def acertou(numero_sorteado: float, numero: float):
return numero_sorteado == numero
if __name__ == '__main__':
numero_sorteado = sortea_numero()
for it in range(1, 7):
if eh_impar(it):
continue
if acertou(numero_sorteado, it):
print('ACERTOU!', numero_sorteado)
break
else:
print('NÃO ACERTOU O NUMERO!')
|
StarcoderdataPython
|
1663544
|
# !/usr/bin/env python
# encoding: utf-8
"""
@version: 0.1
@author: feikon
@license: Apache Licence
@contact: <EMAIL>
@site: https://github.com/feikon
@software: PyCharm
@file: exceptin_handle.py
@time: 2017/6/8 21:20
"""
import logging
try:
print('try...')
r = 10 / 0
print('result:', r)
except ZeroDivisionError as e:
print('except:', e)
finally: # finally有就一定会执行
print('finally...')
print('END')
# case1:记录错误:不仅显示所有的错误信息,并且会继续执行
def foo1(s):
return 10/int(s)
def bar1(s):
return foo1(s) * 2
def main1():
try:
bar1('0')
except Exception as e:
logging.exception(e)
main1()
print('GO ON 1')
# case2:对比记录错误:不会显示所有错误的地方,会继续运行
def foo2(s):
return 10/int(s)
def bar2(s):
return foo2(s) * 2
def main2():
try:
bar2('0')
except ZeroDivisionError as e:
print('ZeroDivisionError:', e)
main2()
print('GO ON 2')
# 对比3:不会继续运行,显示所有错误的地方
def foo3(s):
return 10/int(s)
def bar3(s):
return foo3(s)*2
def main3():
bar3('0')
main3()
print('GO ON 3')
# rasie
def foo4(s):
n = int(s)
if n == 0:
raise ValueError('invalid value: %s' % s)
return 10 / n
def bar4():
try:
foo4('0')
except ValueError as e:
print('ValueError!')
raise # raise语句如果不带参数,就会把当前错误原样抛出
bar4()
print('GO ON 4')
# 结论:
# 会继续运行的是有错误返回的,如except ZeroDivisionError..
# 显示所有错误的:
# 1.运用Loggging
# 2.不做错误处理
|
StarcoderdataPython
|
128266
|
<filename>CloudflareAPI/core/base.py
#!/usr/bin/env python3
from requests import Session
from typing import Dict, Optional
from .network import Request
from .configuration import Config
config = Config()
class CFBase:
def verify_token(self, token) -> bool:
url = "https://api.cloudflare.com/client/v4/user/tokens/verify"
session = Session()
session.headers.update({"Authorization": f"Bearer {token}"})
response = session.get(url).json()
if response["success"] and "result" in response:
result = response["result"]
if "status" in result:
return result["status"] == "active"
return False
def validate(self, token: Optional[str] = None):
if token is not None:
config.token = token
while not self.verify_token(config.token):
print("Error: Invalid API Token")
config.read_from_user()
print("Valid API Token found")
def props(self) -> Dict[str, str]:
return self.__dict__
def get_request(self, path: str) -> Request:
return Request(token=config.token, path=path)
def parse_params(self, params: Dict[str, str]) -> Dict[str, str]:
parsed = {}
if params is not None:
for key, value in params.items():
if value:
parsed.update({key: value})
return parsed
|
StarcoderdataPython
|
1780216
|
import requests
import string
url = 'https://cat-step.disasm.me/'
prefix = 'spbctf{'
suffix = '}'
flag = '#' * 28
alpha = string.ascii_letters + string.digits + '_-'
left = 0
while left != len(flag):
for a in alpha:
flag = flag[:left] + a + flag[left + 1:]
attempt = prefix + flag + suffix
print(f'Sending {attempt}')
r = requests.post(url, data={'flag': attempt}).json()
if r['length'] < len(flag) - left:
left += 1
break
print('The flag is', prefix + flag + suffix)
|
StarcoderdataPython
|
3230368
|
from collections.abc import MutableSequence
from typing import Iterable, Union, Sequence
from google.protobuf.pyext._message import RepeatedCompositeContainer
from ...proto.jina_pb2 import DocumentProto
if False:
from ..document import Document
__all__ = ['DocumentSet']
class DocumentSet(MutableSequence):
""":class:`DocumentSet` is a mutable sequence of :class:`Document`,
it gives an efficient view of a list of Document. One can iterate over it like
a generator but ALSO modify it, count it, get item.
"""
def __init__(self, docs_proto: Union['RepeatedCompositeContainer', Sequence['Document']]):
super().__init__()
self._docs_proto = docs_proto
self._docs_map = {}
def insert(self, index: int, doc: 'Document') -> None:
self._docs_proto.insert(index, doc.as_pb_object)
def __setitem__(self, key, value: 'Document'):
if isinstance(key, int):
self._docs_proto[key].CopyFrom(value.as_pb_object)
elif isinstance(key, str):
return self._docs_map[key].CopyFrom(value.as_pb_object)
else:
raise IndexError(f'do not support this index {key}')
def __delitem__(self, index):
del self._docs_proto[index]
def __len__(self):
return len(self._docs_proto)
def __iter__(self):
from ..document import Document
for d in self._docs_proto:
yield Document(d)
def __getitem__(self, item):
from ..document import Document
if isinstance(item, int):
return Document(self._docs_proto[item])
elif isinstance(item, str):
return Document(self._docs_map[str(item)])
else:
raise IndexError(f'do not support this index {item}')
def append(self, doc: 'Document'):
self._docs_proto.append(doc.as_pb_object)
def add(self, doc: 'Document'):
"""Shortcut to :meth:`append`, do not override this method """
self.append(doc)
def extend(self, iterable: Iterable['Document']) -> None:
self._docs_proto.extend(doc.as_pb_object for doc in iterable)
def clear(self):
del self._docs_proto[:]
def reverse(self):
"""In-place reverse the sequence """
if isinstance(self._docs_proto, RepeatedCompositeContainer):
size = len(self._docs_proto)
hi_idx = size - 1
for i in range(int(size / 2)):
tmp = DocumentProto()
tmp.CopyFrom(self._docs_proto[hi_idx])
self._docs_proto[hi_idx].CopyFrom(self._docs_proto[i])
self._docs_proto[i].CopyFrom(tmp)
hi_idx -= 1
elif isinstance(self._docs_proto, list):
self._docs_proto.reverse()
def build(self):
"""Build a doc_id to doc mapping so one can later index a Document using
doc_id as string key
"""
self._docs_map = {d.id: d for d in self._docs_proto}
def sort(self, *args, **kwargs):
self._docs_proto.sort(*args, **kwargs)
|
StarcoderdataPython
|
39267
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf8 -*-
from setuptools import setup
setup(
name='certbot_adc',
version='0.1',
description="perform certbot auto dns challenge with DNS provider's API",
url='http://github.com/btpka3/certbot-auto-dns-challenge',
author='btpka3',
author_email='<EMAIL>',
license='Apache License v2.0',
packages=['certbot_adc'],
install_requires=[
"aliyun-python-sdk-core>=2.0.7",
"aliyun-python-sdk-alidns>=2.0.7",
"PyYAML>=3.12",
"validate_email>=1.3",
"qcloudapi-sdk-python>=2.0.9"
],
scripts=[
'bin/certbot-adc-check-conf',
'bin/certbot-adc-manual-auth-hook',
'bin/certbot-adc-manual-cleanup-hook',
],
zip_safe=False
)
|
StarcoderdataPython
|
46906
|
import json
from bson import ObjectId
from pymongo import ReturnDocument
from .exceptions import DBException
class DBActionsMixin:
def __init__(self, model, db):
self._model_cls = model
self._db = db
def add(self, item):
db_obj = self._collection.insert_one(item.prepare_for_db())
model = self._model_cls(id=str(db_obj.inserted_id))
return self.query(**model.db_key()).pop()
def query(self, **query_params):
return [
self._model_cls.from_db_object(db_obj) for db_obj in self._collection.find(query_params)
]
def remove(self, todo_item):
db_obj = self._collection.find_one_and_delete(todo_item.db_key())
if db_obj:
return self._model_cls.from_db_object(db_obj)
raise DBException(f'Unable to remove. Object with id={todo_item.id} is absent.')
def update(self, old_todo_item, new_todo_item):
if not self.query(**old_todo_item.db_key()):
raise DBException(f'Unable to update. Object with id={old_todo_item.id} is absent.')
db_obj = self._collection.find_one_and_update(
old_todo_item.db_key(),
{'$set': new_todo_item.prepare_for_db(with_empty_fields=False)},
return_document=ReturnDocument.AFTER
)
return self._model_cls.from_db_object(db_obj)
class ToDBModelMixin:
def prepare_for_db(self, with_empty_fields=True):
obj = vars(self)
del obj['id']
filter_func = lambda x: True if with_empty_fields else lambda x: x
obj = {key: str(value) for key, value in obj.items() if filter_func(value)}
return obj
def db_key(self):
return {'_id': ObjectId(self.id)}
class ModelSerializeMixin:
def __str__(self):
return json.dumps(self)
class ModelContentMixin:
@property
def is_empty(self):
return not bool([v for v in vars(self).values() if v])
|
StarcoderdataPython
|
4842928
|
import numpy as np
from typing import List, Dict
def calculate_term_frequencies(split_document: List[str], index_map: Dict[str, int]) -> np.ndarray:
occurrences = np.zeros((len(index_map),), dtype=np.uint32)
for word in split_document:
if word in index_map:
occurrences[index_map[word]] += 1
return occurrences / len(split_document)
def calculate_inverse_document_frequencies(documents: List[str], index_map: Dict[str, int]) -> np.ndarray:
if len(documents) == 1:
# TODO: is this a valid action for one-document collection?
return np.ones((len(index_map),), dtype=np.uint32)
occurrences = np.zeros((len(index_map),), dtype=np.uint32)
for document in documents:
split_document = document.split()
added_words = set()
for word in split_document:
if word in index_map and word not in added_words:
added_words.add(word)
occurrences[index_map[word]] += 1
with np.errstate(divide='ignore'):
idfs = np.log(len(documents) / occurrences)
idfs[idfs == np.inf] = 0.0
return idfs
|
StarcoderdataPython
|
3210352
|
<reponame>gmjustforfun/code
import time
import matplotlib.pyplot as plt
import numpy as np
from pso.APSO import APSO
from pso.PSO import PSO
import pandas as pd
def sphere2dim(x):
'''
this function is the target funvtion if "population_Distribution_Information_Of_PSO"
r初始为5,在iter==50时,
:param x: variable
:param r: paremeters,the center of the sphere
:return: result
'''
x1, x2 = x
return (x1+5)**2+(x2+5)**2
def sphere(p):
# Sphere函数
out_put = 0
for i in p:
out_put += (i+5) ** 2
return out_put
def sphere01(p):
# Sphere函数
out_put = 0
for i in p:
out_put += i ** 2
return out_put
def schwefels(x):
out_put = 0
out_put01 = 1
for i in x:
out_put += abs(i+5)
out_put01 = abs(i+5)*out_put01
out_put = out_put01+out_put
return out_put
def rosenbrock(p):
n_dim = len(p)
res = 0
for i in range(n_dim - 1):
res += 100 * np.square(np.square(p[i]+5) - p[i + 1]+5) + np.square(p[i]+5 - 1)
return res
def schewel(x):
out_put = 0
for i in x:
out_put += -(i+5)*np.sin(np.sqrt(abs(i+5)))
return out_put
"----------------------------------"
# f1 完成
def Sphere(p):
# Sphere函数
out_put = 0
for i in p:
out_put += i ** 2
return out_put
# f2 完成
def Sch222(x):
out_put = 0
out_put01 = 1
for i in x:
out_put += abs(i)
out_put01 = abs(i)*out_put01
out_put = out_put01+out_put
return out_put
# f3 完成
def Quadric(x):
output = 0
# print(x.shape[0])
for i in range(x.shape[0]):
output += np.square(np.sum(x[0:i+1]))
# print(np.square(np.sum(x[0:i+1])))
return output
# f4 完成
def Schl(x):
# print(np.max(np.abs(x)))
return np.max(np.abs(x))
# f5 完成
def Step(x):
output = 0
for i in x:
output += (np.floor(i+0.5))**2
return output
# f6 完成
def Noise(x):
output = 0
cnt = 1
for i in x:
output = cnt * (i**4) + output
cnt += 1
output += np.random.rand()
return output
# f7 完成
def Rosenbrock(p):
'''
-2.048<=xi<=2.048
函数全局最优点在一个平滑、狭长的抛物线山谷内,使算法很难辨别搜索方向,查找最优也变得十分困难
在(1,...,1)处可以找到极小值0
:param p:
:return:
'''
n_dim = len(p)
res = 0
for i in range(n_dim - 1):
res += 100 * np.square(np.square(p[i]) - p[i + 1]) + np.square(p[i] - 1)
return res
# f8 有问题,忽略
# 这个是APSO的f8
def Schewel(x):
out_put = 0
for i in x:
out_put += -i*np.sin(np.sqrt(abs(i)))
return out_put
# f9 完成
def Rastrigin(p):
'''
多峰值函数,也是典型的非线性多模态函数
-5.12<=xi<=5.12
在范围内有10n个局部最小值,峰形高低起伏不定跳跃。很难找到全局最优
has a global minimum at x = 0 where f(x) = 0
'''
return np.sum([np.square(x) - 10 * np.cos(2 * np.pi * x) + 10 for x in p])
# f10
def Ackley(x):
part1 = 0
part2 = 0
for i in x:
part1 += (i**2)
part2 += np.cos(2 * np.pi * i)
left = 20 * np.exp(-0.2 * ((part1 / x.shape[0]) ** .5))
right = np.exp(part2 / x.shape[0])
return -left - right + 20 + np.e
# f11 ok
def Griewank(p):
'''
存在多个局部最小值点,数目与问题的维度有关。
此函数是典型的非线性多模态函数,具有广泛的搜索空间,是优化算法很难处理的复杂多模态问题。
在(0,...,0)处取的全局最小值0
-600<=xi<=600
'''
part1 = [np.square(x) / 4000 for x in p]
part2 = [np.cos(x / np.sqrt(i + 1)) for i, x in enumerate(p)]
return np.sum(part1) - np.prod(part2) + 1
class DemoTrailForAPSO:
def __init__(self, test_question=None):
self.test_question = test_question # 区分测试的内容是什么
def population_Distribution_Information_Of_PSO(self):
"""
实验1:
This function is used to trail the population distribution information of PSO
This function manipulate GPSO proposed in article "A modified PSO"
The article does not give the specific parameters setting. We set w=0.8,c1=c2=2.
MAXITER=100.
The target function: f = (x1-r)^2+(x2-r)^2
The constrict :xi [-10,10]
This function plot three figures.
“注意:文献中,当迭代到50代的时候,r由-5变为了5,
在PSO主程序中加上判断语句,如果迭代次数满足条件了,self.func换掉
现在计算结果是对的,但是从绘制出的种群分布图来看,到了50代的时候没能跳出去???????
已解决!!!
:return:
"""
"----------------------------1.计算,得到进化曲线图和结果"
pso = PSO(func=sphere2dim, dim=2, pop=100, max_iter=100, lb=[-10, -10], ub=[10, 10], w=0.8, c1=1.5, c2=1)
pso.run()
# print(pso.record_value['gbest_each_generation']) # 给出每一代的适应值,可以绘制进化曲线图
print(pso.gbest_y_hist)
plt.plot(pso.gbest_y_hist)
plt.show() # 绘制进化曲线图
"------------------------------2.各代绘制种群分布图"
plt.figure(1)
ax1 = plt.subplot(2, 3, 1)
plt.title('1代')
ax2 = plt.subplot(2, 3, 2)
plt.title('25代')
ax3 = plt.subplot(2, 3, 3)
plt.title('49代')
ax4 = plt.subplot(2, 3, 4)
plt.title('50代')
ax5 = plt.subplot(2, 3, 5)
plt.title('60代')
ax6 = plt.subplot(2, 3, 6)
plt.title('80代')
# 选择ax1
plt.sca(ax1)
# matplotlib画图中中文显示会有问题,需要这两行设置默认字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 坐标轴,设置标签和范围
plt.xlabel('x1')
plt.xlabel('x2')
plt.xlim(xmax=10, xmin=-10)
plt.ylim(ymax=10, ymin=-10)
# 设置颜色
color = '#00CED1'
# 设置点的面积
area = np.pi * 2.5
area1 = np.pi * 5
"--------静态可以不要"
# 第0代,初始分布
for particle_no in range(pso.pop):
plt.scatter(pso.record_value['X'][0][particle_no][0], pso.record_value['X'][0][particle_no][1], s=area, c=color, alpha=0.4)
plt.scatter(pso.record_value['gbest_each_generation'][0][0],
pso.record_value['gbest_each_generation'][0][1], s=area1, c='#ff0000', alpha=0.4)
# 选择ax2,第25代
plt.sca(ax2)
for particle_no in range(pso.pop):
plt.scatter(pso.record_value['X'][24][particle_no][0], pso.record_value['X'][24][particle_no][1], s=area,
c=color, alpha=0.4)
plt.scatter(pso.record_value['gbest_each_generation'][24][0],
pso.record_value['gbest_each_generation'][24][1], s=area1, c='#ff0000', alpha=0.4)
# 选择ax3,第49代
plt.sca(ax3)
for particle_no in range(pso.pop):
plt.scatter(pso.record_value['X'][48][particle_no][0], pso.record_value['X'][48][particle_no][1], s=area,
c=color, alpha=0.4)
plt.scatter(pso.record_value['gbest_each_generation'][48][0],
pso.record_value['gbest_each_generation'][48][1], s=area1, c='#ff0000', alpha=0.4)
# 选择ax4,第50代
plt.sca(ax4)
for particle_no in range(pso.pop):
plt.scatter(pso.record_value['X'][50][particle_no][0], pso.record_value['X'][49][particle_no][1], s=area,
c=color, alpha=0.4)
plt.scatter(pso.record_value['gbest_each_generation'][49][0],
pso.record_value['gbest_each_generation'][49][1], s=area1, c='#ff0000', alpha=0.4)
# 选择ax5,第60代
plt.sca(ax5)
for particle_no in range(pso.pop):
plt.scatter(pso.record_value['X'][59][particle_no][0], pso.record_value['X'][59][particle_no][1], s=area,
c=color, alpha=0.4)
plt.scatter(pso.record_value['gbest_each_generation'][59][0],
pso.record_value['gbest_each_generation'][59][1], s=area1, c='#ff0000', alpha=0.4)
# 选择ax6,第80代
plt.sca(ax6)
for particle_no in range(pso.pop):
plt.scatter(pso.record_value['X'][79][particle_no][0], pso.record_value['X'][79][particle_no][1], s=area,
c=color, alpha=0.4)
plt.scatter(pso.record_value['gbest_each_generation'][79][0],
pso.record_value['gbest_each_generation'][79][1], s=area1, c='#ff0000', alpha=0.4)
plt.show()
"--------绘制动态图,完成!"
plt.ion() # 开启交互模式
plt.subplots()
plt.xlim(-10, 10)
plt.ylim(-10, 10)
plt.title(str(1))
plt.scatter(pso.record_value['gbest_each_generation'][0][0],
pso.record_value['gbest_each_generation'][0][1], s=area1, c='#ff0000', alpha=0.4)
for particle_no in range(pso.pop):
plt.scatter(pso.record_value['X'][0][particle_no][0], pso.record_value['X'][0][particle_no][1], s=area, c=color, alpha=0.4)
for generation_no in range(pso.max_iter-1): # 对每一代0-98 2-100
plt.clf()
plt.xlim(-10, 10)
plt.ylim(-10, 10)
plt.title(str(generation_no+2))
plt.scatter(pso.record_value['gbest_each_generation'][generation_no+1][0], pso.record_value['gbest_each_generation'][generation_no+1][1], s=area1, c='#ff0000', alpha=0.4)
for particle_no in range(pso.pop): # 绘制每个个体的散点分布
plt.scatter(pso.record_value['X'][generation_no+1][particle_no][0], pso.record_value['X'][generation_no+1][particle_no][1],
s=area,
c=color, alpha=0.4)
plt.pause(0.1)
plt.ioff()
plt.show()
def evolutionary_information_f(self):
"""
实验2
This function is used to plot value evolution factor f
no. name dim range optima acceptance
the menchbank function is f1: Sphere 30dim [-100,100] 0 0.01 time-varing
f2: Schwefel's 30dim [-10,10] 0 0.01 单峰函数 time-varing
f4: Rosenbrock 30dim [-10,10] 0 100 单峰函数 time-varing
f7: Schwefel 30dim [-500,500] -12569.5 -10000 多峰函数
:return:
要绘制四个函数的f变化图
:return:
"""
# f1函数2维时变的进化因子的变化趋势图
pso = APSO(func=sphere2dim, dim=2, pop=100, max_iter=100, lb=np.ones(2)*(-10), ub=np.ones(2)*10, w=0.9, c1=2, c2=2, acceptance=0.01)
pso.run()
print(pso.record_value['f']) # 给出每一代的适应值,可以绘制进化曲线图
plt.plot(pso.record_value['f'])
plt.show() # 绘制进化曲线图
# f2函数2维时变的进化因子的变化趋势图
# pso = APSO(func=schwefels, dim=2, pop=100, max_iter=100, lb=np.ones(2) * (-10), ub=np.ones(2) * 10, w=0.9,
# c1=2, c2=2, acceptance=None)
# pso.run()
# print(pso.record_value['f']) # 给出每一代的适应值,可以绘制进化曲线图
# plt.plot(pso.record_value['f'])
# plt.show() # 绘制进化曲线图
# f4函数2维时变的进化因子的变化趋势图
# pso = APSO(func=rosenbrock, dim=2, pop=100, max_iter=100, lb=np.ones(2) * (-10), ub=np.ones(2) * 10, w=0.9,
# c1=2, c2=2, acceptance=None)
# pso.run()
# print(pso.record_value['f']) # 给出每一代的适应值,可以绘制进化曲线图
# plt.plot(pso.record_value['f'])
# plt.show() # 绘制进化曲线图
# f7多封函数的进化因子变化曲线图
# pso = APSO(func=schewel, dim=2, pop=100, max_iter=100, lb=np.ones(2) * (-500), ub=np.ones(2) * 500, w=0.9,
# c1=2, c2=2, acceptance=None)
# pso.run()
# print(pso.record_value['f']) # 给出每一代的适应值,可以绘制进化曲线图
# plt.plot(pso.record_value['f'])
# plt.show() # 绘制进化曲线图
def lala(self):
# vec1 = np.array([2,3,2])
# vec2 = np.array([4,8,1])
# d = np.sqrt(np.sum(np.square(vec2 - vec1)))
# print(d)
# d = np.sqrt(30)
# print(d)
# a = np.array([2,3,1,5,6])
# print(a.min())
# print(a.argmin())
# zoom_o2_5 = Interval(2, 5) #闭区间
#
# print(zoom_o2_5)
#
# print(2 in zoom_o2_5)
#
# print(5 in zoom_o2_5)
# param = random.uniform(0.05, 0.1)
# print(param)
X = np.random.uniform(low=-10, high=10, size=(10, 5))
print(X)
print(X[1][:])
X[2][:]=X[1][:]
def trail_APSO_mean_FEs(self):
"""
统计GPSO的平均迭代次数,最大迭代次数为200000,试验30次,有精度要求
:return:
"""
# FE = [] # 记录30次试验的达到精度的迭代次数,需要统计平均,表3
# BEST = [] # 记录30次试验的最好解 用于统计均值和标准差 表VI
#
# arange = (i for i in range(1))
# time_start = time.process_time()
# for x in arange:
# print("正在运行中----------------------")
# pso = APSO(func=sphere01, dim=30, pop=20, max_iter=10000, lb=np.ones(30) * (-100), ub=np.ones(30) * 100, w=0.9, c1=2, c2=2, acceptance=0.01)
# pso.run()
# FE.append(pso.acceptance_iter)
# BEST.append(pso.gbest_y)
# time_end = time.process_time()
# print('the time for 30 GPSO trials of f1 is %d ' %(time_end-time_start))
# print('MEAN FEs IN OBTAINING ACCEPTABLE SOLUTIONS BY GPSO WITHOUT PARAMETER ADAPTATION %d ' % (np.mean(FE)))
# print('MEAN SOLUTIONS OF THE 30 TRIALS OBTAINED BY GPSO WITHOUT PARAMETER ADAPTATION F1:%d'%(np.mean(BEST)))
# print('STD OF THE 30 TRIALS OBTAINED BY GPSO WITHOUT PARAMETER ADAPTATION F1:%d' % (np.std(BEST)))
# print('Best SOLUTIONS OF THE 30 TRIALS OBTAINED BY GPSO WITHOUT PARAMETER ADAPTATION F1:%d' % (np.min(BEST)))
# print(FE)
# print(BEST)
g = 10000
times = 30
table = np.zeros((2, 10))
for i in range(times):
optimizer = APSO(func=sphere01, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-100), ub=np.ones(30) * 100,
w=0.9, c1=2, c2=2, acceptance=0.01)
start = time.time()
optimizer.run()
end = time.time()
table[0, 0] += optimizer.gbest_y
table[1, 0] += end - start
optimizer = APSO(func=Sch222, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-10), ub=np.ones(30) * 10,
w=0.9, c1=2, c2=2, acceptance=0.01)
start = time.time()
optimizer.run()
end = time.time()
table[0, 1] += optimizer.gbest_y
table[1, 1] += end - start
optimizer = APSO(func=Quadric, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-100), ub=np.ones(30) * 100,
w=0.9, c1=2, c2=2, acceptance=100)
start = time.time()
optimizer.run()
end = time.time()
table[0, 2] += optimizer.gbest_y
table[1, 2] += end - start
optimizer = APSO(func=Rosenbrock, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-10), ub=np.ones(30) * 10,
w=0.9, c1=2, c2=2, acceptance=100)
start = time.time()
optimizer.run()
end = time.time()
table[0, 3] += optimizer.gbest_y
table[1, 3] += end - start
optimizer = APSO(func=Step, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-100), ub=np.ones(30) * 100,
w=0.9, c1=2, c2=2, acceptance=0)
start = time.time()
optimizer.run()
end = time.time()
table[0, 4] += optimizer.gbest_y
table[1, 4] += end - start
optimizer = APSO(func=Noise, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-1.28), ub=np.ones(30) * 1.28,
w=0.9, c1=2, c2=2, acceptance=0.01)
start = time.time()
optimizer.run()
end = time.time()
table[0, 5] += optimizer.gbest_y
table[1, 5] += end - start
optimizer = APSO(func=Schewel, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-500), ub=np.ones(30) * 500,
w=0.9, c1=2, c2=2, acceptance=-10000)
start = time.time()
optimizer.run()
end = time.time()
table[0, 6] += optimizer.gbest_y
table[1, 6] += end - start
optimizer = APSO(func=Rastrigin, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-5.12), ub=np.ones(30) * 5.12,
w=0.9, c1=2, c2=2, acceptance=50)
start = time.time()
optimizer.run()
end = time.time()
table[0, 7] += optimizer.gbest_y
table[1, 7] += end - start
# x_max = 5.12 * np.ones(d)
# x_min = -5.12 * np.ones(d)
# optimizer = APSO(fit_func=Noncontinuous_Rastrigin, num_dim=d, num_particle=p, max_iter=g, x_max=x_max, x_min=x_min)
# start = time.time()
# optimizer.opt()
# end = time.time()
# table[0, 8] += optimizer.gbest_y
# table[1, 8] += end - start
#
#
optimizer = APSO(func=Ackley, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-32), ub=np.ones(30) * 32,
w=0.9, c1=2, c2=2, acceptance=0.01)
start = time.time()
optimizer.run()
end = time.time()
table[0, 8] += optimizer.gbest_y
table[1, 8] += end - start
optimizer = APSO(func=Griewank, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-600), ub=np.ones(30) * 600,
w=0.9, c1=2, c2=2, acceptance=0.01)
start = time.time()
optimizer.run()
end = time.time()
table[0, 9] += optimizer.gbest_y
table[1, 9] += end - start
table = table / times
print(table)
table = pd.DataFrame(table)
table.columns = ['Sphere', 'Schwefel_P222', 'Quadric', 'Rosenbrock', 'Step', 'Quadric_Noise', 'Schwefel',
'Rastrigin', 'Ackley', 'Griewank']
table.index = ['mean score', 'mean time']
print(table)
if __name__ == "__main__":
# pso_TVIW()
demo_01 = DemoTrailForAPSO()
# 实验1 GPSO种群分布实验
# demo_01.population_Distribution_Information_Of_PSO()
# test detail
# demo_01.lala()
# 实验2:GPSO的进化因子变化实验
# 注意:做实验2要将APSO代码处的自适应部分注释
# demo_01.evolutionary_information_f()
# 实验3
demo_01.trail_APSO_mean_FEs()
|
StarcoderdataPython
|
3203137
|
# -*- coding: utf-8 -*-
# @Time: 2020/3/21 22:48
# @Author: GraceKoo
# @File: 76_minimum-window-substring.py
# @Desc:https://leetcode-cn.com/problems/minimum-window-substring
from collections import Counter
from collections import defaultdict
class Solution:
def minWindow(self, s: str, t: str) -> str:
left, right = 0, 0
length_s = len(s)
result = s + s # 随便设定一个比s长的字符串,便于后续处理
t_dict = Counter(t) # t各个数字的出现的次数
count_dict = defaultdict(lambda: 0) # 创建一个默认值为0的字典
# 判断当前字典与目标字典长度、存储的值是否相同
def contains(t_dict, count_dict):
if len(count_dict) < len(t_dict):
return False
for key in count_dict:
if count_dict[key] < t_dict[key] or key not in t_dict:
return False
return True
# 触发right移动
for right in range(length_s):
if s[right] in t_dict:
count_dict[s[right]] += 1
# 触发left移动: 当前存储的字典中与目标字典中长度、包含的值相同
while left < length_s and contains(t_dict, count_dict):
# 更新结果
if right - left + 1 < len(result):
result = s[left : right + 1]
if s[left] in t_dict:
count_dict[s[left]] -= 1
left += 1
return "" if result == s + s else result
so = Solution()
print(so.minWindow("ADOBECODEBANC", "ABC"))
|
StarcoderdataPython
|
1620584
|
from typing import Generator, Optional, Sequence, Union
from libcst import (
Assign,
AssignTarget,
Decorator,
FlattenSentinel,
ImportFrom,
ImportStar,
Module,
Name,
RemovalSentinel,
)
from libcst import matchers as m
from django_codemod.constants import DJANGO_1_9, DJANGO_2_0
from django_codemod.visitors.base import BaseDjCodemodTransformer, import_from_matches
class AssignmentTagTransformer(BaseDjCodemodTransformer):
"""Replace `assignment_tag` by `simple_tag`."""
deprecated_in = DJANGO_1_9
removed_in = DJANGO_2_0
ctx_key_prefix = "AssignmentTagTransformer"
ctx_key_library_call_matcher = f"{ctx_key_prefix}-library_call_matcher"
ctx_key_decorator_matcher = f"{ctx_key_prefix}-decorator_matcher"
@property
def library_call_matcher(self) -> Optional[m.Call]:
return self.context.scratch.get(self.ctx_key_library_call_matcher, None)
@property
def decorators_matcher(self) -> Optional[m.BaseMatcherNode]:
return self.context.scratch.get(self.ctx_key_decorator_matcher, None)
def leave_Module(self, original_node: Module, updated_node: Module) -> Module:
"""Clear context when leaving module."""
self.context.scratch.pop(self.ctx_key_library_call_matcher, None)
self.context.scratch.pop(self.ctx_key_decorator_matcher, None)
return super().leave_Module(original_node, updated_node)
def visit_ImportFrom(self, node: ImportFrom) -> Optional[bool]:
"""Record whether an interesting import is detected."""
import_matcher = (
# django.template
self._template_import_matcher(node)
# django.template.Library
or self._library_import_matcher(node)
)
if import_matcher:
self.context.scratch[self.ctx_key_library_call_matcher] = import_matcher
return None
def _template_import_matcher(self, node: ImportFrom) -> Optional[m.Call]:
"""Return matcher if django.template is imported."""
imported_name_str = self._get_imported_name(node, "django.template")
if not imported_name_str:
return None
# Build the `Call` matcher to look out for, e.g. `template.Library()`
return m.Call(
func=m.Attribute(
attr=m.Name("Library"), value=m.Name(value=imported_name_str)
)
)
def _library_import_matcher(self, node: ImportFrom) -> Optional[m.Call]:
"""Return matcher if django.template.Library is imported."""
imported_name_str = self._get_imported_name(node, "django.template.Library")
if not imported_name_str:
return None
# Build the `Call` matcher to look out for, e.g. `Library()`
return m.Call(func=m.Name(imported_name_str))
@staticmethod
def _get_imported_name(node: ImportFrom, import_path: str) -> Optional[str]:
"""Resolve the imported name if present."""
if isinstance(node.names, ImportStar):
return None
*modules, name = import_path.split(".")
if not import_from_matches(node, modules):
return None
for import_alias in node.names:
if m.matches(import_alias, m.ImportAlias(name=m.Name(name))):
# We're visiting the import statement we're looking for
# Get the actual name it's imported as (in case of import alias)
imported_name_str = (
import_alias.evaluated_alias or import_alias.evaluated_name
)
return imported_name_str
return None
def visit_Assign(self, node: Assign) -> Optional[bool]:
"""Record variable name the `Library()` call is assigned to."""
if self.library_call_matcher and m.matches(
node,
m.Assign(value=self.library_call_matcher),
):
# Visiting a `register = template.Library()` statement
# Generate decorator matchers based on left hand side names
decorator_matchers = self._gen_decorator_matchers(node.targets)
# should match if any of the decorator matches
self.context.scratch[self.ctx_key_decorator_matcher] = m.OneOf(
*decorator_matchers
)
return super().visit_Assign(node)
@staticmethod
def _gen_decorator_matchers(
assign_targets: Sequence[AssignTarget],
) -> Generator[m.Decorator, None, None]:
"""Generate matchers for all possible decorators."""
for assign_target in assign_targets:
# for each variable it's assigned to
if isinstance(assign_target.target, Name):
# get the name of the target
target_str = assign_target.target.value
# matcher we should use for finding decorators to modify
yield m.Decorator(
decorator=m.Attribute(
value=m.Name(target_str),
attr=m.Name("assignment_tag"),
)
)
def leave_Decorator(
self, original_node: Decorator, updated_node: Decorator
) -> Union[Decorator, FlattenSentinel[Decorator], RemovalSentinel]:
"""Update decorator call if all conditions are met."""
if self.decorators_matcher and m.matches(updated_node, self.decorators_matcher):
# If we have a decorator matcher, and it matches,
# then update the node with new name
updated_decorator = updated_node.decorator.with_changes(
attr=Name("simple_tag")
)
return updated_node.with_changes(decorator=updated_decorator)
return super().leave_Decorator(original_node, updated_node)
|
StarcoderdataPython
|
131776
|
<reponame>mozillazg/-bustard<gh_stars>10-100
# -*- coding: utf-8 -*-
import json
import os
import pytest
from bustard.app import Bustard
from bustard.utils import to_bytes, to_text
app = Bustard()
current_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.yield_fixture
def client():
yield app.test_client()
@app.route('/echo', methods=['POST'])
def echo(request):
files = request.files
data = {
'hello': to_text(files['hello'].read()),
}
data.update(request.form)
return json.dumps(data)
@app.route('/bin', methods=['POST'])
def echo_bin(request):
files = request.files
return files['file'].read()
def test_upload(client):
content = to_bytes('你好吗')
files = {
'hello': {
'file': to_text(content),
'filename': 'hello.txt',
}
}
data = {
'abc': 'a',
'a': 'b',
}
expect_data = {}
expect_data.update(data)
expect_data.update({k: f['file'] for k, f in files.items()})
response = client.post('/echo', data=data, files=files)
assert response.json() == expect_data
def test_upload_bin(client):
content = b''
with open(os.path.join(current_dir, 'test.png'), 'rb') as f:
content = f.read()
f.seek(0)
files = {
'file': {
'file': f.read(),
'name': f.name,
}
}
response = client.post('/bin', files=files)
assert response.content == content
|
StarcoderdataPython
|
3378832
|
# Copyright 2015-2017, Truveris Inc. All Rights Reserved.
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
from overpunch import __version__
setup(
name="overpunch",
version=__version__,
description="Overpunch Parser/Formatter",
author="Truveris Inc.",
author_email="<EMAIL>",
url="http://github.com/truveris/overpunch",
test_suite="nose.collector",
packages=find_packages(exclude=["ez_setup"]),
classifiers=[
"Intended Audience :: Healthcare Industry",
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Text Processing",
],
)
|
StarcoderdataPython
|
168237
|
<filename>twitter_axie_giveaway/gift_axie_ocv_lib.py
from __future__ import print_function
from __future__ import division
import cv2 as cv
import numpy as np
import argparse
import os
import config
def compareImages(src, samples_dir):
src_img = cv.imread(src)
hsv_base = cv.cvtColor(src_img, cv.COLOR_BGR2HSV)
h_bins = 50
s_bins = 60
histSize = [h_bins, s_bins]
# hue varies from 0 to 179, saturation from 0 to 255
h_ranges = [0, 180]
s_ranges = [0, 256]
ranges = h_ranges + s_ranges # concat lists
# Use the 0-th and 1-st channels
channels = [0, 1]
hist_base = cv.calcHist([hsv_base], channels, None, histSize, ranges, accumulate=False)
cv.normalize(hist_base, hist_base, alpha=0, beta=1, norm_type=cv.NORM_MINMAX)
min_distance = 1
max_distance = 0
for file in os.listdir(samples_dir):
print(f"filename: {file}")
image = cv.imread(config.DEC_IMGS_DIR+file)
hsv_sample = cv.cvtColor(image, cv.COLOR_BGR2HSV)
hist_sample = cv.calcHist([hsv_sample], channels, None, histSize, ranges, accumulate=False)
cv.normalize(hist_sample, hist_sample, alpha=0, beta=1, norm_type=cv.NORM_MINMAX)
distance = cv.compareHist(hist_base, hist_sample, 3)
min_distance = min(distance, min_distance)
max_distance = max(distance, max_distance)
print(f"dist: {distance}")
print(f"min dist: {min_distance}")
print(f"max dist: {max_distance}")
return (min_distance, max_distance)
|
StarcoderdataPython
|
1628817
|
<filename>scripts/draw_pics.py
#!/usr/bin/env python
#
# Copyright 2013 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################
# Write R skript to generate the field pictures using ggplot2
#
from sys import *
import subprocess,os,time
import os.path
filename_body = argv[1]
fp = open(filename_body+"_makepics.r","w")
fp.write("library(ggplot2)\n") # Diff Fields first
fp.write("xydiffs = read.table('"+filename_body+"_xydiffs.txt',na.strings='********')\n")
fp.write("colnames(xydiffs) = c('X','Y','Diff')\n")
fp.write("postscript('"+filename_body+"_xydiffs.eps')\n")
fp.write("v <- ggplot(xydiffs, aes(Y,X,z=Diff))\n")
fp.write("v + geom_tile(aes(fill=Diff)) + scale_fill_gradientn(colour=c('white','yellow','green','orange','red'),limits=c(0,40)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("xzdiffs = read.table('"+filename_body+"_xzdiffs.txt',na.strings='********')\n")
fp.write("colnames(xzdiffs) = c('X','Z','Diff')\n")
fp.write("postscript('"+filename_body+"_xzdiffs.eps')\n")
fp.write("v <- ggplot(xzdiffs, aes(Z,X,z=Diff))\n")
fp.write("v + geom_tile(aes(fill=Diff)) + scale_fill_gradientn(colour=c('white','yellow','green','orange','red'),limits=c(0,40)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("yzdiffs = read.table('"+filename_body+"_yzdiffs.txt',na.strings='********')\n")
fp.write("colnames(yzdiffs) = c('Y','Z','Diff')\n")
fp.write("postscript('"+filename_body+"_yzdiffs.eps')\n")
fp.write("v <- ggplot(yzdiffs, aes(Z,Y,z=Diff))\n")
fp.write("v + geom_tile(aes(fill=Diff)) + scale_fill_gradientn(colour=c('white','yellow','green','orange','red'),limits=c(0,40)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("xy_gauss = read.table('"+filename_body+"_xy-gauss-en.txt',na.strings='********')\n") # Potential_fields from Gaussian second
fp.write("colnames(xy_gauss) = c('X','Y','MEP')\n")
fp.write("postscript('"+filename_body+"_xy_gauss.eps')\n")
fp.write("v <- ggplot(xy_gauss, aes(Y,X,z=MEP))\n")
fp.write("v + geom_tile(aes(fill=MEP)) + scale_fill_gradientn(colour=rainbow(10),limits=c(-50,50)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("xz_gauss = read.table('"+filename_body+"_xz-gauss-en.txt',na.strings='********')\n")
fp.write("colnames(xz_gauss) = c('X','Z','MEP')\n")
fp.write("postscript('"+filename_body+"_xz_gauss.eps')\n")
fp.write("v <- ggplot(xz_gauss, aes(Z,X,z=MEP))\n")
fp.write("v + geom_tile(aes(fill=MEP)) + scale_fill_gradientn(colour=rainbow(10),limits=c(-50,50)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("yz_gauss = read.table('"+filename_body+"_yz-gauss-en.txt',na.strings='********')\n")
fp.write("colnames(yz_gauss) = c('Y','Z','MEP')\n")
fp.write("postscript('"+filename_body+"_yz_gauss.eps')\n")
fp.write("v <- ggplot(yz_gauss, aes(Z,Y,z=MEP))\n")
fp.write("v + geom_tile(aes(fill=MEP)) + scale_fill_gradientn(colour=rainbow(10),limits=c(-50,50)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("xy_mtp = read.table('"+filename_body+"_xy-mult-en.txt',na.strings='********')\n") # Potential_fields from Multipoles last
fp.write("colnames(xy_mtp) = c('X','Y','MEP')\n")
fp.write("postscript('"+filename_body+"_xy_mtp.eps')\n")
fp.write("v <- ggplot(xy_mtp, aes(Y,X,z=MEP))\n")
fp.write("v + geom_tile(aes(fill=MEP)) + scale_fill_gradientn(colour=rainbow(10),limits=c(-50,50)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("xz_mtp = read.table('"+filename_body+"_xz-mult-en.txt',na.strings='********')\n")
fp.write("colnames(xz_mtp) = c('X','Z','MEP')\n")
fp.write("postscript('"+filename_body+"_xz_mtp.eps')\n")
fp.write("v <- ggplot(xz_mtp, aes(Z,X,z=MEP))\n")
fp.write("v + geom_tile(aes(fill=MEP)) + scale_fill_gradientn(colour=rainbow(10),limits=c(-50,50)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("yz_mtp = read.table('"+filename_body+"_yz-mult-en.txt',na.strings='********')\n")
fp.write("colnames(yz_mtp) = c('Y','Z','MEP')\n")
fp.write("postscript('"+filename_body+"_yz_mtp.eps')\n")
fp.write("v <- ggplot(yz_mtp, aes(Z,Y,z=MEP))\n")
fp.write("v + geom_tile(aes(fill=MEP)) + scale_fill_gradientn(colour=rainbow(10),limits=c(-50,50)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("q()\n")
fp.close()
print
print " Run 'R CMD BATCH "+filename_body+"_makepics.r' to generate the pictures"
print
exit(0)
|
StarcoderdataPython
|
1787993
|
'''
@File :dataloader.py
@Author:Morton
@Date :2020/6/18 16:04
@Desc :The basic loading function to extract raw content and mention graph information from raw data "user_info.xxx.gz".
'''
# -*- coding:utf-8 -*-
import os
import re
import csv
import kdtree
import gensim
import numpy as np
import pandas as pd
import networkx as nx
from haversine import haversine
from collections import defaultdict, OrderedDict
from sklearn.neighbors import NearestNeighbors
class DataLoader:
def __init__(self, data_home, bucket_size=50, encoding='utf-8', celebrity_threshold=10, one_hot_labels=False,
mindf=10, maxdf=0.2, norm='l2', idf=True, btf=True, tokenizer=None, subtf=False, stops=None,
token_pattern=r'(?u)(?<![#@])\b\w\w+\b', vocab=None):
self.data_home = data_home
self.bucket_size = bucket_size
self.encoding = encoding
self.celebrity_threshold = celebrity_threshold
self.one_hot_labels = one_hot_labels
self.mindf = mindf
self.maxdf = maxdf
self.norm = norm
self.idf = idf
self.btf = btf
self.tokenizer = tokenizer
self.subtf = subtf
self.stops = stops if stops else 'english'
self.token_pattern = r'(?u)(?<![#@|,.-_+^……$%&*(); :`,。?、:;;《》{}“”~#¥])\b\w\w+\b'
self.vocab = vocab
def load_data(self):
print('loading the dataset from: {}'.format(self.data_home))
train_file = os.path.join(self.data_home, 'user_info.train.gz')
dev_file = os.path.join(self.data_home, 'user_info.dev.gz')
test_file = os.path.join(self.data_home, 'user_info.test.gz')
df_train = pd.read_csv(train_file, delimiter='\t', encoding=self.encoding, names=['user', 'lat', 'lon', 'text'],
quoting=csv.QUOTE_NONE, error_bad_lines=False)
df_dev = pd.read_csv(dev_file, delimiter='\t', encoding=self.encoding, names=['user', 'lat', 'lon', 'text'],
quoting=csv.QUOTE_NONE, error_bad_lines=False)
df_test = pd.read_csv(test_file, delimiter='\t', encoding=self.encoding, names=['user', 'lat', 'lon', 'text'],
quoting=csv.QUOTE_NONE, error_bad_lines=False)
df_train.dropna(inplace=True)
df_dev.dropna(inplace=True)
df_test.dropna(inplace=True)
df_train['user'] = df_train['user'].apply(lambda x: str(x).lower())
df_train.drop_duplicates(['user'], inplace=True, keep='last')
df_train.set_index(['user'], drop=True, append=False, inplace=True)
df_train.sort_index(inplace=True)
df_dev['user'] = df_dev['user'].apply(lambda x: str(x).lower())
df_dev.drop_duplicates(['user'], inplace=True, keep='last')
df_dev.set_index(['user'], drop=True, append=False, inplace=True)
df_dev.sort_index(inplace=True)
df_test['user'] = df_test['user'].apply(lambda x: str(x).lower())
df_test.drop_duplicates(['user'], inplace=True, keep='last')
df_test.set_index(['user'], drop=True, append=False, inplace=True)
df_test.sort_index(inplace=True)
self.df_train = df_train
self.df_dev = df_dev
self.df_test = df_test
def get_graph(self):
g = nx.Graph()
nodes = set(self.df_train.index.tolist() + self.df_dev.index.tolist() + self.df_test.index.tolist())
assert len(nodes) == len(self.df_train) + len(self.df_dev) + len(self.df_test), 'duplicate target node'
nodes_list = self.df_train.index.tolist() + self.df_dev.index.tolist() + self.df_test.index.tolist()
node_id = {node: id for id, node in enumerate(nodes_list)}
g.add_nodes_from(node_id.values())
for node in nodes:
g.add_edge(node_id[node], node_id[node])
pattern = '(?<=^|(?<=[^a-zA-Z0-9-_\\.]))@([A-Za-z]+[A-Za-z0-9_]+)'
pattern = re.compile(pattern)
print('start adding the train graph')
externalNum = 0
for i in range(len(self.df_train)):
user = self.df_train.index[i]
user_id = node_id[user]
mentions = [m.lower() for m in pattern.findall(self.df_train.text[i])]
idmentions = set()
for m in mentions:
if m in node_id:
idmentions.add(node_id[m])
else:
id = len(node_id)
node_id[m] = id
idmentions.add(id)
externalNum += 1
if len(idmentions) > 0:
g.add_nodes_from(idmentions)
for id in idmentions:
g.add_edge(user_id, id)
print('start adding the dev graph')
externalNum = 0
for i in range(len(self.df_dev)):
user = self.df_dev.index[i]
user_id = node_id[user]
mentions = [m.lower() for m in pattern.findall(self.df_dev.text[i])]
idmentions = set()
for m in mentions:
if m in node_id:
idmentions.add(node_id[m])
else:
id = len(node_id)
node_id[m] = id
idmentions.add(id)
externalNum += 1
if len(idmentions) > 0:
g.add_nodes_from(idmentions)
for id in idmentions:
g.add_edge(id, user_id)
print('start adding the test graph')
externalNum = 0
for i in range(len(self.df_test)):
user = self.df_test.index[i]
user_id = node_id[user]
mentions = [m.lower() for m in pattern.findall(self.df_test.text[i])]
idmentions = set()
for m in mentions:
if m in node_id:
idmentions.add(node_id[m])
else:
id = len(node_id)
node_id[m] = id
idmentions.add(id)
externalNum += 1
if len(idmentions) > 0:
g.add_nodes_from(idmentions)
for id in idmentions:
g.add_edge(id, user_id)
print('#nodes: %d, #edges: %d' % (nx.number_of_nodes(g), nx.number_of_edges(g)))
celebrities = []
for i in range(len(nodes_list), len(node_id)):
deg = len(g[i])
if deg == 1 or deg > self.celebrity_threshold:
celebrities.append(i)
print('removing %d celebrity nodes with degree higher than %d' % (len(celebrities), self.celebrity_threshold))
g.remove_nodes_from(celebrities)
print('projecting the graph')
projected_g = self.efficient_collaboration_weighted_projected_graph2(g, range(len(nodes_list)))
print('#nodes: %d, #edges: %d' % (nx.number_of_nodes(projected_g), nx.number_of_edges(projected_g)))
self.graph = projected_g
def efficient_collaboration_weighted_projected_graph2(self, B, nodes):
# B: the whole graph including known nodes and mentioned nodes --large graph
# nodes: the node_id of known nodes --small graph node
nodes = set(nodes)
G = nx.Graph()
G.add_nodes_from(nodes)
all_nodes = set(B.nodes())
for m in all_nodes:
nbrs = B[m]
target_nbrs = [t for t in nbrs if t in nodes]
# add edge between known nodesA(m) and known nodesB(n)
if m in nodes:
for n in target_nbrs:
if m < n:
if not G.has_edge(m, n):
# Morton added for exclude the long edges
G.add_edge(m, n)
# add edge between known n1 and known n2,
# just because n1 and n2 have relation to m, why ? ? ? Yes, it's right.
for n1 in target_nbrs:
for n2 in target_nbrs:
if n1 < n2:
if not G.has_edge(n1, n2):
G.add_edge(n1, n2)
return G
def get_raw_content_and_save(self, save_file_path):
# Morton add for save the raw content data into files.
if os.path.exists(save_file_path):
print("content already saved.")
return None
data = list(self.df_train.text.values) + list(self.df_dev.text.values) + list(self.df_test.text.values)
file = open(save_file_path, 'w', encoding='utf-8')
for i in range(len(data)):
file.write(str(data[i]) + '\n')
file.close()
print("content saved in {}".format(save_file_path))
def load_doc2vec_feature(self, doc2vec_model_file):
"""
doc2vec_model_file: the file that including all doc2vec features of the raw content.
"""
# load model
model = gensim.models.doc2vec.Doc2Vec.load(doc2vec_model_file)
# train data features
feature_list = list()
index_l = 0
index_r = len(self.df_train.text)
for i in range(index_l, index_r):
feature_list.append(model.docvecs[i])
self.X_train = np.array(feature_list)
# dev data features
feature_list = list()
index_l = len(self.df_train.text)
index_r = len(self.df_train.text) + len(self.df_dev.text)
for i in range(index_l, index_r):
feature_list.append(model.docvecs[i])
self.X_dev = np.array(feature_list)
# test data features
feature_list = list()
index_l = len(self.df_train.text) + len(self.df_dev.text)
index_r = len(self.df_train.text) + len(self.df_dev.text) + len(self.df_test.text)
for i in range(index_l, index_r):
feature_list.append(model.docvecs[i])
self.X_test = np.array(feature_list)
print("training n_samples: %d, n_features: %d" % self.X_train.shape)
print("development n_samples: %d, n_features: %d" % self.X_dev.shape)
print("test n_samples: %d, n_features: %d" % self.X_test.shape)
def assignClasses(self):
"""
get labels of all samples. label == index number of cluster.
"""
clusterer = kdtree.KDTreeClustering(bucket_size=self.bucket_size)
train_locs = self.df_train[['lat', 'lon']].values
clusterer.fit(train_locs)
clusters = clusterer.get_clusters()
cluster_points = defaultdict(list)
for i, cluster in enumerate(clusters):
cluster_points[cluster].append(train_locs[i])
print('# the number of clusterer labels is: %d' % len(cluster_points))
self.cluster_median = OrderedDict()
for cluster in sorted(cluster_points):
points = cluster_points[cluster]
median_lat = np.median([p[0] for p in points])
median_lon = np.median([p[1] for p in points])
self.cluster_median[cluster] = (median_lat, median_lon)
dev_locs = self.df_dev[['lat', 'lon']].values
test_locs = self.df_test[['lat', 'lon']].values
nnbr = NearestNeighbors(n_neighbors=1, algorithm='brute', leaf_size=1, metric=haversine, n_jobs=4)
nnbr.fit(np.array(list(self.cluster_median.values())))
self.dev_classes = nnbr.kneighbors(dev_locs, n_neighbors=1, return_distance=False)[:, 0]
self.test_classes = nnbr.kneighbors(test_locs, n_neighbors=1, return_distance=False)[:, 0]
self.train_classes = clusters
if self.one_hot_labels:
num_labels = np.max(self.train_classes) + 1
y_train = np.zeros((len(self.train_classes), num_labels), dtype=np.float32)
y_train[np.arange(len(self.train_classes)), self.train_classes] = 1
y_dev = np.zeros((len(self.dev_classes), num_labels), dtype=np.float32)
y_dev[np.arange(len(self.dev_classes)), self.dev_classes] = 1
y_test = np.zeros((len(self.test_classes), num_labels), dtype=np.float32)
y_test[np.arange(len(self.test_classes)), self.test_classes] = 1
self.train_classes = y_train
self.dev_classes = y_dev
self.test_classes = y_test
|
StarcoderdataPython
|
187615
|
# -*- coding: utf-8 -*-
import pyfits
from pylab import *
import Marsh
import numpy
import scipy
def getSpectrum(filename,b,Aperture,minimum_column,maximum_column):
hdulist = pyfits.open(filename) # Here we obtain the image...
data=hdulist[0].data # ... and we obtain the image matrix.
Result=Marsh.SimpleExtraction((data.flatten()).astype(double),scipy.polyval(b,numpy.arange(data.shape[1])).astype(double),data.shape[0],data.shape[1],data.shape[1],Aperture,minimum_column,maximum_column)
FinalMatrix=asarray(Result) # After the function, we convert our list to a Numpy array.
return FinalMatrix
# Main function:
FileName='../../../transmission_spectroscopy/WASP6/data.fits' # Filename of the image of the spectrum.
b=pyfits.getdata('../../../transmission_spectroscopy/WASP6/trace_coeffs.fits') # We write our trace...
Aperture=15
Spectrum=getSpectrum(FileName,b,Aperture,0,0) # In this example we take our spectrum from x=50 to 1500,
# where x is in our matrix coordinates.
x=arange(len(Spectrum))
plot(x,Spectrum,'-') # Plot the results.
show()
|
StarcoderdataPython
|
1603609
|
<reponame>arefmalek/Demographics_Disenfranchisement
# TODO: YES I KNOW THEY BASICALLY ARE ALL THE SAME CODE ITS SUPER REDUNDANT
# ILL FIX SOON
import torch
import torch.nn as nn
import torch.nn.functional as F
class Age(nn.Module):
def __init__(self):
super(Age, self).__init__()
if torch.cuda.is_available(): self.cuda()
self.conv1 = nn.Conv2d(3, 6, 4)
self.pool = nn.MaxPool2d(8, 8)
self.conv2 = nn.Conv2d(6, 16, 4)
self.fc1 = nn.Linear(16 * 2 * 2, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84,10) #age (0 to 1 * 100), gender (<0.5 -> F, >=0.5 -> M), race ((0 to 1) / nraces)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class Race(nn.Module):
def __init__(self):
super(Race, self).__init__()
if torch.cuda.is_available(): self.cuda()
self.conv1 = nn.Conv2d(3, 6, 4)
self.pool = nn.MaxPool2d(8, 8)
self.conv2 = nn.Conv2d(6, 16, 4)
self.fc1 = nn.Linear(16 * 2 * 2, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84,5)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class Sex(nn.Module):
def __init__(self):
super(Sex, self).__init__()
if torch.cuda.is_available(): self.cuda()
self.conv1 = nn.Conv2d(3, 6, 4)
self.pool = nn.MaxPool2d(8, 8)
self.conv2 = nn.Conv2d(6, 16, 4)
self.fc1 = nn.Linear(16 * 2 * 2, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84,2) #age (0 to 1 * 100), gender (<0.5 -> F, >=0.5 -> M), race ((0 to 1) / nraces)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
|
StarcoderdataPython
|
131176
|
<filename>doubly_linked_list_with_tail.py<gh_stars>1-10
"""
Author: PyDev
Description: Doubly Linked List with a Tail consist of a element, where the element is the
skeleton and consist of a value next, and previous variable/element.
There is a Head pointer to refer to the front of the Linked List.
The Tail pointer to refer to the end of Linked List/last elment
"""
class Element:
""" A class that represent single skeleton of the Linked List"""
def __init__(self, value):
"""
initiliaze new elements of the linked list with new value
Arguments:
- value: any object reference to assign new element to Linked List
Instance varabiles:
- value: an object value
- next: a reference/pointer to next element in the linked list and default value is None
- previous: a reference/pointer to previous element in the linked list and default value is None
"""
self.value = value
self.next = None
self.previous = None
class DoublyLinkedListWithTail:
"""
A class of Doubly-Linked List where it have attributes and properties, such as:
- Doubly Linked List
- With tail
Methods:
- pushFront(new_element)
- topFront()
- popFront()
- pushBack(new_element)
- topBack()
- popBack()
- find()
- erase()
- isEmpty()
- addBefore()
- addAfter()
"""
def __init__(self, head = None):
"""
initilize DoublyLinkedListWithoutTail object instance
Arguments:
- head: default to None
Instance variables:
- head: an object value which refer to head/start of the Linked List
- Tail: an object value which refer to the back/end of the Linked List
"""
self.head = self.tail = head
def pushFront(self, new_element):
"""
add new element to the front
Arguments:
- new_element: an object that reference to new element to be added
"""
if self.tail:
new_element.next = self.head
self.head = new_element
new_element.next.previous = self.head
else:
self.head = self.tail = new_element
def topFront(self):
"""
return front element/item
Returns:
- the front element/item object
"""
return self.head
def popFront(self):
"""remove front element/item"""
if self.head:
next_element = self.head.next
if next_element:
next_element.previous = None
else:
self.tail = None
previous_head_object = self.head
self.head = next_element
else:
print("Doubly Linked List With Tail is empty!")
def pushBack(self, new_element):
"""
add to back,also known as append
Arguments:
- new_element: an object that reference to new element to be added
"""
if self.tail:
self.tail.next = new_element
new_element.previous = self.tail
self.tail = new_element
else:
self.head = self.tail = new_element
new_element.previous = None
def topBack(self):
"""
return back/last element/item
Returns:
- the back/last element/item object
"""
if self.tail:
return self.tail
else:
print("Doubly Linked List With Tail is empty!")
def popBack(self):
"""
remove back element/item
"""
if self.head:
if self.head == self.tail:
self.head = self.tail = None
else:
self.tail = self.tail.previous
self.tail.next = None
else:
print("Error! Doubly Linked List With Tail is empty!")
def find(self, value):
"""
find if the value of an object is available in the current Linked List
Arguments:
- value: an object that represent a value we want to look for
Returns:
- boolean object
"""
current = self.head
if self.head:
while current.value != value and current.next:
current = current.next
if current.value == value:
return True
else:
return False
else:
print("Doubly Linked List Without Tail is empty!")
def erase(self, value):
"""
remove an element/item from Linked List
Arguments:
- value: an object that represent a value we want to look for
"""
current = self.head
while current.value != value and current.next:
current = current.next
if current.value == value:
if self.head.value == value:
# We can use self.popFront() or
self.head = current.next
current.next = None
elif not current.next:
# We can use self.popBack() or
self.tail = current.previous
current.previous = None
else:
next_element = current.next
previous_element = current.previous
previous_element.next = next_element
next_element.previous = previous_element
current.next = current.previous = None
def isEmpty(self):
"""
check if the Linked List is empty or not
Reutruns:
- boolean object
"""
if self.head:
return False
else:
return True
def addBefore(self, new_element, node):
"""
add new element/item before a position in the Linked List
Arguments:
- new_element: an object that reference to a new element to be added
- node: an object that reference to an integer object that tells the
method to where place the new element/item
"""
new_element.next = node
new_element.previous = node.previous
node.previous = new_element
if new_element.previous:
new_element.previous.next = new_element
if self.head == node:
self.head = new_element
def addAfter(self, new_element, node):
"""
add new element/item after a node/element/item in the Linked List
Arguments:
- new_element: an object that reference to a new element to be added
- node: an object that is part of the Linked List elements
"""
new_element.next = node.next
new_element.previous = node
node.next = new_element
if new_element.next:
new_element.next.previous = new_element
if self.tail == node:
self.tail = new_element
|
StarcoderdataPython
|
44908
|
<gh_stars>1-10
#!/usr/bin/env python2
from setuptools import setup
setup(name='indCAPS',
version='0.1',
description='OpenShift App',
author='<NAME>',
author_email='<EMAIL>',
# install_requires=['Flask==0.10.1'],
)
|
StarcoderdataPython
|
3244925
|
class ColumnInfo:
'Data object that holds information about a column and the unique values it has'
def __init__(self, name, dataType, uniqueValues=[]):
self.name=name
self.dataType=dataType
self.uniqueValues=uniqueValues
|
StarcoderdataPython
|
1771559
|
from tensorflow import keras
from tensorflow.keras.models import load_model
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from tensorflow.keras.layers import Input, Lambda, Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.applications.inception_v3 import InceptionV3
from keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.inception_v3 import preprocess_input
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
import numpy as np
from glob import glob
import os, sys
# import necessary libraries and configurations
config = ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
folders = glob(os.path.join(sys.path[0], "train/*"))
data_dir = os.path.join(sys.path[0], "train/")
batch_size = 32
image_height = 512
image_width = 1024
image_size = [image_height, image_width]
train_set = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.1,
subset="training",
seed=123,
image_size = (image_height, image_width),
batch_size = batch_size)
val_set = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.1,
subset="validation",
seed=123,
image_size = (image_height, image_width),
batch_size = batch_size)
normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1./255)
train_set = train_set.map(lambda x, y: (normalization_layer(x), y))
val_set = val_set.map(lambda x, y: (normalization_layer(x), y))
inception = InceptionV3(input_shape=image_size + [3], weights='imagenet', include_top=False)
for layer in inception.layers:
layer.trainable = False
x = Flatten()(inception.output)
prediction = Dense(len(folders), activation='softmax')(x)
model = Model(inputs=inception.input, outputs=prediction)
opt = keras.optimizers.Adam(learning_rate=0.0002)
model.compile(optimizer=opt,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
history = model.fit(
train_set,
validation_data = val_set,
epochs = 15,
verbose = 1
)
np.save(os.path.join(sys.path[0], "history.npy"),history.history)
model.save(os.path.join(sys.path[0], "trained_model.h5"))
model.save_weights(os.path.join(sys.path[0], "checkpoint"))
|
StarcoderdataPython
|
1726725
|
<gh_stars>1-10
# The network config (links to the net) we use for our simulation
sumoConfig = "A9_conf.sumocfg"
# The network net we use for our simulation
sumoNet = "A9.net.xml"
mqttUpdates = False
mqttHost = "localhost"
mqttPort = "1883"
# should it use kafka for config changes & publishing data (else it uses json file)
kafkaUpdates = True
# the kafka host we want to send our messages to
kafkaHost = "kafka:9092"
# the topics we send the kafka messages to
kafkaTopicMeanCarData = "platooning-car-data"
kafkaTopicDurations = "platooning-trip-durations"
kafkaTopicPlatooningData = "platooning-data"
# where we receive system changes
kafkaPlatoonConfigTopic = "platooning-config"
# Initial wait time before publishing data, should not be changed
ignore_first_n_results = 350
# True if we want to use the SUMO GUI
sumoUseGUI = False
# startEdgeID & lastEdgeID denotes lower & upper edges, i.e. extreme points of the map
startEdgeID = "11S"
lastEdgeID = "23805795"
''' one of these will be selected (in randomized manner) as exit edge of each car '''
# edgeIDsForExit = ["135586672#0", "12N", "286344111", "286344110", "23805795"]
edgeIDsAndNumberOfLanesForExit = {
"135586672#0": 4
}
# TODO: uncomment following for production?
# edgeIDsAndNumberOfLanesForExit = {
# "135586672#0": 4,
# "12N": 5,
# "286344111": 3,
# "286344110": 4,
# "23805795": 3
# }
# you can also set contextual parameters
parameters = dict(
contextual=dict(
lookAheadDistance=500.0, # distance to find a leader vehicle in the simulation
switchImpatienceFactor=0.1,
platoonCarCounter=250,
totalCarCounter=250, # set totalCarCounter as platoonCarCounter, other scenario is not tested excessively
extended_simpla_logic=True
),
changeable=dict(
maxVehiclesInPlatoon=10,
catchupDistance=500.0,
maxPlatoonGap=500.0,
platoonSplitTime=10.0,
joinDistance=3000.0 # to find extreme positions (-+d) of platoon
)
)
|
StarcoderdataPython
|
3348193
|
<reponame>dmartinpro/microhomie
import settings
from homie.constants import FALSE, TRUE, BOOLEAN
from homie.device import HomieDevice
from homie.node import HomieNode
from homie.property import HomieNodeProperty
from machine import Pin
# reversed values for the esp8266 boards onboard led
ONOFF = {FALSE: 1, TRUE: 0, 1: FALSE, 0: TRUE}
class LED(HomieNode):
def __init__(self, name="Onboard LED", pin=2):
super().__init__(id="led", name=name, type="LED")
self.pin = pin
self.led = Pin(pin, Pin.OUT, value=0)
self.power_property = HomieNodeProperty(
id="power",
name="LED Power",
settable=True,
datatype=BOOLEAN,
default=TRUE,
)
self.add_property(self.power_property, self.on_power_msg)
def on_power_msg(self, topic, payload, retained):
self.led(ONOFF[payload])
self.power_property.data = ONOFF[self.led()]
def main():
# Homie device setup
homie = HomieDevice(settings)
# Add LED node to device
homie.add_node(LED())
# run forever
homie.run_forever()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
197712
|
import argparse
import json
import pandas as pd
from scipy.stats import pearsonr
def main(human_ann_file, metrics_file):
metrics = json.load(open(metrics_file))
metrics['gold_1'] = metrics['g1']
metrics['gold_2'] = metrics['g2']
metrics['gold_3'] = metrics['g3']
human_ann_df = pd.read_csv(human_ann_file)
unique_models = list(human_ann_df["Input.MODEL"].unique())
eval_categories = [
'Answer.counterfactual',
'Answer.ending',
'Answer.plot',
'Answer.premise',
'Answer.second'
]
print("{}\t{}\t{}\t{}".format("Model Name", "Human Acc", "Drift Similarity", "CFR"))
for ec in eval_categories:
print("`======= {} ".format(ec))
human_eval_numbers = []
drift_similarities = []
cfr_metrics = []
for um in unique_models:
if um == "gold_1" or um == "gold_2":
continue
model_df = human_ann_df[human_ann_df["Input.MODEL"] == um]
model_story_ann = model_df.groupby("Input.STORY").aggregate("mean")
ec_accuracy = (model_story_ann[ec] >= 2).mean()
original_name = um.split(".")[0]
if original_name not in metrics:
print("SKIPPING {}".format(original_name))
continue
print("{}\t{}\t{}\t{}".format(um, ec_accuracy, metrics[original_name]["drift_similarity"], metrics[original_name]["CFR_METRIC"]))
human_eval_numbers.append(ec_accuracy)
drift_similarities.append(metrics[original_name]["drift_similarity"])
cfr_metrics.append(metrics[original_name]["CFR_METRIC"])
drift_correl = pearsonr(human_eval_numbers, drift_similarities)
cfr_correl = pearsonr(human_eval_numbers, cfr_metrics)
print("DRIFT:\tCorrelation:\t{}\tP-value\t{}".format(drift_correl[0], drift_correl[1]))
print("CFR:\tCorrelation:\t{}\tP-value\t{}".format(cfr_correl[0], cfr_correl[1]))
print("\n\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='evaluate.py',
usage='%(prog)s gold_annotations predictions',
description='Evaluate story rewrite'
)
parser.add_argument('--human-ann-file', type=str,
dest="human_ann_file",
help='Location of human annotation file. Usually obtained from mturk download and named *.csv',
default=None)
parser.add_argument('--metrics-file', type=str,
dest="metrics_file",
help='Location of metrics file. Usually named metrics.json',
default=None)
args = parser.parse_args()
# Run seed selection if args valid
print('====Input Arguments====')
print(json.dumps(vars(args), indent=2, sort_keys=True))
print("=======================")
main(args.human_ann_file, args.metrics_file)
|
StarcoderdataPython
|
1735820
|
<reponame>KyleVaughn/ThermalAnalysisPlots
import matplotlib.pyplot as plt
import matplotlib.font_manager
import numpy as np
import sys
from scipy.signal import savgol_filter
# plots
ExportData_bool = False
plotTempvsMass_bool = False
plotTempvsdmdt_bool = True
plotTempvsdmdt_normalized_bool = False
plotinvKvsdmdt_bool = False
plotinvKvsdmdt_stepwise_normalized_bool = False
plotStepMassvsTime_bool = True
plotStepdmdtvsTime_bool = True
#Title
custom_title = False
custom_title_names = ['DrugName']
# Legend
custom_legend = False
custom_legend_names = ['TGA pan, step','DSC pan, step','TGA pan, ramp','PE pan, ramp','DSC pan, ramp']
#'Stepwise: 100-200 C','Stepwise: 180-330 C']
#, '26.3 mg','35.7 mg'
turn_legend_linefit_off = False
# Linear fit for enthalpy calc
linear = False # Perform the linear fit
linear_ranges = [(230,270)] # Temperature range in C for the linear fit
# Smoothing using Savgol filter
smooth = False # plot smoothed data
smooth_ramp_only_and_I_DEMAND_THE_SAME_COLORS = False # If only plotting smoothed ramp data, and u want the lines to be the original data color
smooth_window = 31 # points in smoothing window. must be an odd number
smooth_order = 1 # order of polynomial fit
# Scatter plot
scatter = True # Plot the full data as points
scatter_smoothed_plt4 = True # Plot the smoothed ramp data for the K^-1 vs -dmdt plot as point data
pt_size = 1 # point size
# Isotherm
# Assumes monotonic heating. Isotherm starts when |T - T_iso| < match tolerance
# Isotherm ends when |T - T_iso| > deviation tolerance
# Since temperature will fluctuate, and is output at discrete intervals, we need tolerances.
isotherm_match_tolerance = 2e-1 # if within this many degrees of isotherm, start isotherm
isotherm_deviation_tolerance = 3.5 # if isotherm deviates this many degrees, consider it stopped
# Fonts
fontname = "Arial"
fontsize = 12
fontsize_title = 14
################################################################################################
class data:
def __init__(self, title, isRamp, time, temp, weight, iso_temps, iso_timeSteps):
self.title = title
self.isRamp = isRamp
self.time = time
self.temp = temp
self.temp_K = temp + 273.15
self.weight = weight
self.iso_temps = np.array(iso_temps)
if not isRamp:
self.iso_temps_K = self.iso_temps + 273.15
self.iso_timeSteps = np.array(iso_timeSteps)
self.iso_dmdt = []
self.iso_start_weight = []
self.iso_stop_weight = []
ref_weight = weight[0]
self.weight_percent = 100.0 + 100.0*(weight-ref_weight)/ref_weight
self.dmdt = np.gradient(self.weight, self.time)
self.dmdt_smooth = savgol_filter(self.dmdt, 31, 1)
print(f"Reference weight for {title} is {ref_weight:.5f} mg")
def printAll(self):
print("Title: ", self.title)
print("isRamp: ", self.isRamp)
print("Time: ", self.time)
print("Temp in C: ", self.temp)
print("Temp in K: ", self.temp_K)
print("Weight: ", self.weight)
print("Weight %: ", self.weight_percent)
print("Iso Temps: ", self.iso_temps)
print("ISo Timesteps: ", self.iso_timeSteps)
###############################################################################################
def CtoK(T):
return T + 273.15
def KtoC(T):
return T - 273.15
def inv2C(Tinv):
return 1.0/Tinv - 273.15
def C2inv(T):
return 1/(T + 273.15)
###############################################################################################
def plotTempvsMass(datalist):
fig = plt.figure(1)
for d in datalist:
plt.plot(d.temp, d.weight_percent, label=d.title)
plt.xlabel(r'Temperature ($\degree$C)', fontname=fontname, fontsize=fontsize)
plt.ylabel('% Mass', fontname=fontname, fontsize=fontsize)
plt.title("Percent Mass vs. Time", fontname=fontname, fontsize=fontsize_title)
plt.tick_params(labelright=True, right=True, labeltop=True, top=True, which='both', direction='in')
plt.minorticks_on()
plt.legend()
fig.tight_layout()
###############################################################################################
def plotTempvsdmdt(datalist):
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
color_idx = 0
fig = plt.figure(2)
ax = fig.add_subplot(111)
secax = ax.secondary_xaxis('top', functions=(KtoC, CtoK))
for d in datalist:
if d.isRamp:
if scatter:
ax.scatter(d.temp_K[:-1], d.dmdt, s=pt_size, color=colors[color_idx], label=d.title)
else:
ax.plot(d.iso_temps_K, d.iso_dmdt, '-o', color=colors[color_idx], label=d.title)
if scatter:
ax.scatter(d.temp_K, d.dmdt, s=pt_size, color=colors[color_idx+1])
color_idx = color_idx + 1
color_idx = color_idx + 1
if smooth_ramp_only_and_I_DEMAND_THE_SAME_COLORS:
color_idx = 0
for d in datalist:
if d.isRamp:
if smooth:
ax.plot(d.temp_K[:-1], d.dmdt_smooth, color=colors[color_idx], label=d.title + ' smoothed')
color_idx = color_idx + 1
ax.set_xlabel('Temperature (K)', fontname=fontname, fontsize=fontsize)
secax.set_xlabel(r'Temperature ($\degree$C)', fontname=fontname, fontsize=fontsize)
ax.set_ylabel(r'$\frac{dm}{dt}$ (mg/min)', fontname=fontname, fontsize=fontsize)
plt.title(r"$\frac{dm}{dt}$ vs. Temperature", fontname=fontname, fontsize=fontsize_title)
ax.tick_params(labelright=True, right=True, which='both', direction='in')
secax.tick_params(which='both', direction='in')
ax.minorticks_on()
secax.minorticks_on()
ax.legend()
fig.tight_layout()
###############################################################################################
def plotTempvsdmdt_normalized(datalist):
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
color_idx = 0
fig = plt.figure(3)
ax = fig.add_subplot(111)
secax = ax.secondary_xaxis('top', functions=(KtoC, CtoK))
for d in datalist:
if d.isRamp:
if scatter:
ax.scatter(d.temp_K[:-1], 100.0*d.dmdt/d.weight[0], s=pt_size, color=colors[color_idx], label=d.title)
else:
ax.plot(d.iso_temps_K, 100.0*d.iso_dmdt/d.weight[0], '-o', color=colors[color_idx], label=d.title)
color_idx = color_idx + 1
if smooth_ramp_only_and_I_DEMAND_THE_SAME_COLORS:
color_idx = 0
for d in datalist:
if d.isRamp:
if smooth:
ax.plot(d.temp_K[:-1], 100.0*d.dmdt_smooth/d.weight[0], color=colors[color_idx], label=d.title + ' smoothed')
color_idx = color_idx + 1
ax.set_xlabel('Temperature (K)', fontname=fontname, fontsize=fontsize)
secax.set_xlabel(r'Temperature ($\degree$C)', fontname=fontname, fontsize=fontsize)
ax.set_ylabel(r'$\frac{100}{m_0}\frac{dm}{dt}$ (%/min)', fontname=fontname, fontsize=fontsize)
plt.title(r"Mass Loss Rate per mg Loaded vs. Temperature", fontname=fontname, fontsize=fontsize_title)
ax.tick_params(labelright=True, right=True, which='both', direction='in')
secax.tick_params(which='both', direction='in')
ax.minorticks_on()
secax.minorticks_on()
ax.legend()
fig.tight_layout()
########################################################################################################
def plotinvKvsdmdt(datalist):
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
colors = colors + colors
color_idx = 0
fig = plt.figure(4)
ax = fig.add_subplot(111)
ax.set_yscale('log')
secax = ax.secondary_xaxis('top', functions=(inv2C, C2inv))
for d in datalist:
if d.isRamp:
if scatter:
ax.plot(1.0/np.array(d.temp_K[:-1]), -d.dmdt, 'o', ms=pt_size, color=colors[color_idx], label=d.title)
else:
ax.plot(1.0/np.array(d.iso_temps_K), -d.iso_dmdt, '-o', color=colors[color_idx], label=d.title)
color_idx = color_idx + 1
if smooth_ramp_only_and_I_DEMAND_THE_SAME_COLORS:
color_idx = 0
for d in datalist:
if d.isRamp:
if smooth:
if scatter_smoothed_plt4:
# Note that matplotlib has a problem with log scale scatter plots, hence using plot instead of scatter
# for figure 4 only.
ax.plot(1.0/np.array(d.temp_K[:-1]), -d.dmdt_smooth, 'o', ms=pt_size, color=colors[color_idx], label=d.title + ' smoothed')
else:
ax.plot(1.0/np.array(d.temp_K[:-1]), -d.dmdt_smooth, color=colors[color_idx], label=d.title + ' smoothed')
color_idx = color_idx + 1
# Linear fit
if linear:
if len(linear_ranges) < 1:
print("Linear range requires at least one tuple or temperatures: [(min temp0, max temp0), (min temp1, max temp1), etc.]")
print("If the linear fit is not desired, set 'linear = False' in the options")
sys.exit()
for tup in linear_ranges:
if len(tup) != 2:
print("Linear range requires exactly two arguments per tuple: [(min temp, max temp)]")
print("If the linear fit is not desired, set 'linear = False' in the options")
sys.exit()
if tup[0] >= tup[1]:
print("Linear range requires [(min temp, max temp)], with min temp < max temp")
sys.exit()
if len(datalist) != len(linear_ranges):
print(f"Length of datalist ({len(datalist)}) does not equal length of linear ranges ({len(linear_ranges)})")
print("Linear range requires: [(min temp0, max temp0), (min temp1, max temp1), etc.]")
print("If the linear fit is not desired, set 'linear = False' in the options")
sys.exit()
for i,d in enumerate(datalist):
linear_range = linear_ranges[i]
print(f'The linear range is {linear_range[0]}-{linear_range[1]}C')
if d.isRamp:
lin_idx = (linear_range[0] < d.temp) & (d.temp < linear_range[1])
# Check that this range has 2 valid points
valid_pts= 0
for tf in lin_idx:
if tf:
valid_pts = valid_pts + 1
if valid_pts < 2:
print('Cannot find the necessary 2 points in the linear data range for a line fit.'
+ ' Using entire data set for line fit.')
lin_temp_inv0 = 1.0/np.array(d.temp_K)
lin_dmdt = -d.dmdt
else:
lin_temp_inv0 = 1.0/np.array(d.temp_K[lin_idx])
lin_dmdt = -d.dmdt[lin_idx[:-1]]
else:
lin_idx = (linear_range[0] < d.iso_temps) & (d.iso_temps < linear_range[1])
# Check that this range has 2 valid points
valid_pts= 0
for tf in lin_idx:
if tf:
valid_pts = valid_pts + 1
if valid_pts < 2:
print('Cannot find the necessary 2 points in the linear data range for a line fit.' \
+ ' Using entire data set for line fit.')
lin_temp_inv0 = 1.0/d.iso_temps_K
lin_dmdt = -d.iso_dmdt
else:
lin_temp_inv0 = 1.0/d.iso_temps_K[lin_idx]
lin_dmdt = -d.iso_dmdt[lin_idx]
# Handle negative values
pos_idx = (lin_dmdt > 0.0)
neg_ctr = 0
for tf in pos_idx:
if not tf:
neg_ctr = neg_ctr + 1
if neg_ctr > 1:
print(f"{neg_ctr} negative values encountered in -dm/dt." \
+ "Cannot take log of these values, so I'm ignoring them")
lin_temp_inv = lin_temp_inv0[pos_idx]
lin_logdmdt = np.log(lin_dmdt[pos_idx])
TT = np.linspace(lin_temp_inv[0], lin_temp_inv[-1], 100)
results = np.polyfit(lin_temp_inv, lin_logdmdt, 1, full=True)
m = results[0][0]
b = results[0][1]
if not results[1].size > 0:
RSS = 0.0
else:
RSS = results[1][0]
y = lin_logdmdt
ybar = np.sum(y)/len(y)
TSS = np.sum((y - ybar)**2)
Rsquared = 1 - RSS/TSS
print(f"For: {d.title}")
if b > 0:
print(f"Line: y={m}x + {b}")
else:
print(f"Line: y={m}x - {-b}")
print(f"RSS: {RSS}")
print(f"R^2: {Rsquared}")
print(f"Enthalpy = {m*(-8.3145e-3):.3f} kJ/mol")
line = np.poly1d(results[0])
if b > 0:
ax.plot(TT, np.exp(line(TT)), color=colors[color_idx],
label=r"$\ln\left(-\frac{dm}{dt}\right)$" \
+ f"={m:.1f}" \
+ r"$T^{-1}$+" \
+ f"{b:.1f}," \
+ f" $R^2$={Rsquared:.4f}")
else:
ax.plot(TT, np.exp(line(TT)), color=colors[color_idx],
label=r"$\ln\left(-\frac{dm}{dt}\right)$" \
+ f"={m:.1f}" \
+ r"$T^{-1}$-" \
+ f"{-b:.1f}," \
+ f" $R^2$={Rsquared:.4f}")
color_idx = color_idx + 1
ax.set_xlabel('1/T (K$^{-1}$)', fontname=fontname, fontsize=fontsize)
secax.set_xlabel(r'Temperature ($\degree$C)', fontname=fontname, fontsize=fontsize)
ax.set_ylabel(r'$-\frac{dm}{dt}$ (mg/min)', fontname=fontname, fontsize=fontsize)
plt.title(r"$-\frac{dm}{dt}$ vs. 1/T", fontname=fontname, fontsize=fontsize_title)
ax.tick_params(labelright=True, right=True, which='both', direction='in')
secax.tick_params(which='both', direction='in')
ax.minorticks_on()
secax.minorticks_on()
ax.legend()
fig.tight_layout()
###############################################################################################
def ExportData(datalist):
# Temp, inv_t, wt, wt%, dmdt,
for d in datalist:
f = open(d.title + "_export.txt", "w")
if d.isRamp:
# write the header
f.write("Title: " + d.title + "\n")
f.write("Temp (K), Inverse Temp (1/K), Weight (mg), Weight %, dm/dt (mg/s) \n")
# For each value in the range of dmdt (temp - 1), write the line
for i in range(len(d.temp)-1):
line = [d.temp_K[i], 1.0/d.temp_K[i], d.weight[i], d.weight_percent[i], d.dmdt[i]]
str_line = str(line[0])
for item in line[1:]:
str_line = str_line + ", " + str(item)
f.write(str_line + "\n")
# Write the last value for everything but dmdt, which is already exhausted
i = len(d.temp) - 1
line = [d.temp_K[i], 1.0/d.temp_K[i], d.weight[i], d.weight_percent[i]]
str_line = str(line[0])
for item in line[1:]:
str_line = str_line + ", " + str(item)
f.write(str_line + "\n")
else: # it's not a ramp file
print("Title: ", d.title)
print("Temp (C), Temp (K), Inverse Temp (1/K), Start Weight (mg), Stop Weight (mg), dm/dt (mg/s)")
for i in range(len(d.iso_temps_K) - 1):
print(d.iso_temps[i],
d.iso_temps_K[i],
1.0/d.iso_temps_K[i],
d.iso_start_weight[i],
d.iso_stop_weight[i],
d.iso_dmdt[i]
)
i = len(d.iso_temps_K) - 1
print(d.iso_temps[i],
d.iso_temps_K[i],
1.0/d.iso_temps_K[i],
d.iso_start_weight[i],
d.iso_stop_weight[i],
)
f.close()
###############################################################################################
def importData(custom_legend_names):
datalist = []
filenames = sys.argv[1:]
# Did they give any files?
if not filenames:
print("No files given. Please specify files to process with 'python3 processTGA.py file1.txt file2.txt etc.'")
sys.exit()
# Are custom legend names being used?
if custom_legend:
if( len(custom_legend_names) != len(filenames) ):
print("Custom legend names must correspond to same number of files")
sys.exit()
# Read data from each file
for index, filename in enumerate(filenames):
try:
f = open(filename, 'r', encoding='utf-16')
lines = f.readlines()
except IOError:
print("Could not open/read file:", filename)
sys.exit()
# Determine if ramp or step data
isRamp = False
for ctr, line in enumerate(lines):
if 'ProcName' in line:
words = line.split()
if words[1] == 'Ramp':
isRamp = True
# Import data
if isRamp:
iso_temps = []
iso_timeSteps = []
for ctr, line in enumerate(lines):
if 'StartOfData' in line:
startOfDataLine = ctr
elif 'Sample' in line:
words = line.split()
if words[0] == 'Sample':
title = words[1]
else:
iso_temps = []
iso_timeSteps = []
for ctr, line in enumerate(lines):
if 'StartOfData' in line:
startOfDataLine = ctr
elif 'Sample' in line:
words = line.split()
if words[0] == 'Sample':
title = words[1]
elif 'OrgMethod' and 'Ramp' in line:
if 'Isothermal' in lines[ctr+1]:
# temp
words = line.split()
iso_temps.append(float(words[-2]))
# time step
words = lines[ctr+1].split()
iso_timeSteps.append(float(words[-2]))
filedata = np.loadtxt(filename,
skiprows = startOfDataLine + 1,
delimiter = '\t',
encoding = 'utf-16',
)
time = filedata[:,0]
temp = filedata[:,1]
weight = filedata[:,2]
if custom_legend:
title = custom_legend_names[index]
# Initialize data objects
datalist.append( data(title, isRamp, time, temp, weight, iso_temps, iso_timeSteps) )
return datalist
###############################################################################################
def processStepData(datalist):
# Assumed monotonic heating
for d in datalist:
if not d.isRamp:
assert(len(d.iso_temps) == len(d.iso_timeSteps))
startStopIndices = []
for i in range(len(d.iso_temps)):
iso_temp = d.iso_temps[i]
iso_timeStep = d.iso_timeSteps[i]
iso_start, iso_stop = getIsothermStartStopIndices(d.temp, d.time, iso_temp, iso_timeStep)
startStopIndices.append((iso_start, iso_stop))
iso_dmdt = np.zeros(len(d.iso_temps))
temp_inv = np.zeros(len(d.iso_temps))
for i in range(len(d.iso_temps)):
mass_start = d.weight[startStopIndices[i][0]]
mass_stop = d.weight[startStopIndices[i][1]]
d.iso_start_weight.append(mass_start)
d.iso_stop_weight.append(mass_stop)
time_start = d.time[startStopIndices[i][0]]
time_stop = d.time[startStopIndices[i][1]]
iso_dmdt[i] = (mass_stop - mass_start)/(time_stop - time_start)
d.iso_dmdt = iso_dmdt
###############################################################################################
def getIsothermStartStopIndices(temp_data, time_data, iso_temp, iso_timeStep):
# Return first value within tolerance.
# If no values in tolerance, return closest match
error = abs(temp_data - iso_temp)
start = len(temp_data)-1
stop = len(temp_data)-1
# start index
for index, value in enumerate(error):
if value < isotherm_match_tolerance:
start = index
break
if start == len(temp_data)-1:
start = error.argmin()
# stop index
time_stop = len(temp_data)-1
temp_stop = len(temp_data)-1
time_start = time_data[start]
# temp stop
for index, value in enumerate(error[start:]):
if value > isotherm_deviation_tolerance:
temp_stop = index + start
break
# time stop
elapsed_time = time_data - time_start
for index, value in enumerate(elapsed_time[start:]):
if value >= iso_timeStep:
time_stop = index + start
break
stop = min(time_stop, temp_stop)
return start, stop
########################################################################################################
def plotinvKvsdmdt_stepwise_normalized(datalist):
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
colors = colors + colors
color_idx = 0
fig = plt.figure(5)
ax = fig.add_subplot(111)
ax.set_yscale('log')
secax = ax.secondary_xaxis('top', functions=(inv2C, C2inv))
for d in datalist:
if d.isRamp:
if scatter:
ax.plot(1.0/np.array(d.temp_K[:-1]), np.divide(-d.dmdt, d.weight[:-1]), 'o', ms=pt_size, color=colors[color_idx], label=d.title)
else:
ax.plot(1.0/np.array(d.iso_temps_K), np.divide(-d.iso_dmdt, d.weight[:-1]), '-o', color=colors[color_idx], label=d.title)
color_idx = color_idx + 1
if smooth_ramp_only_and_I_DEMAND_THE_SAME_COLORS:
color_idx = 0
for d in datalist:
if d.isRamp:
if smooth:
if scatter_smoothed_plt4:
# Note that matplotlib has a problem with log scale scatter plots, hence using plot instead of scatter
# for figure 4 only.
ax.plot(1.0/np.array(d.temp_K[:-1]), np.divide(-d.dmdt_smooth, d.weight[:-1]), 'o', ms=pt_size, color=colors[color_idx], label=d.title + ' smoothed')
else:
ax.plot(1.0/np.array(d.temp_K[:-1]), np.divide(-d.dmdt_smooth, d.weight[:-1]), color=colors[color_idx], label=d.title + ' smoothed')
color_idx = color_idx + 1
# Linear fit
if linear:
if len(linear_ranges) < 1:
print("Linear range requires at least one tuple or temperatures: [(min temp0, max temp0), (min temp1, max temp1), etc.]")
print("If the linear fit is not desired, set 'linear = False' in the options")
sys.exit()
for tup in linear_ranges:
if len(tup) != 2:
print("Linear range requires exactly two arguments per tuple: [(min temp, max temp)]")
print("If the linear fit is not desired, set 'linear = False' in the options")
sys.exit()
if tup[0] >= tup[1]:
print("Linear range requires [(min temp, max temp)], with min temp < max temp")
sys.exit()
if len(datalist) != len(linear_ranges):
print(f"Length of datalist ({len(datalist)}) does not equal length of linear ranges ({len(linear_ranges)})")
print("Linear range requires: [(min temp0, max temp0), (min temp1, max temp1), etc.]")
print("If the linear fit is not desired, set 'linear = False' in the options")
sys.exit()
for i,d in enumerate(datalist):
linear_range = linear_ranges[i]
print(f'The linear range is {linear_range[0]}-{linear_range[1]}C')
if d.isRamp:
lin_idx = (linear_range[0] < d.temp) & (d.temp < linear_range[1])
# Check that this range has 2 valid points
valid_pts= 0
for tf in lin_idx:
if tf:
valid_pts = valid_pts + 1
if valid_pts < 2:
print('Cannot find the necessary 2 points in the linear data range for a line fit.'
+ ' Using entire data set for line fit.')
lin_temp_inv0 = 1.0/np.array(d.temp_K)
lin_dmdt = np.divide(-d.dmdt, d.weight)
else:
lin_temp_inv0 = 1.0/np.array(d.temp_K[lin_idx])
lin_dmdt = np.divide(-d.dmdt[lin_idx[:-1]], d.weight[lin_idx])
else:
lin_idx = (linear_range[0] < d.iso_temps) & (d.iso_temps < linear_range[1])
# Check that this range has 2 valid points
valid_pts= 0
for tf in lin_idx:
if tf:
valid_pts = valid_pts + 1
if valid_pts < 2:
print('Cannot find the necessary 2 points in the linear data range for a line fit.' \
+ ' Using entire data set for line fit.')
lin_temp_inv0 = 1.0/d.iso_temps_K
lin_dmdt = np.divide(-d.iso_dmdt, d.iso_start_weight)
else:
lin_temp_inv0 = 1.0/d.iso_temps_K[lin_idx]
lin_dmdt = np.divide(-d.iso_dmdt[lin_idx], d.iso_start_weight[lin_idx])
# Handle negative values
pos_idx = (lin_dmdt > 0.0)
neg_ctr = 0
for tf in pos_idx:
if not tf:
neg_ctr = neg_ctr + 1
if neg_ctr > 1:
print(f"{neg_ctr} negative values encountered in -dm/dt." \
+ "Cannot take log of these values, so I'm ignoring them")
lin_temp_inv = lin_temp_inv0[pos_idx]
lin_logdmdt = np.log(lin_dmdt[pos_idx])
TT = np.linspace(lin_temp_inv[0], lin_temp_inv[-1], 100)
results = np.polyfit(lin_temp_inv, lin_logdmdt, 1, full=True)
m = results[0][0]
b = results[0][1]
if not results[1].size > 0:
RSS = 0.0
else:
RSS = results[1][0]
y = lin_logdmdt
ybar = np.sum(y)/len(y)
TSS = np.sum((y - ybar)**2)
Rsquared = 1 - RSS/TSS
print(f"For: {d.title}")
if b > 0:
print(f"Line: y={m}x + {b}")
else:
print(f"Line: y={m}x - {-b}")
print(f"RSS: {RSS}")
print(f"R^2: {Rsquared}")
print(f"Enthalpy = {m*(-8.3145e-3):.3f} kJ/mol")
line = np.poly1d(results[0])
if b > 0:
ax.plot(TT, np.exp(line(TT)), color=colors[color_idx],
label=r"$\ln\left(-\frac{dm}{dt}\right)$" \
+ f"={m:.1f}" \
+ r"$T^{-1}$+" \
+ f"{b:.1f}," \
+ f" $R^2$={Rsquared:.4f}")
else:
ax.plot(TT, np.exp(line(TT)), color=colors[color_idx],
label=r"$\ln\left(-\frac{dm}{dt}\right)$" \
+ f"={m:.1f}" \
+ r"$T^{-1}$-" \
+ f"{-b:.1f}," \
+ f" $R^2$={Rsquared:.4f}")
color_idx = color_idx + 1
ax.set_xlabel('1/T (K$^{-1}$)', fontname=fontname, fontsize=fontsize)
secax.set_xlabel(r'Temperature ($\degree$C)', fontname=fontname, fontsize=fontsize)
ax.set_ylabel(r'$-\frac{dm}{dt}$ (mg/min)', fontname=fontname, fontsize=fontsize)
plt.title(r"$-\frac{dm}{dt}$ vs. 1/T", fontname=fontname, fontsize=fontsize_title)
ax.tick_params(labelright=True, right=True, which='both', direction='in')
secax.tick_params(which='both', direction='in')
ax.minorticks_on()
secax.minorticks_on()
ax.legend()
fig.tight_layout()
###############################################################################################
def plotStepMassvsTime(datalist):
fig = plt.figure(6)
for d in datalist:
if d.isRamp:
print("PlotStepMassvsTime is only valid for step files!")
sys.exit()
else:
assert(len(d.iso_temps) == len(d.iso_timeSteps))
startStopIndices = []
for i in range(len(d.iso_temps)):
iso_temp = d.iso_temps[i]
iso_timeStep = d.iso_timeSteps[i]
iso_start, iso_stop = getIsothermStartStopIndices(d.temp, d.time, iso_temp, iso_timeStep)
startStopIndices.append((iso_start, iso_stop))
iso_mass = np.zeros(startStopIndices[i][1] - startStopIndices[i][0] + 1)
iso_time = np.zeros(startStopIndices[i][1] - startStopIndices[i][0] + 1)
for i in range(len(d.iso_temps)):
iso_mass = d.weight[startStopIndices[i][0] : startStopIndices[i][1] + 1]
iso_time = d.time[startStopIndices[i][0] : startStopIndices[i][1] + 1]
plt.plot(iso_time, iso_mass, label=r"" + d.title + " " + str(d.iso_temps[i]) + "$\degree$C")
plt.xlabel(r'Time (min)', fontname=fontname, fontsize=fontsize)
plt.ylabel('Mass (g)', fontname=fontname, fontsize=fontsize)
plt.title("Mass vs. Time", fontname=fontname, fontsize=fontsize_title)
plt.tick_params(labelright=True, right=True, labeltop=True, top=True, which='both', direction='in')
plt.minorticks_on()
plt.legend()
fig.tight_layout()
fig = plt.figure(7)
for j, d in enumerate(datalist):
if d.isRamp:
print("PlotStepMassvsTime is only valid for step files!")
sys.exit()
else:
assert(len(d.iso_temps) == len(d.iso_timeSteps))
startStopIndices = []
for i in range(len(d.iso_temps)):
iso_temp = d.iso_temps[i]
iso_timeStep = d.iso_timeSteps[i]
iso_start, iso_stop = getIsothermStartStopIndices(d.temp, d.time, iso_temp, iso_timeStep)
startStopIndices.append((iso_start, iso_stop))
iso_mass = np.zeros(startStopIndices[i][1] - startStopIndices[i][0] + 1)
iso_time = np.zeros(startStopIndices[i][1] - startStopIndices[i][0] + 1)
for i in range(len(d.iso_temps)):
if linear_ranges[j][0] <= d.iso_temps[i] and d.iso_temps[i] <= linear_ranges[j][1]:
iso_mass = d.weight[startStopIndices[i][0] : startStopIndices[i][1] + 1]
iso_time = d.time[startStopIndices[i][0] : startStopIndices[i][1] + 1] - d.time[startStopIndices[i][0]]
plt.plot(iso_time, iso_mass, label=r"" + d.title + " " + str(d.iso_temps[i]) + "$\degree$C")
plt.xlabel(r'Time (min)', fontname=fontname, fontsize=fontsize)
plt.ylabel('Mass (g)', fontname=fontname, fontsize=fontsize)
plt.title("Mass vs. Time", fontname=fontname, fontsize=fontsize_title)
plt.tick_params(labelright=True, right=True, labeltop=True, top=True, which='both', direction='in')
plt.minorticks_on()
plt.legend()
fig.tight_layout()
###############################################################################################
def plotStepdmdtvsTime(datalist):
fig = plt.figure(8)
for j, d in enumerate(datalist):
if d.isRamp:
print("PlotStepMassvsTime is only valid for step files!")
sys.exit()
else:
assert(len(d.iso_temps) == len(d.iso_timeSteps))
startStopIndices = []
for i in range(len(d.iso_temps)):
iso_temp = d.iso_temps[i]
iso_timeStep = d.iso_timeSteps[i]
iso_start, iso_stop = getIsothermStartStopIndices(d.temp, d.time, iso_temp, iso_timeStep)
startStopIndices.append((iso_start, iso_stop))
iso_dmdt = np.zeros(startStopIndices[i][1] - startStopIndices[i][0] + 1)
iso_time = np.zeros(startStopIndices[i][1] - startStopIndices[i][0] + 1)
for i in range(len(d.iso_temps)):
if linear_ranges[j][0] <= d.iso_temps[i] and d.iso_temps[i] <= linear_ranges[j][1]:
iso_dmdt = d.dmdt[startStopIndices[i][0] : startStopIndices[i][1] + 1]
iso_time = d.time[startStopIndices[i][0] : startStopIndices[i][1] + 1] - d.time[startStopIndices[i][0]]
plt.plot(iso_time, iso_dmdt, label=r"" + d.title + " " + str(d.iso_temps[i]) + "$\degree$C")
for i in range(len(d.iso_temps)):
if linear_ranges[j][0] <= d.iso_temps[i] and d.iso_temps[i] <= linear_ranges[j][1]:
iso_dmdt = d.dmdt_smooth[startStopIndices[i][0] : startStopIndices[i][1] + 1]
iso_time = d.time[startStopIndices[i][0] : startStopIndices[i][1] + 1] - d.time[startStopIndices[i][0]]
plt.plot(iso_time, iso_dmdt, label=r"" + d.title + " " + str(d.iso_temps[i]) + "$\degree$C smooth")
plt.xlabel(r'Time (min)', fontname=fontname, fontsize=fontsize)
plt.ylabel(r'$\frac{dm}{dt}$ (mg/min)', fontname=fontname, fontsize=fontsize)
plt.title(r"$\frac{dm}{dt}$ vs. Time", fontname=fontname, fontsize=fontsize_title)
plt.tick_params(labelright=True, right=True, labeltop=True, top=True, which='both', direction='in')
plt.minorticks_on()
plt.legend()
fig.tight_layout()
fig = plt.figure(9)
for j, d in enumerate(datalist):
if d.isRamp:
print("PlotStepMassvsTime is only valid for step files!")
sys.exit()
else:
assert(len(d.iso_temps) == len(d.iso_timeSteps))
startStopIndices = []
for i in range(len(d.iso_temps)):
iso_temp = d.iso_temps[i]
iso_timeStep = d.iso_timeSteps[i]
iso_start, iso_stop = getIsothermStartStopIndices(d.temp, d.time, iso_temp, iso_timeStep)
startStopIndices.append((iso_start, iso_stop))
iso_dmdt = np.zeros(startStopIndices[i][1] - startStopIndices[i][0] + 1)
iso_time = np.zeros(startStopIndices[i][1] - startStopIndices[i][0] + 1)
for i in range(len(d.iso_temps)):
if linear_ranges[j][0] <= d.iso_temps[i] and d.iso_temps[i] <= linear_ranges[j][1]:
iso_dmdt = d.dmdt_smooth[startStopIndices[i][0] : startStopIndices[i][1] + 1]
iso_time = d.time[startStopIndices[i][0] : startStopIndices[i][1] + 1] - d.time[startStopIndices[i][0]]
plt.plot(iso_time, np.gradient(iso_dmdt, iso_time),
label=r"" + d.title + " " + str(d.iso_temps[i]) + "$\degree$C")
plt.xlabel(r'Time (min)', fontname=fontname, fontsize=fontsize)
plt.ylabel(r'$\frac{d^2m}{dt^2}$ (mg/min$^2$)', fontname=fontname, fontsize=fontsize)
plt.title(r"$\frac{d^2m}{dt^2}$ vs. Time", fontname=fontname, fontsize=fontsize_title)
plt.tick_params(labelright=True, right=True, labeltop=True, top=True, which='both', direction='in')
plt.minorticks_on()
plt.legend()
fig.tight_layout()
###############################################################################################
if __name__ == "__main__":
datalist = importData(custom_legend_names)
processStepData(datalist)
if ExportData_bool:
ExportData(datalist)
if plotTempvsMass_bool:
plotTempvsMass(datalist)
if plotTempvsdmdt_bool:
plotTempvsdmdt(datalist)
if plotTempvsdmdt_normalized_bool:
plotTempvsdmdt_normalized(datalist)
if plotinvKvsdmdt_bool:
plotinvKvsdmdt(datalist)
if plotinvKvsdmdt_stepwise_normalized_bool:
plotinvKvsdmdt_stepwise_normalized(datalist)
if plotStepMassvsTime_bool:
plotStepMassvsTime(datalist)
if plotStepdmdtvsTime_bool:
plotStepdmdtvsTime(datalist)
plt.show()
|
StarcoderdataPython
|
1785427
|
<filename>src/corpus/__init__.py
from corpus.corpus import CorpusAnalyzer
from corpus.cran_corpus import CranCorpusAnalyzer
from corpus.cisi_corpus import CisiCorpusAnalyzer
from corpus.lisa_corpus import LisaCorpusAnalyzer
from corpus.npl_corpus import NplCorpusAnalyzer
from corpus.union_corpus import UnionCorpusAnalyzer
from corpus.wiki_corpus import WikiCorpusAnalyzer
|
StarcoderdataPython
|
18554
|
<reponame>samuelvp360/Microbiological-Assay-Calculator<filename>MainController.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
from pathlib import Path
from datetime import datetime
from PyQt5 import QtCore as qtc
from PyQt5 import QtWidgets as qtw
from PyQt5 import uic
import numpy as np
from Models import AssaysModel, SamplesModel
from DB.AssaysDB import MyZODB
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT as NavigationToolbar
from WellProcessor import WellProcessor
from Assay import Assay
matplotlib.use('Qt5Agg')
p = Path(__file__)
print(p)
isLink = os.path.islink(p)
if isLink:
theLink = os.readlink(p)
path = Path(theLink).resolve().parent
path = f'{path}/'
print('linked')
else:
path = ''
print('unlinked')
class PlotCanvas(FigureCanvasQTAgg):
"""
docstring
"""
def __init__(self, parent=None):
self.fig = Figure(
figsize=(12, 8), dpi=100, facecolor='#2d2a2e', tight_layout=True
)
self.ax = self.fig.add_subplot(111)
super().__init__(self.fig)
class MainWindow(qtw.QMainWindow):
def __init__(self, path):
super().__init__()
self.path = path
uic.loadUi(f'{path}Views/uiMainWindow.ui', self)
self.database = MyZODB(path)
self.canvas = PlotCanvas(self)
self.toolbar = NavigationToolbar(self.canvas, self)
self.uiToolbarLayout.addWidget(self.toolbar)
self.uiPlotLayout.addWidget(self.canvas)
self.assaysList = self.LoadAssays()
self.assaysToRemove = []
self.model = AssaysModel(self.assaysList)
self.uiAssaysTableView.setModel(self.model)
self.uiAssaysTableView.resizeColumnsToContents()
self.uiAssaysTableView.resizeRowsToContents()
self.selectedAssay = None
self.selectedSamples = None
# ---------------- SIGNALS ---------------
self.uiCommitButton.clicked.connect(self.StoreChanges)
self.uiAddAssayButton.clicked.connect(self.AddAssay)
self.uiDelAssayButton.clicked.connect(self.RemoveAssay)
self.uiAddSampleButton.clicked.connect(self.AddSample)
self.uiDelSampleButton.clicked.connect(self.RemoveSample)
self.uiDiscardButton.clicked.connect(self.DiscardChanges)
self.uiPlotButton.clicked.connect(lambda: self.Plot(ext=True))
self.uiAssaysTableView.selectionModel().selectionChanged.connect(self.SetSelectedAssay)
self.uiSamplesTableView.doubleClicked.connect(self.TriggerWell)
def Plot(self, ext=False):
if ext:
fig, ax = plt.subplots()
self.canvas.ax.clear()
assay = self.assaysList[self.selectedAssay]
samples = [assay.samples[i] for i in self.selectedSamples]
n = len(samples)
x = np.arange(len(assay.conc))
limit = 0.4
width = 2 * limit / n
if n == 1:
factor = np.zeros(1)
else:
factor = np.linspace(-limit + width / 2, limit - width / 2, n)
for i, sample in enumerate(samples):
mean = sample['Inhibition'].loc['Mean', ::-1]
std = sample['Inhibition'].loc['Std', ::-1]
if ext:
ax.bar(
x + factor[i], mean, width, label=sample['Name'],
yerr=std, capsize=15 / n, edgecolor='black'
)
ax.axhline(
100, color='black', linestyle='--', linewidth=0.8
)
self.canvas.ax.bar(
x + factor[i], mean, width, label=sample['Name'],
yerr=std, capsize=15 / n, edgecolor='black'
)
self.canvas.ax.axhline(
100, color='black', linestyle='--', linewidth=0.8
)
self.canvas.ax.set_title(assay.name, color='#ae81ff')
self.canvas.ax.set_xlabel(u'Concentration (\u00B5g/mL)', color='#f92672')
self.canvas.ax.set_ylabel('%Inhibition', color='#f92672')
self.canvas.ax.set_xticks(x)
self.canvas.ax.set_xticklabels(assay.conc[::-1])
self.canvas.ax.tick_params(axis='x', colors='#66d9ef')
self.canvas.ax.tick_params(axis='y', colors='#66d9ef')
self.canvas.ax.legend()
self.canvas.draw()
if ext:
ax.set_title(assay.name, color='#ae81ff')
ax.set_xlabel(u'Concentrations (\u00B5g/mL)', color='#f92672')
ax.set_ylabel('%Inhibition', color='#f92672')
ax.set_xticks(x)
ax.set_xticklabels(assay.conc)
ax.tick_params(axis='x', colors='#66d9ef')
ax.tick_params(axis='y', colors='#66d9ef')
ax.legend()
fig.tight_layout()
plt.show()
def LoadAssays(self):
DB = self.database.FetchDB()
if not len(DB):
return []
else:
assayNames = DB.keys()
return [DB.get(i) for i in assayNames]
def SetSelectedAssay(self):
indexes = self.uiAssaysTableView.selectedIndexes()
if indexes:
self.selectedAssay = indexes[0].row()
assay = self.assaysList[self.selectedAssay]
self.samplesModel = SamplesModel(assay.samples, assay.conc)
self.uiSamplesTableView.setModel(self.samplesModel)
self.uiSamplesTableView.resizeColumnsToContents()
self.samplesModel.layoutChanged.emit()
self.uiSamplesTableView.selectionModel().selectionChanged.connect(self.SetSelectedSamples)
else:
self.selectedAssay = None
def SetSelectedSamples(self):
indexes = self.uiSamplesTableView.selectedIndexes()
if indexes:
self.selectedSamples = tuple(set([i.row() for i in indexes]))
if self.selectedAssay is not None:
self.Plot()
else:
qtw.QMessageBox.warning(
self, 'Not Assay Selected!',
'Please select the corresponding assay before showing the plot'
)
else:
self.selectedSamples = None
def SetConcentrations(self):
value, ok = qtw.QInputDialog.getText(
self, 'Concentrations', 'Please enter the highest concentration'
)
if ok:
try:
# el número 6 se puede cambiar según sea el número de
# diluciones seriadas
conc = [str(float(value.replace(',', '.')) / 2 ** i) for i in range(6)]
return conc
except ValueError:
qtw.QMessageBox.warning(
self, 'Not a valid number!',
'You have not enter a valid number, please try again'
)
return False
def AddAssay(self):
items = ('MIC', 'MTT')
typeOfAssay, ok = qtw.QInputDialog.getItem(
self, 'Type of Assay', 'Choose the type of assay to add',
items, 0, False
)
name = self.SetAssayName()
conc = self.SetConcentrations()
while not conc:
conc = self.SetConcentrations()
date = datetime.now()
assay = Assay(typeOfAssay, name, conc, date)
self.assaysList.append(assay)
self.model.layoutChanged.emit()
self.uiAssaysTableView.resizeColumnsToContents()
self.uiAssaysTableView.resizeRowsToContents()
def AddSample(self):
items = ['1', '2', '3', '4']
if self.selectedAssay is not None:
numOfSamples, ok1 = qtw.QInputDialog.getItem(
self, 'Number of Samples', 'Choose the number of samples per plate',
items, 0, False
)
if int(numOfSamples) == 3:
del items[3]
elif int(numOfSamples) == 4:
del items[2:]
replicas, ok2 = qtw.QInputDialog.getItem(
self, 'Number of Samples', 'Choose the number of replicas',
items, 0, False
)
if ok1 and ok2:
self.wellProcessor = WellProcessor(
self.path, self.assaysList[self.selectedAssay].name,
self.assaysList[self.selectedAssay].conc,
int(numOfSamples), int(replicas)
)
self.wellProcessor.submitted.connect(self.SampleProcessor)
self.wellProcessor.show()
else:
return False
else:
qtw.QMessageBox.warning(
self, 'No Assay Selection',
'You have not selected an assay, please choose one assay before adding a sample'
)
def RemoveAssay(self):
if self.selectedAssay is not None:
self.assaysToRemove.append(self.assaysList[self.selectedAssay].name)
if self.assaysList[self.selectedAssay].stored:
self.database.RemoveAssay(self.assaysList[self.selectedAssay].name)
del self.assaysList[self.selectedAssay]
self.selectedAssay = self.selectedAssay - 1 if self.selectedAssay - 1 >= 0 else 0
if len(self.assaysList) > 0:
index = self.uiAssaysTableView.model().index(self.selectedAssay, 0, qtc.QModelIndex())
self.uiAssaysTableView.setCurrentIndex(index)
self.uiAssaysTableView.selectionModel().selectionChanged.connect(self.SetSelectedAssay)
self.model.layoutChanged.emit()
def RemoveSample(self):
if self.selectedAssay is not None and self.selectedSamples is not None:
self.assaysList[self.selectedAssay].RemoveSample(self.selectedSamples)
self.model.layoutChanged.emit()
assay = self.assaysList[self.selectedAssay]
self.samplesModel = SamplesModel(assay.samples, assay.conc)
self.uiSamplesTableView.setModel(self.samplesModel)
self.selectedSamples = [self.selectedSamples[0] - 1 if self.selectedSamples[0] - 1 >= 0 else 0]
if len(self.assaysList[self.selectedAssay].samples) > 0:
index = self.uiSamplesTableView.model().index(self.selectedSamples[0], 0, qtc.QModelIndex())
self.uiSamplesTableView.setCurrentIndex(index)
self.uiSamplesTableView.selectionModel().selectionChanged.connect(self.SetSelectedSamples)
self.samplesModel.layoutChanged.emit()
@qtc.pyqtSlot(list, list, list, object, object)
def SampleProcessor(self, samples, sampleNames, samplesPositions, TF, T0):
assay = self.assaysList[self.selectedAssay]
for index, name in enumerate(sampleNames):
exist = [True if s['Name'] == name else False for s in assay.samples]
if True in exist:
reply = qtw.QMessageBox.question(
self, 'Existing Sample',
f'The sample {name} already exists in {assay.name}. Do you want to overwrite it?',
qtw.QMessageBox.Yes | qtw.QMessageBox.No,
qtw.QMessageBox.No
)
if reply == qtw.QMessageBox.Yes:
for idx, value in enumerate(exist):
if value:
del assay.samples[idx]
assay.StoreSample(samples[index], index, sampleNames, samplesPositions, TF, T0)
elif reply == qtw.QMessageBox.No:
continue
else:
assay.StoreSample(samples[index], index, sampleNames, samplesPositions, TF, T0)
self.samplesModel = SamplesModel(assay.samples, assay.conc)
self.uiSamplesTableView.setModel(self.samplesModel)
self.uiSamplesTableView.selectionModel().selectionChanged.connect(self.SetSelectedSamples)
self.samplesModel.layoutChanged.emit()
self.uiSamplesTableView.resizeColumnsToContents()
def SetAssayName(self):
text, ok = qtw.QInputDialog.getText(
self, 'Assay Name', 'Please enter the name of the assay'
)
if ok:
return text
def TriggerWell(self):
if self.selectedAssay is not None and self.selectedSamples is not None:
assay = self.assaysList[self.selectedAssay]
sample = assay.samples[self.selectedSamples[0]]
self.wellProcessor = WellProcessor(
self.path, assay.name, assay.conc, len(sample['Name of samples']),
sample['TF'].shape[0], sample['T0'], sample['TF'],
sample['Name of samples'], sample['Positions']
)
self.wellProcessor.submitted.connect(self.SampleProcessor)
self.wellProcessor.show()
def TrackChanges(self):
assaysToStore = [index for index, assay in enumerate(self.assaysList) if not assay.stored]
assaysToUpdate = [index for index, assay in enumerate(self.assaysList) if assay._p_changed]
assaysToRemove = self.assaysToRemove
return assaysToStore, assaysToUpdate, assaysToRemove
def StoreChanges(self):
assaysToStore, assaysToUpdate, assaysToRemove = self.TrackChanges()
toStore = len(assaysToStore)
toUpdate = len(assaysToUpdate)
toRemove = len(assaysToRemove)
message = qtw.QMessageBox()
message.setWindowTitle('Changes to save')
message.setStandardButtons(qtw.QMessageBox.Ok | qtw.QMessageBox.Cancel)
text = []
if toStore > 0:
text1 = ['\nTo Store: ' + self.assaysList[i].name for i in assaysToStore]
text.extend(text1)
if toUpdate > 0:
text2 = ['\nTo Update: ' + self.assaysList[i].name for i in assaysToUpdate]
text.extend(text2)
if toRemove > 0:
text3 = ['\nTo Remove: ' + name for name in assaysToRemove]
text.extend(text3)
if toStore + toUpdate + toRemove > 0:
message.setText(
'The following assays will be stored, removed or updated:{}'.format(''.join(text))
)
returnValue = message.exec()
if returnValue == qtw.QMessageBox.Ok:
for index in assaysToStore:
self.database.StoreAssay(self.assaysList[index])
if len(assaysToStore) == 0 and len(assaysToUpdate) > 0:
self.database.Commit()
if len(assaysToStore) == 0 and len(assaysToRemove) > 0:
self.database.Commit()
else:
self.database.Abort()
else:
message2 = qtw.QMessageBox()
message2.setText('There are no changes to be saved')
message2.exec()
def DiscardChanges(self):
self.database.Abort()
self.assaysList = self.LoadAssays()
self.model = AssaysModel(self.assaysList)
self.uiAssaysTableView.setModel(self.model)
self.uiAssaysTableView.resizeColumnsToContents()
self.uiAssaysTableView.resizeRowsToContents()
if len(self.assaysList) > 0:
index = self.uiAssaysTableView.model().index(self.selectedAssay, 0, qtc.QModelIndex())
self.uiAssaysTableView.setCurrentIndex(index)
self.uiAssaysTableView.selectionModel().selectionChanged.connect(self.SetSelectedAssay)
self.model.layoutChanged.emit()
if len(self.assaysList[self.selectedAssay].samples) > 0:
index = self.uiSamplesTableView.model().index(self.selectedSamples[0], 0, qtc.QModelIndex())
self.uiSamplesTableView.setCurrentIndex(index)
assay = self.assaysList[self.selectedAssay]
self.samplesModel = SamplesModel(assay.samples, assay.conc)
self.uiSamplesTableView.setModel(self.samplesModel)
self.uiSamplesTableView.selectionModel().selectionChanged.connect(self.SetSelectedSamples)
self.samplesModel.layoutChanged.emit()
self.uiSamplesTableView.resizeColumnsToContents()
def closeEvent(self, event):
try:
assaysToStore, assaysToUpdate, assaysToRemove = self.TrackChanges()
toStore = len(assaysToStore)
toUpdate = len(assaysToUpdate)
toRemove = len(assaysToRemove)
if toStore + toUpdate + toRemove > 0:
reply = qtw.QMessageBox.question(
self, 'Window Close',
'Some changes have not been stored yet, do you want to save them',
qtw.QMessageBox.Yes | qtw.QMessageBox.No | qtw.QMessageBox.Cancel,
qtw.QMessageBox.No
)
if reply == qtw.QMessageBox.Yes:
self.StoreChanges()
self.database.Close()
event.accept()
elif reply == qtw.QMessageBox.No:
self.database.Abort()
self.database.Close()
event.accept()
else:
event.ignore()
else:
self.database.Close()
event.accept()
except AttributeError:
self.database.Close()
event.accept()
if __name__ == '__main__':
app = qtw.QApplication(sys.argv)
window = MainWindow(path)
window.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
3382914
|
<gh_stars>0
import csv
import os
class GameState:
def __init__(self):
"""
Initialize game state variables.
"""
self.__quit_game = False
# score parameters
self.__bonus = 20
self.__multiplier = 1
self.__score = 0
self.__nick = 'AAA'
# obstacle parameters
self.__obstacle_speed = 5
self.__obstacle_prob = 1
# if power_up_obstacles exceeds 10000 reset and power up obstacle parameters
self.__power_up_obstacles = 0
# list of current obstacles and powerups in the game
self.__obstacles = []
self.__powerups = []
# load nick names and scores from highscore file
home = os.environ['HOME']
self.__highscore_path = home+'/.config/aaaaaa/highscore.csv'
if not os.path.exists(self.__highscore_path):
self.__initialize_highscore()
self.__nicks, self.__scores = self.__load_highscore(self.__highscore_path)
# update highscore
self.__update_highscore = True
# rank of new highscore
self.__highscore_pos = None
# check if player reached new highscore
self.__new_highscore = False
# check if player needs to input nick
self.__input_nick = True
# character position while nick input
self.__nick_pos = 0
# possible nick characters
self.__chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!#$%*+=-'
# pause game
self.__pause = False
# show title screen
self.__title = True
@property
def title(self):
"""
Boolean. If True, show title screen.
"""
return self.__title
@property
def bonus(self):
"""
Bonus that is added to the score every time the player hits the boundaries.
"""
return self.__bonus
@property
def pause(self):
"""
If True, game is paused.
"""
return self.__pause
@property
def multiplier(self):
"""
Score multiplier.
"""
return self.__multiplier
@property
def obstacles(self):
"""
List of current obstacles
"""
return self.__obstacles
@property
def powerups(self):
"""
List of current powerups.
"""
return self.__powerups
@property
def obstacle_prob(self):
"""
"Probability" for spawning new obstacles
"""
return self.__obstacle_prob
@property
def obstacle_speed(self):
"""
Obstacle movement speed
"""
return self.__obstacle_speed
@property
def power_up_obstacles(self):
"""
Score counter until next obstacle power up.
"""
return self.__power_up_obstacles
@property
def score(self):
"""
Player score.
"""
return self.__score
@property
def quit(self):
"""
Check if game has ended.
"""
return self.__quit_game
@property
def new_highscore(self):
"""
Player reached new highscore.
"""
# if new_highscore is already set to True do not check again
if self.__new_highscore:
return self.__new_highscore
self.__new_highscore = self.score > self.scores[-1]
return self.__new_highscore
@property
def highscore_pos(self):
"""
Position of new highscore in highscore list
"""
return self.__highscore_pos
@property
def scores(self):
"""
Highscore scores
"""
return self.__scores
@property
def nicks(self):
"""
Highscore nicks
"""
return self.__nicks
@property
def input_nick(self):
"""
Check if player needs to input nick
"""
return self.__input_nick
@property
def nick_pos(self):
"""
Current character position for nick input
"""
return self.__nick_pos
@property
def update_highscore(self):
"""
True if update wasn't updated yet, False otherwise
"""
return self.__update_highscore
def next_char(self):
"""
Go to next character in nick at nick_pos
"""
char_pos = self.__chars.index(self.__nick[self.__nick_pos]) + 1
if char_pos >= len(self.__chars):
char_pos = 0
prefix = self.__nick[:self.__nick_pos]
postfix = self.__nick[self.__nick_pos+1:]
self.__nick = prefix + self.__chars[char_pos] + postfix
# update highscore
self.add_highscore_entry()
def prev_char(self):
"""
Go to previous character in nick at nick_pos
"""
char_pos = self.__chars.index(self.__nick[self.__nick_pos]) - 1
if char_pos < 0:
char_pos = len(self.__chars) - 1
prefix = self.__nick[:self.__nick_pos]
postfix = self.__nick[self.__nick_pos+1:]
self.__nick = prefix + self.__chars[char_pos] + postfix
# update highscore
self.add_highscore_entry()
def next_nick_pos(self):
"""
Increase character position for nick input
"""
if self.__nick_pos < 2:
self.__nick_pos += 1
def prev_nick_pos(self):
"""
Decrease character position for nick input
"""
if self.__nick_pos > 0:
self.__nick_pos -= 1
def add_powerup(self, powerup):
"""
Add powerup to list of current powerups.
:param powerup:
New powerup object.
"""
self.__powerups.append(powerup)
def add_obstacle(self, obstacle):
"""
Add obstacle to list of current obstacles.
:param obstacle:
New obstacle object
"""
self.__obstacles.append(obstacle)
def remove_powerup(self, powerup):
"""
Remove powerup from list of current powerups.
:param powerup:
Powerup object
"""
self.__powerups.remove(powerup)
def remove_obstacle(self, obstacle):
"""
Remove obstacle from list of current obstacles.
:param obstacle:
Obstacle object
"""
self.__obstacles.remove(obstacle)
def reset_multiplier(self):
"""
Reset multiplier to 1.
"""
self.__multiplier = 1
def reset_power_up_obstacles(self):
"""
Decrease power_up_obstacles counter by 10000
"""
self.__power_up_obstacles -= 10000
def set_multiplier(self, multiplier):
"""
Set score multiplier.
:param multiplier:
New score multiplier.
"""
self.__multiplier = multiplier
def increase_multiplier(self, x):
"""
Increase score multiplier by x.
:param x:
Integer
"""
self.__multiplier += x
def increase_obstacle_speed(self):
"""
Increase obstacle speed by 1
"""
self.__obstacle_speed += 1
def increase_score(self):
"""
Increase player score by multiplier * bonus
"""
self.__score += self.__multiplier * self.__bonus
self.__power_up_obstacles += self.__multiplier * self.__bonus
def increase_obstacle_prob(self):
"""
Increase obstacle_prob by 1
"""
self.__obstacle_prob += 1
def add_highscore_entry(self):
"""
Add new highscore entry (nick, score).
"""
if self.__highscore_pos is None:
self.__highscore_pos = [self.__score > s for s in self.__scores].index(True)
self.__nicks.insert(self.__highscore_pos, self.__nick)
self.__nicks = self.__nicks[:-1]
self.__scores.insert(self.__highscore_pos, self.__score)
self.__scores = self.__scores[:-1]
else:
self.__nicks[self.__highscore_pos] = self.__nick
self.__scores[self.__highscore_pos] = self.__score
def quit_game(self):
"""
Set quit_game to True.
"""
self.__quit_game = True
def highscore_updated(self):
"""
Set update_highscore to False
"""
self.__update_highscore = False
def set_nick(self):
"""
Set input_nick to False and update highscore file
"""
self.__input_nick = False
self.__write_highscore(self.__highscore_path)
def __write_highscore(self, file):
"""
Write highscore to csv file
:param file:
csv file containing nicknames and highscore
"""
with open(file, 'w') as f:
writer = csv.writer(f)
# write entries to file
for entry in zip(self.__nicks, self.__scores):
writer.writerow (entry)
f.close()
def __load_highscore(self, file):
"""
Load highscore from csv file,
:param file:
csv file containing nicknames and highscores
:return:
Loaded highscore.
"""
nicks = []
scores = []
with open(file) as f:
top_ten = csv.reader(f)
# read first ten entries and ignore the remaining
for nick, score in top_ten:
nicks.append(nick)
scores.append(int(score))
if len(nicks) == 10:
break
f.close()
return nicks, scores
def pause_game(self):
"""
Set pause game to True
"""
self.__pause = True
def resume_game(self):
"""
Set pause game to False
"""
self.__pause = False
def start_game(self):
"""
Set title to False
"""
self.__title = False
def back_to_title(self):
"""
Set title to True
"""
self.__title = True
def destroy_obstacles_powerups(self):
"""
Reset obstacles and powerups list
"""
self.__obstacles = []
self.__powerups = []
def reset(self):
"""
Reset game state for new game.
"""
self.__quit_game = False
# score parameters
self.__bonus = 20
self.__multiplier = 1
self.__score = 0
self.__nick = 'AAA'
# obstacle parameters
self.__obstacle_speed = 5
self.__obstacle_prob = 1
# if power_up_obstacles exceeds 10000 reset and power up obstacle parameters
self.__power_up_obstacles = 0
# list of current obstacles and powerups in the game
self.__obstacles = []
self.__powerups = []
# update highscore
self.__update_highscore = True
# rank of new highscore
self.__highscore_pos = None
# check if player reached new highscore
self.__new_highscore = False
# check if player needs to input nick
self.__input_nick = True
# character position while nick input
self.__nick_pos = 0
def __initialize_highscore(self):
"""
Initialize highscore csv file.
"""
rows = [['AAA', 10000],
['AAA', 9000],
['AAA', 8000],
['AAA', 7000],
['AAA', 6000],
['AAA', 5000],
['AAA', 4000],
['AAA', 3000],
['AAA', 2000],
['AAA', 1000]]
with open(self.__highscore_path, 'w') as f:
writer = csv.writer(f)
writer.writerows(rows)
f.close()
|
StarcoderdataPython
|
100517
|
from typing import Dict, List, Any
import numpy as np
from overrides import overrides
from .instance import TextInstance, IndexedInstance
from ..data_indexer import DataIndexer
class QuestionPassageInstance(TextInstance):
"""
A QuestionPassageInstance is a base class for datasets that consist primarily of a question
text and a passage, where the passage contains the answer to the question. This class should
not be used directly due to the missing ``_index_label`` function, use a subclass instead.
"""
def __init__(self, question_text: str, passage_text: str, label: Any, index: int=None):
super(QuestionPassageInstance, self).__init__(label, index)
self.question_text = question_text
self.passage_text = passage_text
def __str__(self):
return ('QuestionPassageInstance(' + self.question_text +
', ' + self.passage_text + ', ' +
str(self.label) + ')')
@overrides
def words(self) -> Dict[str, List[str]]:
words = self._words_from_text(self.question_text)
passage_words = self._words_from_text(self.passage_text)
for namespace in words:
words[namespace].extend(passage_words[namespace])
return words
def _index_label(self, label: Any) -> List[int]:
"""
Index the labels. Since we don't know what form the label takes,
we leave it to subclasses to implement this method.
"""
raise NotImplementedError
@overrides
def to_indexed_instance(self, data_indexer: DataIndexer):
question_indices = self._index_text(self.question_text, data_indexer)
passage_indices = self._index_text(self.passage_text, data_indexer)
label_indices = self._index_label(self.label)
return IndexedQuestionPassageInstance(question_indices,
passage_indices, label_indices,
self.index)
class IndexedQuestionPassageInstance(IndexedInstance):
"""
This is an indexed instance that is used for (question, passage) pairs.
"""
def __init__(self,
question_indices: List[int],
passage_indices: List[int],
label: List[int],
index: int=None):
super(IndexedQuestionPassageInstance, self).__init__(label, index)
self.question_indices = question_indices
self.passage_indices = passage_indices
@classmethod
@overrides
def empty_instance(cls):
return IndexedQuestionPassageInstance([], [], label=None, index=None)
@overrides
def get_lengths(self) -> Dict[str, int]:
"""
We need to pad at least the question length, the passage length, and the
word length across all the questions and passages. Subclasses that
add more arguments should also override this method to enable padding on said
arguments.
"""
question_lengths = self._get_word_sequence_lengths(self.question_indices)
passage_lengths = self._get_word_sequence_lengths(self.passage_indices)
lengths = {}
# the number of words to pad the question to
lengths['num_question_words'] = question_lengths['num_sentence_words']
# the number of words to pad the passage to
lengths['num_passage_words'] = passage_lengths['num_sentence_words']
if 'num_word_characters' in question_lengths and 'num_word_characters' in passage_lengths:
# the length of the longest word across the passage and question
lengths['num_word_characters'] = max(question_lengths['num_word_characters'],
passage_lengths['num_word_characters'])
return lengths
@overrides
def pad(self, max_lengths: Dict[str, int]):
"""
In this function, we pad the questions and passages (in terms of number of words in each),
as well as the individual words in the questions and passages themselves.
"""
max_lengths_tmp = max_lengths.copy()
max_lengths_tmp['num_sentence_words'] = max_lengths_tmp['num_question_words']
self.question_indices = self.pad_word_sequence(self.question_indices, max_lengths_tmp)
max_lengths_tmp['num_sentence_words'] = max_lengths_tmp['num_passage_words']
self.passage_indices = self.pad_word_sequence(self.passage_indices, max_lengths_tmp,
truncate_from_right=False)
@overrides
def as_training_data(self):
question_array = np.asarray(self.question_indices, dtype='int32')
passage_array = np.asarray(self.passage_indices, dtype='int32')
return (question_array, passage_array), np.asarray(self.label)
|
StarcoderdataPython
|
1614077
|
<filename>DataWrangling/scraping_web.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Please note that the function 'make_request' is provided for
# your reference only.
# You will not be able to to actually use it from within the Udacity web UI.
# Your task is to process the HTML using BeautifulSoup, extract the hidden
# form field values for "__EVENTVALIDATION" and "__VIEWSTATE" and set the
# appropriate values in the data dictionary.
# All your changes should be in the 'extract_data' function
from bs4 import BeautifulSoup
import requests
import os
import json
# setup the location files
DATADIR = "../Data/"
DATAFILEOUT = "airport.html"
html_page = 'http://www.transtats.bts.gov/Data_Elements.aspx?Data=2'
# get data for web
def extract_data(page):
data = {"eventvalidation": "",
"viewstate": ""}
soup = BeautifulSoup(page, "lxml")
ev = soup.find(id="__EVENTVALIDATION")
data["eventvalidation"] = ev["value"]
vs = soup.find(id="__VIEWSTATE")
data["viewstate"] = vs["value"]
return data
# post web page
def make_request(data, s):
eventvalidation = data["eventvalidation"]
viewstate = data["viewstate"]
r = s.post("http://www.transtats.bts.gov/Data_Elements.aspx?Data=2",
data={'AirportList': "BOS",
'CarrierList': "VX",
'Submit': 'Submit',
"__EVENTTARGET": "",
"__EVENTARGUMENT": "",
"__EVENTVALIDATION": eventvalidation,
"__VIEWSTATE": viewstate
})
return r.text
def test(webFile):
s = requests.Session()
r = s.get(html_page)
data = extract_data(r.text)
f = open(webFile, 'w')
f.write(make_request(data, s))
assert data["eventvalidation"] != ""
assert data["eventvalidation"].startswith("/wEdAMUJQSsANZr")
assert data["viewstate"].startswith("/wEPDwULLTE")
if __name__ == '__main__':
webFile = os.path.join(DATADIR, DATAFILEOUT)
test(webFile)
|
StarcoderdataPython
|
1711762
|
from argparse import ArgumentParser,FileType
import subprocess
import sys
#Var auxs
parser = ArgumentParser("RSolver. SYPER Tool for RSA Challenges")
argumentsinputs = parser.add_argument_group('ARGUMENTS INPUTS')
filesinputs = parser.add_argument_group('FILE INPUTS')
private = parser.add_argument_group('INPUT PRIVATE PARTIAL KEYS')
ciphertexts=parser.add_argument_group('CIPHERTEXT INPUTS')
debuggroup = parser.add_argument_group('General')
# #Files input
filesinputs.add_argument("--publickey", dest="pem", type=FileType('r'), nargs='*', help="Path of Public(s) key(s) in PEM format (space separated)")
filesinputs.add_argument("--inputfile", dest="file", type=FileType('r'), help="file with the challenge data in decimals/hex")
private.add_argument("--partialkey", "-pk", dest="partialkey", help="Input base64 priv key filling with * the first or last character")
private.add_argument("--partialkeyfile", "-pkf", dest="partialkeyfile", type=FileType('r'), help="Filepath with base64 priv key filling with * the first or last character")
# Arguments line input
argumentsinputs.add_argument("-p", dest="p", help="Input p value in decimal/hex")
argumentsinputs.add_argument("-q", dest="q", help="Input q value in decimal/hex")
argumentsinputs.add_argument("-n", dest="n", help="Input n value in decimal/hex")
argumentsinputs.add_argument("-e", dest="e", help="Input e value in decimal/hex")
argumentsinputs.add_argument("-f", dest="phi", help="Input phi value in decimal/hex")
argumentsinputs.add_argument("-d", dest="d", help="Input d value in decimal/hex")
argumentsinputs.add_argument("-dp", dest="dp", help="Input dp value in decimal/hex")
argumentsinputs.add_argument("-dq", dest="dq", help="Input dq value in decimal/hex")
argumentsinputs.add_argument("-qinv", dest="qinv", help="Input qinv value in decimal/hex")
argumentsinputs.add_argument("-blind", dest="blind", action="store_true", help="Set this for blind RSA is possible (Oracle Attack)")
argumentsinputs.add_argument("-ucp", dest="ucp", help="Set this for blind RSA. ucp value")
ciphertexts.add_argument("-c", dest="c", help="Input c value in decimal/hex")
ciphertexts.add_argument("--c64file", dest="c64file", type=FileType('r'), nargs='*', help="Input crypted(s) file(s) in base64")
ciphertexts.add_argument("--cfile", "-x", dest="cryptfile", type=FileType('rb'), nargs='*', help="Input encrypted/s file/s")
ciphertexts.add_argument("-c64", dest="c64", help="Input c in base64")
#Debug
debuggroup.add_argument("--debug", dest="debug", help="Debug mode")
debuggroup.add_argument("--timeout", dest="timeout", help="Timeout for each script, default 120segs")
debuggroup.add_argument("--scripts", dest="scripts", help="Optional: Launch only custom scripts. Valids are: n_is_prime,multin,tokyo,zfermat,boneh_durfee,getpqefromdpdqqinv,withdp,common_modulus,common_prime,yafu,ces1,zmultiprimeYafu,wiener,pyqdadod,blind,wiener2,hastad,hastadwithouthE,factordb,lowq,wiener3,boneh_durfee", type=str)
def main():
subprocess.call(["rsolver"]+sys.argv[1:])
return 0
if __name__ == "__main__":
sys.exit(main())
|
StarcoderdataPython
|
3276057
|
from siteswapClass import siteswap
while True:
swapString = input('siteswap?: ')
if swapString == '':
continue
swap = siteswap(swapString)
if swap.isValid():
print('Valid siteswap: ', end='')
if swap.isMultiplex():
print('M', end='')
if swap.isSync():
print('S', end='')
print()
print(' loops: ', end='')
swap.printLoops()
print(' loop time: ', end='')
swap.printLoopTime()
print(' Siteswap array: ', end='')
swap.printArray()
swap.printSite()
else:
print('Invalid siteswap: ', end='')
if swap.isMultiplex():
print('M', end='')
if swap.isSync():
print('S', end='')
print('\n Siteswap array: ', end='')
swap.printArray()
|
StarcoderdataPython
|
91759
|
from __future__ import absolute_import, division, print_function
import cmath
import math
from six.moves import zip
class least_squares:
def __init__(self, obs, calc):
self.obs = obs
self.calc = calc
a, b = self.calc.real, self.calc.imag
self.abs_calc = math.sqrt(a**2 + b**2)
self.delta = self.obs - self.abs_calc
def f(self):
"Mathematica: f=(obs-Sqrt[a^2+b^2])^2"
return self.delta**2
def da(self):
"Mathematica: D[f,a]"
if (self.abs_calc == 0): return 0
return -2 * self.delta * self.calc.real / self.abs_calc
def db(self):
"Mathematica: D[f,b]"
if (self.abs_calc == 0): return 0
return -2 * self.delta * self.calc.imag / self.abs_calc
def daa(self):
"Mathematica: FortranForm[FullSimplify[D[f,a,a]]]"
ac = self.abs_calc
if (ac == 0):
if (self.obs == 0): return 2
return -1.e160
return 2 - (2*self.calc.imag**2*self.obs)/ac/ac/ac
def dbb(self):
"Mathematica: FortranForm[FullSimplify[D[f,b,b]]]"
ac = self.abs_calc
if (ac == 0):
if (self.obs == 0): return 2
return -1.e160
return 2 - (2*self.calc.real**2*self.obs)/ac/ac/ac
def dab(self):
"Mathematica: FortranForm[FullSimplify[D[f,a,b]]]"
ac = self.abs_calc
if (ac == 0):
if (self.obs == 0): return 0
return 1.e160
return (2*self.calc.real*self.calc.imag*self.obs)/ac/ac/ac
class exp_i_alpha_sum:
def __init__(self, alphas):
self.alphas = alphas
def f(self):
"Mathematica: f=Exp[I alpha]"
result = 0
for alpha in self.alphas:
result += cmath.exp(1j*alpha)
return result
def d_alphas(self):
"Mathematica: D[f,alpha]"
return [1j*cmath.exp(1j*alpha) for alpha in self.alphas]
def d2_alphas(self):
"Mathematica: D[f,alpha,alpha]"
return [-cmath.exp(1j*alpha) for alpha in self.alphas]
def d_target_d_alphas(self, target):
"Rule for derivatives of sum of roots of unity."
da, db = target.da(), target.db()
return [da * d.real + db * d.imag for d in self.d_alphas()]
def d2_target_d_alphas(self, target):
"Product rule applied to da * d.real + db * d.imag."
result = []
da, db = target.da(), target.db()
daa, dbb, dab = target.daa(), target.dbb(), target.dab()
d = self.d_alphas()
d2 = self.d2_alphas()
for di,d2i in zip(d, d2):
row = []
for dj in d:
sum = daa * di.real * dj.real \
+ dbb * di.imag * dj.imag \
+ dab * (di.real * dj.imag + di.imag * dj.real)
if (di is dj):
sum += da * d2i.real + db * d2i.imag
row.append(sum)
result.append(row)
return result
|
StarcoderdataPython
|
3385875
|
<filename>tools/codegen/serialize_json.py
import json
import os
import sys
import re
import glob
from mako import exceptions
from mako.template import Template
from pathlib import Path
class Field(object):
def __init__(self, name, type):
self.name = name
self.type = type
self.getter = None
self.setter = None
class Record(object):
def __init__(self, name, fields, bases, fileName):
self.name = name
self.luaName = name.replace("::", ".")
var = str.rsplit(name, "::", 1)
self.short_name = var[-1]
if len(var) > 1:
self.namespace = var[0]
self.export_to_c = not "::" in name
self.fields = fields
self.bases = bases
self.fileName = fileName
def allFields(self):
result = []
result.extend(self.fields)
for base in self.bases:
result.extend(base.allFields())
return result
def parseRecord(name, json):
fields = []
if shouldSkip(json):
return
for key, value in json["fields"].items():
attr = value["attrs"]
if "transient" in attr:
continue
field = Field(key, value["type"])
fields.append(field)
bases = []
for value in json["bases"]:
bases.append(value)
return Record(name, fields, bases, json["fileName"])
class Enumerator(object):
def __init__(self, name, value):
self.name = name
self.short_name = str.rsplit(name, "::", 1)[-1]
self.value = value
self.export_to_c = not "::" in name
class Enum(object):
def __init__(self, name, underlying_type, enumerators, fileName):
self.name = name
if underlying_type == "unfixed":
abort(name + " is not fixed enum!")
self.postfix = ": " + underlying_type
var = str.rsplit(name, "::", 1)
self.short_name = var[-1]
if len(var) > 1:
self.namespace = var[0]
self.enumerators = enumerators
self.export_to_c = not "::" in name
self.fileName = fileName
for enumerator in enumerators:
if not enumerator.export_to_c:
self.export_to_c = False
break
def parseEnum(name, json):
if shouldSkip(json):
return
enumerators = []
for key2, value2 in json["values"].items():
enumerators.append(Enumerator(
key2, value2["value"]))
return Enum(name, json["underlying_type"], enumerators, json["fileName"])
class Database(object):
def __init__(self):
self.records = []
self.enums = []
self.name_to_record = {}
self.name_to_enum = {}
def resolve_base(self):
for record in self.records:
bases = []
for base in record.bases:
if base in self.name_to_record:
bases.append(self.name_to_record[base])
record.bases = bases
def add_record(self, record):
if not record:
return
self.records.append(record)
self.name_to_record[record.name] = record
def add_enum(self, enum):
if not enum:
return
self.enums.append(enum)
self.name_to_enum[enum.name] = enum
class Binding(object):
def __init__(self):
self.records = []
self.enums = []
self.headers = set()
BASE = os.path.dirname(os.path.realpath(__file__).replace("\\", "/"))
def shouldSkip(value):
attr = value["attrs"]
if not "serialize" in attr:
return True
serialize = attr["serialize"]
if isinstance(serialize, list):
if not "json" in serialize:
return True
else:
if serialize != "json":
return True
return False
def main():
db = Database()
data = Binding()
root = sys.argv[1]
outdir = sys.argv[2]
api = sys.argv[3]
config = api.lower()+"_configure.h"
api = api.upper()+"_API"
includes = sys.argv[4:].copy()
includes.append(root)
for path in includes:
metas = glob.glob(os.path.join(path, "**", "*.h.meta"), recursive=True)
for meta in metas:
try:
meta = json.load(open(meta))
except json.decoder.JSONDecodeError as e:
print(e)
abort(meta)
for key, value in meta["records"].items():
db.add_record(parseRecord(key, value))
for key, value in meta["enums"].items():
db.add_enum(parseEnum(key, value))
db.resolve_base()
metas = glob.glob(os.path.join(root, "**", "*.h.meta"), recursive=True)
for meta in metas:
meta = json.load(open(meta))
for key, value in meta["records"].items():
if key in db.name_to_record:
record = db.name_to_record[key]
data.records.append(record)
data.headers.add(GetInclude(record.fileName))
for key, value in meta["enums"].items():
if key in db.name_to_enum:
enum = db.name_to_enum[key]
data.enums.append(enum)
data.headers.add(GetInclude(enum.fileName))
template = os.path.join(BASE, "json_writer.cpp.mako")
content = render(template, db=data)
output = os.path.join(outdir, "json_writer.generated.cpp")
write(output, content)
template = os.path.join(BASE, "json_reader.cpp.mako")
content = render(template, db=data)
output = os.path.join(outdir, "json_reader.generated.cpp")
write(output, content)
template = os.path.join(BASE, "json_writer.h.mako")
content = render(template, db=data, api=api, config=config)
output = os.path.join(outdir, "json_writer.generated.h")
write(output, content)
template = os.path.join(BASE, "json_reader.h.mako")
content = render(template, db=data, api=api, config=config)
output = os.path.join(outdir, "json_reader.generated.h")
write(output, content)
def GetInclude(path):
return os.path.normpath(path).replace(os.sep, "/")
def render(filename, **context):
try:
template = Template(
open(filename, "rb").read(),
filename=filename,
input_encoding="utf8",
strict_undefined=True,
)
return template.render(**context)
except Exception:
# Uncomment to see a traceback in generated Python code:
# raise
abort(exceptions.text_error_template().render())
def write(path, content):
RE_PYTHON_ADDR = re.compile(r"<.+? object at 0x[0-9a-fA-F]+>")
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory, exist_ok=True)
open(path, "wb").write(content.encode("utf-8"))
python_addr = RE_PYTHON_ADDR.search(content)
if python_addr:
abort('Found "{}" in {} ({})'.format(
python_addr.group(0), os.path.basename(path), path))
def abort(message):
print(message, file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
4808493
|
"""
do pnet train
"""
import tensorflow as tf
def pnet_train():
print ('do pnet train')
input = tf.keras.layers.Input(shape=[12, 12, 3])
x = tf.keras.layers.Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input)
x = tf.keras.layers.PReLU(shared_axes=[1, 2], name='prelu1')(x)
x = tf.keras.layers.MaxPool2D(pool_size=2)(x)
x = tf.keras.lyaers.Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(x)
x = tf.keras.layers.PReLU(shared_axes=[1, 2], name='prelu2')(x)
x = tf.keras.layers.Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(x)
x = tf.keras.lyaers.PReLU(shared_axes=[1, 2], name='prelu3')(x)
classifier = tf.keras.layers.Conv2D(2, (1, 1), activation='softmax', name='classifier1')(x)
classifier = tf.keras.layers.Reshape((2,))(classifier)
bbox_regress = tf.keras.layers.Conv2D(4, (1, 1), name='bbox1')(x)
bbox_regress = tf.keras.layers.Reshape((4,))(bbox_regress)
if __name__ == '__main__':
"""test for tensorflow.keras
"""
print('tf version: %s, keras version: %s' % (tf.__version__, tf.keras.__version__))
pnet_train()
|
StarcoderdataPython
|
1702501
|
<filename>params.py
#environment list for ez switching
envs = ['CartPole-v0', 'Acrobot-v1', 'MountainCar-v0',
'BipedalWalker-v2', 'Pong-v4', 'SpaceInvaders-v0',
'Breakout-v0']
#all retro envs
#retro.list_games()
#retro.list_states(game)
env_name = envs[0]#'SonicTheHedgehog-Genesis'
env_state = 'GreenHillZone.Act1' #only valid for retro environments
#training
train_episodes = 1000
train_print_interval = 100
train_max_steps = 500
save_interval = 10 #episode interval to save the model
#testing
test_episodes = 100
test_print_interval = 10
test_max_steps = 500
test_records = 4 #number of episodes to dump to video, disabled for retro
#hyper
train_batch = 20 #replay memory batch size
reward_decay = 0.99
learn_rate = 1e-4
#reward_offset = -1 #scalar added to raw environment rewards
#done_reward = 1000 #scalar for reaching done state
#misc
seed = 42
out_dir = './logs' #base folder for model, any recordings, etc
downsample = 'slow' #slow, fast, none. 'fast' sacrifices quality for speed
recover = False
|
StarcoderdataPython
|
12151
|
import abc
class SpecSource(abc.ABC):
@abc.abstractmethod
def describe(self) -> str:
"""
Returns:
str to print in case there is an error constructing extractor for tracing back
"""
raise NotImplementedError()
class UnknownSource(SpecSource):
def describe(self) -> str:
return 'Unknown Source'
|
StarcoderdataPython
|
3304916
|
<gh_stars>1-10
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
required_conan_version = ">=1.33.0"
class DbgMacroConan(ConanFile):
name = "c-dbg-macro"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/eerimoq/dbg-macro"
license = "MIT"
description = "A dbg(...) macro for C"
topics = ("conan", "debugging", "macro", "pretty-printing", "header-only")
settings = ("compiler", "build_type", "os", "arch")
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def validate(self):
if self.settings.os == "Windows":
raise ConanInvalidConfiguration("This library is not compatible with Windows")
def source(self):
tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
def package(self):
self.copy("include/dbg.h", dst=".", src=self._source_subfolder)
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
def package_id(self):
self.info.header_only()
|
StarcoderdataPython
|
144336
|
from id.trafficmon.objectblob.ObjectBlob import ObjectBlob
import numpy as np
__author__ = 'Luqman'
class ObjectBlobManager(object):
blob_map = None
image_reference = None
max_id = 0
def __init__(self, contour_list, image):
# print type(image)
if (contour_list is None) and (image is None):
self.blob_map = {}
else:
self.image_reference = image
new_blob_list = []
for contour in contour_list:
new_blob = ObjectBlob(contour, image)
new_blob_list.append(new_blob)
self.list_evaluate(new_blob_list)
def list_evaluate(self, blob_list):
self.blob_map = {}
i = self.max_id
for blob in blob_list:
self.blob_map[i] = blob
i += 1
self.max_id = i
def get_image_reference(self):
return self.image_reference
def get_next_id(self):
self.max_id += 1
return self.max_id
def entity_evaluate(self, blob):
th = 20
th_color = 25
result = False
for k in self.blob_map:
cur_blob = self.blob_map[k]
dist = cur_blob.get_blob_distance(blob)
if (dist <= th) and (dist > 0):
color_dist = cur_blob.get_blob_color_distance(blob)
if color_dist <= th_color:
result = True
return result
def set_blob_map(self, blob_map):
self.blob_map = blob_map.copy()
def spatial_evaluation(self):
th = 30
th_color = 25
merge_list = []
removed_list = []
blob_map_1 = self.blob_map.copy()
blob_manager_reference = self.copy()
for k in blob_map_1:
cur_blob = blob_map_1[k]
blob_map_reference = blob_manager_reference.blob_map
for k1 in blob_map_reference:
cur_blob_reference = blob_map_reference[k1]
dist = cur_blob_reference.get_blob_distance(cur_blob)
if (dist <= th) and (dist > 0):
color_dist = cur_blob_reference.get_blob_color_distance(cur_blob)
if color_dist <= th_color:
if k not in removed_list:
removed_list.append(k)
if k1 not in removed_list:
removed_list.append(k1)
overlap_tuple1 = k, k1
overlap_tuple2 = k1, k
if (overlap_tuple1 not in merge_list) and (overlap_tuple2 not in merge_list):
merge_list.append(overlap_tuple1)
return merge_list, removed_list
def get_contour_list(self):
contour_list = []
for blob in self.blob_map.values():
contour_list.append(blob.get_contour())
return contour_list
def get_blob_count(self):
return len(self.blob_map)
def copy(self):
new_blob_manager = ObjectBlobManager(None, None)
new_blob_manager.blob_map = self.blob_map.copy()
new_blob_manager.image_reference = np.copy(self.image_reference)
new_blob_manager.max_id = self.max_id
return new_blob_manager
def remove_and_merge(self, remove_list, merge_list):
new_contour_list = []
# remove
for k in self.blob_map:
if k not in remove_list:
new_contour_list.append(self.blob_map[k].get_contour())
# merge
for merge_idx in merge_list:
k1, k2 = merge_idx
blob1 = self.blob_map[k1]
blob2 = self.blob_map[k2]
new_blob = blob1.merge_blob(blob2, self.image_reference)
new_contour_list.append(new_blob.get_contour())
new_blob_manager = ObjectBlobManager(new_contour_list, self.image_reference)
return new_blob_manager
def temporal_evaluation(self, prev_blob_manager, current_image):
# buat korelasi & korespondensi
th = 12
th_color = 25
cur_blob = self.blob_map.copy()
prev_blob = prev_blob_manager.blob_map.copy()
pairs_cur_blob = {}
pairs_prev_blob = {}
for k in cur_blob:
if k not in pairs_cur_blob:
pairs_cur_blob[k] = []
cur_blob_k = cur_blob[k]
for k1 in prev_blob:
if k1 not in pairs_prev_blob:
pairs_prev_blob[k1] = []
prev_blob_k1 = prev_blob[k1]
dist = prev_blob_k1.get_blob_distance(cur_blob_k)
if (dist <= th) and (dist > 0):
pairs_cur_blob[k].append(k1)
pairs_prev_blob[k1].append(k)
# got correlation of every blob / object
# next: process blobs based on correlation
# 1 to n -> n to 1 -> 1 to 1 -> 1 to 0 -> 0 to 1
one_to_n = {}
n_to_one = {}
one_to_one_cur = {}
one_to_one_prev = {}
one_to_zero = {}
zero_to_one = {}
for key in pairs_cur_blob:
cur_pair = pairs_cur_blob[key]
if len(cur_pair) == 0:
one_to_zero[key] = cur_pair
elif len(cur_pair) == 1:
one_to_one_cur[key] = cur_pair
else:
one_to_n[key] = cur_pair
for key in pairs_prev_blob:
prev_pair = pairs_prev_blob[key]
if len(prev_pair) == 0:
zero_to_one[key] = prev_pair
elif len(prev_pair) == 1:
one_to_one_prev[key] = prev_pair
else:
n_to_one[key] = prev_pair
new_blob_map = {}
destroy_keys = []
# 1 to n
for key in one_to_n:
new_blob = cur_blob[key]
ref_blob_keys = one_to_n[key]
# # cek individual nanti diicek lagi
# for ref_key in ref_blob_keys:
# ref_blob = prev_blob[ref_key]
#
# cek gabungan
merged_blob = None
key_used = 0
for ref_key in ref_blob_keys:
key_used = ref_key
if merged_blob is None:
merged_blob = prev_blob[ref_key]
else:
merged_blob = merged_blob.merge_blob(prev_blob[ref_key], prev_blob_manager.get_image_reference())
if new_blob.is_similar(merged_blob):
for ref_key in ref_blob_keys:
if ref_key != key_used:
destroy_keys.append(key_used)
new_blob_map[key_used] = prev_blob[key_used].track(new_blob, self.image_reference)
else:
# oklusi
for ref_key in ref_blob_keys:
occlusion_blob = prev_blob[ref_key].move_blob(self.image_reference)
new_blob_map[ref_key] = prev_blob[ref_key].track(occlusion_blob, self.image_reference)
one_to_one_prev.pop(key, None)
# n to 1
for key in n_to_one:
ref_blob = prev_blob[key]
cur_blob_keys = n_to_one[key]
# cek gabungan
merged_blob = None
for cur_key in cur_blob_keys:
if merged_blob is None:
merged_blob = cur_blob[cur_key]
else:
merged_blob = merged_blob.merge_blob(cur_blob[cur_key], self.get_image_reference())
# if ref_blob.is_similar(merged_blob):
new_blob_map[key] = ref_blob.track(merged_blob, self.image_reference)
destroy_keys.append(key)
# else:
# # oklusi berpisah
# destroy_keys.append(key)
# for cur_key in cur_blob_keys:
# new_key = prev_blob_manager.get_next_id()
# new_blob_map[new_key] = ref_blob.track(cur_blob[cur_key], self.image_reference)
one_to_one_cur.pop(key, None)
# 0 to 1
for key in zero_to_one:
new_blob_map[key] = prev_blob[key]
# cek tempat yang sama
blob = prev_blob[key]
new_blob = ObjectBlob(blob.get_contour(), current_image)
next_blob = blob.track(new_blob, self.image_reference)
new_blob_map[key] = next_blob
if new_blob.get_blob_color_distance(blob) > th_color:
# destroy
destroy_keys.append(key)
if next_blob.get_n_frames_in_map() > 15:
destroy_keys.append(key)
# cek perkiraan tempat selanjutnya # coba nanti liat ini masih butuh enggak
# 1 to 0
for key in one_to_zero:
ref_key = prev_blob_manager.get_next_id()
new_blob_map[ref_key] = cur_blob[key]
# masukkan ke dalam manager langsung
# 1 to 1
for key in one_to_one_cur:
ref_key = one_to_one_cur[key][0]
next_blob = prev_blob[ref_key].track(cur_blob[key], self.image_reference)
new_blob_map[ref_key] = next_blob
one_to_one_prev.pop(ref_key, None)
for key in one_to_one_prev:
next_key = one_to_one_prev[key][0]
next_blob = prev_blob[key].track(cur_blob[next_key], self.image_reference)
new_blob_map[key] = next_blob
for dest in destroy_keys:
new_blob_map.pop(dest, None)
# print prev_blob_manager.max_id
return self.set_next_blob_map(new_blob_map, prev_blob_manager.max_id)
# print pairs_cur_blob, pairs_prev_blob
def draw_contours(self, image, is_temporal):
image_used = np.copy(image)
image_result = np.copy(image)
for k in self.blob_map:
cur_blob = self.blob_map[k]
image_result = cur_blob.draw(image_used, k, is_temporal)
image_used = np.copy(image_result)
return image_result
def destroy_blob(self, key):
blob = self.blob_map.pop(key, None)
print "object no. "+str(key)+" is destroyed"
if blob is not None:
print "it has been in your screen for "+str(blob.get_n_frames_in_map())
def set_next_blob_map(self, blob_map, max_id):
next_blob_manager = self.copy()
next_blob_manager.set_blob_map(blob_map)
next_blob_manager.max_id = max_id
return next_blob_manager
def check(self):
print "--- checking ---"
print "number of blob: ", len(self.blob_map)
for k in self.blob_map:
print self.blob_map[k].get_area()
|
StarcoderdataPython
|
161072
|
import glob
import logging
from . import Reader
LOG = logging.getLogger(__name__)
def load_all(path, recursive=True, scene_type=None, sample=None):
"""Parsed scenes at the given path returned as a generator.
Each scene contains a list of `Row`s where the first pedestrian is the
pedestrian of interest.
The path supports `**` when the `recursive` argument is True (default).
:param scene_type: see Reader
"""
LOG.info('loading dataset from %s', path)
filenames = glob.iglob(path, recursive=recursive)
for filename in filenames:
sample_rate = None
if sample is not None:
for k, v in sample.items():
if k in filename:
sample_rate = v
yield from Reader(filename, scene_type=scene_type).scenes(sample=sample_rate)
|
StarcoderdataPython
|
109100
|
<reponame>Dawars/stereo-magnification
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantitative evaluation of view synthesis results.
Read in dumped json data and compute various statistics.
"""
import os
import json
import numpy as np
from scipy.stats import wilcoxon
from scipy.stats.mstats import rankdata
from pyglib import app
from pyglib import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('root', 'evaluation', 'Evaluation directory')
flags.DEFINE_string(
'model_names',
'v4_1024,v4_1024_alpha,v4_1024_singleRGB,v4_1024_fgbg,v4_1024_all',
'model names')
flags.DEFINE_string('data_split', 'test', 'split of the data')
flags.DEFINE_string('stats', 'mean,rank,diff,wilcoxon',
'which stats to compute')
def load_data(root, model):
with open(root + '/json/' + model + '.json') as f:
data = json.load(f)
return data
def merge_into(data, d):
if data == {}:
data['models'] = []
data['examples'] = d['examples']
n = len(data['examples'])
data['ssim'] = [[]] * n
data['psnr'] = [[]] * n
for m in d['model_names']:
assert m not in data['models']
data['models'].append(str(m))
assert d['examples'] == data['examples']
assert len(data['ssim']) == len(d['ssim'])
assert len(data['psnr']) == len(d['psnr'])
data['ssim'] = [a + b for (a, b) in zip(data['ssim'], d['ssim'])]
data['psnr'] = [a + b for (a, b) in zip(data['psnr'], d['psnr'])]
def compute_mean(data):
print '\nMEAN + STD\n'
ssim = np.array(data['ssim'])
psnr = np.array(data['psnr'])
for i, m in enumerate(data['models']):
print '%30s ssim %.3f ± %.3f psnr %.2f ± %.2f' % (
m, np.mean(ssim[:, i]), np.std(ssim[:, i]), np.mean(psnr[:, i]),
np.std(psnr[:, i]))
def compute_rank(data):
print '\nRANK\n'
# rankdata assigns rank 1 to the lowest element, so
# we need to negate before ranking.
ssim_rank = rankdata(np.array(data['ssim']) * -1.0, axis=1)
psnr_rank = rankdata(np.array(data['psnr']) * -1.0, axis=1)
# Rank mean + std.
for i, m in enumerate(data['models']):
print '%30s ssim-rank %.2f ± %.2f psnr-rank %.2f ± %.2f' % (
m, np.mean(ssim_rank[:, i]), np.std(ssim_rank[:, i]),
np.mean(psnr_rank[:, i]), np.std(psnr_rank[:, i]))
# Rank frequencies
print '\n SSIM rank freqs'
print_rank_freqs(data, ssim_rank)
print '\n PSNR rank freqs'
print_rank_freqs(data, psnr_rank)
def print_rank_freqs(data, rank):
e = len(data['examples'])
m = len(data['models'])
freqs = []
for i in range(m):
one_rank = np.count_nonzero(
np.logical_and(np.less_equal(i + 1.0, rank), np.less(rank, i + 2.0)),
axis=0) * 1.0 / e
freqs.append(one_rank)
freqs = np.array(freqs)
print '%30s %s' % ('', ''.join('%4.0f ' % (x + 1) for x in range(m)))
for i, m in enumerate(data['models']):
print '%30s %s' % (m, ''.join(
'%4.0f%%' % (100 * x) for x in freqs[:, i]))
def compute_diff(data):
print '\nDIFF\n'
# We take the first model as the best!
ssim = np.array(data['ssim'])
psnr = np.array(data['psnr'])
ssim_diff = ssim - ssim[:, 0:1]
psnr_diff = psnr - psnr[:, 0:1]
for i, m in enumerate(data['models']):
print '%30s ssim-diff %.3f ± %.3f psnr-diff %.2f ± %.2f' % (
m, np.mean(ssim_diff[:, i]), np.std(ssim_diff[:, i]),
np.mean(psnr_diff[:, i]), np.std(psnr_diff[:, i]))
def compute_wilcoxon(data):
print '\nWILCOXON SIGNED-RANK TEST\n'
# We take the first model as the basis for each comparison.
ssim = np.array(data['ssim'])
psnr = np.array(data['psnr'])
for i, m in enumerate(data['models']):
if i == 0:
print ' [differences from %s]' % m
continue
ssim_v, ssim_p = wilcoxon(ssim[:, i], ssim[:, 0])
psnr_v, psnr_p = wilcoxon(psnr[:, i], psnr[:, 0])
print '%30s ssim %.3f, p %.1e psnr %.2f, p %.1e' % (m, ssim_v, ssim_p,
psnr_v, psnr_p)
def main(_):
stats = FLAGS.stats.split(',')
root = FLAGS.root
model_names = FLAGS.model_names.split(',')
data = {}
for m in model_names:
d = load_data(root, m)
merge_into(data, d)
print '\nLOADED %d models, %d examples' % (len(data['models']),
len(data['examples']))
if 'mean' in stats:
compute_mean(data)
if 'rank' in stats:
compute_rank(data)
if 'diff' in stats:
compute_diff(data)
if 'wilcoxon' in stats:
compute_wilcoxon(data)
print
if __name__ == '__main__':
app.run()
|
StarcoderdataPython
|
1661988
|
<filename>src/gerel/debug/model_validator.py
def validate_model(model, *args, **kwargs):
if not model.inputs:
raise ValueError('Model has no input nodes')
if not model.outputs:
raise ValueError('Model has no output nodes')
|
StarcoderdataPython
|
172449
|
<reponame>vztpv/Python-Study
# 5-5 Problems
print(all([1, 2, abs(-3)-3]))
print(chr(ord('a')) == 'a')
x = [1, -2, 3, -5, 8, -3]
print(list(filter(lambda val: val > 0, x)))
x = hex(234)
print(int(x, 16))
x = [1, 2, 3, 4]
print(list(map(lambda a: a * 3, x)))
x = [-8, 2, 7, 5, -3, 5, 0, 1]
print(max(x) + min(x))
x = 17 / 3
print(round(x, 4))
|
StarcoderdataPython
|
3329045
|
# -*- coding: utf-8 -*-
import os,sys,shutil
import zipfile,glob,subprocess
import traceback
def u(text):
return unicode(text,"utf-8")
def myjoin(*args):
path = os.path.join(*args)
path = os.path.abspath(path)
return path
def luaCompileWithLuac(desc,path):
try:
for dirname, dirnames, filenames in os.walk(path):
print "current dirname >> " + dirname.replace(path, '')
for filename in filenames:
currentFileName = os.path.join(dirname, os.path.basename(filename));
if '.lua' in filename:
execuateCommand = "luac -s -o " + currentFileName + " " + currentFileName;
os.system(execuateCommand);
else:
shutil.copy(currentFileName,output);
print desc + " lua compile finish"
except:
print desc + " lua compile fail"
def luaCompileWithCocos(desc,path):
try:
execuateCommand = "cocos luacompile -s {0} -d {0}".format(path)
os.system(execuateCommand);
print desc + " lua compile finish"
except:
print desc + " lua compile fail"
try:
option = {
"stdout":subprocess.PIPE,
"stderr":subprocess.PIPE
}
cwd = os.path.abspath(os.path.dirname(__file__))
print "c",cwd,__file__
if len(cwd) == 0:
cwd = "."
sys.path.append(myjoin(cwd,"..","Engine"))
from android_compile import compile_run
from android_compile import orientation_portrait
from android_compile import orientation_landscape
dist_root = myjoin(cwd,"pini_distribute")
pini_root = myjoin(cwd,"pini")
updator_root = myjoin(cwd,"updator")
if sys.platform == "darwin" :
pass
else:
os.chdir(cwd)
if os.path.exists(dist_root) :
shutil.rmtree(dist_root)
os.chdir(pini_root)
proc = subprocess.Popen(["python","setup.py","py2exe"],**option)
out, err = proc.communicate()
errcode = proc.returncode
print out,err,errcode
shutil.move(myjoin(os.curdir,"dist"), dist_root )
shutil.rmtree(myjoin(os.curdir,"build"))
shutil.copyfile(myjoin(os.curdir,"atl.so"),myjoin(dist_root,"atl.so"))
shutil.copyfile(myjoin(os.curdir,"compiler.py"),myjoin(dist_root,"compiler.py"))
shutil.copyfile(myjoin(os.curdir,"libgcc_s_dw2-1.dll"),myjoin(dist_root,"libgcc_s_dw2-1.dll"))
shutil.copyfile(myjoin(os.curdir,"libstdc++-6.dll"),myjoin(dist_root,"libstdc++-6.dll"))
shutil.copytree(myjoin(os.curdir,"resource"),myjoin(dist_root,"resource"))
shutil.copytree(myjoin(os.curdir,"imageformats"),myjoin(dist_root,"imageformats"))
shutil.copytree(myjoin(os.pardir,os.pardir,"Engine","VisNovel","src"), myjoin(dist_root,"lua"))
shutil.copytree(myjoin(os.pardir,os.pardir,"Engine","window64"), myjoin(dist_root,"window"))
try:
shutil.rmtree(myjoin(dist_root,"window","src"))
except Exception, e:
pass
try:
shutil.rmtree(myjoin(dist_root,"window","res"))
except Exception, e:
pass
shutil.copytree(myjoin(os.pardir,os.pardir,"Engine","VisNovel","src"),myjoin(dist_root,"window","src"))
shutil.copytree(myjoin(os.pardir,os.pardir,"Engine","VisNovel","res"),myjoin(dist_root,"window","res"))
# launcher lua compile - dist_root+"\\window\\src"
# launcher_lua = myjoin(dist_root,"window","src")
# luaCompileWithCocos("launcher",launcher_lua)
### tool lua compile - dist_root+"\\lua"
# tool_lua = myjoin(dist_root,"lua")
# luaCompileWithLuac("tool",tool_lua)
### cocos lua remove
# for root, dirs, files in os.walk(myjoin(dist_root,"window","src"), topdown=False):
# for name in files:
# path = os.path.join(root, name).replace("\\","/")
# p,ext= os.path.splitext(path)
# if ext == ".lua" :
# print "remove plain-lua ",path
# os.remove(path)
#################################################################
### android compile
orientation_portrait()
compile_run(False,True,False,"-portrait.apk")
orientation_landscape()
compile_run(False,True,False,"-landscape.apk",True)
#################################################################
apkdistpath = myjoin(dist_root,"resource","android")
try:
shutil.rmtree(apkdistpath)
except Exception, e:
pass
os.mkdir(apkdistpath)
srcapk1 = myjoin(dist_root,"..","..","Engine","android","PiniRemote-portrait.apk")
srcapk2 = myjoin(dist_root,"..","..","Engine","android","PiniRemote-landscape.apk")
distapk1= myjoin(apkdistpath,"PiniRemote-portrait.apk")
distapk2= myjoin(apkdistpath,"PiniRemote-landscape.apk")
shutil.copy(srcapk1,distapk1)
shutil.copy(srcapk2,distapk2)
distapk1 = myjoin(dist_root,"..","pini","resource","android","PiniRemote-portrait.apk")
distapk2 = myjoin(dist_root,"..","pini","resource","android","PiniRemote-landscape.apk")
try:
os.remove(distapk1)
except Exception, e:
pass
try:
os.remove(distapk2)
except Exception, e:
pass
shutil.copy(srcapk1,distapk1)
shutil.copy(srcapk2,distapk2)
#################################################################
os.chdir(updator_root)
proc = subprocess.Popen(["python","setup.py","py2exe"],**option)
out, err = proc.communicate()
errcode = proc.returncode
print out,err,errcode
os.chdir(myjoin(updator_root,"dist"))
#myjoin(os.pardir,os.pardir,"novel","VisNovel","src")
if os.path.exists(myjoin(os.curdir,"resource")) :
shutil.rmtree(myjoin(os.curdir,"resource"))
if os.path.exists(myjoin(os.curdir,"pygit2")) :
shutil.rmtree(myjoin(os.curdir,"pygit2"))
shutil.copytree(myjoin(os.pardir,"resource"),myjoin(os.curdir,"resource"))
shutil.copytree(myjoin(os.pardir,"pygit2"), myjoin(os.curdir,"pygit2"))
'''
_zip = zipfile.ZipFile(myjoin(os.pardir,os.pardir,"pini_launcher.zip"), 'w')
for root, dirs, files in os.walk(os.curdir, topdown=False):
for name in files:
path = os.path.join(root, name).replace("\\","/")
_zip.write(path)
_zip.close()
'''
os.chdir(updator_root)
#shutil.rmtree(myjoin(os.curdir,"dist"))
nsisMaster = myjoin(os.curdir,"nsis","Master")
if os.path.isdir(nsisMaster) :
shutil.rmtree(nsisMaster)
shutil.move(myjoin(os.curdir,"dist"),nsisMaster)
shutil.rmtree(myjoin(os.curdir,"build"))
shutil.copyfile("nsis/Master/Updator.exe","../Updator.exe")
os.rename("nsis/Master/Updator.exe",u("nsis/Master/piniengine.exe"))
shutil.copytree("../pini/imageformats","nsis/Master/imageformats")
os.system("makensis.exe nsis\\Master.nsi")
try:
os.remove("../PiniInstaller.exe")
except Exception, e:
pass
os.rename("nsis/installer.exe","../PiniInstaller.exe")
shutil.rmtree(nsisMaster)
except Exception, e:
print e
traceback.print_exc()
|
StarcoderdataPython
|
24031
|
<filename>add_admin.py
import sqlite3
from config import DB_PATH
def exe_query(query):
con_obj = sqlite3.connect(DB_PATH)
courser = con_obj.execute(query)
res = courser.fetchall()
con_obj.commit()
con_obj.close()
return res
try:
admin_id = int(input('Enter admin id: '))
exe_query(f'INSERT INTO Admin (telegram_id) VALUES ({admin_id});')
print(f'Admin ({admin_id}) added successfully!')
except ValueError:
print('Invalid admin id')
|
StarcoderdataPython
|
185162
|
# Author: <NAME>
# E-mail: <EMAIL>
# Author: <NAME>
# E-mail: <EMAIL>
# Author: <NAME>
# E-mail: <EMAIL>
try:
from exam.utils.baseline_studies import EMBEDDINGvsTRAIN_EMBEDDINGS
from exam.utils.baseline_studies import NUM_LAYERSvsHIDDEN_DIM
from exam.utils.baseline_studies import LEARNING_RATEvsDROPOUT
from exam.utils.baseline_studies import BATCH_SIZEvsEPOCHS
except:
from utils.baseline_studies import EMBEDDINGvsTRAIN_EMBEDDINGS
from utils.baseline_studies import NUM_LAYERSvsHIDDEN_DIM
from utils.baseline_studies import LEARNING_RATEvsDROPOUT
from utils.baseline_studies import BATCH_SIZEvsEPOCHS
import torch
non_pos_tagged_corporas = {
'58': 100,
'77': 100,
'79': 100,
'81': 100,
'84': 100,
'86': 100,
'88': 100,
'90': 100,
'92': 100,
'94': 100,
'96': 100,
'98': 100,
'100': 100,
'102': 100,
'104': 100,
'106': 100,
'108': 100,
'110': 100,
'112': 100,
'114': 100,
'116': 100,
'118': 100,
'120': 100,
'122': 100,
'124': 100,
'126': 100,
'127': 50,
'128': 300,
'129': 600
}
pos_tagged_corporas = {
'76': 100,
'78': 100,
'80': 100,
'83': 100,
'85': 100,
'87': 100,
'89': 100,
'91': 100,
'93': 100,
'95': 100,
'97': 100,
'99': 100,
'101': 100,
'103': 100,
'105': 100,
'107': 100,
'109': 100,
'111': 100,
'113': 100,
'115': 100,
'117': 100,
'119': 100,
'121': 100,
'123': 100,
'125': 100,
'130': 500,
'131': 300,
'132': 600,
'133': 50,
'134': 300,
'135': 600,
'189': 300
}
bert_corporas = {'216': 768}
elmo_corporas = {'217': 2048, '218': 2048}
# dict with current best params (updated after every study)
params = {
'TRAIN_DATA': "/cluster/projects/nn9851k/IN5550/fabior/train.conll",
'DEV_DATA': "/cluster/projects/nn9851k/IN5550/fabior/dev.conll",
'TEST_DATA': "/cluster/projects/nn9851k/IN5550/fabior/test.conll",
'verbose': True,
'random_state': 1,
'BATCH_SIZE': 32,
'HIDDEN_DIM': 50,
'device': "cuda" if torch.cuda.is_available() else "cpu",
'output_dim': 5,
'NUM_LAYERS': 1,
'DROPOUT': 0.1,
'LEARNING_RATE': 0.01,
'TRAIN_EMBEDDINGS': True,
'EPOCHS': 20,
# 'EMBEDDINGS_DIR': "exam/saga/",
'EMBEDDINGS_DIR': "/cluster/shared/nlpl/data/vectors/latest/",
'EMBEDDINGS': "58"
}
# parameter space for each study
space = {
'EMBEDDINGvsTRAIN_EMBEDDINGS': {
'par_1': list(non_pos_tagged_corporas.keys()),
'par_2': [True, False],
},
'NUM_LAYERSvsHIDDEN_DIM': {
'par_1': [1, 3, 5],
'par_2': [5, 10, 50, 100, 500],
},
'LEARNING_RATEvsDROPOUT': {
'par_1': [0.1, 0.01, 0.001, 0.0001],
'par_2': [0.1, 0.2, 0.3],
},
'BATCH_SIZEvsEPOCHS': {
'par_1': [25, 32, 36, 40, 44, 50],
'par_2': [5, 10, 25, 50],
}
}
################# 1st study
print('First study #=#=#=#=#')
params.pop('EMBEDDINGS')
params.pop('TRAIN_EMBEDDINGS')
params['EMBEDDINGS'], params['TRAIN_EMBEDDINGS'] = \
EMBEDDINGvsTRAIN_EMBEDDINGS(
par_1=space['EMBEDDINGvsTRAIN_EMBEDDINGS']['par_1'],
par_2=space['EMBEDDINGvsTRAIN_EMBEDDINGS']['par_2'],
out_path_filename="outputs/baseline_EMBEDDINGvsTRAIN_EMBEDDINGS",
**params
).run()._best_params()
#####################################
# # ################# 2nd study
# print('Second study #=#=#=#=#')
# params.pop('NUM_LAYERS')
# params.pop('HIDDEN_DIM')
# params['NUM_LAYERS'], params['HIDDEN_DIM'] = NUM_LAYERSvsHIDDEN_DIM(
# par_1=space['NUM_LAYERSvsHIDDEN_DIM']['par_1'],
# par_2=space['NUM_LAYERSvsHIDDEN_DIM']['par_2'],
# out_path_filename="outputs/baseline_NUM_LAYERSvsHIDDEN_DIM",
# **params
# ).run()._best_params()
# #####################################
# print('Third study #=#=#=#=#')
# # ################# 3rd study
# params.pop('LEARNING_RATE')
# params.pop('DROPOUT')
# params['LEARNING_RATE'], params['DROPOUT'] = LEARNING_RATEvsDROPOUT(
# par_1=space['LEARNING_RATEvsDROPOUT']['par_1'],
# par_2=space['LEARNING_RATEvsDROPOUT']['par_2'],
# out_path_filename="outputs/baseline_LEARNING_RATEvsDROPOUT",
# **params
# ).run()._best_params()
# # #####################################
print('Fourth study #=#=#=#=#')
# ################# 4th study
params.pop('BATCH_SIZE')
params.pop('EPOCHS')
params['BATCH_SIZE'], params['EPOCHS'] = BATCH_SIZEvsEPOCHS(
par_1=space['BATCH_SIZEvsEPOCHS']['par_1'],
par_2=space['BATCH_SIZEvsEPOCHS']['par_2'],
out_path_filename="outputs/baseline_BATCH_SIZEvsEPOCHS",
**params
).run()._best_params()
# #####################################
|
StarcoderdataPython
|
1678426
|
#!/usr/bin/env python
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import unittest
import os
import tempfile
# internal modules:
from yotta.lib.folders import globalInstallDirectory
from yotta.test.cli import cli
from yotta.test.cli import util
Test_Target = 'x86-linux-native'
class TestCLILink(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.prefix_dir = tempfile.mkdtemp()
os.environ['YOTTA_PREFIX'] = cls.prefix_dir
@classmethod
def tearDownClass(cls):
util.rmRf(cls.prefix_dir)
cls.prefix_dir = None
def testLink(self):
linked_in_module = util.writeTestFiles(util.Test_Trivial_Lib, True)
stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'link'], cwd=linked_in_module)
self.assertEqual(statuscode, 0)
self.assertTrue(os.path.exists(os.path.join(globalInstallDirectory(), 'test-trivial-lib')))
test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True)
stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'list'], cwd=test_module)
self.assertIn('missing', stdout+stderr)
stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'link', 'test-trivial-lib'], cwd=test_module)
self.assertEqual(statuscode, 0)
self.assertNotIn('broken', stdout+stderr)
stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'list'], cwd=test_module)
self.assertNotIn('missing', stdout+stderr)
util.rmRf(test_module)
util.rmRf(linked_in_module)
@unittest.skipIf(not util.canBuildNatively(), "can't build natively on this platform yet")
def testLinkedBuild(self):
linked_in_module = util.writeTestFiles(util.Test_Trivial_Lib, True)
test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True)
stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link'], cwd=linked_in_module)
self.assertEqual(statuscode, 0)
stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link', 'test-trivial-lib'], cwd=test_module)
self.assertEqual(statuscode, 0)
stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'build'], cwd=test_module)
self.assertEqual(statuscode, 0)
util.rmRf(test_module)
util.rmRf(linked_in_module)
@unittest.skipIf(not util.canBuildNatively(), "can't build natively on this platform yet")
def testLinkedReBuild(self):
# test that changing which module is linked triggers a re-build
linked_in_module_1 = util.writeTestFiles(util.Test_Trivial_Lib, True)
linked_in_module_2 = util.writeTestFiles(util.Test_Trivial_Lib, True)
test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True)
stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link'], cwd=linked_in_module_1)
self.assertEqual(statuscode, 0)
stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link', 'test-trivial-lib'], cwd=test_module)
self.assertEqual(statuscode, 0)
stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'build'], cwd=test_module)
self.assertEqual(statuscode, 0)
# check that rebuild is no-op
stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'build'], cwd=test_module)
self.assertIn('no work to do', stdout+stderr)
self.assertEqual(statuscode, 0)
stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link'], cwd=linked_in_module_2)
self.assertEqual(statuscode, 0)
stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'build'], cwd=test_module)
self.assertNotIn('no work to do', stdout+stderr)
self.assertEqual(statuscode, 0)
util.rmRf(test_module)
util.rmRf(linked_in_module_1)
util.rmRf(linked_in_module_2)
@unittest.skipIf(not util.canBuildNatively(), "can't build natively on this platform yet")
def testTargetLinkedBuild(self):
linked_in_target = util.writeTestFiles(util.getNativeTargetDescription(), True)
test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep_Preinstalled, True)
stdout, stderr, statuscode = cli.run(['-t', 'test-native-target', '--plain', 'link-target'], cwd=linked_in_target)
self.assertEqual(statuscode, 0)
stdout, stderr, statuscode = cli.run(['-t', 'test-native-target', '--plain', 'link-target', 'test-native-target'], cwd=test_module)
self.assertEqual(statuscode, 0)
stdout, stderr, statuscode = cli.run(['-t', 'test-native-target', '--plain', 'build'], cwd=test_module)
self.assertEqual(statuscode, 0)
util.rmRf(test_module)
util.rmRf(linked_in_target)
|
StarcoderdataPython
|
4819228
|
<reponame>epm0dev/Lens-dev
from django.test import TestCase, Client
# A test case for the contact page's GET and POST requests.
class ContactPageTestCase(TestCase):
# Set up the class for testing.
@classmethod
def setUpClass(cls):
# Call the superclass' setUpClass() method.
super().setUpClass()
# Create a client object for the class to make requests.
cls.client = Client()
# Define valid post data for the contact form.
cls.valid_data = {
'first-name': 'Ethan',
'last-name': 'Mancini',
'middle-initial': 'P',
'email-address': '<EMAIL>',
'phone-number': '330-322-6010',
'message': 'Message'
}
# Define invalid post data for the contact form.
cls.invalid_data = {
'first-name': '',
'last-name': '',
'middle-initial': None,
'email-address': 'invalid.edu',
'phone-number': '111-2222-33A',
'message': ''
}
# Test get requests for the contact page.
def test_get(self):
# Send a get request without a trailing forward slash.
response = self.client.get('/contact')
# Ensure that the response has the correct status code and the proper template was rendered.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'lens/base.html')
self.assertTemplateUsed(response, 'contact/contact.html')
# Send a get request with a trailing forward slash.
response = self.client.get('/contact/')
# Ensure that the response has the correct status code and the proper template was rendered.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'lens/base.html')
self.assertTemplateUsed(response, 'contact/contact.html')
# Test post requests for the contact page.
def test_post(self):
# Attempt to make a post request with each combination of mostly valid data with one invalid field.
for key in self.invalid_data:
# If the value in invalid_data associated with the current key is None, the field cannot be invalid, so
# continue to the next iteration of the loop.
if self.invalid_data[key] is None:
continue
# Copy the valid data to a new dictionary.
post_data = self.valid_data.copy()
# Replace the valid value at the current key with an invalid one.
post_data[key] = self.invalid_data[key]
# Send a post request containing the data and ensure that the response has a status code of 400.
response = self.client.post('/contact', data=post_data)
self.assertEqual(response.status_code, 400)
# Send a post request containing the valid data and ensure that the response has an error code less than 400.
response = self.client.post('/contact', data=self.valid_data)
self.assertLess(response.status_code, 400)
|
StarcoderdataPython
|
1621264
|
"""
=======
License
=======
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from concept.tools.serialize import Serializable
from concept.tools.decorator import validate_test_responsibility_for
@validate_test_responsibility_for(Serializable)
class TestSerializable(unittest.TestCase):
"""testing of class pydemo.tools.serialize.Serializable class."""
def test_init(self):
"""Testing of Serializable.__init__ method."""
serializable = Serializable()
self.assertEqual("serializable", serializable.get_serializable_name())
self.assertRaises(AttributeError, serializable.get_serializable_fields)
def test_is_derived(self):
"""Testing of Serializable.is_derived function."""
class A(object):
pass
class B(A):
def __init__(self):
super(B, self).__init__()
b = B()
self.assertEqual(True, Serializable.is_derived(b, A))
self.assertEqual(False, Serializable.is_derived(1234, A))
def test_get_serializable_fields_with_no_field(self):
"""Testing of Serializable.get_Serializable_fields with no field."""
class Data(Serializable):
pass
data = Data()
self.assertRaises(AttributeError, data.get_serializable_fields)
def test_get_serializable_fields(self):
"""Testing of Serializable.getSerializableFields with fields (as usual)."""
class Data(Serializable):
def __init__(self):
super(Data, self).__init__()
self.firstName = "Agatha"
self.surName = "Christie"
data = Data()
self.assertEqual(["firstName", "surName"], sorted(data.get_serializable_fields()))
def test_get_serializable_name(self):
"""Testing of Serializable.getSerializableName."""
class Data(Serializable):
pass
data = Data()
self.assertEqual("data", data.get_serializable_name())
def test_is_enabled_for_attributes(self):
"""Testing of Serializable.is_enabled_for_attributes."""
class DataA(Serializable):
pass
class DataB(Serializable):
def __init__(self, title):
super(DataB, self).__init__()
self.title = title
def is_enabled_for_attributes(self):
return True
data = DataA()
self.assertEqual(False, data.is_enabled_for_attributes())
data = DataB("hello world")
expectedXML = """<datab title="hello world"/>"""
self.assertEqual(expectedXML, data.to_xml())
def test_to_xml(self):
"""Testing of Serializable.to_xml method."""
class Person(Serializable):
def __init__(self, firstName, surName):
super(Person, self).__init__()
self.firstName = firstName
self.surName = surName
person = Person("Agatha", "Christie")
expectedXML = "<person><firstName>Agatha</firstName><surName>Christie</surName></person>"
self.assertEqual(expectedXML, person.to_xml())
def test_to_xml_with_standard_list(self):
"""Testing of Serializable.to_xml with a standard list."""
class Values(Serializable):
def __init__(self, values):
super(Values, self).__init__()
self.values = values
def get_serializable_name(self):
return "list"
values = Values([10, 20, 30])
expectedXML = "<list><values><value>10</value><value>20</value><value>30</value></values></list>"
self.assertEqual(expectedXML, values.to_xml())
def test_to_xml_with_serializable_field(self):
""" Testing of Serializable.to_xml with a serializable field """
class Field(Serializable):
def __init__(self, first_name, sur_name):
super(Field, self).__init__()
self.first_name = first_name
self.sur_name = sur_name
def is_enabled_for_attributes(self):
return True
class Data(Serializable):
def __init__(self, first_name, sur_name):
super(Data, self).__init__()
self.field = Field(first_name, sur_name)
data = Data("Agatha", "Christie")
expected = """<data><field first_name="Agatha" sur_name="Christie"/></data>"""
self.assertEqual(expected, data.to_xml())
def test_to_xml_with_bad_type_for_fields(self):
""" Testing of Serializable.to_xml for failures with getSerializableFields """
class PersonNoFieldList(Serializable):
def get_serializable_fields(self):
return "not a list"
person = PersonNoFieldList()
self.assertRaises(TypeError, person.to_xml)
def test_to_xml_with_missing_field_in_class(self):
""" Testing of Serializable.to_Xml for failures with getSerializableFields """
class PersonFieldDoesNotExist(Serializable):
def get_serializable_fields(self):
return ["firstName"]
person = PersonFieldDoesNotExist()
self.assertRaises(NameError, person.to_xml)
def test_register_class(self):
""" Testing of Serializable.registerClass method """
class TestRegisterClass:
def __init__(self):
pass
self.assertTrue(Serializable.register_class(TestRegisterClass))
self.assertFalse(Serializable.register_class(TestRegisterClass))
self.assertTrue(Serializable.register_class(TestRegisterClass, "otherName"))
self.assertFalse(Serializable.register_class(TestRegisterClass, "otherName"))
def test_from_xml(self):
"""Testing of Serializable.to_xml method (simple)."""
class Person:
def __init__(self, name=""):
self.name = name
def __eq__(self, other):
return self.name == other.name
Serializable.register_class(Person)
person = Serializable.from_xml("""<person><name><NAME></name></person>""")
self.assertTrue(isinstance(person, Person))
self.assertEqual(Person("<NAME>"), person)
|
StarcoderdataPython
|
1610909
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012, <NAME>
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
import numpy as np
from visvis.wobjects.polygonalModeling import BaseMesh
def combineMeshes(meshes):
""" combineMeshes(meshes)
Given a list of mesh objects, produces a combined mesh.
"""
if not meshes:
raise ValueError('No meshes or empty meshes given')
# Check mesh simularity
vpf = 0
hasNormals = True
hasFaces = True
hasValues = True
#
for mesh in meshes:
if vpf == 0:
# First mesh: init
hasFaces = (mesh._faces is not None)
vpf = mesh._verticesPerFace
else:
# Compare with first
if mesh._verticesPerFace != vpf:
raise ValueError('Cannot combine meshes with different verticesPerFace.')
if (mesh._faces is not None) != hasFaces:
raise ValueError('Cannot combine meshes with and without face data.')
if True:
# Compare always
hasNormals = hasNormals and (mesh._normals is not None)
hasValues = hasValues and (mesh._values is not None)
# Combine vertices
vertices = np.concatenate( [m._vertices for m in meshes] )
# Combine faces
faces = None
if hasFaces:
facesList = []
startIndex = 0
for mesh in meshes:
facesList.append( mesh._faces + startIndex )
startIndex += mesh._vertices.shape[0]
faces = np.concatenate( facesList )
# Combine normals
normals = None
if hasNormals:
normals = np.concatenate( [m._normals for m in meshes] )
# Combine values
values = None
if hasValues:
values = np.concatenate( [m._values for m in meshes] )
# Done
return BaseMesh(vertices, faces, normals, values, vpf)
|
StarcoderdataPython
|
1747121
|
<reponame>breadtech/interface<gh_stars>0
import json
fp = open('menu.json')
s = fp.read()
# implement security here
# - bad to just pass in a string from a config file
menu = json.loads(s)
|
StarcoderdataPython
|
110217
|
# -*- coding: utf-8 -*-
"""Example for sending batch information to InfluxDB via UDP."""
"""
INFO: In order to use UDP, one should enable the UDP service from the
`influxdb.conf` under section
[[udp]]
enabled = true
bind-address = ":8089" # port number for sending data via UDP
database = "udp1" # name of database to be stored
[[udp]]
enabled = true
bind-address = ":8090"
database = "udp2"
"""
import argparse
from influxdb import InfluxDBClient
def main(uport):
"""Instantiate connection to the InfluxDB."""
# NOTE: structure of the UDP packet is different than that of information
# sent via HTTP
json_body = {
"tags": {
"host": "server01",
"region": "us-west"
},
"time": "2009-11-10T23:00:00Z",
"points": [{
"measurement": "cpu_load_short",
"fields": {
"value": 0.64
}
},
{
"measurement": "cpu_load_short",
"fields": {
"value": 0.67
}
}]
}
# make `use_udp` True and add `udp_port` number from `influxdb.conf` file
# no need to mention the database name since it is already configured
client = InfluxDBClient(use_udp=True, udp_port=uport)
# Instead of `write_points` use `send_packet`
client.send_packet(json_body)
def parse_args():
"""Parse the args."""
parser = argparse.ArgumentParser(
description='example code to play with InfluxDB along with UDP Port')
parser.add_argument('--uport', type=int, required=True,
help=' UDP port of InfluxDB')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(uport=args.uport)
|
StarcoderdataPython
|
3268617
|
<gh_stars>0
# -*- mode: python; -*-
# Execute this script using Bash script of the same name but without a file
# extension. Use ../pdbwrapper/pdbwrapper instead of pdb for debugging.
import os
import sys
import argparse
description = r"""
the_name_of_script_goes_here -- Short description line goes here
Multi-line description goes here.
Multi-line description goes here.
Multi-line description goes here.
"""
def main():
"""
Main function
"""
# Parse command-line arguments:
# https://docs.python.org/2/howto/argparse.html
parser = argparse.ArgumentParser(
prog=os.path.basename(os.path.splitext(sys.argv[0])[0]), # Avoid showing the .py file extension in the usage help
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-theint", type=int,
help="some integer")
args = parser.parse_args(sys.argv[1:])
print("\n{}\nargs\n{}".format('-' * 80, args))
if __name__ == '__main__':
sys.exit(0 if main() else 1) # Return non-zero exit codes upon failure
# import pdb
# pdb.set_trace()
|
StarcoderdataPython
|
104744
|
<filename>game.py
import pygame
import time
import random
def game_loop(window):
clock = pygame.time.Clock()
stop = False
circle_coords = [250, 250]
circle_radius = 10
velocity = [0,0]
painted = pygame.Surface((500, 500))
drawing = True
event_vel_map = {
pygame.K_UP: [0, -1],
pygame.K_DOWN: [0, 1],
pygame.K_RIGHT: [1, 0],
pygame.K_LEFT: [-1, 0]
}
colors = [
(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
(255, 255, 255)
]
color = random.choice(colors)
while not stop:
window.fill((0, 0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
print("Ended")
stop = True
elif event.type == pygame.KEYDOWN and (event.key == pygame.K_q or event.key == pygame.K_ESCAPE):
print("Q pressed")
stop = True
elif event.type == pygame.KEYDOWN and event.key in event_vel_map:
dv = event_vel_map[event.key]
velocity[0] += dv[0]
velocity[1] += dv[1]
elif event.type == pygame.KEYUP and event.key in event_vel_map:
dv = event_vel_map[event.key]
velocity[0] -= dv[0]
velocity[1] -= dv[1]
elif event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
drawing = not drawing
elif event.type == pygame.KEYDOWN and event.key == pygame.K_r:
oldColor = color
while color == oldColor:
color = random.choice(colors)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_c:
painted.fill((0, 0, 0))
circle_coords[0] = min(max(circle_coords[0] + velocity[0], circle_radius), 500 - circle_radius)
circle_coords[1] = min(max(circle_coords[1] + velocity[1], circle_radius), 500 - circle_radius)
if drawing:
pygame.draw.circle(painted, color, circle_coords, circle_radius)
window.blit(painted, (0, 0))
pygame.draw.circle(window, (127, 127, 127), circle_coords, circle_radius)
pygame.display.update()
clock.tick(60)
pygame.quit()
def main():
pygame.init()
window = pygame.display.set_mode((500, 500))
pygame.display.set_caption("Pygame test")
game_loop(window)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
4808513
|
from django.apps import AppConfig
class ProductsalesuiConfig(AppConfig):
name = 'productSalesUi'
|
StarcoderdataPython
|
1709397
|
<reponame>uincore/lane-finder
import sys
sys.path.append("../")
from image_processor import ImageProcessor
from lane_detector import LaneDetector
from camera import Camera
from image_operations.threshold import ColorAndGradientThresholdOperation
from image_operations.color_threshold import WhiteAndYellowColorThresholdOperation
from image_operations.transformation_parameters import TransformationParameters
from image_operations.perspective_transformation import PerspectiveTransformationOperation
from lane.lane import Lane
from lane.lane_validator import LaneValidator
from line_factory.sliding_window.sliding_window_container import SlidingWindowsContainer
from line_factory.sliding_window.sliding_window_line_detector import SlidingWindowLineDetector
from line_factory.sliding_window.curved_line_factory import CurvedLineFactory
from lane.lane_mask_factory import LaneMaskFactory
from logger import Logger
import matplotlib.pyplot as plt
import cv2
import numpy as np
from lane.drawing import Drawing
from line_factory.curved_line_coordinates_factory import CurvedLineCoordinatesFactory
from line_factory.polynomial import Polynomial
# distance from image bottom to lane lines crossing in pixels, depends on camera position
vanishing_point_distance = 310
x_meters_per_pixel = 3.7 / 700
# max distance will be a result of vanishing_point_distance * max_distance_coefficient multiplication
max_distance_coefficient = 0.85
# allow ar deny lane mask construction for known lane width and one valid lane line case
allow_line_projection = True
# valid lane width boundaries
lane_width_min_max = (650, 1000)
lane_width_deviation_tolerance = 60
w, h = 1280, 720
sliding_window_container = SlidingWindowsContainer()
sliding_window_line_detector = SlidingWindowLineDetector(sliding_window_container)
curved_line_factory = CurvedLineFactory(sliding_window_line_detector)
lane = Lane(curved_line_factory, x_meters_per_pixel)
validator = LaneValidator(lane_width_min_max, lane_width_deviation_tolerance, allow_line_projection)
def convert(image):
return image[..., [2, 1, 0]]
bgr_frame = cv2.imread("curvature_test_image.png")
bw_frame = cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2GRAY)
lane.update(bw_frame)
validation_result = validator.validate(lane)
print("Lane curvature radius: {:.0f}m".format(lane.radius_m))
print("Lane curvature radius: {:.0f}pixels".format(lane.radius_m/x_meters_per_pixel))
print("1000 pixels is {:.0f} meters".format(1000*x_meters_per_pixel))
print("Lane width is: {:.2f}m".format(lane.width*x_meters_per_pixel))
print("-"*20)
print("Left line radius: {:.0f}m".format(lane.line_left.radius*x_meters_per_pixel))
print("Right line radius: {:.0f}m".format(lane.line_right.radius*x_meters_per_pixel))
mask = Drawing.create_mask_image(bw_frame.shape, lane.line_left.coordinates, lane.line_right.coordinates, 255)
line_points = mask.nonzero()
avg_line, coefficients = CurvedLineCoordinatesFactory.create(line_points, bw_frame.shape[0])
y0 = 1600
x0 = avg_line[y0][0]
radius = Polynomial.radius(coefficients, y0)
distance = Polynomial.distance(coefficients, 0, 1600)
print("-"*30)
print("Lane radius: {:.0f}m".format(radius * x_meters_per_pixel))
print("Mask distance: {:.1f}m".format(distance * x_meters_per_pixel))
rgb_frame = convert(bgr_frame)
plt.imshow(rgb_frame)
x2, y2 = avg_line.T
plt.plot(x2, y2, "b")
tangent_fn = Polynomial.get_tangent_fn(coefficients, x0, y0)
pt1y, pt2y = 10, 1600
pt1x = tangent_fn(pt1y)
pt2x = tangent_fn(pt2y)
normal_fn = Polynomial.get_normal_fn(coefficients, x0, y0)
pn1y, pn2y = 1500, 1604
pn1x = normal_fn(pn1y)
pn2x = normal_fn(pn2y)
cx1, cy1 = Polynomial.center(coefficients, x0, y0, radius)
plt.plot(cx1, cy1, "ro")
plt.plot([x0, cx1], [y0, cy1], "b-")
plt.plot([pt1x, pt2x], [pt1y, pt2y], "g-")
plt.plot([pn1x, pn2x], [pn1y, pn2y], "r-")
# x, y = lane.line_left.coordinates.T
# plt.plot(x, y, "g")
# x1, y1 = lane.line_right.coordinates.T
# plt.plot(x1, y1, "b")
plt.show()
|
StarcoderdataPython
|
1723061
|
<gh_stars>100-1000
import collections
from collections import OrderedDict, namedtuple
class deque:
def __init__(self, iterable, maxlen, flags=0):
assert iterable == ()
self.maxlen = maxlen
self.flags = flags
self.d = collections.deque(iterable, maxlen)
def __len__(self):
return len(self.d)
def append(self, x):
if self.flags & 1:
if len(self.d) == self.maxlen:
raise IndexError
self.d.append(x)
def popleft(self):
return self.d.popleft()
|
StarcoderdataPython
|
168375
|
<gh_stars>0
import sys
import numpy as np
import math
from OpenGL.GL import *
from OpenGL.GL import shaders
from OpenGL.GLUT import *
vao = None
vbo = None
shaderProgram = None
uniColor = None
# gera os vertices do circulo
# retorna um array com os vertices
def circleVertex(raio, cx, cy):
vertices = []
for i in range(360):
ang = (i * math.pi) / 180
x = cx + (math.cos(ang) * raio)
y = cy + (math.sin(ang) * raio)
vertices.append([x,y,0])
return np.array(vertices, dtype='f')
def readShaderFile(filename):
with open('shader/' + filename, 'r') as myfile:
return myfile.read()
def init():
global shaderProgram
global vao
global vbo
global uniColor
glClearColor(0, 0, 0, 0);
vertex_code = readShaderFile('ice.vp')
fragment_code = readShaderFile('ice.fp')
# compile shaders and program
vertexShader = shaders.compileShader(vertex_code, GL_VERTEX_SHADER)
fragmentShader = shaders.compileShader(fragment_code, GL_FRAGMENT_SHADER)
shaderProgram = shaders.compileProgram(vertexShader, fragmentShader)
# Create and bind the Vertex Array Object
vao = GLuint(0)
glGenVertexArrays(1, vao)
glBindVertexArray(vao)
# Create and bind the Vertex Buffer Object
#cria as bolas de sorvete
sorvete = np.concatenate((
circleVertex(0.3,-0.3,0),
circleVertex(0.3, 0,0.2),
circleVertex(0.3, 0.3,0)
))
#cria a casquinha
casquinha = np.array([[-0.3, 0, 0], [0, -1, 0], [0.3, 0, 0]], dtype='f')
#junta os array em um so
vertices = np.concatenate((casquinha,sorvete))
vbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, vbo)
glBufferData(GL_ARRAY_BUFFER, vertices, GL_STATIC_DRAW)
glVertexAttribPointer(0, 3, GL_FLOAT, False, 0, None) # first 0 is the location in shader
glBindAttribLocation(shaderProgram, 0, 'vertexPosition') # name of attribute in shader
glEnableVertexAttribArray(0) # 0=location do atributo, tem que ativar todos os atributos inicialmente sao desabilitados por padrao
# atribui uma variavel uniforme para cor
uniColor = glGetUniformLocation(shaderProgram, "uColor")
# Note that this is allowed, the call to glVertexAttribPointer registered VBO
# as the currently bound vertex buffer object so afterwards we can safely unbind
glBindBuffer(GL_ARRAY_BUFFER, 0)
# Unbind VAO (it's always a good thing to unbind any buffer/array to prevent strange bugs)
glBindVertexArray(0);
def display():
global shaderProgram
global vao
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# load everthing back
glUseProgram(shaderProgram)
glBindVertexArray(vao)
glBindBuffer(GL_ARRAY_BUFFER, vbo)
# glDrawArrays( mode , first, count)
#desenha os objetos
# desenha casquinha
glUniform3f(uniColor, 0.9, 0.6,0.25) # muda a cor da variavel uniforme
glDrawArrays(GL_TRIANGLES, 0, 3)
# desenha bolas de sorvete
# bola do meio
glUniform3f(uniColor, 1, 0.3,0.3)
glDrawArrays(GL_POLYGON, 363, 360)
# bola da esquerda
glUniform3f(uniColor, 1, 1,0.5)
glDrawArrays(GL_POLYGON, 3, 360)
# bola da direita
glUniform3f(uniColor, 0.3, 0.5,1)
glDrawArrays(GL_POLYGON, 363+360, 360)
#clean things up
glBindBuffer(GL_ARRAY_BUFFER, 0)
glBindVertexArray(0)
glUseProgram(0)
glutSwapBuffers() # necessario para windows!
def reshape(width, height):
glViewport(0, 0, width, height)
if __name__ == '__main__':
glutInit(sys.argv)
glutInitContextVersion(3, 0)
glutInitContextProfile(GLUT_CORE_PROFILE);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
glutInitWindowSize(640, 640);
glutCreateWindow(b'Ice Cream!')
glutReshapeFunc(reshape)
glutDisplayFunc(display)
init()
glutMainLoop()
|
StarcoderdataPython
|
1673512
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import uuid
from telemetry.backend.backend import TelemetryBackend
from telemetry.utils.message import Message, MessageType
from telemetry.utils.guid import get_or_generate_uid
class GABackend(TelemetryBackend):
backend_url = 'https://www.google-analytics.com/collect'
id = 'ga'
def __init__(self, tid: str = None, app_name: str = None, app_version: str = None):
super(GABackend, self).__init__(tid, app_name, app_version)
if tid is None:
tid = 'UA-17808594-29'
self.tid = tid
self.uid = get_or_generate_uid('openvino_ga_uid', lambda: str(uuid.uuid4()), is_valid_uuid4)
self.app_name = app_name
self.app_version = app_version
self.default_message_attrs = {
'v': '1', # API Version
'tid': self.tid,
'cid': self.uid,
'an': self.app_name,
'av': self.app_version,
'ua': 'Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14' # dummy identifier of the browser
}
def send(self, message: Message):
try:
import requests
requests.post(self.backend_url, message.attrs, timeout=1.0)
except Exception:
pass
def build_event_message(self, event_category: str, event_action: str, event_label: str, event_value: int = 1,
**kwargs):
data = self.default_message_attrs.copy()
data.update({
't': 'event',
'ec': event_category,
'ea': event_action,
'el': event_label,
'ev': event_value,
})
return Message(MessageType.EVENT, data)
def build_session_start_message(self, **kwargs):
data = self.default_message_attrs.copy()
data.update({
'sc': 'start',
't': 'event',
'ec': 'session',
'ea': 'control',
'el': 'start',
'ev': 1,
})
return Message(MessageType.SESSION_START, data)
def build_session_end_message(self, **kwargs):
data = self.default_message_attrs.copy()
data.update({
'sc': 'end',
't': 'event',
'ec': 'session',
'ea': 'control',
'el': 'end',
'ev': 1,
})
return Message(MessageType.SESSION_END, data)
def build_error_message(self, error_msg: str, **kwargs):
pass
def build_stack_trace_message(self, error_msg: str, **kwargs):
pass
def is_valid_uuid4(uid: str):
try:
uuid.UUID(uid, version=4)
except ValueError:
return False
return True
|
StarcoderdataPython
|
15361
|
import flask
import telebot
import words
from dotenv import load_dotenv
load_dotenv()
app = flask.Flask(__name__)
bot = telebot.TeleBot(environ.get("TG_TOKEN"), threaded=False)
WEBHOOK_URL_PATH = "/%s/" % (environ.get("TG_TOKEN"))
# # Remove webhook, it fails sometimes the set if there is a previous webhook
# bot.remove_webhook()
# time.sleep(1)
# # Set webhook
# bot.set_webhook(url=environ.get("WEBHOOK_URL") + WEBHOOK_URL_PATH)
@bot.message_handler(commands=['ping'])
def ping(message):
return bot.reply_to(message, "pong")
@bot.message_handler(commands=['start_game'])
def start_game(message):
if "group" in message.chat.type:
admins = bot.get_chat_administrators(message.chat.id)
w = words.Words()
for a in admins:
if message.from_user.id == a.user.id:
return bot.reply_to(message, w.start_game())
return bot.reply_to(message, "Only admins can do that!")
@bot.message_handler(commands=['ranks'])
def ranks(message):
w = words.Words()
return bot.reply_to(message, "`" + w.rankings() + "`", parse_mode="Markdown")
@bot.message_handler(commands=['ans'])
def answer(message):
if message.chat.id == message.from_user.id:
return bot.reply_to(message, "Sorry, its command work only on public chats.")
w = words.Words()
ans = message.text.split(' ')
if len(ans) == 2:
return bot.reply_to(message, w.check(message.from_user.first_name, ans[1]), parse_mode="Markdown")
return bot.reply_to(message, "Wrong command. You should use /ans <pkm_name>")
|
StarcoderdataPython
|
4822648
|
# for backwards compat
from redis_cache import RedisCache
from redis_cache import ShardedRedisCache
from redis_cache.backends.base import ImproperlyConfigured
from redis_cache.connection import pool
|
StarcoderdataPython
|
90480
|
from datetime import timedelta
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from rest_framework.test import APITestCase
from blitz_api.factories import UserFactory
from ..models import Membership, Order, OrderLine, Refund
class RefundTests(APITestCase):
@classmethod
def setUpClass(cls):
super(RefundTests, cls).setUpClass()
cls.membership_type = ContentType.objects.get_for_model(Membership)
cls.membership = Membership.objects.create(
name="basic_membership",
details="1-Year student membership",
available=True,
price=50,
duration=timedelta(days=365),
)
cls.user = UserFactory()
cls.order = Order.objects.create(
user=cls.user,
transaction_date=timezone.now(),
authorization_id=1,
settlement_id=1,
)
cls.orderline = OrderLine.objects.create(
order=cls.order,
quantity=999,
content_type=cls.membership_type,
object_id=cls.membership.id,
)
def test_create(self):
"""
Ensure that we can create a membership.
"""
refund = Refund.objects.create(
orderline=self.orderline,
refund_date=timezone.now(),
amount=10.00,
details="Refund details",
)
self.assertEqual(str(refund), 'basic_membership, qt:999, 10.0$')
|
StarcoderdataPython
|
3248795
|
<reponame>Sitcode-Zoograf/storyboard<gh_stars>0
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MasterBranchHelper(object):
name = "master"
project_id = None
expired = False
expiration_date = None
autocreated = False
restricted = True
def __init__(self, project_id):
self.project_id = project_id
def as_dict(self):
master_branch_dict = {
"name": self.name,
"project_id": self.project_id,
"expired": self.expired,
"expiration_date": self.expiration_date,
"autocreated": self.autocreated,
"restricted": self.restricted
}
return master_branch_dict
|
StarcoderdataPython
|
3383896
|
<reponame>dtom90/pygotham_boiler_miner
import urllib.request
from bs4 import BeautifulSoup
import os
outputPathBase = os.path.join(os.path.dirname(__file__), 'DEPData/')
urlPrefix = "file://"+outputPathBase
def getDEPData( appNum ):
url = requestToDEPUrl( appNum )
soup = urlToSoup( url )
if hasDEPData( soup ):
return extractDEPDataFromSoup( soup )
else:
return "No Boiler Data For Given Application Number"
def requestToDEPUrl( appNum ):
"This assumes the appNumber is known to be a valid DEP application number."
appYear = int( appNum[-2:] )
if appYear > 65:
return ( urlPrefix + "19xx/19" + appNum[-2] + "x/"+
"19" + appNum[-2:] + "/" + appNum + ".html")
else:
return ( urlPrefix + "20xx/20" + appNum[-2] + "x/"+
"20" + appNum[-2:] + "/" + appNum + ".html")
def urlToSoup( url ):
"Takes in URL and returns a soup object of the contents."
webpage = urllib.request.urlopen( url )
soup = BeautifulSoup( webpage, "html.parser" )
soup.unicode
return soup
def hasDEPData( soup ):
"Checks to see whether DEP data exist for a given application number."
tables = soup.find_all("table")
return tables[1].get_text().find( "NO RECORD" ) == -1
def extractDEPDataFromSoup( soup ):
"""
Takes in data structure from BeautifulSoup and parses for DEP Boiler Data.
We assume that the soup has been prescreened to ensure that data exist.
"""
tables = soup.find_all( "table" )
#get premise address, boro name, BIN, block #, and lot #
#This part has the following format:
#'\n\n\n\r\n PREMISES: [address] \xa0\xa0[boro name]\r\n...
#\xa0\xa0 BIN: [BIN, last 6]\xa0\xa0BLOCK:\r\n [block #]...
#\xa0\xa0LOT: [lot #]\r\n \n\n\n'
locationData = tables[ 1 ].get_text()
locationData = locationData.replace( '\n', '' )#removes '\n's
locationData = locationData.replace( '\r' , '' )#removes '\r's
locDataSplit = locationData.split( ": " )
locDataSplit2 = locDataSplit[ 1 ].split( "\xa0" )
appAddress = locDataSplit2[ 0 ][ 0:-1 ]
appBoro = locDataSplit2[2].partition( ' ')[0]
#check for case where BIN, Block, Lot are missing
appBIN = "NA"
appBlock = "NA"
appLot = "NA"
try:
appBIN = int( locDataSplit[2].partition( '\xa0' )[ 0 ] )
except:
pass
try:
appBlock = int( locDataSplit[3].partition( '\xa0' )[ 0 ] )
except:
pass
try:
appLot = int( locDataSplit[4].partition( '\xa0' )[ 0 ] )
except:
pass
allLocationData = [ appAddress, appBoro, appBIN, appBlock, appLot ]
#get DEP Application Data
applicationData = tables[2].find_all("td") #Grab individual table entries.
allDEPData = []
for i in applicationData:
txt = i.get_text() # Get the text,
if ':' in txt:
allDEPData.append(txt
.replace('\r', '') # then remove the '\r's,
.replace('\n', '') # then remove the '\n's,
.partition(':')[2] # then remove everything before ":",
.strip() # strip it
)
return allLocationData + allDEPData
|
StarcoderdataPython
|
1770149
|
<filename>spark_auto_mapper_fhir/backbone_elements/plan_definition_target.py
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from spark_auto_mapper_fhir.fhir_types.list import FhirList
from spark_auto_mapper_fhir.fhir_types.string import FhirString
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.base_types.fhir_backbone_element_base import (
FhirBackboneElementBase,
)
if TYPE_CHECKING:
pass
# id_ (string)
# extension (Extension)
# modifierExtension (Extension)
# measure (CodeableConcept)
from spark_auto_mapper_fhir.complex_types.codeable_concept import CodeableConcept
# End Import for References for measure
# Import for CodeableConcept for measure
from spark_auto_mapper_fhir.value_sets.loinc_codes import LOINCCodesCode
# End Import for CodeableConcept for measure
# detailQuantity (Quantity)
from spark_auto_mapper_fhir.complex_types.quantity import Quantity
# detailRange (Range)
from spark_auto_mapper_fhir.complex_types.range import Range
# detailCodeableConcept (CodeableConcept)
# End Import for References for detailCodeableConcept
# Import for CodeableConcept for detailCodeableConcept
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
# End Import for CodeableConcept for detailCodeableConcept
# due (Duration)
from spark_auto_mapper_fhir.complex_types.duration import Duration
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class PlanDefinitionTarget(FhirBackboneElementBase):
"""
PlanDefinition.Target
This resource allows for the definition of various types of plans as a sharable, consumable, and executable artifact. The resource is general enough to support the description of a broad range of clinical artifacts such as clinical decision support rules, order sets and protocols.
"""
# noinspection PyPep8Naming
def __init__(
self,
*,
id_: Optional[FhirString] = None,
extension: Optional[FhirList[ExtensionBase]] = None,
modifierExtension: Optional[FhirList[ExtensionBase]] = None,
measure: Optional[CodeableConcept[LOINCCodesCode]] = None,
detailQuantity: Optional[Quantity] = None,
detailRange: Optional[Range] = None,
detailCodeableConcept: Optional[CodeableConcept[GenericTypeCode]] = None,
due: Optional[Duration] = None,
) -> None:
"""
This resource allows for the definition of various types of plans as a
sharable, consumable, and executable artifact. The resource is general enough
to support the description of a broad range of clinical artifacts such as
clinical decision support rules, order sets and protocols.
:param id_: None
:param extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
:param modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
:param measure: The parameter whose value is to be tracked, e.g. body weight, blood pressure,
or hemoglobin A1c level.
:param detailQuantity: None
:param detailRange: None
:param detailCodeableConcept: None
:param due: Indicates the timeframe after the start of the goal in which the goal should
be met.
"""
super().__init__(
id_=id_,
extension=extension,
modifierExtension=modifierExtension,
measure=measure,
detailQuantity=detailQuantity,
detailRange=detailRange,
detailCodeableConcept=detailCodeableConcept,
due=due,
)
|
StarcoderdataPython
|
4809543
|
<reponame>danielzhaotongliu/cs348_project<filename>backend/exampleapp/models.py<gh_stars>1-10
from __future__ import unicode_literals
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from phone_field import PhoneField
# Create your models here.
class Customer(models.Model):
uid = models.AutoField(primary_key=True)
username = models.CharField(max_length=100, unique=True)
password = models.CharField(max_length=100)
phone = PhoneField(null=True, blank=True)
email = models.EmailField(null=True, blank=True)
class Shoe(models.Model):
sid = models.AutoField(primary_key=True)
# Please see here for difference between db_index and unique
# https://www.reddit.com/r/django/comments/8tlscw/what_is_the_difference_between_db_indextrue_and/
style = models.CharField(max_length=100, db_index=True)
price = models.FloatField(db_index=True)
brand = models.CharField(max_length=100, db_index=True)
stock = models.IntegerField(validators=[MinValueValidator(0)])
size = models.IntegerField(
validators=[MinValueValidator(0)], db_index=True)
image_url = models.URLField()
colour = models.CharField(max_length=100, db_index=True)
name = models.CharField(max_length=100)
release_date = models.DateField()
description = models.CharField(max_length=1000)
def __str__(self):
# default return value when querying for readability
return self.name
class Review(models.Model):
uid = models.ForeignKey(Customer, null=True, on_delete=models.SET_NULL)
sid = models.ForeignKey(Shoe, null=True, on_delete=models.SET_NULL)
rating = models.IntegerField(
validators=[MinValueValidator(1), MaxValueValidator(5)])
comment = models.TextField()
# Django does not allow multi-attribute primary key columns see below
# https://stackoverflow.com/questions/16800375/how-can-set-two-primary-key-fields-for-my-models-in-django
# The declaration below only adds constraints to data
class Meta:
unique_together = (('uid', 'sid'),)
class PaymentMethod(models.Model):
CARD_TYPE = [('VISA', 'VISA'), ('MASTERCARD',
'MASTERCARD'), ('AMEX', 'AMEX')]
uid = models.ForeignKey(Customer, null=True, on_delete=models.SET_NULL)
cardNumber = models.CharField(max_length=16)
type = models.CharField(max_length=10, choices=CARD_TYPE)
isDefault = models.BooleanField()
class Meta:
unique_together = (('uid', 'cardNumber'),)
class Transaction(models.Model):
# since Transaction is not a weak entity, tid itself is enough for primary key
tid = models.AutoField(primary_key=True)
uid = models.ForeignKey(Customer, null=True, on_delete=models.SET_NULL)
sid = models.ForeignKey(Shoe, null=True, on_delete=models.SET_NULL)
datetime = models.DateTimeField(null=True)
quantity = models.IntegerField(validators=[MinValueValidator(1)])
address = models.TextField(null=True, blank=True)
payMethod = models.ForeignKey(
PaymentMethod, null=True, on_delete=models.SET_NULL)
class AddressBook(models.Model):
uid = models.ForeignKey(Customer, null=True, on_delete=models.SET_NULL)
address = models.TextField()
isDefault = models.BooleanField()
class Meta:
unique_together = (('uid', 'address'),)
|
StarcoderdataPython
|
1645815
|
<filename>retropath2_wrapper/RetroPath2.py
#!/usr/bin/env python3
"""
Created on January 16 2020
@author: <NAME>, <NAME>
@description: Python wrapper to run RetroPath2.0 KNIME workflow
"""
from os import (
mkdir as os_mkdir,
path as os_path,
rename,
devnull,
# geteuid,
# getegid
)
from getpass import getuser
from shutil import copyfile
from sys import platform as sys_platform
from subprocess import (
run,
STDOUT,
TimeoutExpired
) # nosec
from brs_utils import (
download_and_extract_tar_gz,
extract_gz,
chown_r
)
from filetype import guess
from tempfile import TemporaryDirectory
from typing import (
Dict,
List,
Tuple
)
from logging import (
Logger,
getLogger
)
from re import match
from csv import reader as csv_reader
from colored import fg, bg, attr
from logging import StreamHandler
from csv import reader
__KNIME_VER__ = '4.3.0'
__RETROPATH2_KWF__ = 'RetroPath2.0_r20210127.knwf'
def set_vars(
kexec: str,
kver: str,
kpkg_install: bool,
workflow: str
) -> Dict:
"""
Set variables and store them into a dictionary.
Parameters
----------
kexec : str
Path to KNIME executable.
kver : str
Version of KNIME to install.
kpkg_install : bool
Boolean to know if KNIME packages have to be installed.
workflow: str
Path to workflow to process.
logger : Logger
The logger object.
"""
# Take workflow in the package if not passed as an argument
if workflow is None:
workflow = os_path.join(
os_path.dirname(os_path.abspath(__file__)),
'workflows',
__RETROPATH2_KWF__
)
# Setting kexec, kpath, kinstall, kver
kexec_install = False
if kexec is None:
kinstall = os_path.dirname(os_path.abspath(__file__))
if not kver:
kver = __KNIME_VER__
kpath = os_path.join(
kinstall,
'knime_'
) + kver
kexec = os_path.join(
kpath,
'knime'
)
if not os_path.exists(kexec):
kexec_install = True
else:
kpath = kexec[:kexec.rfind('/')]
kinstall = kpath[:kpath.rfind('/')]
# Build a dict to store KNIME vars
return {
'kexec' : kexec,
'kexec_install' : kexec_install,
'kver' : kver,
'kpath' : kpath,
'kinstall' : kinstall,
'kpkg_install' : kpkg_install,
'workflow' : workflow
}
def retropath2(
sink_file: str, source_file: str, rules_file: str,
outdir: str,
kexec: str = None, kpkg_install: bool = True, kver: str = None,
workflow: str = None,
kvars: Dict = None,
max_steps: int = 3,
topx: int = 100,
dmin: int = 0, dmax: int = 100,
mwmax_source: int = 1000, mwmax_cof: int = 1000,
timeout: int = 30,
logger: Logger = getLogger(__name__)
) -> Tuple[str, Dict]:
if kvars is None:
# Store KNIME vars into a dictionary
kvars = set_vars(
kexec,
kver,
kpkg_install,
workflow
)
logger.debug('kvars: ' + str(kvars))
# Store RetroPath2 params into a dictionary
rp2_params = {
'max_steps' : max_steps,
'topx' : topx,
'dmin' : dmin,
'dmax' : dmax,
'mwmax_source' : mwmax_source,
'mwmax_cof' : mwmax_cof
}
logger.debug('rp2_params: ' + str(rp2_params))
r_code, inchi = check_input(source_file, sink_file)
if r_code != 'OK':
return str(r_code), None
# Install KNIME
# if kexec is not specified
# and executable not detected in default path
if kvars['kexec_install']:
install_knime(
kvars['kinstall'],
kvars['kver'],
logger
)
r_code = install_knime_pkgs(
kvars['kpath'],
kvars['kver'],
logger
)
if r_code > 0:
return str(r_code), None
elif r_code == -1:
return 'OSError', None
else:
# Add packages to KNIME
if kvars['kpkg_install']:
r_code = install_knime_pkgs(
kvars['kpath'],
kvars['kver'],
logger
)
if r_code > 0:
return str(r_code), None
elif r_code == -1:
return 'OSError', None
logger.info('{attr1}Initializing{attr2}'.format(attr1=attr('bold'), attr2=attr('reset')))
with TemporaryDirectory() as tempd:
# Format files for KNIME
files = format_files_for_knime(
sink_file, source_file, rules_file,
tempd, outdir,
logger
)
logger.debug(files)
# Create outdir if does not exist
if not os_path.exists(outdir):
os_mkdir(outdir)
# Call KNIME
r_code = call_knime(
kvars,
files,
rp2_params,
timeout,
logger
)
if r_code > 0:
return str(r_code), files
elif r_code == -1:
return 'TimeLimit', files
elif r_code == -2:
return 'OSError', None
r_code = check_src_in_sink_2(
source_inchi = inchi,
src_in_sink_file = os_path.join(files['outdir'], files['src-in-sk']),
logger = logger
)
if r_code > 0:
return str(r_code), files
elif r_code == -1:
return 'SrcInSink', files
elif r_code == -2:
return 'FileNotFound', files
return 'OK', files
def check_input(
source_file: str,
sink_file: str,
logger: Logger = getLogger(__name__)
) -> Tuple[str, str]:
logger.info('{attr1}Checking input data{attr2}'.format(attr1=attr('bold'), attr2=attr('reset')))
# Check if InChI is well-formed
inchi = check_inchi_from_file(source_file, logger)
if inchi == '':
return 'InChI', None
# Check if source is in sink
r_code = check_src_in_sink_1(inchi, sink_file, logger)
if r_code == -1:
return 'SrcInSink', None
elif r_code == -2:
return 'FileNotFound', None
return 'OK', inchi
def check_src_in_sink_1(
source_inchi: str,
sink_file: str,
logger: Logger = getLogger(__name__)
) -> int:
"""
Check if source is present in sink file. InChIs have to be strictly equal.
Parameters
----------
source_inchi: str
Path to file containing the source.
sink_file: str
Path to file containing the sink.
logger : Logger
The logger object.
Returns
-------
int Return code.
"""
logger.info(' |- Source in Sink (simple)')
try:
with open(sink_file, 'r') as f:
for row in csv_reader(f, delimiter=',', quotechar='"'):
if source_inchi == row[1]:
logger.error(' source has been found in sink')
return -1
except FileNotFoundError as e:
logger.error(e)
return -2
return 0
def check_inchi_from_file(
file: str,
logger: Logger = getLogger(__name__)
) -> str:
logger.info(' |- InChI')
try:
with open(file, 'r') as f:
f_reader = reader(f)
header = next(f_reader)
if [_.strip().lower() for _ in header[:2]] != ['name', 'inchi']:
logger.error(header)
return False
compound_id, inchi = next(f_reader)[:2] # Sniff first inchi
inchi = inchi.strip() # Remove trailing spaces
# Match
#
# InChI=
# -----
# matches 'InChI='
#
# 1(S)?
# -----
# matches:
# 1 --> version number, currently 1
# (S)? --> standard or not
#
# /(([a-z|[A-Z])\d+)+
# ------------------
# Main layer/Chemical formula, only mandatory sublayer
# matches:
# / --> layer separator
# (([a-z|[A-Z])\d+)+ --> a letter followed by at least one number, at least one time
#
# (/.+)?
# ------
# Other (sub-)layers
# matches:
# (/.+)? --> if '/' is present, then at least one character/symbol is mandatory
# if match('InChI=1(S)?/(([a-z|[A-Z])\d+)+(/.+)?$', inchi) is None:
if match('InChI=1(S)?/(([a-z|[A-Z])+\d+)+(/.+)?$', inchi) is None:
logger.error(' {inchi} is not a valid InChI notation'.format(inchi=inchi))
return ''
except FileNotFoundError as e:
logger.error(e)
return ''
return inchi
def check_src_in_sink_2(
source_inchi: str,
src_in_sink_file: str,
logger: Logger = getLogger(__name__)
) -> int:
"""
Check if source is present in sink file. InChIs could differ.
Parameters
----------
source_inchi: str
Path to file containing the source.
sink_file: str
Path to file containing the sink.
logger : Logger
The logger object.
Returns
-------
int Return code.
"""
logger.info(' |- Checking Source in Sink (advanced)')
try:
count = 0
with open(src_in_sink_file, 'r') as f:
for i in csv_reader(f, delimiter=',', quotechar='"'):
count += 1
if count > 1:
logger.error(' |- source has been found in sink')
return -1
except FileNotFoundError as e:
logger.error(e)
return -2
return 0
def install_knime(
kinstall: str,
kver: str,
logger: Logger = getLogger(__name__)
) -> None:
"""
Install KNIME.
Parameters
----------
kinstall : str
Path where install KNIME into.
kver : str
Version of KNIME to install.
logger : Logger
The logger object.
"""
logger.info('{attr1}Downloading KNIME {kver}...{attr2}'.format(attr1=attr('bold'), kver=kver, attr2=attr('reset')))
if sys_platform == 'linux':
kurl = 'http://download.knime.org/analytics-platform/linux/knime_'+kver+'.linux.gtk.x86_64.tar.gz'
elif sys_platform == 'darwin':
# kurl = 'https://download.knime.org/analytics-platform/macosx/knime-'+kver+'-app.macosx.cocoa.x86_64.dmg'
kurl = 'https://download.knime.org/analytics-platform/macosx/knime-latest-app.macosx.cocoa.x86_64.dmg'
else:
kurl = 'https://download.knime.org/analytics-platform/win/knime-'+kver+'-installer-win32.win32.x86_64.exe'
logger.info(' |--url: '+kurl)
logger.info(' |--install_dir: '+kinstall)
download_and_extract_tar_gz(kurl, kinstall)
chown_r(kinstall, getuser())
# chown_r(kinstall, geteuid(), getegid())
def gunzip_to_csv(filename: str, indir: str) -> str:
"""
Uncompress gzip file into indir.
Parameters
----------
filename : str
Path of file to deflate.
indir : str
Path where install.
"""
new_f = os_path.join(
indir,
os_path.basename(filename)+'.gz'
)
copyfile(filename, new_f)
filename = extract_gz(new_f, indir)
rename(filename, filename+'.csv')
filename += '.csv'
return filename
def format_files_for_knime(
sinkfile: str, sourcefile: str, rulesfile: str,
indir: str, outdir: str,
logger: Logger = getLogger(__name__)
) -> Dict:
"""
Format files according to KNIME expectations.
Parameters
----------
sinkfile : str
Path of sink file.
sourcefile : str
Path of source file.
rulesfile : str
Path of rules file.
indir : str
Path where install.
outdir : str
Path to output the resuts.
logger : Logger
The logger object.
Returns
-------
Dict Dictionary containing filenames.
"""
logger.info(' |- Formatting files for KNIME')
# If 'rulesfile' is a pure gzip archive without tar
kind = guess(rulesfile)
if kind:
if kind.mime == 'application/gzip':
rulesfile = gunzip_to_csv(rulesfile, indir)
files = {
'sink' : os_path.abspath(sinkfile),
'source' : os_path.abspath(sourcefile),
'rules' : os_path.abspath(rulesfile),
'results' : 'results'+'.csv',
'src-in-sk' : 'source-in-sink'+'.csv',
'outdir' : outdir
}
# Because KNIME accepts only '.csv' file extension,
# files have to be renamed
for key in ['sink', 'source', 'rules']:
if os_path.splitext(files[key])[-1] != '.csv':
new_f = os_path.join(
indir,
os_path.basename(files[key])+'.csv'
)
copyfile(files[key], new_f)
files[key] = new_f
return files
def install_knime_pkgs(
kpath: str,
kver: str,
logger: Logger = getLogger(__name__)
) -> int:
"""
Install KNIME packages needed to execute RetroPath2.0 workflow.
Parameters
----------
kpath : str
Path that contains KNIME executable.
kver : str
Version of KNIME installed.
logger : Logger
The logger object.
Returns
-------
int Return code.
"""
StreamHandler.terminator = ""
logger.info( ' |- Checking KNIME packages...')
logger.debug(' + kpath: '+kpath)
logger.debug(' + kver: '+kver)
args = \
' -application org.eclipse.equinox.p2.director' \
+ ' -nosplash -consolelog' \
+ ' -r http://update.knime.org/community-contributions/trunk,' \
+ 'http://update.knime.com/community-contributions/trusted/'+kver[:3]+',' \
+ 'http://update.knime.com/analytics-platform/'+kver[:3] \
+ ' -i org.knime.features.chem.types.feature.group,' \
+ 'org.knime.features.datageneration.feature.group,' \
+ 'org.knime.features.python.feature.group,' \
+ 'org.rdkit.knime.feature.feature.group' \
+ ' -bundlepool ' + kpath + ' -d ' + kpath
if ' ' in kpath:
cmd = '"'+os_path.join(kpath, 'knime')+'"' \
+ args
else:
cmd = os_path.join(kpath, 'knime') \
+ args
try:
printout = open(devnull, 'wb') if logger.level > 10 else None
CPE = run(
cmd.split(),
stdout=printout,
stderr=printout,
shell=False
) # nosec
logger.debug(CPE)
StreamHandler.terminator = "\n"
logger.info(' OK')
return CPE.returncode
except OSError as e:
logger.error(e)
return -1
def call_knime(
kvars: Dict,
files: Dict,
params: Dict,
timeout: int,
logger: Logger = getLogger(__name__)
) -> int:
"""
Install KNIME packages needed to execute RetroPath2.0 workflow.
Parameters
----------
kvars: Dict
KNIME variables.
files: Dict
Paths of sink, source, rules files.
params: Dict
Parameters of the workflow to process.
timeout: int
Time after which the run returns.
logger : Logger
The logger object.
Returns
-------
int Return code.
"""
StreamHandler.terminator = ""
logger.info('{attr1}Running KNIME...{attr2}'.format(attr1=attr('bold'), attr2=attr('reset')))
args = ' -nosplash -nosave -reset --launcher.suppressErrors -application org.knime.product.KNIME_BATCH_APPLICATION ' \
+ ' -workflowFile=' + kvars['workflow'] \
+ ' -workflow.variable=input.dmin,"' + str(params['dmin']) + '",int' \
+ ' -workflow.variable=input.dmax,"' + str(params['dmax']) + '",int' \
+ ' -workflow.variable=input.max-steps,"' + str(params['max_steps']) + '",int' \
+ ' -workflow.variable=input.sourcefile,"' + files['source'] + '",String' \
+ ' -workflow.variable=input.sinkfile,"' + files['sink'] + '",String' \
+ ' -workflow.variable=input.rulesfile,"' + files['rules'] + '",String' \
+ ' -workflow.variable=input.topx,"' + str(params['topx']) + '",int' \
+ ' -workflow.variable=input.mwmax-source,"' + str(params['mwmax_source']) + '",int' \
+ ' -workflow.variable=input.mwmax-cof,"' + str(params['mwmax_cof']) + '",int' \
+ ' -workflow.variable=output.dir,"' + files['outdir'] + '",String' \
+ ' -workflow.variable=output.solutionfile,"' + files['results'] + '",String' \
+ ' -workflow.variable=output.sourceinsinkfile,"' + files['src-in-sk'] + '",String'
logger.debug(kvars['kexec'] + ' ' + args)
try:
printout = open(devnull, 'wb') if logger.level > 10 else None
CPE = run(
[kvars['kexec']] + args.split(),
stdout=printout,
stderr=printout,
timeout=timeout*60,
shell=False
) # nosec
logger.debug(CPE)
StreamHandler.terminator = "\n"
logger.info(' {bold}OK{reset}'.format(bold=attr('bold'), reset=attr('reset')))
return CPE.returncode
except TimeoutExpired as e:
logger.warning(' |- Time limit ({timeout} min) is reached'.format(timeout=timeout))
logger.warning(' Results collected until now are available')
return -1
except OSError as e:
logger.error(e)
return -2
|
StarcoderdataPython
|
1605830
|
from app import db
from app.helpers.graphene_types import BaseSQLAlchemyObjectType
from app.helpers.mail_type import EmailType
from app.models.base import BaseModel
from app.models.utils import enum_column
class Email(BaseModel):
mailjet_id = db.Column(db.TEXT, unique=True, nullable=False)
address = db.Column(db.TEXT, nullable=False)
type = enum_column(EmailType, nullable=False)
user_id = db.Column(
db.Integer, db.ForeignKey("user.id"), nullable=True, index=True
)
user = db.relationship("User", backref="emails")
employment_id = db.Column(
db.Integer, db.ForeignKey("employment.id"), nullable=True, index=True
)
employment = db.relationship("Employment", backref="invite_emails")
class EmailOutput(BaseSQLAlchemyObjectType):
class Meta:
model = Email
only_fields = (
"id",
"creation_time",
"type",
"address",
)
|
StarcoderdataPython
|
1648699
|
<gh_stars>1-10
"""
API calls for flowcharts
"""
from seamm_datastore.database.models import Flowchart
from seamm_datastore.database.schema import FlowchartSchema
from flask import Response
from flask_jwt_extended import jwt_required
from seamm_dashboard import authorize
import json
__all__ = ["get_flowcharts", "get_flowchart", "get_cytoscape"]
@jwt_required(optional=True)
def get_flowcharts(description=None, limit=None):
# If limit is not set, set limit to all jobs in DB.
if limit is None:
limit = Flowchart.query.count()
if description is not None:
flowcharts = Flowchart.query.filter(
Flowchart.description.contains(description)
).limit(limit)
else:
flowcharts = Flowchart.query.limit(limit)
authorized_flowcharts = []
for flowchart in flowcharts:
if authorize.read(flowchart):
authorized_flowcharts.append(flowchart)
else:
for project in flowchart.projects:
if authorize.read(project):
authorized_flowcharts.append(flowchart)
break
flowcharts_schema = FlowchartSchema(many=True)
return flowcharts_schema.dump(authorized_flowcharts), 200
@jwt_required(optional=True)
def get_flowchart(id):
"""
Function for api endpoint api/flowcharts/{id}
Parameters
----------
id : the ID of the flowchart to return
"""
flowchart = Flowchart.query.get(id)
if flowchart is None:
return Response(status=404)
authorized = False
if authorize.read(flowchart):
authorized = True
if not authorized:
for project in flowchart.projects:
if authorize.read(project):
authorized = True
break
if not authorized:
return Response(status=401)
flowchart_schema = FlowchartSchema(many=False)
return flowchart_schema.dump(flowchart), 200
@jwt_required(optional=True)
def get_cytoscape(id, flowchartKeys=None):
"""
Function for getting cytoscape elements for a flowchart.
"""
flowchart = Flowchart.query.get(id)
if flowchart is None:
return Response(status=404)
authorized = False
if authorize.read(flowchart):
authorized = True
if not authorized:
for project in flowchart.projects:
if authorize.read(project):
authorized = True
break
if not authorized:
return Response(status=401)
important_stuff = {}
important_stuff = flowchart.json
if isinstance(important_stuff, str):
important_stuff = json.loads(flowchart.json)
elements = []
for node_number, node in enumerate(important_stuff["nodes"]):
url = "#"
# Build elements for cytoscape
elements.append(
{
"data": {
"id": node["attributes"]["_uuid"],
"name": node["attributes"]["_title"],
"url": url,
},
"position": {
"x": node["attributes"]["x"],
"y": node["attributes"]["y"],
},
"description": "",
}
)
for edge in important_stuff["edges"]:
node1_id = edge["node1"]
node2_id = edge["node2"]
edge_data = {
"data": {
"id": str(node1_id) + "_" + str(node2_id),
"source": node1_id,
"target": node2_id,
},
}
elements.append(edge_data)
return elements, 201
|
StarcoderdataPython
|
74018
|
default_app_config = "example.apps.ExampleConfig"
|
StarcoderdataPython
|
111939
|
<filename>utilities/utilities.py
import numpy as np
from math import log
# sigmoid activation as per example
def sigmoid_activation(x):
return 1.0 / (1.0+np.exp(-x))
# inverted sigmoid activation
def inv_sigmoid_activation(x):
return -1.0 * np.log((1.0-x)/x) if x > 0.0 else 0.0
# corresponding derivative
def sigmoid_prime(x):
return np.multiply(sigmoid_activation(x), (1-sigmoid_activation(x)))
# loss function
def dot_loss(p, b):
e = p - b
esq = e.dot(e.T)
return np.sum(np.array(esq)), e
# vector of partial derivatives for the output activations
def cost_derivative(out_act, y):
return out_act-y
# softmax
def softmax(x):
return np.exp(x) / np.sum(np.exp(x))
# Sample generator
def next_batch(x, y, batchsize):
for i in np.arange(0, x.shape[0], batchsize):
yield (x[i:i + batchsize], y[i:i + batchsize])
|
StarcoderdataPython
|
3212747
|
"""
列表:list
insert(i,x)
remove(x)
sort()
count()
append(x)
reverse()
index(x) 返回列表中第一个值为x的索引,如果没有匹配到返回一个错误
"""
# a=[66.25,333,333,1,1234.5]
# print(a.count(333),a.count(66.25),a.count("x"))
# a.insert(2,-1)
# a.append(333)
# print(a)
#
# print(a.index(333))
# a.remove(333)
# print(a)
# a.reverse()
# print(a)
# a.sort()
# print(a)
"""
将列表作为堆栈使用:append(),pop()
"""
# stack=[3,4,5]
# stack.append(6)
# print(stack)
#
# print(stack.pop())
# print(stack)
"""
将列表当作队列使用
"""
# from collections import deque
#
# queue = deque(["John", "make", "dawei"])
# queue.append("Terry")
# queue.append("Graham")
# queue.popleft()
# queue.popleft()
# print(queue)
"""
列表推导
"""
# vec = [2, 4, 6]
# print([[x, x ** 2] for x in vec])
#
# vec1 = [2, 4, 6]
# vec2 = [4, 3, -9]
# print([x * y for x in vec1 for y in vec2])
# print([x + y for x in vec1 for y in vec2])
# print([vec1[i] * vec2[i] for i in range(len(vec1))])
"""
嵌套列表解析
将3X4 的矩阵列表 转为4X3 的列表
"""
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]
]
#第一种方法
for i in range(4):
print("值为:",[row[i] for row in matrix])
print([row[1] for row in matrix])
print([[row[i] for row in matrix]for i in range(4)])
#第二种方法
# transposed=[]
# for i in range(4):
# transposed.append([row[i] for row in matrix])
#
# print(transposed)
"""
del 语句
可以从一个列表依索引而不是值来删除一个元素
"""
a = [-1, 1, 66.25, 333, 333, 1234.5]
del a[0]
print(a)
del a[2:4]
print(a)
del a[:]
print(a)
|
StarcoderdataPython
|
1725075
|
<filename>discord_dice_roller/main.py
"""Main file for our bot"""
# Built-in
import os
import random
# Third-party
from discord.ext import commands
from dotenv import load_dotenv
# Application
from cogs import DiceRollingCog, GuildConfigCog, UserConfigCog, UtilityCog
from utils.logging import setup_logging
from utils.settings import get_command_prefix, init_settings_files
# --------------------------------------------------------------------------------
# > Main
# --------------------------------------------------------------------------------
if __name__ == "__main__":
# Env setup
random.seed()
load_dotenv()
setup_logging()
init_settings_files()
# Bot setup
bot = commands.Bot(command_prefix=get_command_prefix, help_command=None)
for cog_class in [DiceRollingCog, GuildConfigCog, UserConfigCog, UtilityCog]:
bot.add_cog(cog_class(bot))
# Execute
TOKEN = os.getenv("DISCORD_TOKEN")
bot.run(TOKEN)
|
StarcoderdataPython
|
3291236
|
# Small alphabet k using function
def for_k():
""" *'s printed in the shape of k """
for row in range(9):
for col in range(9):
if col ==0 or row+col ==5 and row >1 or row -col ==3:
print('*',end=' ')
else:
print(' ',end=' ')
print()
def while_k():
""" *'s printed in the Shape of Small k """
row =0
while row <9:
col =0
while col <9:
if col ==0 or row+col ==5 and row >1 or row -col ==3:
print('*',end=' ')
else:
print(' ',end=' ')
col+=1
print()
row +=1
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.