id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1608535
|
<filename>lib/loaders/gt_mrcn_loader.py
"""
data_json has
0. refs: [{ref_id, ann_id, box, image_id, split, category_id, sent_ids, att_wds}]
1. images: [{image_id, ref_ids, file_name, width, height, h5_id}]
2. anns: [{ann_id, category_id, image_id, box, h5_id}]
3. sentences: [{sent_id, tokens, h5_id}]
4. word_to_ix: {word: ix}
5. att_to_ix : {att_wd: ix}
6. att_to_cnt: {att_wd: cnt}
7. label_length: L
Note, box in [xywh] format
label_h5 has
/labels is (M, max_length) uint32 array of encoded labels, zeros padded
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
import numpy as np
import h5py
import json
import random
from loaders.loader import Loader
import torch
from torch.autograd import Variable
from functools import cmp_to_key
# mrcn path
from mrcn import inference_no_imdb
# box functions
def xywh_to_xyxy(boxes):
"""Convert [x y w h] box format to [x1 y1 x2 y2] format."""
return np.hstack((boxes[:, 0:2], boxes[:, 0:2] + boxes[:, 2:4] - 1))
def xyxy_to_xywh(boxes):
"""Convert [x1 y1 x2 y2] box format to [x y w h] format."""
return np.hstack((boxes[:, 0:2], boxes[:, 2:4] - boxes[:, 0:2] + 1))
class GtMRCNLoader(Loader):
def __init__(self, data_json, data_h5):
# parent loader instance
Loader.__init__(self, data_json, data_h5)
# prepare attributes
self.att_to_ix = self.info['att_to_ix']
self.ix_to_att = {ix: wd for wd, ix in self.att_to_ix.items()}
self.num_atts = len(self.att_to_ix)
self.att_to_cnt = self.info['att_to_cnt']
# img_iterators for each split
self.split_ix = {}
self.iterators = {}
for image_id, image in self.Images.items():
# we use its ref's split (there is assumption that each image only has one split)
split = self.Refs[image['ref_ids'][0]]['split']
if split not in self.split_ix:
self.split_ix[split] = []
self.iterators[split] = 0
self.split_ix[split] += [image_id]
for k, v in self.split_ix.items():
print('assigned %d images to split %s' % (len(v), k))
def prepare_mrcn(self, head_feats_dir, args):
"""
Arguments:
head_feats_dir: cache/feats/dataset_splitBy/net_imdb_tag, containing all image conv_net feats
args: imdb_name, net_name, iters, tag
"""
self.head_feats_dir = head_feats_dir
self.mrcn = inference_no_imdb.Inference(args)
assert args.net_name == 'res101'
self.pool5_dim = 1024
self.fc7_dim = 2048
# load different kinds of feats
def loadFeats(self, Feats):
# Feats = {feats_name: feats_path}
self.feats = {}
self.feat_dim = None
for feats_name, feats_path in Feats.items():
if osp.isfile(feats_path):
self.feats[feats_name] = h5py.File(feats_path, 'r')
self.feat_dim = self.feats[feats_name]['fc7'].shape[1]
assert self.feat_dim == self.fc7_dim
print('FeatLoader loading [%s] from %s [feat_dim %s]' % \
(feats_name, feats_path, self.feat_dim))
# shuffle split
def shuffle(self, split):
random.shuffle(self.split_ix[split])
# reset iterator
def resetIterator(self, split):
self.iterators[split] = 0
# expand list by seq_per_ref, i.e., [a,b], 3 -> [aaabbb]
def expand_list(self, L, n):
out = []
for l in L:
out += [l] * n
return out
# get batch of data
def getBatch(self, split, opt):
# options
batch_size = opt.get('batch_size', 5)
seq_per_ref = opt.get('seq_per_ref', 3)
sample_ratio = opt.get('visual_sample_ratio', 0.3) # sample ratio, st vs dt
split_ix = self.split_ix[split]
max_index = len(split_ix) - 1 # don't forget to -1
wrapped = False
# fetch image_ids
batch_image_ids = []
for i in range(batch_size):
ri = self.iterators[split]
ri_next = ri + 1
if ri_next > max_index:
ri_next = 0
wrapped = True
self.iterators[split] = ri_next
image_id = split_ix[ri]
batch_image_ids += [image_id]
# fetch feats
batch_ref_ids = []
batch_pos_ann_ids, batch_pos_sent_ids, batch_pos_pool5, batch_pos_fc7 = [], [], [], []
batch_pos_cxt_fc7, batch_pos_cxt_lfeats = [], []
batch_neg_ann_ids, batch_neg_sent_ids, batch_neg_pool5, batch_neg_fc7 = [], [], [], []
batch_neg_cxt_fc7, batch_neg_cxt_lfeats = [], []
for image_id in batch_image_ids:
ref_ids = self.Images[image_id]['ref_ids']
batch_ref_ids += self.expand_list(ref_ids, seq_per_ref)
# fetch head and im_info
head, im_info = self.image_to_head(image_id)
head = Variable(torch.from_numpy(head).cuda())
# get image related ids
image_pos_ann_ids, image_neg_ann_ids = [], []
for ref_id in ref_ids:
ref_ann_id = self.Refs[ref_id]['ann_id']
# pos ids
pos_ann_ids = [ref_ann_id] * seq_per_ref
pos_sent_ids = self.fetch_sent_ids_by_ref_id(ref_id, seq_per_ref)
# neg ids
neg_ann_ids, neg_sent_ids = self.sample_neg_ids(ref_ann_id, seq_per_ref, sample_ratio)
# add to image and batch
image_pos_ann_ids += pos_ann_ids
image_neg_ann_ids += neg_ann_ids
batch_pos_sent_ids += pos_sent_ids
batch_neg_sent_ids += neg_sent_ids
# fetch feats
pos_ann_boxes = xywh_to_xyxy(np.vstack([self.Anns[ann_id]['box'] for ann_id in image_pos_ann_ids]))
image_pos_pool5, image_pos_fc7 = self.fetch_grid_feats(pos_ann_boxes, head, im_info) # (num_pos, k, 7, 7)
batch_pos_pool5 += [image_pos_pool5]
batch_pos_fc7 += [image_pos_fc7]
neg_ann_boxes = xywh_to_xyxy(np.vstack([self.Anns[ann_id]['box'] for ann_id in image_neg_ann_ids]))
image_neg_pool5, image_neg_fc7 = self.fetch_grid_feats(neg_ann_boxes, head, im_info) # (num_neg, k, 7, 7)
batch_neg_pool5 += [image_neg_pool5]
batch_neg_fc7 += [image_neg_fc7]
# add to batch
batch_pos_ann_ids += image_pos_ann_ids
batch_neg_ann_ids += image_neg_ann_ids
# get feats and labels
pos_fc7 = torch.cat(batch_pos_fc7, 0); pos_fc7.detach()
pos_pool5 = torch.cat(batch_pos_pool5, 0); pos_pool5.detach()
pos_lfeats = self.compute_lfeats(batch_pos_ann_ids)
pos_dif_lfeats = self.compute_dif_lfeats(batch_pos_ann_ids)
pos_labels = np.vstack([self.fetch_seq(sent_id) for sent_id in batch_pos_sent_ids])
neg_fc7 = torch.cat(batch_neg_fc7, 0); neg_fc7.detach()
neg_pool5 = torch.cat(batch_neg_pool5, 0); neg_pool5.detach()
neg_lfeats = self.compute_lfeats(batch_neg_ann_ids)
neg_dif_lfeats = self.compute_dif_lfeats(batch_neg_ann_ids)
neg_labels = np.vstack([self.fetch_seq(sent_id) for sent_id in batch_neg_sent_ids])
# fetch cxt_fc7 and cxt_lfeats
pos_cxt_fc7, pos_cxt_lfeats, pos_cxt_ann_ids = self.fetch_cxt_feats(batch_pos_ann_ids, opt)
neg_cxt_fc7, neg_cxt_lfeats, neg_cxt_ann_ids = self.fetch_cxt_feats(batch_neg_ann_ids, opt)
pos_cxt_fc7 = Variable(torch.from_numpy(pos_cxt_fc7).cuda())
pos_cxt_lfeats = Variable(torch.from_numpy(pos_cxt_lfeats).cuda())
neg_cxt_fc7 = Variable(torch.from_numpy(neg_cxt_fc7).cuda())
neg_cxt_lfeats = Variable(torch.from_numpy(neg_cxt_lfeats).cuda())
# fetch attributes for batch_pos_ann_ids ONLY
att_labels, select_ixs = self.fetch_attribute_label(batch_pos_ann_ids)
# convert to Variable
pos_lfeats = Variable(torch.from_numpy(pos_lfeats).cuda())
pos_dif_lfeats = Variable(torch.from_numpy(pos_dif_lfeats).cuda())
pos_labels = Variable(torch.from_numpy(pos_labels).long().cuda())
neg_lfeats = Variable(torch.from_numpy(neg_lfeats).cuda())
neg_dif_lfeats = Variable(torch.from_numpy(neg_dif_lfeats).cuda())
neg_labels = Variable(torch.from_numpy(neg_labels).long().cuda())
# chunk pos_labels and neg_labels using max_len
max_len = max((pos_labels != 0).sum(1).max().data[0],
(neg_labels != 0).sum(1).max().data[0])
pos_labels = pos_labels[:, :max_len]
neg_labels = neg_labels[:, :max_len]
# return
data = {}
data['ref_ids'] = batch_ref_ids
data['ref_ann_ids'] = batch_pos_ann_ids
data['ref_sent_ids'] = batch_pos_sent_ids
data['ref_cxt_ann_ids'] = pos_cxt_ann_ids
data['Feats'] = {'fc7': pos_fc7, 'pool5': pos_pool5, 'lfeats': pos_lfeats, 'dif_lfeats': pos_dif_lfeats,
'cxt_fc7': pos_cxt_fc7, 'cxt_lfeats': pos_cxt_lfeats}
data['labels'] = pos_labels
data['neg_ann_ids'] = batch_neg_ann_ids
data['neg_sent_ids'] = batch_neg_sent_ids
data['neg_Feats'] = {'fc7': neg_fc7, 'pool5': neg_pool5, 'lfeats': neg_lfeats, 'dif_lfeats': neg_dif_lfeats,
'cxt_fc7': neg_cxt_fc7, 'cxt_lfeats': neg_cxt_lfeats}
data['neg_labels'] = neg_labels
data['neg_cxt_ann_ids'] = neg_cxt_ann_ids
data['att_labels'] = att_labels # (num_pos_ann_ids, num_atts)
data['select_ixs'] = select_ixs # variable size
data['bounds'] = {'it_pos_now': self.iterators[split], 'it_max': max_index, 'wrapped': wrapped}
return data
def sample_neg_ids(self, ann_id, seq_per_ref, sample_ratio):
"""Return
- neg_ann_ids : list of ann_ids that are negative to target ann_id
- neg_sent_ids: list of sent_ids that are negative to target ann_id
"""
st_ref_ids, st_ann_ids, dt_ref_ids, dt_ann_ids = self.fetch_neighbour_ids(ann_id)
# neg ann
neg_ann_ids, neg_sent_ids = [], []
for k in range(seq_per_ref):
# neg_ann_id for negative visual representation: mainly from same-type objects
if len(st_ann_ids) > 0 and np.random.uniform(0, 1, 1) < sample_ratio:
neg_ann_id = random.choice(st_ann_ids)
elif len(dt_ann_ids) > 0:
neg_ann_id = random.choice(dt_ann_ids)
else:
neg_ann_id = random.choice(list(self.Anns.keys()))
neg_ann_ids += [neg_ann_id]
# neg_ref_id for negative language representations: mainly from same-type "referred" objects
if len(st_ref_ids) > 0 and np.random.uniform(0, 1, 1) < sample_ratio:
neg_ref_id = random.choice(st_ref_ids)
elif len(dt_ref_ids) > 0:
neg_ref_id = random.choice(dt_ref_ids)
else:
neg_ref_id = random.choice(list(self.Refs.keys()))
neg_sent_id = random.choice(self.Refs[neg_ref_id]['sent_ids'])
neg_sent_ids += [neg_sent_id]
return neg_ann_ids, neg_sent_ids
def fetch_neighbour_ids(self, ref_ann_id):
"""
For a given ref_ann_id, we return
- st_ann_ids: same-type neighbouring ann_ids (not including itself)
- dt_ann_ids: different-type neighbouring ann_ids
Ordered by distance to the input ann_id
"""
ref_ann = self.Anns[ref_ann_id]
x, y, w, h = ref_ann['box']
rx, ry = x+w/2, y+h/2
def compare(ann_id0, ann_id1):
x, y, w, h = self.Anns[ann_id0]['box']
ax0, ay0 = x+w/2, y+h/2
x, y, w, h = self.Anns[ann_id1]['box']
ax1, ay1 = x+w/2, y+h/2
# closer --> former
if (rx-ax0)**2 + (ry-ay0)**2 <= (rx-ax1)**2 + (ry-ay1)**2:
return -1
else:
return 1
image = self.Images[ref_ann['image_id']]
ann_ids = list(image['ann_ids']) # copy in case the raw list is changed
ann_ids = sorted(ann_ids, key=cmp_to_key(compare))
st_ref_ids, st_ann_ids, dt_ref_ids, dt_ann_ids = [], [], [], []
for ann_id in ann_ids:
if ann_id != ref_ann_id:
if self.Anns[ann_id]['category_id'] == ref_ann['category_id']:
st_ann_ids += [ann_id]
if ann_id in self.annToRef:
st_ref_ids += [self.annToRef[ann_id]['ref_id']]
else:
dt_ann_ids += [ann_id]
if ann_id in self.annToRef:
dt_ref_ids += [self.annToRef[ann_id]['ref_id']]
return st_ref_ids, st_ann_ids, dt_ref_ids, dt_ann_ids
def fetch_sent_ids_by_ref_id(self, ref_id, num_sents):
"""
Sample #num_sents sents for each ref_id.
"""
sent_ids = list(self.Refs[ref_id]['sent_ids'])
if len(sent_ids) < num_sents:
append_sent_ids = [random.choice(sent_ids) for _ in range(num_sents - len(sent_ids))]
sent_ids += append_sent_ids
else:
random.shuffle(sent_ids)
sent_ids = sent_ids[:num_sents]
assert len(sent_ids) == num_sents
return sent_ids
def combine_feats(self, feats0, feats1):
feats = {}
for k, v in feats0.items():
feats[k] = torch.cat([feats0[k], feats1[k]])
return feats
def image_to_head(self, image_id):
"""Returns
head: float32 (1, 1024, H, W)
im_info: float32 [[im_h, im_w, im_scale]]
"""
feats_h5 = osp.join(self.head_feats_dir, str(image_id)+'.h5')
feats = h5py.File(feats_h5, 'r')
head, im_info = feats['head'], feats['im_info']
return np.array(head), np.array(im_info)
def fetch_grid_feats(self, boxes, net_conv, im_info):
"""returns
- pool5 (n, 1024, 7, 7)
- fc7 (n, 2048, 7, 7)
"""
pool5, fc7 = self.mrcn.box_to_spatial_fc7(net_conv, im_info, boxes)
return pool5, fc7
def compute_lfeats(self, ann_ids):
# return ndarray float32 (#ann_ids, 5)
lfeats = np.empty((len(ann_ids), 5), dtype=np.float32)
for ix, ann_id in enumerate(ann_ids):
ann = self.Anns[ann_id]
image = self.Images[ann['image_id']]
x, y, w, h = ann['box']
ih, iw = image['height'], image['width']
lfeats[ix] = np.array([[x/iw, y/ih, (x+w-1)/iw, (y+h-1)/ih, w*h/(iw*ih)]], np.float32)
return lfeats
def compute_dif_lfeats(self, ann_ids, topK=5):
# return ndarray float32 (#ann_ids, 5*topK)
dif_lfeats = np.zeros((len(ann_ids), 5*topK), dtype=np.float32)
for i, ref_ann_id in enumerate(ann_ids):
# reference box
rbox = self.Anns[ref_ann_id]['box']
rcx, rcy, rw, rh = rbox[0]+rbox[2]/2, rbox[1]+rbox[3]/2, rbox[2], rbox[3]
# candidate boxes
_, st_ann_ids, _, _ = self.fetch_neighbour_ids(ref_ann_id)
for j, cand_ann_id in enumerate(st_ann_ids[:topK]):
cbox = self.Anns[cand_ann_id]['box']
cx1, cy1, cw, ch = cbox[0], cbox[1], cbox[2], cbox[3]
dif_lfeats[i, j*5:(j+1)*5] = \
np.array([(cx1-rcx)/rw, (cy1-rcy)/rh, (cx1+cw-rcx)/rw, (cy1+ch-rcy)/rh, cw*ch/(rw*rh)])
return dif_lfeats
def fetch_cxt_feats(self, ann_ids, opt):
"""
Return
- cxt_feats : ndarray (#ann_ids, topK, fc7_dim)
- cxt_lfeats : ndarray (#ann_ids, topK, 5)
- cxt_ann_ids: [[ann_id]] of size (#ann_ids, topK), padded with -1
Note we only use neighbouring "different"(+"same") objects for computing context objects, zeros padded.
"""
topK = opt['num_cxt']
cxt_feats = np.zeros((len(ann_ids), topK, self.fc7_dim), dtype=np.float32)
cxt_lfeats = np.zeros((len(ann_ids), topK, 5), dtype=np.float32)
cxt_ann_ids = [[-1 for _ in range(topK)] for _ in range(len(ann_ids))] # (#ann_ids, topK)
for i, ref_ann_id in enumerate(ann_ids):
# reference box
rbox = self.Anns[ref_ann_id]['box']
rcx, rcy, rw, rh = rbox[0]+rbox[2]/2, rbox[1]+rbox[3]/2, rbox[2], rbox[3]
# candidate boxes
_, st_ann_ids, _, dt_ann_ids = self.fetch_neighbour_ids(ref_ann_id)
if opt['with_st'] > 0:
cand_ann_ids = dt_ann_ids + st_ann_ids
else:
cand_ann_ids = dt_ann_ids
cand_ann_ids = cand_ann_ids[:topK]
for j, cand_ann_id in enumerate(cand_ann_ids):
cand_ann = self.Anns[cand_ann_id]
cbox = cand_ann['box']
cx1, cy1, cw, ch = cbox[0], cbox[1], cbox[2], cbox[3]
cxt_lfeats[i, j, :] = np.array([(cx1-rcx)/rw, (cy1-rcy)/rh, (cx1+cw-rcx)/rw, (cy1+ch-rcy)/rh, cw*ch/(rw*rh)])
cxt_feats[i, j, :] = self.feats['ann']['fc7'][cand_ann['h5_id'], :]
cxt_ann_ids[i][j] = cand_ann_id
return cxt_feats, cxt_lfeats, cxt_ann_ids
# weights = 1/sqrt(cnt)
def get_attribute_weights(self, scale=10):
# return weights for each concept, ordered by cpt_ix
cnts = [self.att_to_cnt[self.ix_to_att[ix]] for ix in range(self.num_atts)]
cnts = np.array(cnts)
weights = 1/cnts**0.5
weights = (weights - np.min(weights)) / (np.max(weights) - np.min(weights))
weights = weights * (scale-1) + 1
return torch.from_numpy(weights).float()
def fetch_attribute_label(self, ref_ann_ids):
"""Return
- labels : Variable float (N, num_atts)
- select_ixs: Variable long (n, )
"""
labels = np.zeros((len(ref_ann_ids), self.num_atts))
select_ixs = []
for i, ref_ann_id in enumerate(ref_ann_ids):
ref = self.annToRef[ref_ann_id]
if len(ref['att_wds']) > 0:
select_ixs += [i]
for wd in ref['att_wds']:
labels[i, self.att_to_ix[wd]] = 1
return Variable(torch.from_numpy(labels).float().cuda()), Variable(torch.LongTensor(select_ixs).cuda())
def decode_attribute_label(self, scores):
"""Inputs
- scores: Variable (cuda) (n, num_atts), after sigmoid range [0,1]
- labels: list of [[att, sc], [att, sc], ...]
"""
scores = scores.data.cpu().numpy()
N = scores.shape[0]
labels = []
for i in range(N):
label = []
score = scores[i] # (num_atts, )
for j, sc in enumerate(list(score)):
label += [(self.ix_to_att[j], sc)]
labels.append(label)
return labels
def getAttributeBatch(self, split):
wrapped = False
split_ix = self.split_ix[split]
max_index = len(split_ix) - 1
ri = self.iterators[split]
ri_next = ri+1
if ri_next > max_index:
ri_next = 0
wrapped = True
self.iterators[split] = ri_next
image_id = split_ix[ri]
image = self.Images[image_id]
# fetch head and im_info
head, im_info = self.image_to_head(image_id)
head = Variable(torch.from_numpy(head).cuda())
# fetch ann_ids owning attributes
ref_ids = image['ref_ids']
ann_ids = [self.Refs[ref_id]['ann_id'] for ref_id in ref_ids]
ann_boxes = xywh_to_xyxy(np.vstack([self.Anns[ann_id]['box'] for ann_id in ann_ids]))
pool5, fc7 = self.fetch_grid_feats(ann_boxes, head, im_info) # (#ann_ids, k, 7, 7)
lfeats = self.compute_lfeats(ann_ids)
dif_lfeats = self.compute_dif_lfeats(ann_ids)
# move to Variable
lfeats = Variable(torch.from_numpy(lfeats).cuda())
dif_lfeats = Variable(torch.from_numpy(dif_lfeats).cuda())
# return data
data = {}
data['image_id'] = image_id
data['ref_ids'] = ref_ids
data['ann_ids'] = ann_ids
data['Feats'] = {'pool5': pool5, 'fc7': fc7, 'lfeats': lfeats, 'dif_lfeats': dif_lfeats}
data['bounds'] = {'it_pos_now': self.iterators[split], 'it_max': max_index, 'wrapped': wrapped}
return data
def getSentBatch(self, sent_id, opt):
# Fetch feats according to the sent_id
ref = self.sentToRef[sent_id]
image_id = ref['image_id']
image = self.Images[image_id]
# fetch head and im_info
head, im_info = self.image_to_head(image_id)
head = Variable(torch.from_numpy(head).cuda())
# fetch feats
ann_ids = image['ann_ids']
ann_boxes = xywh_to_xyxy(np.vstack([self.Anns[ann_id]['box'] for ann_id in ann_ids]))
pool5, fc7 = self.fetch_grid_feats(ann_boxes, head, im_info) # (#ann_ids, k, 7, 7)
lfeats = self.compute_lfeats(ann_ids)
dif_lfeats = self.compute_dif_lfeats(ann_ids)
cxt_fc7, cxt_lfeats, cxt_ann_ids = self.fetch_cxt_feats(ann_ids, opt)
labels = np.array([self.fetch_seq(sent_id)]).astype(np.int32)
# move to Variable
lfeats = Variable(torch.from_numpy(lfeats).cuda())
labels = Variable(torch.from_numpy(labels).long().cuda())
dif_lfeats = Variable(torch.from_numpy(dif_lfeats).cuda())
cxt_fc7 = Variable(torch.from_numpy(cxt_fc7).cuda())
cxt_lfeats = Variable(torch.from_numpy(cxt_lfeats).cuda())
# return data
data = {}
data['image_id'] = image_id
data['ann_ids'] = ann_ids
data['cxt_ann_ids'] = cxt_ann_ids
data['Feats'] = {'pool5': pool5, 'fc7': fc7, 'lfeats': lfeats, 'dif_lfeats': dif_lfeats,
'cxt_fc7': cxt_fc7, 'cxt_lfeats': cxt_lfeats}
data['labels'] = labels
return data
def getTestBatch(self, split, opt):
# Fetch feats according to the image_split_ix
# current image
wrapped = False
split_ix = self.split_ix[split]
max_index = len(split_ix) - 1
ri = self.iterators[split]
ri_next = ri+1
if ri_next > max_index:
ri_next = 0
wrapped = True
self.iterators[split] = ri_next
image_id = split_ix[ri]
image = self.Images[image_id]
# fetch head and im_info
head, im_info = self.image_to_head(image_id)
head = Variable(torch.from_numpy(head).cuda())
# fetch feats
ann_ids = image['ann_ids']
ann_boxes = xywh_to_xyxy(np.vstack([self.Anns[ann_id]['box'] for ann_id in ann_ids]))
pool5, fc7 = self.fetch_grid_feats(ann_boxes, head, im_info) # (#ann_ids, k, 7, 7)
lfeats = self.compute_lfeats(ann_ids)
dif_lfeats = self.compute_dif_lfeats(ann_ids)
cxt_fc7, cxt_lfeats, cxt_ann_ids = self.fetch_cxt_feats(ann_ids, opt)
# fetch sents
sent_ids = []
gd_ixs = []
for ref_id in image['ref_ids']:
ref = self.Refs[ref_id]
for sent_id in ref['sent_ids']:
sent_ids += [sent_id]
gd_ixs += [ann_ids.index(ref['ann_id'])]
labels = np.vstack([self.fetch_seq(sent_id) for sent_id in sent_ids])
# move to Variable
lfeats = Variable(torch.from_numpy(lfeats).cuda())
labels = Variable(torch.from_numpy(labels).long().cuda())
dif_lfeats = Variable(torch.from_numpy(dif_lfeats).cuda())
cxt_fc7 = Variable(torch.from_numpy(cxt_fc7).cuda())
cxt_lfeats = Variable(torch.from_numpy(cxt_lfeats).cuda())
# return data
data = {}
data['image_id'] = image_id
data['ann_ids'] = ann_ids
data['cxt_ann_ids'] = cxt_ann_ids
data['sent_ids'] = sent_ids
data['gd_ixs'] = gd_ixs
data['Feats'] = {'pool5': pool5, 'fc7': fc7, 'lfeats': lfeats, 'dif_lfeats': dif_lfeats,
'cxt_fc7': cxt_fc7, 'cxt_lfeats': cxt_lfeats}
data['labels'] = labels
data['bounds'] = {'it_pos_now': self.iterators[split], 'it_max': max_index, 'wrapped': wrapped}
return data
def getImageBatch(self, image_id, sent_ids=None, opt={}):
# fetch head and im_info
image = self.Images[image_id]
head, im_info = self.image_to_head(image_id)
head = Variable(torch.from_numpy(head).cuda())
# fetch feats
ann_ids = image['ann_ids']
ann_boxes = xywh_to_xyxy(np.vstack([self.Anns[ann_id]['box'] for ann_id in ann_ids]))
pool5, fc7 = self.fetch_grid_feats(ann_boxes, head, im_info) # (#ann_ids, k, 7, 7)
lfeats = self.compute_lfeats(ann_ids)
dif_lfeats = self.compute_dif_lfeats(ann_ids)
cxt_fc7, cxt_lfeats, cxt_ann_ids = self.fetch_cxt_feats(ann_ids, opt)
# fetch sents
gd_ixs = []
if sent_ids is None:
sent_ids = []
for ref_id in image['ref_ids']:
ref = self.Refs[ref_id]
for sent_id in ref['sent_ids']:
sent_ids += [sent_id]
gd_ixs += [ann_ids.index(ref['ann_id'])]
else:
# given sent_id, we find the gd_ix
for sent_id in sent_ids:
ref = self.sentToRef[sent_id]
gd_ixs += [ann_ids.index(ref['ann_id'])]
labels = np.vstack([self.fetch_seq(sent_id) for sent_id in sent_ids])
# move to Variable
lfeats = Variable(torch.from_numpy(lfeats).cuda())
labels = Variable(torch.from_numpy(labels).long().cuda())
dif_lfeats = Variable(torch.from_numpy(dif_lfeats).cuda())
cxt_fc7 = Variable(torch.from_numpy(cxt_fc7).cuda())
cxt_lfeats = Variable(torch.from_numpy(cxt_lfeats).cuda())
# return data
data = {}
data['image_id'] = image_id
data['ann_ids'] = ann_ids
data['cxt_ann_ids'] = cxt_ann_ids
data['sent_ids'] = sent_ids
data['gd_ixs'] = gd_ixs
data['Feats'] = {'pool5': pool5, 'fc7': fc7, 'lfeats': lfeats, 'dif_lfeats': dif_lfeats,
'cxt_fc7': cxt_fc7, 'cxt_lfeats': cxt_lfeats}
data['labels'] = labels
return data
|
StarcoderdataPython
|
1637652
|
# -*- coding:utf-8 -*-
'''
获取最大利益
author:zhangyu
date:2020/3/24
'''
from typing import List
class Solution:
def massage(self, nums: List[int]) -> int:
'''
求最大利润
Args:
nums:数组
Returns:
最大利润
'''
if not nums or len(nums) < 1:
return 0
length = len(nums)
dp = [0] * length
dp[0] = nums[0]
dp[1] = max(nums[0], nums[1])
for i in range(2, length):
dp[i] = max(nums[i] + dp[i - 2], dp[i - 1])
return dp[length - 1]
if __name__ == '__main__':
nums = [1, 2, 3, 1]
solution = Solution()
max_profit = solution.massage(nums)
assert max_profit == 4
|
StarcoderdataPython
|
4838029
|
<reponame>DLR-SC/tigl
from tigl3.geometry import CTiglPointsToBSplineInterpolation
from tigl3.occ_helpers.containers import float_array, int_array, point_array
from OCC.Core.Geom import Geom_BSplineCurve
def interpolate_points(points, params=None, degree=3, close_continuous=False):
"""
Creates a b-spline that passes through the given points
using b-spline interpolation.
:param points: Array of points (numpy array also works!). First dimension over number of points, second must be 3!
:param params: Optional list of parameters (list of floats), at which the points should be interpolated.
This has a strong effect on the final curve shape.
:param degree: Polynomial degree of the resulting b-spline curve.
:param close_continuous: If True, the start and end of the curve will be continuous
(only, if first and last point is equal!)
:return: The resulting curve (Geom_BSplineCurve)
"""
occ_points_array = point_array(points)
if params is None:
interp = CTiglPointsToBSplineInterpolation(occ_points_array, degree, close_continuous)
else:
if len(params) != len(points):
raise RuntimeError("Number of parameters don't match number of points")
interp = CTiglPointsToBSplineInterpolation(occ_points_array, params, degree, close_continuous)
curve = interp.curve()
return curve
def bspline_curve(cp, knots, mults, degree):
"""
Creates a BSplineCurve from the control points, knots, multiplicites and degree
:param points: Array of points (numpy array also works!). First dimension over number of points, second must be 3!
:param knots: Knot vector (not flattened)
:param mults: Multiplicity vector. Each entry defined the multiplicity of the corresponding knot
:param degree: Polynomial degree of the resulting b-spline curve.
:return: The resulting curve (Geom_BSplineCurve)
"""
assert(len(knots) == len(mults))
assert(degree >= 1)
occ_cp = point_array(cp)
occ_knots = float_array(knots)
occ_mults = int_array(mults)
curve = Geom_BSplineCurve(occ_cp.Array1(), occ_knots.Array1(), occ_mults.Array1(), degree)
return curve
|
StarcoderdataPython
|
1635066
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
from MiniFramework.EnumDef_6_0 import *
from MiniFramework.Layer import *
class DropoutLayer(CLayer):
def __init__(self, input_size, ratio=0.5):
self.dropout_ratio = ratio # the bigger, the more unit dropped
self.mask = None
self.input_size = input_size
self.output_size = input_size
def forward(self, input, train=True):
assert(input.ndim == 2)
if train:
self.mask = np.random.rand(*input.shape) > self.dropout_ratio
self.z = input * self.mask
else:
self.z = input * (1.0 - self.dropout_ratio)
return self.z
def backward(self, delta_in, idx):
delta_out = self.mask * delta_in
return delta_out
|
StarcoderdataPython
|
162469
|
from typing import (
List,
Tuple,
)
from hummingbot.strategy.market_symbol_pair import MarketSymbolPair
from hummingbot.strategy.simple_trade import (
SimpleTradeStrategy
)
from hummingbot.strategy.simple_trade.simple_trade_config_map import simple_trade_config_map
def start(self):
try:
order_amount = simple_trade_config_map.get("order_amount").value
order_type = simple_trade_config_map.get("order_type").value
is_buy = simple_trade_config_map.get("is_buy").value
time_delay = simple_trade_config_map.get("time_delay").value
market = simple_trade_config_map.get("market").value.lower()
raw_market_symbol = simple_trade_config_map.get("market_symbol_pair").value.upper()
order_price = None
cancel_order_wait_time = None
if order_type == "limit":
order_price = simple_trade_config_map.get("order_price").value
cancel_order_wait_time = simple_trade_config_map.get("cancel_order_wait_time").value
try:
assets: Tuple[str, str] = self._initialize_market_assets(market, [raw_market_symbol])[0]
except ValueError as e:
self._notify(str(e))
return
market_names: List[Tuple[str, List[str]]] = [(market, [raw_market_symbol])]
self._initialize_wallet(token_symbols=list(set(assets)))
self._initialize_markets(market_names)
self.assets = set(assets)
maker_data = [self.markets[market], raw_market_symbol] + list(assets)
self.market_symbol_pairs = [MarketSymbolPair(*maker_data)]
strategy_logging_options = SimpleTradeStrategy.OPTION_LOG_ALL
self.strategy = SimpleTradeStrategy(market_infos=[MarketSymbolPair(*maker_data)],
order_type=order_type,
order_price=order_price,
cancel_order_wait_time=cancel_order_wait_time,
is_buy=is_buy,
time_delay=time_delay,
order_amount=order_amount,
logging_options=strategy_logging_options)
except Exception as e:
self._notify(str(e))
self.logger().error("Unknown error during initialization.", exc_info=True)
|
StarcoderdataPython
|
4841206
|
<reponame>xcsp3team/pycsp3
"""
See https://en.wikipedia.org/wiki/Shikaku
See "Shikaku as a Constraint Problem" by <NAME>
Example of Execution:
python3 Shikaku.py -data=Shikaku_grid1.json
"""
from pycsp3 import *
nRows, nCols, rooms = data
nRooms = len(rooms)
def no_overlapping(i, j):
leftmost = i if rooms[i].col <= rooms[j].col else j
rightmost = j if leftmost == i else i
p = r[leftmost] <= l[rightmost]
if rooms[leftmost].row == rooms[rightmost].row:
return p
if rooms[leftmost].row > rooms[rightmost].row:
return p | (t[leftmost] >= b[rightmost])
return p | (b[leftmost] <= t[rightmost])
# l[i] is the position of the left border of the ith room
l = VarArray(size=nRooms, dom=range(nCols + 1))
# r[i] is the position of the right border of the ith room
r = VarArray(size=nRooms, dom=range(nCols + 1))
# t[i] is the position of the top border of the ith room
t = VarArray(size=nRooms, dom=range(nRows + 1))
# b[i] is the position of the bottom border of the ith room
b = VarArray(size=nRooms, dom=range(nRows + 1))
satisfy(
# each room must be surrounded by its borders
[(l[i] <= col, r[i] > col, t[i] <= row, b[i] > row) for i, (row, col, _) in enumerate(rooms)],
# respecting the surface of each room
[(r[i] - l[i]) * (b[i] - t[i]) == val for i, (_, _, val) in enumerate(rooms)],
# rooms must not overlap
[no_overlapping(i, j) for i, j in combinations(range(nRooms), 2)]
)
""" Comments
1) it is also possible to write (but this is less compact):
[l[i] <= rooms[i].col for i in range(nRooms)],
[r[i] > rooms[i].col for i in range(nRooms)],
[t[i] <= rooms[i].row for i in range(nRooms)],
[b[i] > rooms[i].row for i in range(nRooms)],
"""
|
StarcoderdataPython
|
191275
|
<gh_stars>0
__copyright__ = """This file is part of SCINE Utilities.
This code is licensed under the 3-clause BSD license.
Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.
See LICENSE.txt for details.
"""
from conans import ConanFile
class TestPackageConan(ConanFile):
def build(self):
pass
def test(self):
if self.options["scine_xtb_wrapper"].python:
self.output.info("Trying to import 'scine_xtb_wrapper'")
import scine_xtb_wrapper
self.output.info("Import worked")
|
StarcoderdataPython
|
3387074
|
import numpy as np
from torch import nn
from torch.nn import init
from torch.nn.functional import elu
from braindecode.torch_ext.init import glorot_weight_zero_bias
from braindecode.torch_ext.modules import Expression
from braindecode.torch_ext.util import np_to_var
class EEGNet(object):
"""
EEGNet model from [EEGNet]_.
Notes
-----
This implementation is not guaranteed to be correct, has not been checked
by original authors, only reimplemented from the paper description.
References
----------
.. [EEGNet] <NAME>., <NAME>., <NAME>., Gordon,
<NAME>., <NAME>., & <NAME>. (2016).
EEGNet: A Compact Convolutional Network for EEG-based
Brain-Computer Interfaces.
arXiv preprint arXiv:1611.08024.
"""
def __init__(self, in_chans,
n_classes,
final_conv_length='auto',
input_time_length=None,
pool_mode='max',
second_kernel_size=(2,32),
third_kernel_size=(8,4),
drop_prob=0.25
):
if final_conv_length == 'auto':
assert input_time_length is not None
self.__dict__.update(locals())
del self.self
def create_network(self):
pool_class = dict(max=nn.MaxPool2d, mean=nn.AvgPool2d)[self.pool_mode]
model = nn.Sequential()
n_filters_1 = 16
model.add_module('conv_1', nn.Conv2d(
self.in_chans, n_filters_1, (1, 1), stride=1, bias=True))
model.add_module('bnorm_1', nn.BatchNorm2d(
n_filters_1, momentum=0.01, affine=True, eps=1e-3),)
model.add_module('elu_1', Expression(elu))
# transpose to examples x 1 x (virtual, not EEG) channels x time
model.add_module('permute_1', Expression(lambda x: x.permute(0,3,1,2)))
model.add_module('drop_1', nn.Dropout(p=self.drop_prob))
n_filters_2 = 4
# keras padds unequal padding more in front, so padding
# too large should be ok.
# Not padding in time so that croped training makes sense
# https://stackoverflow.com/questions/43994604/padding-with-even-kernel-size-in-a-convolutional-layer-in-keras-theano
model.add_module('conv_2', nn.Conv2d(
1, n_filters_2, self.second_kernel_size, stride=1,
padding=(self.second_kernel_size[0] // 2, 0),
bias=True))
model.add_module('bnorm_2',nn.BatchNorm2d(
n_filters_2, momentum=0.01, affine=True, eps=1e-3),)
model.add_module('elu_2', Expression(elu))
model.add_module('pool_2', pool_class(
kernel_size=(2, 4), stride=(2, 4)))
model.add_module('drop_2', nn.Dropout(p=self.drop_prob))
n_filters_3 = 4
model.add_module('conv_3', nn.Conv2d(
n_filters_2, n_filters_3, self.third_kernel_size, stride=1,
padding=(self.third_kernel_size[0] // 2, 0),
bias=True))
model.add_module('bnorm_3',nn.BatchNorm2d(
n_filters_3, momentum=0.01, affine=True, eps=1e-3),)
model.add_module('elu_3', Expression(elu))
model.add_module('pool_3', pool_class(
kernel_size=(2, 4), stride=(2, 4)))
model.add_module('drop_3', nn.Dropout(p=self.drop_prob))
out = model(np_to_var(np.ones(
(1, self.in_chans, self.input_time_length, 1),
dtype=np.float32)))
n_out_virtual_chans = out.cpu().data.numpy().shape[2]
if self.final_conv_length == 'auto':
n_out_time = out.cpu().data.numpy().shape[3]
self.final_conv_length = n_out_time
model.add_module('conv_classifier', nn.Conv2d(
n_filters_3, self.n_classes,
(n_out_virtual_chans, self.final_conv_length,), bias=True))
model.add_module('softmax', nn.LogSoftmax())
# Transpose back to the the logic of braindecode,
# so time in third dimension (axis=2)
model.add_module('permute_2', Expression(lambda x: x.permute(0,1,3,2)))
# remove empty dim at end and potentially remove empty time dim
# do not just use squeeze as we never want to remove first dim
def squeeze_output(x):
assert x.size()[3] == 1
x = x[:,:,:,0]
if x.size()[2] == 1:
x = x[:,:,0]
return x
model.add_module('squeeze', Expression(squeeze_output))
glorot_weight_zero_bias(model)
return model
|
StarcoderdataPython
|
3230151
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import django_tables2 as tables
from rapidsms.contrib.messagelog.models import Message
class MessageTable(tables.Table):
class Meta:
model = Message
exclude = ('id', )
order_by = ('-date', )
attrs = {
'class': 'table table-striped table-bordered table-condensed'
}
|
StarcoderdataPython
|
1658459
|
<filename>FizzBuzz.py<gh_stars>0
"""
Created by akiselev on 2019-06-21
"""
#!/usr/bin/python
for num in range(1,21):
string=""
if num % 3 == 0:
string += "fizz"
if num % 5 == 0:
string += "buzz"
if (num % 3 != 0) and (num % 5 != 0) :
string = str(num)
print (string)
|
StarcoderdataPython
|
87474
|
<reponame>trevor-ngugi/instagram-clone
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-02-09 09:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gram', '0002_profile'),
]
operations = [
migrations.AddField(
model_name='image',
name='post_time',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='profile',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='gram.Profile'),
preserve_default=False,
),
]
|
StarcoderdataPython
|
3230701
|
<reponame>bemxio/osu-replay-parser
import lzma
import struct
from datetime import datetime, timezone, timedelta
from typing import List, Optional
import base64
from dataclasses import dataclass
from osrparse.utils import (Mod, GameMode, ReplayEvent, ReplayEventOsu,
ReplayEventCatch, ReplayEventMania, ReplayEventTaiko, Key, KeyMania,
KeyTaiko, LifeBarState)
class _Unpacker:
"""
Helper class for dealing with the ``.osr`` format. Not intended to be used
by consumers.
"""
def __init__(self, replay_data):
self.replay_data = replay_data
self.offset = 0
def string_length(self, binarystream):
result = 0
shift = 0
while True:
byte = binarystream[self.offset]
self.offset += 1
result = result |((byte & 0b01111111) << shift)
if (byte & 0b10000000) == 0x00:
break
shift += 7
return result
def unpack_string(self):
if self.replay_data[self.offset] == 0x00:
self.offset += 1
elif self.replay_data[self.offset] == 0x0b:
self.offset += 1
string_length = self.string_length(self.replay_data)
offset_end = self.offset + string_length
string = self.replay_data[self.offset:offset_end].decode("utf-8")
self.offset = offset_end
return string
else:
raise ValueError("Expected the first byte of a string to be 0x00 "
f"or 0x0b, but got {self.replay_data[self.offset]}")
def unpack_once(self, specifier):
# always use little endian
specifier = f"<{specifier}"
unpacked = struct.unpack_from(specifier, self.replay_data, self.offset)
self.offset += struct.calcsize(specifier)
# `struct.unpack_from` always returns a tuple, even if there's only one
# element
return unpacked[0]
def unpack_timestamp(self):
ticks = self.unpack_once("q")
timestamp = datetime.min + timedelta(microseconds=ticks/10)
timestamp = timestamp.replace(tzinfo=timezone.utc)
return timestamp
def unpack_play_data(self, mode):
replay_length = self.unpack_once("i")
offset_end = self.offset + replay_length
data = self.replay_data[self.offset:offset_end]
data = lzma.decompress(data, format=lzma.FORMAT_AUTO)
data = data.decode("ascii")
(replay_data, rng_seed) = self.parse_replay_data(data, mode)
self.offset = offset_end
return (replay_data, rng_seed)
@staticmethod
def parse_replay_data(replay_data_str, mode):
# remove trailing comma to make splitting easier
replay_data_str = replay_data_str[:-1]
events = [event.split('|') for event in replay_data_str.split(',')]
rng_seed = None
play_data = []
for event in events:
time_delta = int(event[0])
x = event[1]
y = event[2]
keys = int(event[3])
if time_delta == -12345 and event == events[-1]:
rng_seed = keys
continue
if mode is GameMode.STD:
keys = Key(keys)
event = ReplayEventOsu(time_delta, float(x), float(y), keys)
if mode is GameMode.TAIKO:
event = ReplayEventTaiko(time_delta, int(x), KeyTaiko(keys))
if mode is GameMode.CTB:
event = ReplayEventCatch(time_delta, float(x), int(keys) == 1)
if mode is GameMode.MANIA:
event = ReplayEventMania(time_delta, KeyMania(keys))
play_data.append(event)
return (play_data, rng_seed)
def unpack_replay_id(self):
# old replays had replay_id stored as a short (4 bytes) instead of a
# long (8 bytes), so fall back to short if necessary.
# lazer checks against the gameversion before trying to parse as a
# short, but there may be some weirdness with replays that were set
# during this time but downloaded later having actually correct (long)
# replay_ids, since they were likely manually migrated at some point
# after the switch to long took place.
# See:
# https://github.com/ppy/osu/blob/84e1ff79a0736aa6c7a44804b585ab1c54a843
# 99/osu.Game/Scoring/Legacy/LegacyScoreDecoder.cs#L78-L81
try:
replay_id = self.unpack_once("q")
except struct.error:
replay_id = self.unpack_once("l")
return replay_id
def unpack_life_bar(self):
life_bar = self.unpack_string()
if not life_bar:
return None
# remove trailing comma to make splitting easier
life_bar = life_bar[:-1]
states = [state.split("|") for state in life_bar.split(",")]
return [LifeBarState(int(s[0]), float(s[1])) for s in states]
def unpack(self):
mode = GameMode(self.unpack_once("b"))
game_version = self.unpack_once("i")
beatmap_hash = self.unpack_string()
username = self.unpack_string()
replay_hash = self.unpack_string()
count_300 = self.unpack_once("h")
count_100 = self.unpack_once("h")
count_50 = self.unpack_once("h")
count_geki = self.unpack_once("h")
count_katu = self.unpack_once("h")
count_miss = self.unpack_once("h")
score = self.unpack_once("i")
max_combo = self.unpack_once("h")
perfect = self.unpack_once("?")
mods = Mod(self.unpack_once("i"))
life_bar_graph = self.unpack_life_bar()
timestamp = self.unpack_timestamp()
(replay_data, rng_seed) = self.unpack_play_data(mode)
replay_id = self.unpack_replay_id()
return Replay(mode, game_version, beatmap_hash, username,
replay_hash, count_300, count_100, count_50, count_geki, count_katu,
count_miss, score, max_combo, perfect, mods, life_bar_graph,
timestamp, replay_data, replay_id, rng_seed)
class _Packer:
def __init__(self, replay, *, dict_size=None, mode=None):
self.replay = replay
self.dict_size = dict_size or 1 << 21
self.mode = mode or lzma.MODE_FAST
def pack_byte(self, data):
return struct.pack("<B", data)
def pack_short(self, data):
return struct.pack("<H", data)
def pack_int(self, data):
return struct.pack("<I", data)
def pack_long(self, data):
return struct.pack("<Q", data)
def pack_ULEB128(self, data):
# https://github.com/mohanson/leb128
r, i = [], len(data)
while True:
byte = i & 0x7f
i = i >> 7
if (i == 0 and byte & 0x40 == 0) or (i == -1 and byte & 0x40 != 0):
r.append(byte)
return b"".join(map(self.pack_byte, r))
r.append(0x80 | byte)
def pack_string(self, data):
if data:
return (self.pack_byte(11) + self.pack_ULEB128(data) +
data.encode("utf-8"))
return self.pack_byte(11) + self.pack_byte(0)
def pack_timestamp(self):
# windows ticks starts at year 0001, in contrast to unix time (1970).
# 62135596800 is the number of seconds between these two years and is
# added to account for this difference.
# The factor of 10000000 converts seconds to ticks.
ticks = (62135596800 + self.replay.timestamp.timestamp()) * 10000000
ticks = int(ticks)
return self.pack_long(ticks)
def pack_life_bar(self):
data = ""
if self.replay.life_bar_graph is None:
return self.pack_string(data)
for state in self.replay.life_bar_graph:
life = state.life
# store 0 or 1 instead of 0.0 or 1.0
if int(life) == life:
life = int(state.life)
data += f"{state.time}|{life},"
return self.pack_string(data)
def pack_replay_data(self):
data = ""
for event in self.replay.replay_data:
t = event.time_delta
if isinstance(event, ReplayEventOsu):
data += f"{t}|{event.x}|{event.y}|{event.keys.value},"
elif isinstance(event, ReplayEventTaiko):
data += f"{t}|{event.x}|0|{event.keys.value},"
elif isinstance(event, ReplayEventCatch):
data += f"{t}|{event.x}|0|{int(event.dashing)},"
elif isinstance(event, ReplayEventMania):
data += f"{t}|{event.keys.value}|0|0,"
if self.replay.rng_seed:
data += f"-12345|0|0|{self.replay.rng_seed},"
filters = [
{
"id": lzma.FILTER_LZMA1,
"dict_size": self.dict_size,
"mode": self.mode
}
]
data = data.encode("ascii")
compressed = lzma.compress(data, format=lzma.FORMAT_ALONE,
filters=filters)
return self.pack_int(len(compressed)) + compressed
def pack(self):
r = self.replay
data = b""
data += self.pack_byte(r.mode.value)
data += self.pack_int(r.game_version)
data += self.pack_string(r.beatmap_hash)
data += self.pack_string(r.username)
data += self.pack_string(r.replay_hash)
data += self.pack_short(r.count_300)
data += self.pack_short(r.count_100)
data += self.pack_short(r.count_50)
data += self.pack_short(r.count_geki)
data += self.pack_short(r.count_katu)
data += self.pack_short(r.count_miss)
data += self.pack_int(r.score)
data += self.pack_short(r.max_combo)
data += self.pack_byte(r.perfect)
data += self.pack_int(r.mods.value)
data += self.pack_life_bar()
data += self.pack_timestamp()
data += self.pack_replay_data()
data += self.pack_long(r.replay_id)
return data
@dataclass
class Replay:
"""
A replay found in a ``.osr`` file, or following the osr format. To create a
replay, you likely want ``Replay.from_path``, ``Replay.from_file``, or
``Replay.from_string``.
Attributes
----------
mode: GameMode
The game mode this replay was played on.
game_version: int
The game version this replay was played on.
beatmap_hash: str
The hash of the beatmap this replay was played on.
username: str
The user that played this replay.
replay_hash:
The hash of this replay.
count_300: int
The number of 300 judgments in this replay.
count_100: int
The number of 100 judgments in this replay.
count_50: int
The number of 50 judgments in this replay.
count_geki: int
The number of geki judgments in this replay.
count_katu: int
The number of katu judgments in this replay.
count_miss: int
The number of misses in this replay.
score: int
The score of this replay.
max_combo: int
The maximum combo attained in this replay.
perfect: bool
Whether this replay was perfect or not.
mods: Mod
The mods this replay was played with.
life_bar_graph: Optional[List[LifeBarState]]
The life bar of this replay over time.
replay_data: List[ReplayEvent]
The replay data of the replay, including cursor position and keys
pressed.
replay_id: int
The replay id of this replay, or 0 if not submitted.
rng_seed: Optional[int]
The rng seed of this replay, or ``None`` if not present (typically not
present on older replays).
"""
mode: GameMode
game_version: int
beatmap_hash: str
username: str
replay_hash: str
count_300: int
count_100: int
count_50: int
count_geki: int
count_katu: int
count_miss: int
score: int
max_combo: int
perfect: bool
mods: Mod
life_bar_graph: Optional[List[LifeBarState]]
timestamp: datetime
replay_data: List[ReplayEvent]
replay_id: int
rng_seed: Optional[int]
@staticmethod
def from_path(path):
"""
Creates a new ``Replay`` object from the ``.osr`` file at the given
``path``.
Parameters
----------
path: str or os.PathLike
The path to the osr file to read from.
Returns
-------
Replay
The parsed replay object.
"""
with open(path, "rb") as f:
return Replay.from_file(f)
@staticmethod
def from_file(file):
"""
Creates a new ``Replay`` object from an open file object.
Parameters
----------
file: file-like
The file object to read from.
Returns
-------
Replay
The parsed replay object.
"""
data = file.read()
return Replay.from_string(data)
@staticmethod
def from_string(data):
"""
Creates a new ``Replay`` object from a string containing ``.osr`` data.
Parameters
----------
data: str
The data to parse.
Returns
-------
Replay
The parsed replay object.
"""
return _Unpacker(data).unpack()
def write_path(self, path, *, dict_size=None, mode=None):
"""
Writes the replay to the given ``path``.
Parameters
----------
path: str or os.PathLike
The path to where to write the replay.
Notes
-----
This uses the current values of any attributes, and so can be used to
create an edited version of a replay, by first reading a replay, editing
an attribute, then writing the replay back to its file.
"""
with open(path, "wb") as f:
self.write_file(f, dict_size=dict_size, mode=mode)
def write_file(self, file, *, dict_size=None, mode=None):
"""
Writes the replay to an open file object.
Parameters
----------
file: file-like
The file object to write to.
"""
packed = self.pack(dict_size=dict_size, mode=mode)
file.write(packed)
def pack(self, *, dict_size=None, mode=None):
"""
Returns the text representing this ``Replay``, in ``.osr`` format.
The text returned by this method is suitable for writing to a file as a
valid ``.osr`` file.
Returns
-------
str
The text representing this ``Replay``, in ``.osr`` format.
"""
return _Packer(self, dict_size=dict_size, mode=mode).pack()
def parse_replay_data(data_string, *, decoded=False, decompressed=False,
mode=GameMode.STD) -> List[ReplayEvent]:
"""
Parses the replay data portion of a replay from a string. This method is
siutable for use with the replay data returned by api v1's ``/get_replay``
endpoint, for instance.
Parameters
----------
data_string: str or bytes
The replay data to parse.
decoded: bool
Whether ``data_string`` has already been decoded from a b64
representation. Api v1 returns a base 64 encoded string, for instance.
decompressed: bool
Whether ``data_string`` has already been both decompressed from lzma,
and decoded to ascii.
|br|
For instance, the following two calls are equivalent:
```
>>> parse_replay_data(lzma_string, decoded=True)
>>> ...
>>> lzma_string = lzma.decompress(lzma_string).decode("ascii")
>>> parse_replay_data(lzma_string, decompressed=True)
```
|br|
If ``decompressed`` is ``True``, ``decoded`` is automatically set to
``True`` as well (ie, if ``decompressed`` is ``True``, we will assume
``data_string`` is not base 64 encoded).
mode: GameMode
What mode to parse the replay data as.
"""
# assume the data is already decoded if it's been decompressed
if not decoded and not decompressed:
data_string = base64.b64decode(data_string)
if not decompressed:
data_string = lzma.decompress(data_string, format=lzma.FORMAT_AUTO)
data_string = data_string.decode("ascii")
(replay_data, _seed) = _Unpacker.parse_replay_data(data_string, mode)
return replay_data
|
StarcoderdataPython
|
3332087
|
"""
"""
import time, zlib, re
from calendar import timegm
from .rtlibRepl import minidom
import wx
from WikiExceptions import *
import Consts
from .MiscEvent import MiscEventSourceMixin, KeyFunctionSink
from . import Exporters, Serialization
from . import StringOps
# from ..StringOps import applyBinCompact, getBinCompactForDiff, \
# fileContentToUnicode, BOM_UTF8, formatWxDate
#
# from ..Serialization import serToXmlUnicode, serFromXmlUnicode, serToXmlInt, \
# serFromXmlInt, iterXmlElementFlat
#
# from ..DocPages import AbstractWikiPage
DAMAGED = object()
class TrashBag(object):
"""
A trash bag contains all parts of a wikiword, wikipage content itself and
dependent datablocks (e.g. old versions). It provides also a small subset
of WikiData API to allow read-only access to the items in the bag.
"""
__slots__ = ("trashcan", "trashTimeStamp", "bagId",
"contentStorageMode", "originalUnifiedName", "xmlNode")
def __init__(self, trashcan): # , contentStorageMode = u"single"
self.trashcan = trashcan
self.trashTimeStamp = time.time()
self.bagId = 0 # Invalid, numbers start with 1
# self.contentStorageMode = contentStorageMode
# Unified name of main content before it was trashed.
# Normally this is a "wikipage/..." but the trash bag can contain
# additional items
self.originalUnifiedName = None
self.xmlNode = None
def getTrashcan(self):
return self.trashcan
def getFormattedTrashDate(self, formatStr):
return StringOps.formatWxDate(formatStr, wx.DateTimeFromTimeT(
self.trashTimeStamp))
def serializeOverviewToXmlProd(self, xmlDoc):
"""
Create XML node to contain all overview information (not content)
about this object.
"""
xmlNode = self.xmlNode
if xmlNode is None:
xmlNode = xmlDoc.createElement(u"trashBag")
self.serializeOverviewToXml(xmlNode, xmlDoc)
return xmlNode
def serializeOverviewToXml(self, xmlNode, xmlDoc):
"""
Create XML node to contain all overview information (not content)
about this object.
"""
Serialization.serToXmlInt(xmlNode, xmlDoc, u"bagId", self.bagId,
replace=True)
Serialization.serToXmlUnicode(xmlNode, xmlDoc, u"originalUnifiedName",
self.originalUnifiedName, replace=True)
Serialization.serToXmlUnicode(xmlNode, xmlDoc, u"trashTime", unicode(time.strftime(
"%Y-%m-%d/%H:%M:%S", time.gmtime(self.trashTimeStamp))),
replace=True)
# Serialization.serToXmlUnicode(xmlNode, xmlDoc, u"contentStorageMode",
# self.contentStorageMode, replace=True)
def serializeOverviewFromXml(self, xmlNode):
"""
Set object state from data in xmlNode)
"""
self.xmlNode = xmlNode
self.bagId = Serialization.serFromXmlInt(xmlNode, u"bagId")
self.originalUnifiedName = Serialization.serFromXmlUnicode(xmlNode,
u"originalUnifiedName")
timeStr = Serialization.serFromXmlUnicode(xmlNode, u"trashTime")
self.trashTimeStamp = timegm(time.strptime(timeStr,
"%Y-%m-%d/%H:%M:%S"))
def getPacketUnifiedName(self):
if self.bagId == 0:
return None
else:
return u"trashcan/trashBag/packet/bagId/%s" % self.bagId
def getPacketData(self):
unifName = self.getPacketUnifiedName()
if unifName is None:
return None
return self.trashcan.getWikiDocument().retrieveDataBlock(unifName, None)
def deletePacket(self):
"""
Delete associated packet (if any). Should only be called from Trashcan
"""
unifName = self.getPacketUnifiedName()
if unifName is None:
return
self.trashcan.getWikiDocument().deleteDataBlock(unifName)
self.bagId = 0
class Trashcan(MiscEventSourceMixin):
def __init__(self, wikiDocument):
MiscEventSourceMixin.__init__(self)
self.wikiDocument = wikiDocument
self.trashBags = []
self.trashBagIds = set()
self.xmlNode = None
self.__sinkWikiDoc = KeyFunctionSink((
("changed wiki configuration", self.onChangedWikiConfiguration),
))
self.wikiDocument.getMiscEvent().addListener(self.__sinkWikiDoc)
def getWikiDocument(self):
return self.wikiDocument
def close(self):
self.wikiDocument.getMiscEvent().removeListener(self.__sinkWikiDoc)
def isInDatabase(self):
"""
Can be called before readOverview() to check if the version overview
is already in database.
"""
unifName = u"trashcan/overview"
return self.wikiDocument.retrieveDataBlock(unifName) is not None
def onChangedWikiConfiguration(self, miscEvt):
self._removeOldest()
def _removeOldest(self):
"""
Remove oldest trashbags if there are more in the can than configuration
setting allows
"""
remCount = len(self.trashBags) - self.wikiDocument.getWikiConfig()\
.getint("main", "trashcan_maxNoOfBags", 200)
if remCount <= 0:
return
for bag in self.trashBags[:remCount]:
self.trashBagIds.discard(bag.bagId)
del self.trashBags[:remCount]
def _addTrashBag(self, trashBag):
"""
Adds bag to trashcan. Also checks if there are too many bags according
to settings and removes the oldest one(s). The bag must already have a
unique bagId
"""
assert trashBag.bagId > 0
self.trashBags.append(trashBag)
self.trashBagIds.add(trashBag.bagId)
self._removeOldest()
self.writeOverview()
def storeWikiWord(self, word):
"""
Store wikiword (including versions) in a trash bag and return bag id
"""
bag = TrashBag(self)
for bagId in xrange(1, len(self.trashBagIds) + 2):
if not bagId in self.trashBagIds:
break
else:
raise InternalError(u"Trashcan: No free bagId???")
bag.bagId = bagId
data = Exporters.getSingleWikiWordPacket(self.wikiDocument, word)
self.wikiDocument.storeDataBlock(bag.getPacketUnifiedName(),
data, storeHint=self.getStorageHint())
bag.originalUnifiedName = u"wikipage/" + word
self._addTrashBag(bag)
return bagId
def deleteBag(self, bag):
"""
Deletes bag from trashcan. Only the bagId of the bag parameter
is used so to delete a bag with a particular bagId just create
a "fake" bag and set bagId accordingly.
"""
bagId = bag.bagId
if bag.bagId == 0:
return
for i, tb in enumerate(self.trashBags):
if tb.bagId == bagId:
del self.trashBags[i]
self.trashBagIds.discard(bagId)
tb.deletePacket()
return
def readOverview(self):
"""
Read and decode overview from database. Most functions can be called
only after this was called (exception: isInDatabase())
"""
unifName = u"trashcan/overview"
content = self.wikiDocument.retrieveDataBlock(unifName, default=DAMAGED)
if content is DAMAGED:
raise Exception(_(u"Trashcan data damaged")) # TODO: Specific exception
elif content is None:
self.trashBags = []
self.trashBagIds = set()
self.xmlNode = None
return
xmlDoc = minidom.parseString(content)
xmlNode = xmlDoc.firstChild
self.serializeFromXml(xmlNode)
# def getDependentDataBlocks(self):
# assert not self.isInvalid()
#
# unifiedPageName = self.basePage.getUnifiedPageName()
#
# result = [u"versioning/overview/" + unifiedPageName]
#
# for entry in self.versionEntries:
# result.append(u"versioning/packet/versionNo/%s/%s" % (entry.versionNumber,
# unifiedPageName))
#
# return result
@staticmethod
def deleteBrokenData(wikiDocument):
"""
Delete all trashcan data in case existing data is broken and can't
be deleted in regular ways.
"""
dataBlocks = wikiDocument.getDataBlockUnifNamesStartingWith(
u"trashcan/")
for db in dataBlocks:
wikiDocument.deleteDataBlock(db)
def writeOverview(self):
unifName = u"trashcan/overview"
if len(self.trashBags) == 0:
self.wikiDocument.deleteDataBlock(unifName)
return
xmlDoc = minidom.getDOMImplementation().createDocument(None, None, None)
xmlNode = self.serializeToXmlProd(xmlDoc)
xmlDoc.appendChild(xmlNode)
content = xmlDoc.toxml("utf-8")
self.wikiDocument.storeDataBlock(unifName, content,
storeHint=self.getStorageHint())
def clear(self):
"""
Delete all data from trashcan (called when user empties trashcan)
"""
self.trashBags = []
self.trashBagIds = set()
self.xmlNode = None
self.deleteBrokenData(self.wikiDocument)
def getTrashBags(self):
return self.trashBags
def getStorageHint(self):
"""
Return appropriate storage hint according to option settings.
"""
if self.wikiDocument.getWikiConfig().getint("main",
"trashcan_storageLocation", 0) != 1:
return Consts.DATABLOCK_STOREHINT_INTERN
else:
return Consts.DATABLOCK_STOREHINT_EXTERN
# @staticmethod
# def decodeContent(encContent, encoding):
# if encoding is None:
# return encContent
# if encoding == "zlib":
# return zlib.decompress(encContent)
#
# @staticmethod
# def encodeContent(content, encoding):
# if encoding is None:
# return content
# if encoding == "zlib":
# return zlib.compress(content)
def serializeToXmlProd(self, xmlDoc):
"""
Create XML node to contain all information about this object.
"""
xmlNode = self.xmlNode
if xmlNode is None:
xmlNode = xmlDoc.createElement(u"trashcanOverview")
self.serializeToXml(xmlNode, xmlDoc)
return xmlNode
def serializeToXml(self, xmlNode, xmlDoc):
"""
Modify XML node to contain all information about this object.
"""
xmlNode.setAttribute(u"formatVersion", u"0")
xmlNode.setAttribute(u"readCompatVersion", u"0")
xmlNode.setAttribute(u"writeCompatVersion", u"0")
for xmlEntry in Serialization.iterXmlElementFlat(xmlNode, u"trashBag"):
xmlNode.removeChild(xmlEntry)
for entry in self.trashBags:
entryNode = entry.serializeOverviewToXmlProd(xmlDoc)
xmlNode.appendChild(entryNode)
def serializeFromXml(self, xmlNode):
"""
Set object state from data in xmlNode.
"""
formatVer = int(xmlNode.getAttribute(u"writeCompatVersion"))
if formatVer > 0:
SerializationException("Wrong version no. %s for trashcan overview" %
formatVer)
self.xmlNode = xmlNode
trashBags = []
trashBagIds = set()
for xmlEntry in Serialization.iterXmlElementFlat(xmlNode, u"trashBag"):
entry = TrashBag(self)
entry.serializeOverviewFromXml(xmlEntry)
trashBags.append(entry)
trashBagIds.add(entry.bagId)
# Order trash bags by trash date
trashBags.sort(key=lambda entry: entry.trashTimeStamp)
self.trashBags = trashBags
self.trashBagIds = trashBagIds
# def getVersionContentRaw(self, versionNumber):
# if len(self.trashBags) == 0:
# raise InternalError(u"Tried to retrieve non-existing "
# u"version number %s from empty list." % versionNumber)
#
# if versionNumber == -1:
# versionNumber = self.trashBags[-1].versionNumber
#
# base = None
# workList = []
# for i in xrange(len(self.trashBags) - 1, -1, -1):
# entry = self.trashBags[i]
# if entry.contentDifferencing == u"complete":
# workList = []
# base = entry
# else:
# workList.append(entry)
#
# if entry.versionNumber == versionNumber:
# break
# else:
# raise InternalError(u"Tried to retrieve non-existing "
# u"version number %s." % versionNumber)
#
# if base is None:
# raise InternalError(u"No base version found for getVersionContent(%s)" %
# versionNumber)
#
# unifName = u"versioning/packet/versionNo/%s/%s" % (base.versionNumber,
# self.basePage.getUnifiedPageName())
#
# content = self.wikiDocument.retrieveDataBlock(unifName, default=DAMAGED)
# if content is DAMAGED:
# raise VersioningException(_(u"Versioning data damaged"))
# elif content is None:
# raise InternalError(u"Tried to retrieve non-existing "
# u"packet for version number %s" % versionNumber)
#
# content = self.decodeContent(content, entry.contentEncoding)
#
# for entry in workList:
# unifName = u"versioning/packet/versionNo/%s/%s" % (entry.versionNumber,
# self.basePage.getUnifiedPageName())
# packet = self.wikiDocument.retrieveDataBlock(unifName, default=None)
# if content is DAMAGED:
# raise VersioningException(_(u"Versioning data damaged"))
# elif content is None:
# raise InternalError(u"Tried to retrieve non-existing "
# u"packet for version number %s" % versionNumber)
#
#
# content = applyBinCompact(content, packet)
#
# return content
#
#
# def getVersionContent(self, versionNumber):
# return fileContentToUnicode(self.getVersionContentRaw(versionNumber))
#
#
# def addVersion(self, content, entry):
# """
# entry.versionNumber is assumed invalid and will be filled by this function.
# """
# if isinstance(content, unicode):
# content = BOM_UTF8 + content.encode("utf-8")
# assert isinstance(content, str)
#
# completeStep = max(self.wikiDocument.getWikiConfig().getint("main",
# "versioning_completeSteps", 10), 0)
#
# if completeStep == 0:
# asRevDiff = True
# else:
# if len(self.trashBags) < completeStep:
# asRevDiff = True
# else:
# asRevDiff = False
# for e in reversed(self.trashBags[-completeStep:-1]):
# if e.contentDifferencing == "complete":
# asRevDiff = True
# break
#
# self.maxVersionNumber += 1
# newHeadVerNo = self.maxVersionNumber
#
# newHeadUnifName = u"versioning/packet/versionNo/%s/%s" % \
# (newHeadVerNo, self.basePage.getUnifiedPageName())
#
# self.wikiDocument.storeDataBlock(newHeadUnifName, content,
# storeHint=self.getStorageHint())
#
# entry.versionNumber = newHeadVerNo
# entry.contentDifferencing = "complete"
# entry.contentEncoding = None
# self.trashBags.append(entry)
#
# if len(self.trashBags) > 1:
# if asRevDiff:
# prevHeadEntry = self.trashBags[-2]
# prevHeadContent = self.getVersionContentRaw(prevHeadEntry.versionNumber)
#
# unifName = u"versioning/packet/versionNo/%s/%s" % (prevHeadEntry.versionNumber,
# self.basePage.getUnifiedPageName())
# diffPacket = getBinCompactForDiff(content, prevHeadContent)
#
# if len(diffPacket) < len(prevHeadContent):
# prevHeadEntry.contentDifferencing = "revdiff"
# prevHeadEntry.contentEncoding = None
# self.wikiDocument.storeDataBlock(unifName, diffPacket,
# storeHint=self.getStorageHint())
#
# self.fireMiscEventKeys(("appended version", "changed version overview"))
#
#
# def deleteVersion(self, versionNumber):
# if len(self.trashBags) == 0:
# raise InternalError("Non-existing version %s to delete (empty list)." %
# versionNumber)
#
# if versionNumber == -1:
# versionNumber = self.trashBags[-1].versionNumber
#
# if versionNumber == self.trashBags[0].versionNumber:
# # Delete oldest
# unifName = u"versioning/packet/versionNo/%s/%s" % (versionNumber,
# self.basePage.getUnifiedPageName())
#
# self.wikiDocument.deleteDataBlock(unifName)
# del self.trashBags[0]
# self.fireMiscEventKeys(("deleted version", "changed version overview"))
#
# return
#
# if versionNumber == self.trashBags[-1].versionNumber:
# # Delete newest
#
# # We can assume here that len(self.trashBags) >= 2 otherwise
# # previous "if" would have been true.
#
# prevHeadEntry = self.trashBags[-2]
# newContent = self.getVersionContentRaw(prevHeadEntry.versionNumber)
#
# unifName = u"versioning/packet/versionNo/%s/%s" % (prevHeadEntry.versionNumber,
# self.basePage.getUnifiedPageName())
# prevHeadEntry.contentDifferencing = "complete"
# self.wikiDocument.storeDataBlock(unifName, newContent,
# storeHint=self.getStorageHint())
#
# unifName = u"versioning/packet/versionNo/%s/%s" % (versionNumber,
# self.basePage.getUnifiedPageName())
# self.wikiDocument.deleteDataBlock(unifName)
# del self.trashBags[-1]
# self.fireMiscEventKeys(("deleted version", "changed version overview"))
#
# return
#
# # Delete some version in-between: Not supported yet.
# raise InternalError("In-between version %s to delete." %
# versionNumber)
# class WikiPageSnapshot(AbstractWikiPage):
# def __init__(self, wikiDocument, baseWikiPage, versionNo):
# AbstractWikiPage.__init__(self, wikiDocument, baseWikiPage.getWikiWord())
#
# self.baseWikiPage = baseWikiPage
# self.versionNumber = versionNo
#
# self.content = self.baseWikiPage.getVersionOverview().getVersionContent(
# versionNo)
#
#
# def getSnapshotBaseDocPage(self):
# return self.baseWikiPage
#
# def getSnapshotVersionNumber(self):
# return self.versionNumber
#
#
# def getContent(self):
# return self.content
#
#
# def getUnifiedPageName(self):
# if self.versionNumber == 0:
# return None
#
# return u"versioning/version/versionNo/%s/%s" % (self.versionNumber,
# self.baseWikiPage.getWikiWord())
#
#
# def isReadOnlyEffect(self):
# """
# Return true if page is effectively read-only, this means
# "for any reason", regardless if error or intention.
# """
# return True
#
#
# def getVersionOverview(self):
# return self.baseWikiPage.getVersionOverview()
#
# def getExistingVersionOverview(self):
# return self.baseWikiPage.getExistingVersionOverview()
#
# def setPresentation(self, data, startPos):
# """
# Set (a part of) the presentation tuple. This is silently ignored
# if the "write access failed" or "read access failed" flags are
# set in the wiki document.
# data -- tuple with new presentation data
# startPos -- start position in the presentation tuple which should be
# overwritten with data.
# """
# pass # TODO?
|
StarcoderdataPython
|
774
|
<reponame>zhiqwang/mmdeploy
_base_ = ['../_base_/base_tensorrt_static-300x300.py']
|
StarcoderdataPython
|
93209
|
# Import the converted model's class
import numpy as np
import random
import tensorflow as tf
from tensorflow.python.ops import rnn, rnn_cell
from posenet import GoogLeNet as PoseNet
import cv2
from tqdm import tqdm
import math
batch_size = 75
max_iterations = 30000
# Set this path to your project directory
path = 'path_to_project/'
# Set this path to your dataset directory
directory = 'path_to_datasets/KingsCollege/'
dataset = 'dataset_test.txt'
class datasource(object):
def __init__(self, images, poses):
self.images = images
self.poses = poses
class vecsource(object):
def __init__(self, vecs, poses):
self.vecs = vecs
self.poses = poses
def centeredCrop(img, output_side_length):
height, width, depth = img.shape
new_height = output_side_length
new_width = output_side_length
if height > width:
new_height = output_side_length * height / width
else:
new_width = output_side_length * width / height
height_offset = (new_height - output_side_length) / 2
width_offset = (new_width - output_side_length) / 2
cropped_img = img[height_offset:height_offset + output_side_length,
width_offset:width_offset + output_side_length]
return cropped_img
def preprocess(images):
images_out = [] #final result
#Resize and crop and compute mean!
images_cropped = []
for i in tqdm(range(len(images))):
X = cv2.imread(images[i])
X = cv2.resize(X, (455, 256))
X = centeredCrop(X, 224)
images_cropped.append(X)
#compute images mean
N = 0
mean = np.zeros((1, 3, 224, 224))
for X in tqdm(images_cropped):
X = np.transpose(X,(2,0,1))
mean[0][0] += X[:,:,0]
mean[0][1] += X[:,:,1]
mean[0][2] += X[:,:,2]
N += 1
mean[0] /= N
#Subtract mean from all images
for X in tqdm(images_cropped):
X = np.transpose(X,(2,0,1))
X = X - mean
X = np.squeeze(X)
X = np.transpose(X, (1,2,0))
Y = np.expand_dims(X, axis=0)
images_out.append(Y)
return images_out
def get_data():
poses = []
images = []
with open(directory+dataset) as f:
next(f) # skip the 3 header lines
next(f)
next(f)
for line in f:
fname, p0,p1,p2,p3,p4,p5,p6 = line.split()
p0 = float(p0)
p1 = float(p1)
p2 = float(p2)
p3 = float(p3)
p4 = float(p4)
p5 = float(p5)
p6 = float(p6)
poses.append((p0,p1,p2,p3,p4,p5,p6))
images.append(directory+fname)
images = preprocess(images)
return datasource(images, poses)
def gen_data(source):
while True:
indices = range(len(source.images))
random.shuffle(indices)
for i in indices:
image = source.images[i]
pose_x = source.poses[i][0:3]
pose_q = source.poses[i][3:7]
yield image, pose_x, pose_q
def gen_data_batch(source):
data_gen = gen_data(source)
while True:
image_batch = []
pose_x_batch = []
pose_q_batch = []
for _ in range(batch_size):
image, pose_x, pose_q = next(data_gen)
image_batch.append(image)
pose_x_batch.append(pose_x)
pose_q_batch.append(pose_q)
yield np.array(image_batch), np.array(pose_x_batch), np.array(pose_q_batch)
def main():
image = tf.placeholder(tf.float32, [1, 224, 224, 3])
datasource = get_data()
results = np.zeros((len(datasource.images),2))
net = PoseNet({'data': image})
p3_x = net.layers['cls3_fc_pose_xyz']
p3_q = net.layers['cls3_fc_pose_wpqr']
init = tf.initialize_all_variables()
outputFile = "PoseNet.ckpt"
saver = tf.train.Saver()
with tf.Session() as sess:
# Load the data
sess.run(init)
saver.restore(sess, path + 'PoseNet.ckpt')
data_gen = gen_data_batch(datasource)
for i in range(len(datasource.images)):
np_image = datasource.images[i]
feed = {image: np_image}
pose_q= np.asarray(datasource.poses[i][3:7])
pose_x= np.asarray(datasource.poses[i][0:3])
predicted_x, predicted_q = sess.run([p3_x, p3_q], feed_dict=feed)
pose_q = np.squeeze(pose_q)
pose_x = np.squeeze(pose_x)
predicted_q = np.squeeze(predicted_q)
predicted_x = np.squeeze(predicted_x)
#Compute Individual Sample Error
q1 = pose_q / np.linalg.norm(pose_q)
q2 = predicted_q / np.linalg.norm(predicted_q)
d = abs(np.sum(np.multiply(q1,q2)))
theta = 2 * np.arccos(d) * 180/math.pi
error_x = np.linalg.norm(pose_x-predicted_x)
results[i,:] = [error_x,theta]
print 'Iteration: ', i, ' Error XYZ (m): ', error_x, ' Error Q (degrees): ', theta
median_result = np.median(results,axis=0)
print 'Median error ', median_result[0], 'm and ', median_result[1], 'degrees.'
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1643300
|
ram=int(input("ingresa el valor de la ram"))
tb=int(input("ingresa el valor de los bloques de la ram"))
y=int(ram/tb)
print("el valor de Y es:",y)
if(ram % tb != 0):
y= y + 1
print("el valor de Y es:",y)
datos={}
i=0
bloq=0
while i<=y:
aux=bloq
tar=input("ingrese nombre de la tarea")
bloq= int(input("ingrese los bloques de la tarea"))
bloq=i+bloq
if(bloq<=y):
for i in range(i,bloq):
datos[i]=tar
i=bloq
print("valor de i"+str(i))
else :
print("La tarea no se pudo agregar")
i=bloq
print(datos)
band1 = 1
j=0
print("y "+str(y))
while band1==1:
tare = input("ingresa la tarea a eliminar")
for j in range(j,y):
print("j="+str(j)+" elemento:"+datos[j])
if datos[j] == tare:
print(tare)
datos[j]=""
else:
print("else"+datos[j])
j=0
#i=bloq+1
print(datos)
band1 = 0
|
StarcoderdataPython
|
3354513
|
<reponame>raface/python-bizdays_calendar
class BizdaysException(Exception):
pass
class FormattingException(Exception):
pass
class FileException(Exception):
pass
class ConfigException(Exception):
pass
class ConnectionException(Exception):
pass
|
StarcoderdataPython
|
17892
|
## Copyright 2014 Cognitect. All Rights Reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS-IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import itertools
from transit.pyversion import imap, izip
def mapcat(f, i):
return itertools.chain.from_iterable(imap(f, i))
def pairs(i):
return izip(*[iter(i)] * 2)
cycle = itertools.cycle
def take(n, i):
return itertools.islice(i, 0, n)
|
StarcoderdataPython
|
3205708
|
<filename>src/hepqpr/qallse/seeding/storage.py
from .utils import *
class DoubletInfo:
"""
Holds information about doublet for a given spacepoint when it is considered as the middle spacepoint.
Output of the doubletCounting step write there
"""
def __init__(self, nbSpacepoints):
# number of inner doublet (other spacepoint has a smaller radius)
self.nInner = np.zeros(nbSpacepoints, dtype=np.int64)
# number of outer doublet (other spacepoint has a bigger radius)
self.nOuter = np.zeros(nbSpacepoints, dtype=np.int64)
# True if at least one inner and one outer doublet, false otherwise
self.good = np.zeros(nbSpacepoints, dtype=np.bool8)
class DoubletStorage:
"""
Store the doublets generated by the algorithm for a given event
"""
def __init__(self):
self.inner = []
self.outer = []
def make_dataframe(self):
self.doublets = pd.DataFrame({'inner': self.inner, 'outer': self.outer})
class SpacepointLayerRange:
"""
Class holding the startIndex and endIndex of each layer for each phi slice in the object SpacepointStorage
"""
def __init__(self, nLayers):
# starting idx of each layer for a given phi slice
self.layerBegin = np.zeros(nLayers, dtype=np.int64)
# one element after the last idx
self.layerEnd = np.zeros(nLayers, dtype=np.int64)
class SpacepointStorage:
"""
Class holding all the spacepoints and informations about the phi slices
"""
def __init__(self, spacepoints, config):
"""
Init the spacepoints storage. Spacepoints are expected to be given as a pandas dataframe.
Only spacepoints from a single event and modules/layers according to the DetectorModel should be given
"""
# Contains the type of the spacepoints (Pixel = true, SCT = False)
self.type = np.zeros(spacepoints.shape[0], dtype=np.bool8)
# Contains the z coordinate of the spacepoints
self.z = np.zeros(spacepoints.shape[0])
# Contains the r coordinate of the spacepoints
self.r = np.zeros(spacepoints.shape[0])
# Contains the x coordinate of the spacepoints
self.x = np.zeros(spacepoints.shape[0])
# Contains the y coordinate of the spacepoints
self.y = np.zeros(spacepoints.shape[0])
# Contains the hit_id of the spacepoints
self.hit_id = np.zeros(spacepoints.shape[0])
# id given to a spacepoint, used only for computing efficiency between the standard and modified seeding,
# it's not necessary for the seeding
self.idsp = np.zeros(spacepoints.shape[0], dtype=np.int64)
# fill these arrays, not implemented
self.covZ = np.ones(spacepoints.shape[0]) * 100
self.covR = np.ones(spacepoints.shape[0]) * 100
# start/end index of each phi slice/layer
self.phiSlices = [SpacepointLayerRange(config.nLayers) for _ in range(config.nPhiSlices)]
spacepoints['phi'] = calc_phi(spacepoints['x'], spacepoints['y'])
spacepoints['bin_phi'] = scale_phi(spacepoints['phi'], config.nPhiSlices)
spacepoints['r'] = calc_r(spacepoints['x'], spacepoints['y'])
self.module_ids = np.zeros(spacepoints.shape[0])
# define the position of a given detector layer
layNoToIdx = {2: 0, 4: 1, 6: 2, 8: 3}
volToOffset = {8: 0, 13: 4, 17: 8}
crtIdx = 0
for (sliceid, volid, layid), df in spacepoints.groupby(['bin_phi', 'volume_id', 'layer_id'], sort=False):
layIdx = volToOffset[volid] + layNoToIdx[layid]
nbHits = df.shape[0]
self.phiSlices[sliceid].layerBegin[layIdx] = crtIdx
nextIdx = crtIdx + nbHits
self.phiSlices[sliceid].layerEnd[layIdx] = nextIdx
pixel = df['volume_id'] == 8
self.type[crtIdx:nextIdx] = pixel.values
self.x[crtIdx:nextIdx] = df['x'].values
self.y[crtIdx:nextIdx] = df['y'].values
self.z[crtIdx:nextIdx] = df['z'].values
self.r[crtIdx:nextIdx] = df['r'].values
self.hit_id[crtIdx:nextIdx] = df['hit_id'].values
self.idsp[crtIdx:nextIdx] = df['hit_id'].values
self.module_ids[crtIdx:nextIdx] = df['module_id'].values
crtIdx = nextIdx
|
StarcoderdataPython
|
4833397
|
<reponame>zywek123/accessible_output2<filename>build/lib/accessible_output2/platform_utils/paths.py
import inspect
import platform
import os
import sys
from functools import wraps
def merge_paths(func):
@wraps(func)
def merge_paths_wrapper(*a, **k):
return unicode(os.path.join(func(**k), *a))
return merge_paths_wrapper
def windows_path(path_id):
import ctypes
path = ctypes.create_unicode_buffer(260)
if ctypes.windll.shell32.SHGetFolderPathW(0, path_id, 0, 0, ctypes.byref(path)) != 0:
raise ctypes.WinError()
return path.value
def app_data_path(app_name=None):
"""Cross-platform method for determining where to put application data."""
"""Requires the name of the application"""
plat = platform.system()
if plat == 'Windows':
path = windows_path(0x01a)
elif plat == 'Darwin':
path = os.path.join(os.path.expanduser('~'), 'Library', 'Application Support')
elif plat == 'Linux':
path = os.path.expanduser('~')
app_name = '.%s' % app_name.replace(' ', '_')
return os.path.join(path, app_name)
def prepare_app_data_path(app_name):
"""Creates the application's data directory, given its name."""
dir = app_data_path(app_name)
if not os.path.exists(dir):
os.mkdir(dir)
def is_frozen():
"""Return a bool indicating if application is compressed"""
import imp
return hasattr(sys, 'frozen') or imp.is_frozen("__main__")
def get_executable():
if is_frozen():
return sys.executable
return sys.argv[0]
def get_module():
return inspect.stack()[2][1]
def app_path():
"""Always determine the path to the main module, even when run with py2exe or otherwise frozen"""
return os.path.abspath(os.path.dirname(get_executable()))
def module_path():
return os.path.abspath(os.path.dirname(get_module()))
def executable_path():
return os.path.join(app_path(), get_executable())
def ensure_path(path):
if not os.path.exists(path):
os.makedirs(path)
def documents_path():
"""On windows, returns the path to My Documents. On OSX, returns the user's Documents folder. For anything else, returns the user's home directory."""
plat = platform.system()
if plat == 'Windows':
return windows_path(0x005)
elif plat == 'Darwin':
path = os.path.join(os.path.expanduser('~'), 'Documents')
else:
path = os.path.expanduser('~')
return path
|
StarcoderdataPython
|
1763487
|
<filename>api/tacticalrmm/accounts/migrations/0007_update_agent_primary_key.py<gh_stars>100-1000
# Generated by Django 3.1.2 on 2020-11-01 22:54
from django.db import migrations
def link_agents_to_users(apps, schema_editor):
Agent = apps.get_model("agents", "Agent")
User = apps.get_model("accounts", "User")
for agent in Agent.objects.all():
user = User.objects.filter(username=agent.agent_id).first()
if user:
user.agent = agent
user.save()
class Migration(migrations.Migration):
dependencies = [
("accounts", "0006_user_agent"),
]
operations = [
migrations.RunPython(link_agents_to_users, migrations.RunPython.noop),
]
|
StarcoderdataPython
|
1740055
|
<reponame>ipendlet/ord-schema
# Copyright 2020 Open Reaction Database Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates reaction-related badges for ord-data."""
import glob
import os
import requests
from absl import app
from absl import flags
from absl import logging
from ord_schema.proto import dataset_pb2
FLAGS = flags.FLAGS
flags.DEFINE_string('root', None, 'ORD root.')
flags.DEFINE_string('output', None, 'Output SVG filename.')
def main(argv):
del argv # Only used by app.run().
num_reactions = 0
for filename in glob.glob(os.path.join(FLAGS.root, '*', '*.pb')):
with open(filename, 'rb') as f:
dataset = dataset_pb2.Dataset.FromString(f.read())
logging.info('%s:\t%d', filename, len(dataset.reactions))
num_reactions += len(dataset.reactions)
args = {
'label': 'Reactions',
'message': num_reactions,
'color': 'informational',
}
response = requests.get('https://img.shields.io/static/v1', params=args)
with open(FLAGS.output, 'w') as f:
f.write(response.text)
if __name__ == '__main__':
flags.mark_flags_as_required(['root', 'output'])
app.run(main)
|
StarcoderdataPython
|
3351449
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 6 10:13:52 2018
@author: slauniai
DEMO HOW TO RUN POINT-SCALE MODEL FOR A SINGLE OR MULTIPLE SITES.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# import model and functions to read data
from spafhy_point import SpaFHy_point
from spafhy_io import read_HydeDaily # , read_FMI_weather
eps = np.finfo(float).eps # machine epsilon
"""
Select demo version:
1 - run for one site,
2 = several runs with varying parameters. reproduce Fig. 3
3 = as demo 1 but for 3 different soil types
4 = as demo 3 but in single call
"""
demo = 1
#%%
""" demo 1: """
if demo == 1:
# set up model for single site; here use FIHy as example
from spafhy_parameters import parameters_FIHy
# read parameters
pgen, pcpy, pbu = parameters_FIHy()
# read forcing data to dataframe
dat, FORC = read_HydeDaily(pgen['forcing_file'])
FORC['Prec'] = FORC['Prec'] / pgen['dt'] # mms-1
FORC['T'] = FORC['Ta'].copy()
# np.array cmask is needed to apply model components at a single point
cmask = np.ones(1)
# create model instance
model = SpaFHy_point(pgen, pcpy, pbu, FORC, cmask=cmask,
cpy_outputs=True, bu_outputs=True)
# simple model run with default parameters:
Nsteps = len(model.FORC) # length of forcing
model._run(0, Nsteps) # calls class function _run to execute model from t=0 to t=Nsteps
# during model run, resutls are stored in two dictionaries:
# canopycrid outputs: model.cpy.results
# bucketgid outputs: model.bu.resutls
# extract them, convert to dataframe and save to csv
cres = model.cpy.results
bres = model.bu.results
del bres['ET'] # also bucket returns ET but in [m] so remove key
res = {**cres, **bres} # combine into one dict
res = {key: np.ravel(res[key]) for key in res.keys()} # and ravel
# convert to dataframe and save
results = pd.DataFrame(data=res, columns=res.keys(), index=model.FORC.index, dtype=float)
results.to_csv(pgen['output_file'] + '.csv', sep=';')
del res, cres, bres
#%% now, let's draw timeseries of root zone and organic layer water content and ET
# components to Fig. 2
plt.figure()
plt.subplot(211)
plt.plot(results[['Wliq', 'Wliq_top']])
plt.legend([r'$\theta$', r'$\theta_{org}$'])
plt.ylabel(r'$\theta$ (m$^{3}$ m$^{-3}$)')
plt.subplot(212)
plt.plot(results[['Evap', 'Transpi', 'Efloor']])
plt.legend(['E', 'Tr', 'Ef'])
plt.ylabel('mm d$^{-1}$')
#%%
""" demo 2: override parameters and run 3 simulations """
if demo == 2:
# set up model for single site; here use FIHy as example
from spafhy_parameters import parameters_FIHy
# read parameters
pgen, pcpy, pbu = parameters_FIHy()
# read forcing data to dataframe
dat, FORC = read_HydeDaily(pgen['forcing_file'])
FORC['Prec'] = FORC['Prec'] / pgen['dt'] # mms-1
FORC['T'] = FORC['Ta'].copy()
# np.array cmask is needed to apply model components at a single point
cmask = np.ones(1)
# run model for 3 different parameter combinations: vary
# g1_conif, g1_decid, wmax, wmaxsnow by +/- 20%, 20%, 20%, 30%
p =[
[1.0, 1.0, 1.0, 1.0], # nominal
[0.8, 0.8, 0.8, 0.7], # low-ET case
[1.2, 1.2, 1.2, 1.3] # high-ET case
]
# save results to list
out = []
for k in range(3):
a = p[k]
# read nominal parameters and modify some of them
pgen, pcpy, pbu = parameters_FIHy()
pcpy['physpara']['g1_conif'] *= a[0]
pcpy['physpara']['g1_decid'] *= a[1]
pcpy['interc']['wmax'] *= a[2]
pcpy['interc']['wmaxsnow'] *= a[3]
# create model instance and run:
model = SpaFHy_point(pgen, pcpy, pbu, FORC, cmask=cmask, cpy_outputs=True, bu_outputs=True)
nsteps=len(FORC)
model._run(0, nsteps)
# extract results, convert to dataframe, print to file and append to out
cres = model.cpy.results
bres = model.bu.results
del bres['ET']
res = {**cres, **bres} # combine into one dict
res = {key: np.ravel(res[key]) for key in res.keys()} # and convert each variable into 1D array
results = pd.DataFrame(data=res, columns=res.keys(), index=model.FORC.index)
results.to_csv(pgen['output_file'] + '_sim_' + str(k) + '.csv', sep=';')
out.append(results)
del model, res, cres, bres, results, pcpy, pgen, pbu
# plot Fig 3 equivalent
from make_Fig3 import draw_Fig3
draw_Fig3(dat, out)
#%%
""" demo 3: as demo2 but run for 3 different soil types defined in soil_properties"""
if demo == 3:
# soil classes
soilclass = np.array([1, 2, 3]) # coarse, medium, fine
cmask = np.ones(1)
# set up model for single site; here use FIHy as example
from spafhy_parameters import parameters_FIHy, soil_properties
from spafhy_io import preprocess_soildata
# read forcing data to dataframe
dat, FORC = read_HydeDaily(pgen['forcing_file'])
FORC['Prec'] = FORC['Prec'] / pgen['dt'] # mms-1
FORC['T'] = FORC['Ta'].copy()
out = []
# run ofe 3 soil classes
for k in range(3):
# read parameters and soil properties
pgen, pcpy, pbu = parameters_FIHy()
psoil = soil_properties()
# get soil properties based on soilclass and update pbu
pbu = preprocess_soildata(pbu, psoil, soilclass[k], cmask=cmask, spatial=True)
print(pbu)
# create model instance and run:
model = SpaFHy_point(pgen, pcpy, pbu, FORC, cmask=cmask, cpy_outputs=True, bu_outputs=True)
nsteps=len(FORC)
model._run(0, nsteps)
# extract results, convert to dataframe, print to file and append to out
cres = model.cpy.results
bres = model.bu.results
del bres['ET']
res = {**cres, **bres} # combine into one dict
res = {key: np.ravel(res[key]) for key in res.keys()} # and convert each variable into 1D array
results = pd.DataFrame(data=res, columns=res.keys(), index=model.FORC.index)
results.to_csv(pgen['output_file'] + '_sim_' + str(k) + '.csv', sep=';')
out.append(results)
del model, res, cres, bres, results, pcpy, pgen, pbu
# plot figure of soil water content and Transpiration at each soil class
plt.figure()
ax1 = plt.subplot(211)
ax1.plot(out[2]['Wliq'], label='fine text')
ax1.plot(out[1]['Wliq'], label='medium text')
ax1.plot(out[0]['Wliq'], label='coarse text')
ax1.legend()
ax1.set_ylabel(r'$\theta$ (m$^{3}$ m$^{-3}$)')
ax2 = plt.subplot(212, sharex=ax1)
ax2.plot(out[2]['Transpi'], label='fine text')
ax2.plot(out[1]['Transpi'], label='medium text')
ax2.plot(out[0]['Transpi'], label='coarse text')
ax2.legend()
ax2.set_ylabel('mm d$^{-1}$')
#%%
""" demo 4: as demo2 but run all 3 different soil types at once"""
if demo == 4:
# soil classes
soilclass = np.array([1, 2, 3]) # coarse, medium, fine
cmask = np.ones(3)
# set up model for single site; here use FIHy as example
from spafhy_parameters import parameters_FIHy, soil_properties
from spafhy_io import preprocess_soildata
# read parameters and soil properties
pgen, pcpy, pbu = parameters_FIHy()
psoil = soil_properties()
# read forcing data to dataframe
dat, FORC = read_HydeDaily(pgen['forcing_file'])
FORC['Prec'] = FORC['Prec'] / pgen['dt'] # mms-1
FORC['T'] = FORC['Ta'].copy()
# get soil properties based on soilclass and update pbu
pbu = preprocess_soildata(pbu, psoil, soilclass, cmask=cmask, spatial=True)
#print(pbu)
# create model instance and run:
model = SpaFHy_point(pgen, pcpy, pbu, FORC, cmask=cmask, cpy_outputs=True, bu_outputs=True)
nsteps=len(FORC)
model._run(0, nsteps)
# extract results, convert to dataframe, print to file and append to out
cres = model.cpy.results
bres = model.bu.results
del bres['ET']
res = {**cres, **bres} # combine into one dict
res = {key: np.array(res[key]) for key in res.keys()}
# now res is dict where each key contains np.array which shape is (nsteps,3)
# to save each column into separate csv-file and plot figures, we do follwing:
out = []
n = 0
for s in soilclass:
dummy = {key: res[key][:,n] for key in res.keys()}
results = pd.DataFrame(data=dummy, columns=res.keys(), index=model.FORC.index)
results.to_csv(pgen['output_file'] + '_sim_' + str(s) + '.csv', sep=';')
n += 1
out.append(results)
del model, res, cres, bres, results, pcpy, pgen, pbu
# plot figure of soil water content and Transpiration at each soil class
plt.figure()
ax1 = plt.subplot(211)
ax1.plot(out[2]['Wliq'], label='fine text')
ax1.plot(out[1]['Wliq'], label='medium text')
ax1.plot(out[0]['Wliq'], label='coarse text')
ax1.legend()
ax1.set_ylabel(r'$\theta$ (m$^{3}$ m$^{-3}$)')
ax2 = plt.subplot(212, sharex=ax1)
ax2.plot(out[2]['Transpi'], label='fine text')
ax2.plot(out[1]['Transpi'], label='medium text')
ax2.plot(out[0]['Transpi'], label='coarse text')
ax2.legend()
ax2.set_ylabel('mm d$^{-1}$')
|
StarcoderdataPython
|
1757711
|
<reponame>daintlab/unknown-detection-benchmarks
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
def conv3x3(in_planes, out_planes, stride=1):
" 3x3 convolution with padding "
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion=1
def __init__(self, inplanes, planes, stride=1, downsample=None, dropRate=0.0):
super(BasicBlock, self).__init__()
self.dropRate = dropRate
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
if self.dropRate > 0:
out = F.dropout(out, p=self.dropRate, inplace=False, training=True)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
if self.dropRate > 0:
out = F.dropout(out, p=self.dropRate, inplace=False, training=True)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion=4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes*4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes*4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet_Cifar(nn.Module):
def __init__(self, block, layers, num_classes=40, dropRate=0.0):
super(ResNet_Cifar, self).__init__()
self.dropRate = dropRate
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, layers[0])
self.layer2 = self._make_layer(block, 32, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64, layers[2], stride=2)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, dropRate=self.dropRate))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, dropRate=self.dropRate))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.dropRate > 0:
x = F.dropout(x, p=0.1, inplace=False, training=True)
x = self.fc(x)
return x
# function to extract the multiple features
def feature_list(self, x):
out_list = []
out = self.relu(self.bn1(self.conv1(x)))
out_list.append(out)
out = self.layer1(out)
out_list.append(out)
out = self.layer2(out)
out_list.append(out)
out = self.layer3(out)
out_list.append(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
y = self.fc(out)
return y, out_list
# function to extract a specific feature
def intermediate_forward(self, x, layer_index):
out = F.relu(self.bn1(self.conv1(x)))
if layer_index == 1:
out = self.layer1(out)
elif layer_index == 2:
out = self.layer1(out)
out = self.layer2(out)
elif layer_index == 3:
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
return out
# function to extract the penultimate features
def penultimate_forward(self, x):
out = self.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
penultimate = self.layer3(out)
out = self.avgpool(penultimate)
out = out.view(out.size(0), -1)
y = self.fc(out)
return y, penultimate
def resnet20(**kwargs):
model = ResNet_Cifar(BasicBlock, [3, 3, 3], **kwargs)
return model
def resnet110(**kwargs):
model = ResNet_Cifar(BasicBlock, [18, 18, 18], **kwargs)
return model
|
StarcoderdataPython
|
97540
|
import re
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from random import seed
from random import random
from random import randint
class Scraper(object):
"""Able to start up a browser, to authenticate to Instagram and get
followers and people following a specific user."""
def __init__(self):
self.driver = webdriver.Chrome('drivers/chromedriver2')
def close(self):
"""Close the browser."""
self.driver.close()
def authenticate(self, username, password):
"""Log in to Instagram with the provided credentials."""
print('\nLogging in…')
self.driver.get('https://www.instagram.com')
# Go to log in
login_link = WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.LINK_TEXT, 'Forgot password?'))
)
# Authenticate
username_input = self.driver.find_element_by_xpath(
'//input[@name="username"]'
)
password_input = self.driver.find_element_by_xpath(
'//input[@name="password"]'
)
username_input.send_keys(username)
password_input.send_keys(password)
password_input.send_keys(Keys.RETURN)
time.sleep(10)
def get_users(self, group, target, link, verbose = False):
f = open("links.txt", "a")
"""Return a list of links to the users profiles found."""
if link is None:
wblink = self._get_link(group, target)
if wblink is None:
return []
self._open_dialog(wblink)
if self.users_list_container is None:
return None
else:
wblink = self._get_link_by_link(group, link)
if wblink is None:
return []
self._open_dialog(wblink)
if self.users_list_container is None:
return None
print('\nGetting {} users…{}'.format(
self.expected_number,
'\n' if verbose else ''
))
links = []
last_user_index = 0
updated_list = self._get_updated_user_list()
initial_scrolling_speed = 5
retry = 2
# While there are more users scroll and save the results
while updated_list[last_user_index] is not updated_list[-1] or retry > 0:
self._scroll(self.users_list_container, initial_scrolling_speed)
for index, user in enumerate(updated_list):
if index < last_user_index:
continue
if index > 100:
return links
try:
link_to_user = user.find_element(By.TAG_NAME, 'a').get_attribute('href')
last_user_index = index
if link_to_user not in links:
links.append(link_to_user)
if verbose:
f.write(link_to_user + "\n")
print(
'{0:.2f} {1:s}'.format(
index,
link_to_user
)
)
except:
if (initial_scrolling_speed > 1):
initial_scrolling_speed -= 1
pass
updated_list = self._get_updated_user_list()
if updated_list[last_user_index] is updated_list[-1]:
retry -= 1
f.close()
print('100% Complete')
return links
def _open_dialog(self, link):
"""Open a specific dialog and identify the div containing the users
list."""
link.click()
self.expected_number = int(
re.search('(\d+)', link.text).group(1)
)
time.sleep(1)
try:
self.users_list_container = self.driver.find_element_by_xpath(
'//div[@role="dialog"]//ul/parent::div'
)
except:
self.users_list_container = None
def _get_link(self, group, target):
"""Return the element linking to the users list dialog."""
print('\nNavigating to %s profile…' % target)
self.driver.get('https://www.instagram.com/%s/' % target)
try:
return WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.PARTIAL_LINK_TEXT, group))
)
except:
return None
def _get_link_by_link(self, group, link):
"""Return the element linking to the users list dialog."""
print('\nNavigating to %s profile…' % link)
self.driver.get(link)
try:
return WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.PARTIAL_LINK_TEXT, group))
)
except:
return None
def _get_updated_user_list(self):
"""Return all the list items included in the users list."""
return self.users_list_container.find_elements(By.XPATH, 'ul//li')
def _scroll(self, element, times = 1):
"""Scroll a specific element one or more times with small delay between
them."""
while times > 0:
self.driver.execute_script(
'arguments[0].scrollTop = arguments[0].scrollHeight',
element
)
time.sleep(random() * randint(2, 5))
times -= 1
|
StarcoderdataPython
|
100270
|
<gh_stars>0
import sklearn.base
import numpy as np
class ProbT(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
"""Wraps a sklearn classifier (ClassifierMixin) to use the output
of their .predict_proba method.
Args:
model (sklearn.base.ClassifierMixin): A sklearn classification model
drop (bool): Flag if to drop the column with the 0 probabilties
Example:
from sklearn.pipeline import Pipeline
from pipelinetweak.pipe import ProbT
from sklearn.linear_model import LinearRegression
from sklearn.dummy import DummyRegressor
model = Pipeline(steps=[
('trans', ProbT(LogisticRegression(), drop=False)),
('pred', DummyRegressor())
])
model.fit(X, y)
Y_pred = model.predict(X)
"""
def __init__(self,
model: sklearn.base.ClassifierMixin,
drop: bool = True) -> None:
self.model = model
self.drop = drop
def fit(self, *args, **kwargs) -> 'ProbT':
self.model.fit(*args, **kwargs)
return self
def transform(self, X: np.ndarray, **transform_params) -> np.ndarray:
Z = self.model.predict_proba(X)
if self.drop:
return Z[:, 1:]
else:
return Z
|
StarcoderdataPython
|
138467
|
<reponame>esorot/upb<gh_stars>0
# copybara:strip_for_google3_begin
def pyproto_test_wrapper(name):
src = name + "_wrapper.py"
native.py_test(
name = name,
srcs = [src],
legacy_create_init = False,
main = src,
data = ["@com_google_protobuf//:testdata"],
deps = [
"//python:message_ext",
"@com_google_protobuf//:python_common_test_protos",
"@com_google_protobuf//:python_specific_test_protos",
"@com_google_protobuf//:python_srcs",
],
)
# copybara:replace_for_google3_begin
#
# def pyproto_test_wrapper(name):
# src = name + "_wrapper.py"
# native.py_test(
# name = name,
# srcs = [src],
# main = src,
# deps = [
# "//net/proto2/python/internal:" + name + "_for_deps",
# "//net/proto2/python/public:use_upb_protos",
# ],
# )
#
# copybara:replace_for_google3_end
|
StarcoderdataPython
|
4817270
|
from __future__ import print_function
import torch
from api import config_fun
import train_helper
cfg = config_fun.config()
model = train_helper.get_model(cfg, pretrained=False)
model_pa = torch.nn.DataParallel(model)
model_cuda = model.cuda()
print(type(model))
print(type(model_cuda))
print(type(model_pa))
print(type(model_pa.module))
print(isinstance(model, torch.nn.DataParallel))
print(isinstance(model, torch.nn.Module))
print(isinstance(model_cuda, torch.nn.DataParallel))
print(isinstance(model_cuda, torch.nn.Module))
print(isinstance(model_pa, torch.nn.DataParallel))
print(isinstance(model_pa.module, torch.nn.DataParallel))
print(isinstance(model_pa.module, torch.nn.Module))
|
StarcoderdataPython
|
3290069
|
<filename>Text-Based-Browser/browser.py
import sys
import os
from pathlib import Path
import requests
from bs4 import BeautifulSoup
from colorama import init, Fore, Back, Style
def get_url_file(dir, url):
url_key = url.replace(".", "_").replace("_com", "").replace("_org", "")
return os.sep.join([dir, url_key + ".txt"])
def read_file(url):
with open(get_url_file(dir, url), "r", encoding='UTF-8') as file:
for line in file.readlines():
print(line, end="")
file.close()
def write_file(url, page):
with open(get_url_file(dir, url), "w", encoding='UTF-8') as file:
file.write(page)
file.close()
def get_page(url):
url_path = "http://" + url
page = requests.get(url_path)
soup = BeautifulSoup(page.content, 'html.parser')
page = soup.find_all(text=True)
output = ''
whitelist = [
'title',
'p',
'a',
'ul',
'ol',
'li',
'headers',
]
for t in page:
if t.parent.name in whitelist and len(t) > 1:
if t.parent.name == 'a':
print(Fore.BLUE + '{} '.format(t))
else:
print(Fore.BLACK + '{} '.format(t))
output += '{} '.format(t)
return output
if __name__ == '__main__':
init()
if len(sys.argv) > 1:
dir = sys.argv[1]
Path(dir).mkdir(parents=True, exist_ok=True)
visited_pages = []
url = input()
while url != 'exit':
if url == 'back':
if len(visited_pages) > 0:
url = visited_pages.pop(0)
read_file(url)
else:
try:
read_file(url)
visited_pages.append(url)
except FileNotFoundError:
try:
output = get_page(url)
write_file(url, output)
except Exception as e:
print("Error - invalid link")
print(e)
url = input()
|
StarcoderdataPython
|
123823
|
<filename>Notatki/5_GUI/1_Tkinter/przyklad.py
# Programowanie I R
# Graficzny interfejs użytkownika: Tkinter - przykład
#***********************************************************************************
# Importujemy niezbędne moduły
#***********************************************************************************
# Pakiet Tkinter: podstawowa funkcjonalność
from tkinter import *
# Pakiet Tkinter: współcześnie wyglądające kontrolki
from tkinter.ttk import *
# Pakiet Tkinter: okno MessageBox
from tkinter import messagebox
#***********************************************************************************
# Projektujemy główne okno aplikacji
#***********************************************************************************
# Główne okno aplikacji.
MainForm = Tk()
MainForm.title("Tkinter - przykład")
MainForm.geometry("300x100")
# Kontrolka 1.: etykieta.
lblInfo = Label(MainForm, text = "Wpisz jakiś tekst: ")
lblInfo.grid(column = 0, row = 0)
# Kontrolka 2.: pole do wprowadzania tekstu.
txtText = Entry(MainForm, width = 30)
txtText.grid(column = 1, row = 0)
# Kontrolka 3.: etykieta.
lblTextInfo = Label(MainForm, text = "Wpisany tekst: ")
lblTextInfo.grid(column = 0, row = 1)
# Kontrolka 4.: etykieta.
lblText = Label(MainForm, text = "(brak)")
lblText.grid(column = 1, row = 1)
# Kontrolka 5.: przycisk.
def btnOK_Clicked(): # Metoda wywoływana w chwili kliknięcia przycisku btnOK.
# Zmieniamy napis na etykiecie lblText.
# Metoda txtText.get() zwraca tekst wpisany do pola txtText.
lblText.configure(text = txtText.get())
# Wyświetlamy okno MessageBox.
messagebox.showinfo("Tkinter - przykład", txtText.get())
btnOK = Button(MainForm, text = "OK", command = btnOK_Clicked)
btnOK.grid(column = 1, row = 2)
#***********************************************************************************
# Przekazujemy "sterowanie" aplikacją do jej okna głównego
#***********************************************************************************
MainForm.mainloop()
|
StarcoderdataPython
|
140384
|
# -- Project information -----------------------------------------------------
project = "2i2c Pilot Hubs Infrastructure"
copyright = "2020, 2i2c.org"
author = "2<EMAIL>"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"myst_parser",
"sphinx.ext.intersphinx",
"sphinx_panels",
]
intersphinx_mapping = {
"z2jh": ("https://zero-to-jupyterhub.readthedocs.io/en/latest/", None),
"tc": ("https://team-compass.2i2c.org/en/latest/", None),
}
# -- MyST configuration ---------------------------------------------------
myst_enable_extensions = [
"deflist",
"colon_fence",
]
source_suffix = [".rst", ".md"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for MyST -------------------------------------------------
panels_add_bootstrap_css = False
myst_enable_extensions = [
"colon_fence",
"deflist",
"linkify",
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_book_theme"
html_theme_options = {
"repository_url": "https://github.com/2i2c-org/pilot-hubs",
"use_issues_button": True,
"use_repository_button": True,
}
# -- Custom scripts -----------------------------------------
# Pull latest list of communities served by pilot-hubs/
from yaml import safe_load
import pandas as pd
from pathlib import Path
import subprocess
def render_hubs():
# Grab the latest list of clusters defined in pilot-hubs/
clusters = Path("../config/hubs").glob("*")
# Add list of repos managed outside pilot-hubs
hub_list = [{
'name': 'University of Toronto',
'domain': 'jupyter.utoronto.ca',
'id': 'utoronto',
'template': 'base-hub ([deployment repo](https://github.com/utoronto-2i2c/jupyterhub-deploy/))'
}]
for cluster_info in clusters:
if "schema" in cluster_info.name:
continue
# For each cluster, grab it's YAML w/ the config for each hub
yaml = cluster_info.read_text()
cluster = safe_load(yaml)
# For each hub in cluster, grab its metadata and add it to the list
for hub in cluster['hubs']:
config = hub['config']
# Config is sometimes nested
if 'basehub' in config:
hub_config = config['basehub']['jupyterhub']
else:
hub_config = config['jupyterhub']
# Domain can be a list
if isinstance(hub['domain'], list):
hub['domain'] = hub['domain'][0]
hub_list.append({
'name': hub_config['custom']['homepage']['templateVars']['org']['name'],
'domain': f"[{hub['domain']}](https://{hub['domain']})",
"id": hub['name'],
"template": hub['template'],
})
df = pd.DataFrame(hub_list)
path_tmp = Path("tmp")
path_tmp.mkdir(exist_ok=True)
path_table = path_tmp / "hub-table.csv"
df.to_csv(path_table, index=None)
def render_tfdocs():
tf_path = Path('../terraform')
# Output path is relative to terraform directory
output_path = Path('../docs/reference/terraform.md')
# Template for output file is in ../terraform/.terraform-docs.yml
subprocess.check_call([
'terraform-docs', 'markdown',
f"--output-file={output_path}",
f'--config={str(tf_path / ".terraform-docs.yml")}',
str(tf_path)
])
render_hubs()
render_tfdocs()
|
StarcoderdataPython
|
32033
|
<gh_stars>1-10
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import random
from sympy import symbols, diff, N
def fun(X, Y):
return 2*(np.exp(-X**2 - Y**2))# - np.exp(-(X - 1)**2 - (Y - 1)**2))
def symfun(X, Y):
return 2*(np.exp(1)**(-X**2 - Y**2))# - np.exp(1)**(-(X - 1)**2 - (Y - 1)**2))
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = np.exp(-X**2 - Y**2)
Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2)
Z = fun(X, Y)
fig, ax = plt.subplots()
CS = ax.contour(X, Y, Z)
ax.clabel(CS, inline=1, fontsize=10)
ax.set_title('Simplest default with labels')
o=[(random.random()-0.5)*4, (random.random()-0.5)*4]
x, y = symbols('x y', real=True)
dx=diff(symfun(x, y), x)
dy=diff(symfun(x,y), y)
d=[dx.subs({x:o[0], y:o[1]}), dy.subs({x:o[0], y:o[1]})]
alpha=0.7
i=0
while bool((d[0]**2+d[1]**2)>=1e-4) and i<1000:
d=[dx.subs({x:o[0], y:o[1]}), dy.subs({x:o[0], y:o[1]})]
no=[o[0]+d[0]*alpha, o[1]+d[1]*alpha]
#plt.plot(np.array([o[0], no[0]]), np.array([o[1], no[1]]), color="#"+hex(i)[2:]+""+hex(i)[2:]+""+hex(i)[2:])
plt.plot(o[0], o[1], color="#"+hex(i%(256-16)+16)[2:]+""+hex(i%(256-16)+16)[2:]+""+hex(i%(256-16)+16)[2:], marker='o')
o=no
i+=1
plt.plot(o[0], o[1], color="#"+hex(i%(256-16)+16)[2:]+""+hex(i%(256-16)+16)[2:]+""+hex(i%(256-16)+16)[2:], marker='o')
plt.show()
|
StarcoderdataPython
|
156496
|
import copy
from collections import OrderedDict
from typing import List
import numpy as np
from opticverge.core.chromosome.abstract_chromosome import AbstractChromosome
from opticverge.core.chromosome.function_chromosome import FunctionChromosome
from opticverge.core.generator.int_distribution_generator import rand_int
from opticverge.core.generator.options_generator import rand_options
from opticverge.core.generator.real_generator import rand_real
class RandArrayChromosome(FunctionChromosome):
""" The chromosome for generating fixed or dynamic arrays """
def __init__(self, generator: AbstractChromosome, length: int, fixed: bool = False):
""" The constructor for this class
Args:
generator (AbstractChromosome): An instance of a class derived from an AbstractChromosome
length (int): The length of the array
fixed (bool): Whether the array length is fixed
"""
super(RandArrayChromosome, self).__init__(generator, OrderedDict())
"""
The generator is responsible for producing each entry within the array
"""
self.__generator = generator
"""
The length represents the initial size of the array if fixed is True
"""
self.__length = length
"""
Fixed represents whether the array is subject to change during the
evolutionary process.
"""
self.__fixed = fixed
def generate(self, **kwargs):
# determine the length of the array to generate
length: int = self.__length if self.__fixed is True else rand_int(1, self.__length)
# generate each of the positions of the array using the generator
for i in range(length):
self.genotype[i] = self.__generator.generate(**kwargs)
# the phenotype of an array represents the values from the genotype,
# since we use an OrderedDict as our base representation we are safe to
# use list(self.genotype.values())
self.phenotype = list(self.genotype.values())
return self.phenotype
def mutate(self, mutation_probability: float, **kwargs):
""" The mutation function for the array_chromosome
When mutating the ArrayChromosome we use a number of techniques to
modify the contents.
1. Iterate through each entry in the array and with some probability
change the value based on the generator
2. Then select pairs of positions at random with some probability and swap
their values
3. If fixed is set to False then:
a) Attempt to add an item to the array with some probability
b) Attempt to remove an item from the array with some probability
Args:
mutation_probability: The likelihood that we will change the array
**kwargs:
Returns:
"""
# 1. Attempt to mutate each value
for key, val in self.genotype.items():
if rand_real() < mutation_probability:
self.genotype[key] = self.__generator.generate(**kwargs)
# 2. Attempt to swap positions of the array
keys: List[str or int] = list(self.genotype.keys())
if len(keys) > 1:
# select the number of items to modify in the list
items_to_select: int = rand_int(2, len(keys))
selected: List[str or int] = rand_options(keys, items_to_select)
shuffled: List[np.int64] = copy.copy(selected)
np.random.shuffle(shuffled)
for i, key in enumerate(selected):
if rand_real() < mutation_probability:
self.genotype[selected[i]], self.genotype[shuffled[i]] = self.genotype[shuffled[i]], self.genotype[
selected[i]]
# TODO: Sensibly define how to insert/update an OrderedDict
# 3. Attempt to add and remove items to the array
# if self.__fixed is False:
#
# # Add
# num_positions = rand_int(1, len(keys))
#
# # create a temporary placeholder to update the OrderedDict
# temp_phenotype = list(self.genotype.values())
#
# for i in range(num_positions):
#
# if rand_real() < mutation_probability:
# position = rand_int(0, len(self.genotype.keys()))
# temp_phenotype.insert(position, self.__generator.generate(**kwargs))
#
# # Remove
self.phenotype = list(self.genotype.values())
return self.phenotype
|
StarcoderdataPython
|
194915
|
<filename>packages/augur-core/tests/trading/test_claimTradingProceeds.py
#!/usr/bin/env python
from eth_tester.exceptions import TransactionFailed
from pytest import raises, fixture, mark
from utils import fix, AssertLog, EtherDelta, TokenDelta, BuyWithCash, nullAddress
from constants import YES, NO
def captureLog(contract, logs, message):
translated = contract.translator.listen(message)
if not translated: return
logs.append(translated)
def acquireLongShares(kitchenSinkFixture, cash, market, outcome, amount, approvalAddress, sender):
if amount == 0: return
shareToken = kitchenSinkFixture.contracts["ShareToken"]
cost = amount * market.getNumTicks()
with BuyWithCash(cash, cost, sender, "complete set buy"):
assert shareToken.publicBuyCompleteSets(market.address, amount, sender = sender)
for otherOutcome in range(0, market.getNumberOfOutcomes()):
if otherOutcome == outcome: continue
shareToken.safeTransferFrom(sender, kitchenSinkFixture.accounts[8], shareToken.getTokenId(market.address, otherOutcome), amount, "", sender = sender)
def acquireShortShareSet(kitchenSinkFixture, cash, market, outcome, amount, approvalAddress, sender):
if amount == 0: return
cost = amount * market.getNumTicks()
shareToken = kitchenSinkFixture.contracts["ShareToken"]
with BuyWithCash(cash, cost, sender, "complete set buy"):
assert shareToken.publicBuyCompleteSets(market.address, amount, sender = sender)
shareToken.safeTransferFrom(sender, kitchenSinkFixture.accounts[8], shareToken.getTokenId(market.address, outcome), amount, "", sender = sender)
def finalizeMarket(fixture, market, payoutNumerators):
prepare_finalize_market(fixture, market, payoutNumerators)
assert market.finalize()
def prepare_finalize_market(fixture, market, payoutNumerators):
# set timestamp to after market end
fixture.contracts["Time"].setTimestamp(market.getEndTime() + 1)
# have kitchenSinkFixture.accounts[0] submit designated report
market.doInitialReport(payoutNumerators, "", 0)
# set timestamp to after designated dispute end
disputeWindow = fixture.applySignature('DisputeWindow', market.getDisputeWindow())
fixture.contracts["Time"].setTimestamp(disputeWindow.getEndTime() + 1)
# finalize the market
def test_helpers(kitchenSinkFixture, scalarMarket):
market = scalarMarket
shareToken= kitchenSinkFixture.contracts['ShareToken']
finalizeMarket(kitchenSinkFixture, market, [0,0,40*10**4])
assert shareToken.calculateCreatorFee(market.address, fix('3')) == fix('0.03')
assert shareToken.calculateReportingFee(market.address, fix('5')) == fix('0.05')
assert shareToken.calculateProceeds(market.address, YES, 7) == 7 * market.getNumTicks()
assert shareToken.calculateProceeds(market.address, NO, fix('11')) == fix('0')
(proceeds, shareholderShare, creatorShare, reporterShare) = shareToken.divideUpWinnings(market.address, YES, 13)
assert proceeds == 13.0 * market.getNumTicks()
assert reporterShare == 13.0 * market.getNumTicks() * 0.01
assert creatorShare == 13.0 * market.getNumTicks() * 0.01
assert shareholderShare == 13.0 * market.getNumTicks() * 0.98
def test_redeem_shares_in_yesNo_market(kitchenSinkFixture, universe, cash, market):
shareToken = kitchenSinkFixture.contracts["ShareToken"]
expectedValue = 1 * market.getNumTicks()
expectedReporterFees = expectedValue / universe.getOrCacheReportingFeeDivisor()
expectedMarketCreatorFees = expectedValue / market.getMarketCreatorSettlementFeeDivisor()
expectedSettlementFees = expectedReporterFees + expectedMarketCreatorFees
expectedPayout = expectedValue - expectedSettlementFees
assert universe.getOpenInterestInAttoCash() == 0
# get YES shares with a1
acquireLongShares(kitchenSinkFixture, cash, market, YES, 1, shareToken.address, sender = kitchenSinkFixture.accounts[1])
assert universe.getOpenInterestInAttoCash() == 1 * market.getNumTicks()
# get NO shares with a2
acquireShortShareSet(kitchenSinkFixture, cash, market, YES, 1, shareToken.address, sender = kitchenSinkFixture.accounts[2])
assert universe.getOpenInterestInAttoCash() == 2 * market.getNumTicks()
finalizeMarket(kitchenSinkFixture, market, [0, 0, 10**2])
tradingProceedsClaimedLog = {
'market': market.address,
'numPayoutTokens': expectedPayout,
'numShares': 1,
'sender': kitchenSinkFixture.accounts[1],
'fees': 2,
}
with TokenDelta(cash, expectedMarketCreatorFees, market.getOwner(), "market creator fees not paid"):
with TokenDelta(cash, expectedReporterFees, universe.getOrCreateNextDisputeWindow(False), "Reporter fees not paid"):
# redeem shares with a1
with AssertLog(kitchenSinkFixture, "TradingProceedsClaimed", tradingProceedsClaimedLog):
shareToken.claimTradingProceeds(market.address, kitchenSinkFixture.accounts[1], nullAddress)
# redeem shares with a2
shareToken.claimTradingProceeds(market.address, kitchenSinkFixture.accounts[2], nullAddress)
# assert a1 ends up with cash (minus fees) and a2 does not
assert cash.balanceOf(kitchenSinkFixture.accounts[1]) == expectedPayout
assert shareToken.balanceOfMarketOutcome(market.address, YES, kitchenSinkFixture.accounts[1]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, YES, kitchenSinkFixture.accounts[2]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, NO, kitchenSinkFixture.accounts[1]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, NO, kitchenSinkFixture.accounts[2]) == 0
def test_redeem_shares_in_categorical_market(kitchenSinkFixture, universe, cash, categoricalMarket):
market = categoricalMarket
shareToken = kitchenSinkFixture.contracts["ShareToken"]
numTicks = market.getNumTicks()
expectedValue = numTicks
expectedSettlementFees = expectedValue * 0.02
expectedPayout = expectedValue - expectedSettlementFees
assert universe.getOpenInterestInAttoCash() == 0
# get long shares with a1
acquireLongShares(kitchenSinkFixture, cash, market, 3, 1, shareToken.address, sender = kitchenSinkFixture.accounts[1])
assert universe.getOpenInterestInAttoCash() == 1 * numTicks
# get short shares with a2
acquireShortShareSet(kitchenSinkFixture, cash, market, 3, 1, shareToken.address, sender = kitchenSinkFixture.accounts[2])
assert universe.getOpenInterestInAttoCash() == 2 * numTicks
prepare_finalize_market(kitchenSinkFixture, market, [0, 0, 0, numTicks])
# redeem shares with a1
shareToken.claimTradingProceeds(market.address, kitchenSinkFixture.accounts[1], nullAddress)
assert market.isFinalized()
# redeem shares with a2
shareToken.claimTradingProceeds(market.address, kitchenSinkFixture.accounts[2], nullAddress)
assert universe.getOpenInterestInAttoCash() == 0
# assert both accounts are paid (or not paid) accordingly
assert cash.balanceOf(kitchenSinkFixture.accounts[1]) == expectedPayout
assert cash.balanceOf(kitchenSinkFixture.accounts[2]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, 2, kitchenSinkFixture.accounts[1]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, 2, kitchenSinkFixture.accounts[2]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, 1, kitchenSinkFixture.accounts[1]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, 1, kitchenSinkFixture.accounts[2]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, 0, kitchenSinkFixture.accounts[1]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, 0, kitchenSinkFixture.accounts[2]) == 0
def test_redeem_shares_in_scalar_market(kitchenSinkFixture, universe, cash, scalarMarket):
market = scalarMarket
shareToken = kitchenSinkFixture.contracts["ShareToken"]
expectedValue = 1 * market.getNumTicks()
expectedSettlementFees = expectedValue * 0.02
expectedPayout = expectedValue - expectedSettlementFees
assert universe.getOpenInterestInAttoCash() == 0
# get YES shares with a1
acquireLongShares(kitchenSinkFixture, cash, market, YES, 1, shareToken.address, sender = kitchenSinkFixture.accounts[1])
assert universe.getOpenInterestInAttoCash() == 1 * market.getNumTicks()
# get NO shares with a2
acquireShortShareSet(kitchenSinkFixture, cash, market, YES, 1, shareToken.address, sender = kitchenSinkFixture.accounts[2])
assert universe.getOpenInterestInAttoCash() == 2 * market.getNumTicks()
finalizeMarket(kitchenSinkFixture, market, [0, 10**5, 3*10**5])
# redeem shares with a1
shareToken.claimTradingProceeds(market.address, kitchenSinkFixture.accounts[1], nullAddress)
# redeem shares with a2
shareToken.claimTradingProceeds(market.address, kitchenSinkFixture.accounts[2], nullAddress)
# assert a1 ends up with cash (minus fees) and a2 does not
assert cash.balanceOf(kitchenSinkFixture.accounts[1]) == expectedPayout * 3 / 4
assert cash.balanceOf(kitchenSinkFixture.accounts[2]) == expectedPayout * 1 / 4
assert shareToken.balanceOfMarketOutcome(market.address, YES, kitchenSinkFixture.accounts[1]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, YES, kitchenSinkFixture.accounts[2]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, NO, kitchenSinkFixture.accounts[1]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, NO, kitchenSinkFixture.accounts[2]) == 0
def test_reedem_failure(kitchenSinkFixture, cash, market):
shareToken = kitchenSinkFixture.contracts['ShareToken']
# get YES shares with a1
acquireLongShares(kitchenSinkFixture, cash, market, YES, 1, shareToken.address, sender = kitchenSinkFixture.accounts[1])
# get NO shares with a2
acquireShortShareSet(kitchenSinkFixture, cash, market, YES, 1, shareToken.address, sender = kitchenSinkFixture.accounts[2])
# can't claim trading proceeds before market ends
with raises(TransactionFailed):
shareToken.claimTradingProceeds(market.address, kitchenSinkFixture.accounts[1], nullAddress)
# set timestamp to after market end
kitchenSinkFixture.contracts["Time"].setTimestamp(market.getEndTime() + 1)
# have kitchenSinkFixture.accounts[0] subimt designated report (75% high, 25% low, range -10*10^18 to 30*10^18)
market.doInitialReport([0, 0, 100], "", 0)
# set timestamp to after designated dispute end
disputeWindow = kitchenSinkFixture.applySignature('DisputeWindow', market.getDisputeWindow())
kitchenSinkFixture.contracts["Time"].setTimestamp(disputeWindow.getEndTime() + 1)
# validate that everything else is OK
assert shareToken.claimTradingProceeds(market.address, kitchenSinkFixture.accounts[1], nullAddress)
assert market.isFinalized()
def test_redeem_shares_in_multiple_markets(kitchenSinkFixture, universe, cash, market, scalarMarket):
shareToken = kitchenSinkFixture.contracts['ShareToken']
augurTrading = kitchenSinkFixture.contracts['AugurTrading']
# Get scalar LONG shares with a1
expectedValue = 1 * scalarMarket.getNumTicks() * 3 / 4
expectedSettlementFees = expectedValue * 0.02
expectedPayout = expectedValue - expectedSettlementFees
acquireLongShares(kitchenSinkFixture, cash, scalarMarket, YES, 1, shareToken.address, sender = kitchenSinkFixture.accounts[1])
finalizeMarket(kitchenSinkFixture, scalarMarket, [0, 10**5, 3*10**5])
# get YES shares with a1
expectedValue = 1 * market.getNumTicks()
expectedSettlementFees = expectedValue * 0.02
expectedPayout += expectedValue - expectedSettlementFees
acquireLongShares(kitchenSinkFixture, cash, market, YES, 1, shareToken.address, sender = kitchenSinkFixture.accounts[1])
finalizeMarket(kitchenSinkFixture, market, [0, 0, 10**2])
with TokenDelta(cash, expectedPayout, kitchenSinkFixture.accounts[1], "Claiming multiple markets did not give expected payout"):
assert augurTrading.claimMarketsProceeds([market.address, scalarMarket.address], kitchenSinkFixture.accounts[1], nullAddress)
def test_redeem_shares_affiliate(kitchenSinkFixture, universe, cash, market):
shareToken = kitchenSinkFixture.contracts['ShareToken']
expectedValue = 100 * market.getNumTicks()
expectedReporterFees = expectedValue / universe.getOrCacheReportingFeeDivisor()
expectedMarketCreatorFees = expectedValue / market.getMarketCreatorSettlementFeeDivisor()
expectedSettlementFees = expectedReporterFees + expectedMarketCreatorFees
expectedPayout = expectedValue - expectedSettlementFees
expectedAffiliateFees = expectedMarketCreatorFees / market.affiliateFeeDivisor()
expectedMarketCreatorFees = expectedMarketCreatorFees - expectedAffiliateFees
assert universe.getOpenInterestInAttoCash() == 0
affiliateAddress = kitchenSinkFixture.accounts[5]
# get YES shares with a1
acquireLongShares(kitchenSinkFixture, cash, market, YES, 100, shareToken.address, sender = kitchenSinkFixture.accounts[1])
# get NO shares with a2
acquireShortShareSet(kitchenSinkFixture, cash, market, YES, 100, shareToken.address, sender = kitchenSinkFixture.accounts[2])
finalizeMarket(kitchenSinkFixture, market, [0, 0, 10**2])
with TokenDelta(cash, expectedMarketCreatorFees, market.getOwner(), "market creator fees not paid"):
with TokenDelta(cash, expectedAffiliateFees, affiliateAddress, "affiliate fees not paid"):
# redeem shares with a1
shareToken.claimTradingProceeds(market.address, kitchenSinkFixture.accounts[1], affiliateAddress)
# redeem shares with a2
shareToken.claimTradingProceeds(market.address, kitchenSinkFixture.accounts[2], affiliateAddress)
# assert a1 ends up with cash (minus fees) and a2 does not
assert cash.balanceOf(kitchenSinkFixture.accounts[1]) == expectedPayout
assert shareToken.balanceOfMarketOutcome(market.address, YES, kitchenSinkFixture.accounts[1]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, YES, kitchenSinkFixture.accounts[2]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, NO, kitchenSinkFixture.accounts[1]) == 0
assert shareToken.balanceOfMarketOutcome(market.address, NO, kitchenSinkFixture.accounts[2]) == 0
|
StarcoderdataPython
|
1669056
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from __future__ import unicode_literals
import datetime
import time
import unicodedata
import urllib
from django.conf import settings
from django.utils.translation import ugettext as _
# Avoid shadowing the login() and logout() views below.
from django.contrib.auth import (login as auth_login,
logout as auth_logout)
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.views import redirect_to_login
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import resolve_url
from django.template.response import TemplateResponse
from django.utils.six.moves.urllib.parse import urlparse
from django.utils import timezone
from common.log import logger
from bkaccount.encryption import encrypt, decrypt, salt
from bkaccount.models import Loignlog, BkToken, UserInfo
class AccountSingleton(object):
"""
单例基类
"""
_instance = None
def __new__(cls, *args, **kwargs):
if not isinstance(cls._instance, cls):
cls._instance = object.__new__(cls, *args, **kwargs)
return cls._instance
class Account(AccountSingleton):
"""
账号体系相关的基类Account
提供通用的账号功能
"""
# cookie名称
BK_COOKIE_NAME = settings.BK_COOKIE_NAME
# cookie 有效期,默认为1天
BK_COOKIE_AGE = settings.BK_COOKIE_AGE
# 登录回调链接
REDIRECT_FIELD_NAME = 'c_url'
# 登录连接
BK_LOGIN_URL = str(settings.LOGIN_URL)
# 允许误差时间,防止多台机器时间误差, 1分钟
BK_TOKEN_OFFSET_ERROR_TIME = settings.BK_TOKEN_OFFSET_ERROR_TIME
def is_safe_url(self, url, host=None):
"""
判断url是否与当前host的根域一致
以下情况返回False:
1)根域不一致
2)url的scheme不为:https(s)
3)url为空
"""
if url is not None:
url = url.strip()
if not url:
return False
# Chrome treats \ completely as /
url = url.replace('\\', '/')
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
url_info = urlparse(url)
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
url_domain = url_info.netloc.split(':')[0].split('.')[-2] if url_info.netloc else ''
host_domain = host.split(':')[0].split('.')[-2] if host else ''
return ((not url_info.netloc or url_domain == host_domain) and
(not url_info.scheme or url_info.scheme in ['http', 'https']))
def get_bk_token(self, username):
"""
生成用户的登录态
"""
bk_token = ''
expire_time = int(time.time())
# 重试5次
retry_count = 0
while not bk_token and retry_count < 5:
now_time = int(time.time())
expire_time = now_time + self.BK_COOKIE_AGE
plain_token = '%s|%s|%s' % (expire_time, username, salt())
bk_token = encrypt(plain_token)
try:
BkToken.objects.create(token=bk_token)
except Exception as error:
logger.exception('Login ticket failed to be saved during ticket generation, error: {}'.format(error))
# 循环结束前将bk_token置空后重新生成
bk_token = '' if retry_count < 4 else bk_token
retry_count += 1
return bk_token, datetime.datetime.fromtimestamp(expire_time, timezone.get_current_timezone())
def _is_bk_token_valid(self, bk_token):
"""
验证用户登录态
"""
if not bk_token:
error_msg = _("缺少参数bk_token")
return False, None, error_msg
try:
plain_bk_token = decrypt(bk_token)
except Exception as error:
plain_bk_token = ''
logger.exception("Parameter parse failed, error: {}".format(error))
# 参数bk_token非法
error_msg = _("参数bk_token非法")
if not plain_bk_token:
return False, None, error_msg
token_info = plain_bk_token.split('|')
if not token_info or len(token_info) < 3:
return False, None, error_msg
try:
is_logout = BkToken.objects.get(token=bk_token).is_logout
except BkToken.DoesNotExist:
error_msg = _("不存在该bk_token的记录")
return False, None, error_msg
expire_time = int(token_info[0])
now_time = int(time.time())
# token已注销
if is_logout:
error_msg = _("登录态已注销")
return False, None, error_msg
# token有效期已过
if now_time > expire_time + self.BK_TOKEN_OFFSET_ERROR_TIME:
error_msg = _("登录态已过期")
return False, None, error_msg
# token有效期大于当前时间的有效期
if expire_time - now_time > self.BK_COOKIE_AGE + self.BK_TOKEN_OFFSET_ERROR_TIME:
error_msg = _("登录态有效期不合法")
return False, None, error_msg
username = token_info[1]
return True, username, ""
def is_bk_token_valid(self, request):
bk_token = request.COOKIES.get(self.BK_COOKIE_NAME)
return self._is_bk_token_valid(bk_token)
def set_bk_token_invalid(self, request, response=None):
"""
将登录票据设置为不合法
"""
bk_token = request.COOKIES.get(self.BK_COOKIE_NAME)
if bk_token:
BkToken.objects.filter(token=bk_token).update(is_logout=True)
if response is not None:
# delete cookie
response.delete_cookie(self.BK_COOKIE_NAME, domain=settings.BK_COOKIE_DOMAIN)
return response
return None
def record_login_log(self, request, user, app_id):
"""
记录用户登录日志
"""
host = request.get_host()
login_browser = request.META.get('HTTP_USER_AGENT') or 'unknown'
# 获取用户ip
login_ip = request.META.get('HTTP_X_FORWARDED_FOR') or 'REMOTE_ADDR'
Loignlog.objects.record_login(user, login_browser, login_ip, host, app_id)
def redirect_login(self, request):
"""
重定向到登录页面.
登录态验证不通过时调用
"""
if request.is_ajax():
return HttpResponse(status=401)
path = request.build_absolute_uri()
resolved_login_url = resolve_url(self.BK_LOGIN_URL)
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = settings.SITE_URL[:-1] + request.get_full_path()
return redirect_to_login(
path, resolved_login_url, self.REDIRECT_FIELD_NAME)
def login(self, request, template_name='login/login.html',
authentication_form=AuthenticationForm,
current_app=None, extra_context=None):
"""
登录页面和登录动作
"""
redirect_field_name = self.REDIRECT_FIELD_NAME
redirect_to = request.POST.get(redirect_field_name,
request.GET.get(redirect_field_name, ''))
app_id = request.POST.get('app_id', request.GET.get('app_id', ''))
if request.method == 'POST':
form = authentication_form(request, data=request.POST)
if form.is_valid():
return self.login_success_response(request, form, redirect_to, app_id)
else:
form = authentication_form(request)
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
'app_id': app_id,
}
if extra_context is not None:
context.update(extra_context)
if current_app is not None:
request.current_app = current_app
response = TemplateResponse(request, template_name, context)
response = self.set_bk_token_invalid(request, response)
return response
def logout(self, request, next_page=None):
"""
登出并重定向到登录页面
"""
redirect_field_name = self.REDIRECT_FIELD_NAME
auth_logout(request)
if (redirect_field_name in request.POST or redirect_field_name in request.GET):
next_page = request.POST.get(redirect_field_name,
request.GET.get(redirect_field_name))
# Security check -- don't allow redirection to a different host.
if not self.is_safe_url(url=next_page, host=request.get_host()):
next_page = request.path
if next_page:
# Redirect to this page until the session has been cleared.
response = HttpResponseRedirect(next_page)
else:
# Redirect to login url.
response = HttpResponseRedirect("{}?{}".format(self.BK_LOGIN_URL, "is_from_logout=1"))
# 将登录票据设置为不合法
response = self.set_bk_token_invalid(request, response)
return response
def login_failed_response(self, request, redirect_to, app_id):
"""
登录失败跳转,目前重定向到登录,后续可返还支持自定义的错误页面
"""
redirect_url = self.BK_LOGIN_URL
query = {}
if redirect_to:
query[self.REDIRECT_FIELD_NAME] = redirect_to
if app_id:
query['app_id'] = app_id
if len(query):
redirect_url = "{}?{}".format(self.BK_LOGIN_URL, urllib.urlencode(query))
response = HttpResponseRedirect(redirect_url)
response = self.set_bk_token_invalid(request, response)
return response
def login_success_response(self, request, user_or_form, redirect_to, app_id):
"""
用户验证成功后,登录处理
"""
# 判读是form还是user
if isinstance(user_or_form, AuthenticationForm):
user = user_or_form.get_user()
username = user_or_form.cleaned_data.get('username', '')
else:
user = user_or_form
username = user.username
# 检查回调URL是否安全,防钓鱼
if not self.is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = resolve_url('{}accounts/user/list/'.format(settings.SITE_URL))
# 设置用户登录
auth_login(request, user)
# 记录登录日志
self.record_login_log(request, user, app_id)
bk_token, expire_time = self.get_bk_token(username)
response = HttpResponseRedirect(redirect_to)
response.set_cookie(self.BK_COOKIE_NAME, bk_token,
expires=expire_time,
domain=settings.BK_COOKIE_DOMAIN,
httponly=True)
# set cookie for app or platform
bk_user_info, is_created = UserInfo.objects.get_or_create(user=user)
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, bk_user_info.language,
# max_age=settings.LANGUAGE_COOKIE_AGE,
expires=expire_time,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN)
return response
def login_redirect_response(self, request, redirect_url, is_from_logout):
"""
登录重定向
"""
response = HttpResponseRedirect(redirect_url)
# 来自注销,则需清除蓝鲸bk_token
if is_from_logout:
response = self.set_bk_token_invalid(request, response)
return response
|
StarcoderdataPython
|
1775609
|
#! /usr/bin/env python
from typing import Optional
import numpy as np # type: ignore
from scipy.constants import g # type: ignore
def compact(
dz: np.ndarray,
porosity: np.ndarray,
c: float = 5e-8,
rho_grain: float = 2650.0,
excess_pressure: float = 0.0,
porosity_min: float = 0.0,
porosity_max: float = 1.0,
rho_void: float = 1000.0,
gravity: float = g,
return_dz: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Compact a column of sediment.
Parameters
----------
dz : ndarray of float
Array of sediment thicknesses with depth (the first element is
the top of the sediment column) [meters].
porosity : ndarray or number
Sediment porosity [-].
c : ndarray or number, optional
Compaction coefficient that describes how easily the sediment is to
compact [Pa^-1].
rho_grain : ndarray or number, optional
Grain density of the sediment [kg / m^3].
excess_pressure : ndarray or number, optional
Excess pressure with depth [Pa].
porosity_min : ndarray or number, optional
Minimum porosity that can be achieved by the sediment. This is the
porosity of the sediment in its closest-compacted state [-].
porosity_max : ndarray or number, optional
Maximum porosity of the sediment. This is the porosity of the sediment
without any compaction [-].
rho_void : ndarray or number, optional
Density of the interstitial fluid [kg / m^3].
gravity : float, optional
Acceleration due to gravity [m / s^2].
return_dz : ndarray of float, optional
If provided, an output array into which to place the calculated
compacted layer thicknesses.
Returns
-------
porosity : ndarray
New porosities after compaction.
"""
dz, porosity = np.asarray(dz, dtype=float), np.asarray(porosity, dtype=float)
load = (rho_grain - rho_void) * dz * (1.0 - porosity) * gravity
overlying_load = np.cumsum(load, axis=0) - load - excess_pressure
porosity_new = porosity_min + (porosity_max - porosity_min) * np.exp(
-c * overlying_load
)
np.minimum(porosity_new, porosity, out=porosity_new)
if return_dz is not None:
if return_dz.dtype is dz.dtype and return_dz.shape == dz.shape:
contains_sediment = porosity_new < 1.0
np.divide(
dz * (1.0 - porosity),
1.0 - porosity_new,
where=contains_sediment,
out=return_dz,
)
return_dz[~contains_sediment] = 0.0
else:
raise TypeError(
"size and shape of return_dz ({0}, {1}) must be that of dz ({2}, {3})".format(
return_dz.dtype, return_dz.shape, dz.dtype, dz.shape
)
)
return porosity_new
|
StarcoderdataPython
|
3329495
|
<reponame>computerboy0555/GBVision
from .recording_opencv_window import RecordingOpenCVWindow
from .feed_window import FeedWindow
class RecordingFeedWindow(RecordingOpenCVWindow, FeedWindow):
"""
a basic window that displays the stream from a stream receiver
"""
|
StarcoderdataPython
|
144318
|
import os
import requests
import json
class SimulationManagerException(Exception):
def __init__(self, status_code, message):
self.status_code = status_code
self.message = message
def createSimulation(incident_id, num_nodes, requested_walltime, kind, executable, queuestate_callbacks={}, directory=None, template_dir=None, comment=None, number_instances=1, associated_datasets=[]):
arguments = { 'incident_id': incident_id,
'num_nodes':num_nodes,
'requested_walltime' : requested_walltime,
'kind':kind,
'executable':executable,
'number_instances':number_instances,
'queuestate_calls':queuestate_callbacks,
'associated_datasets':associated_datasets }
if directory is not None:
arguments["directory"]=directory
if comment is not None:
arguments["comment"]=comment
if template_dir is not None:
arguments["template_dir"]=template_dir
createResponse = requests.post(_get_SM_URL()+'/create', json=arguments)
if createResponse.status_code == 201:
return createResponse.json()["simulation_id"]
else:
raise SimulationManagerException(createResponse.status_code, createResponse.text)
def submitSimulation(sim_id):
submitobj = {'simulation_uuid' : sim_id}
response = requests.post(_get_SM_URL()+'/submit', json=submitobj)
if response.status_code != 200:
raise SimulationManagerException(response.status_code, response.text)
def groupSimulations(sim_ids):
submitobj = {'simulation_uuids' : sim_ids}
response = requests.post(_get_SM_URL()+'/group', json=submitobj)
if response.status_code != 200:
raise SimulationManagerException(response.status_code, response.text)
def refreshSimilation(sim_id):
response = requests.post(_get_SM_URL()+'/refresh/'+sim_id)
if response.status_code != 200:
raise SimulationManagerException(response.status_code, response.text)
def cancelSimulation(sim_id):
response = requests.delete(_get_SM_URL()+'/simulation/'+sim_id)
if response.status_code != 200:
raise SimulationManagerException(response.status_code, response.text)
#returns information about the simulation with sim_id as a dictionary
def getSimulationInfo(sim_id):
response = requests.get(_get_SM_URL()+'/info/'+sim_id)
if response.status_code != 200:
raise SimulationManagerException(response.status_code, response.text)
return json.loads(response.text)
def getSMHealth():
try:
health_status = requests.get(_get_SM_URL() + '/health')
return health_status.status_code == 200
except:
return False
def _get_SM_URL():
if "VESTEC_SM_URI" in os.environ:
return os.environ["VESTEC_SM_URI"]
else:
return 'http://localhost:5500/SM'
|
StarcoderdataPython
|
3380687
|
import os
import uuid
import unittest
from spaceone.core.unittest.runner import RichTestRunner
from spaceone.tester.unittest import TestCase, to_json, print_json
def random_string():
return uuid.uuid4().hex
TOKEN = os.environ.get('KEYCLOAK_TOKEN', 'export KEYCLOAK_TOKEN=<KEY>')
OPENID_CONFIGURATION = os.environ.get('OPENID_CONFIGURATION', 'export OPENID_CONFIGURATION=https://yyyyy')
CLIENT_ID = os.environ.get('CLIENT_ID','export CLIENT_ID=zzzzzzz')
CLIENT_SECRET = os.environ.get('CLIENT_SECRET', 'export CLIENT_SECRET=aaaaaaaaa')
OPTIONS = {
'openid-configuration': OPENID_CONFIGURATION,
'auth_type': 'keycloak_oidc',
'client_id': CLIENT_ID
}
SECRET_DATA = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET
}
SCHEMA = 'oauth2_client_credentials'
class TestOAuth(TestCase):
<EMAIL>('WRONG ACCESS_TOKEN')
def test_login(self):
credentials = SECRET_DATA
user_credentials = {
'access_token': TOKEN
}
user_info = self.identity.Auth.login({'options':OPTIONS, 'secret_data':credentials, 'schema': SCHEMA, 'user_credentials':user_credentials})
user_info_json = to_json(user_info)
print(user_info_json)
self.assertEqual(user_info_json['state'], 'ENABLED')
def test_init(self):
credentials = {}
auth_v_info = self.identity.Auth.init({'options':OPTIONS})
j = to_json(auth_v_info)
print(j)
def test_verify(self):
credentials = SECRET_DATA
auth_v_info = self.identity.Auth.verify({'options':OPTIONS, 'secret_data': credentials, 'schema': SCHEMA})
j = to_json(auth_v_info)
print(j)
def test_find_user_id(self):
credentials = SECRET_DATA
user_id = '<EMAIL>'
keyword = 'mz.co.kr'
users_info = self.identity.Auth.find({'options':OPTIONS, 'secret_data':credentials, 'schema': SCHEMA, 'user_id':user_id})
j = to_json(users_info)
print(j)
self.assertEqual(j['total_count'], 1)
def test_find_keyword(self):
credentials = SECRET_DATA
keyword = 'mz.co.kr'
users_info = self.identity.Auth.find({'options':OPTIONS, 'secret_data':credentials, 'schema': SCHEMA, 'keyword':keyword})
j = to_json(users_info)
print(j)
self.assertGreaterEqual(j['total_count'], 1)
def test_find_failure(self):
""" not found users
"""
credentials = SECRET_DATA
user_id = '<EMAIL>'
try:
users_info = self.identity.Auth.find({'options':OPTIONS, 'secret_data':credentials, 'schema': SCHEMA, 'user_id':user_id})
except Exception as e:
print(e)
if __name__ == "__main__":
unittest.main(testRunner=RichTestRunner)
|
StarcoderdataPython
|
1757540
|
<filename>dival/util/plot.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""Provides utility functions for visualization."""
from warnings import warn
from math import ceil
import matplotlib.pyplot as plt
# import mpl_toolkits.axes_grid.axes_size as Size
# from mpl_toolkits.axes_grid import Divider
import numpy as np
def plot_image(x, fig=None, ax=None, **kwargs):
"""Plot image using matplotlib's :meth:`imshow` method.
Parameters
----------
x : array-like or PIL image
The image data. For further information see `imshow documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.imshow.html>`_.
fig : :class:`matplotlib.figure.Figure`, optional
The figure to plot the image in. If ``fig is None``, but `ax` is given,
it is retrieved from `ax`. If both ``fig is None`` and ``ax is None``,
a new figure is created.
ax : :class:`matplotlib.axes.Axes`, optional
The axes to plot the image in. If `None`, an axes object is created
in `fig`.
kwargs : dict, optional
Keyword arguments passed to ``ax.imshow``.
Returns
-------
im : :class:`matplotlib.image.AxesImage`
The image that was plotted.
ax : :class:`matplotlib.axes.Axes`
The axes the image was plotted in.
"""
if fig is None:
if ax is None:
fig = plt.figure()
else:
fig = ax.get_figure()
if ax is None:
ax = fig.add_subplot(111)
kwargs.setdefault('cmap', 'gray')
xticks = kwargs.pop('xticks', None)
yticks = kwargs.pop('yticks', None)
if xticks is not None:
ax.set_xticks(xticks)
if yticks is not None:
ax.set_yticks(yticks)
im = ax.imshow(np.asarray(x).T, **kwargs)
return im, ax
def plot_images(x_list, nrows=1, ncols=-1, fig=None, vrange='equal',
cbar='auto', rect=None, fig_size=None, **kwargs):
"""Plot multiple images using matplotlib's :meth:`imshow` method in
subplots.
Parameters
----------
x_list : sequence of (array-like or PIL image)
List of the image data. For further information see `imshow
documentation
<https://matplotlib.org/api/_as_gen/matplotlib.pyplot.imshow.html>`_.
nrows : int, optional
The number of subplot rows (the default is 1). If -1, it is computed by
``ceil(len(x_list)/ncols)``, or set to 1 if `ncols` is not given.
ncols : int, optional
The number of subplot columns. If -1, it is computed by
``ceil(len(x_list)/nrows)`` (default). If both `nrows` and `ncols` are
given, the value of `ncols` is ignored.
vrange : {``'equal'``, ``'individual'``} or [list of ](float, float),\
optional
Value ranges for the colors of the images.
If a string is passed, the range is auto-computed:
``'equal'``
The same colors are used for all images.
``'individual'``
The colors differ between the images.
If a tuple of floats is passed, it is used for all images.
If a list of tuples of floats is passed, each tuple is used for one
image.
cbar : {``'one'``, ``'many'``, ``'auto'``, ``'none'``}, optional
Colorbar option.
If ``cbar=='one'``, one colorbar is shown. Only possible if the value
ranges used for the colors (cf. `vrange`) are the same for all images.
If ``cbar=='many'``, a colorbar is shown for every image.
If ``cbar=='auto'``, either ``'one'`` or ``'many'`` is chosen,
depending on whether `vrange` is equal for all images.
If ``cbar=='none'``, no colorbars are shown.
fig : :class:`matplotlib.figure.Figure`, optional
The figure to plot the images in. If `None`, a new figure is created.
kwargs : dict, optional
Keyword arguments passed to `plot_image`, which in turn passes them to
``imshow``.
Returns
-------
im : ndarray of :class:`matplotlib.image.AxesImage`
The images that were plotted.
ax : ndarray of :class:`matplotlib.axes.Axes`
The axes the images were plotted in.
"""
try:
x_list = list(x_list)
except TypeError:
raise TypeError('x_list must be iterable. Pass a sequence or use '
'`plot_image` to plot single images.')
for i in range(len(x_list)):
x_list[i] = np.asarray(x_list[i])
if fig is None:
fig = plt.figure()
if nrows is None or nrows == -1:
if ncols is None or ncols == -1:
nrows = 1
else:
nrows = ceil(len(x_list)/ncols)
ncols = ceil(len(x_list)/nrows)
if rect is None:
rect = [0.1, 0.1, 0.8, 0.8]
if fig_size is not None:
fig.set_size_inches(fig_size)
if isinstance(vrange, str):
if vrange == 'equal':
vrange_ = [(min((np.min(x) for x in x_list)),
max((np.max(x) for x in x_list)))] * len(x_list)
VRANGE_EQUAL = True
elif vrange == 'individual':
vrange_ = [(np.min(x), np.max(x)) for x in x_list]
VRANGE_EQUAL = False
else:
raise ValueError("`vrange` must be 'equal' or 'individual'")
elif isinstance(vrange, tuple) and len(vrange) == 2:
vrange_ = [vrange] * len(x_list)
VRANGE_EQUAL = True
else:
vrange_ = vrange
VRANGE_EQUAL = False
if not VRANGE_EQUAL:
if cbar == 'one':
warn("cannot use cbar='one' when vrange is not equal for all"
"images, falling back to cbar='many'")
if cbar != 'none':
cbar = 'many'
elif cbar == 'auto':
cbar = 'one'
ax = fig.subplots(nrows, ncols)
if isinstance(ax, plt.Axes):
ax = np.atleast_1d(ax)
im = np.empty(ax.shape, dtype=object)
for i, (x, ax_, v) in enumerate(zip(x_list, ax.flat, vrange_)):
im_, _ = plot_image(x, ax=ax_, vmin=v[0], vmax=v[1], **kwargs)
im.flat[i] = im_
if cbar == 'many':
fig.colorbar(im_, ax=ax_)
if cbar == 'one':
fig.colorbar(im[0], ax=ax)
return im, ax
|
StarcoderdataPython
|
157375
|
default_app_config = 'business.staff_accounts.apps.UserManagementConfig'
"""
This APP is for management of users
Functions:-
Adding staff Users and giving them initial details
-Department
-Staff Type
-Departmental,General Managers have predefined roles depending on the departments they can access
"""
|
StarcoderdataPython
|
7173
|
import torch.nn as nn
from .basic import *
class squeeze_excitation_2d(nn.Module):
"""Squeeze-and-Excitation Block 2D
Args:
channel (int): number of input channels.
channel_reduction (int): channel squeezing factor.
spatial_reduction (int): pooling factor for x,y axes.
"""
def __init__(self, channel, channel_reduction=4, spatial_reduction=4, norm_mode='bn', act_mode='elu'):
super(squeeze_excitation_2d, self).__init__()
self.pool_size = (spatial_reduction, spatial_reduction)
layers = [nn.AvgPool2d(kernel_size=self.pool_size, stride=self.pool_size)]
layers += conv2d_norm_act(channel, channel // channel_reduction, kernel_size=1, padding=0, norm_mode=norm_mode, act_mode=act_mode, return_list=True)
layers += conv2d_norm_act(channel // channel_reduction, channel, kernel_size=1, padding=0, norm_mode=norm_mode, return_list=True)
layers = [nn.Sigmoid(),
nn.Upsample(scale_factor=self.pool_size, mode='trilinear', align_corners=False)]
self.se = nn.Sequential(*layers)
def forward(self, x):
y = self.se(x)
z = x + y*x
return z
class squeeze_excitation_3d(nn.Module):
"""Squeeze-and-Excitation Block 3D
Args:
channel (int): number of input channels.
channel_reduction (int): channel squeezing factor.
spatial_reduction (int): pooling factor for x,y axes.
z_reduction (int): pooling factor for z axis.
"""
def __init__(self, channel, channel_reduction=4, spatial_reduction=4, z_reduction=1, norm_mode='bn', act_mode='elu'):
super(squeeze_excitation_3d, self).__init__()
self.pool_size = (z_reduction, spatial_reduction, spatial_reduction)
layers = [nn.AvgPool3d(kernel_size=self.pool_size, stride=self.pool_size)]
layers += conv3d_norm_act(channel, channel//channel_reduction, kernel_size=1, padding=0, norm_mode=norm_mode, act_mode=act_mode, return_list=True)
layers += conv3d_norm_act(channel//channel_reduction, channel, kernel_size=1, padding=0, norm_mode=norm_mode, return_list=True)
layers += [nn.Sigmoid(),
nn.Upsample(scale_factor=self.pool_size, mode='trilinear', align_corners=False)]
self.se = nn.Sequential(*layers)
def forward(self, x):
y = self.se(x)
z = x + y*x
return z
|
StarcoderdataPython
|
42480
|
"""
An AWS Lambda function used to run periodic background jobs on ECS.
The complication with running these tasks is that we need to run them on
the same version of the Docker image that the web servers are currently
running on.
"""
import logging
import boto3
from utils import env_list, env_param
# Logging setup is done by Lambda
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Initialize clients for the required services.
ecs = boto3.client('ecs')
def handler(event, context):
"""
The entry point into the lambda function.
This function finds the version of the Docker image running a
specific service on a specific ECS cluster and then launches a new
task on the same ECS cluster running an overridden version of the
same Docker image.
Args:
event:
A dictionary containing information provided when the
function was invoked.
context:
An unknown object containing additional context for the
request.
Returns:
A dictionary containing a response code and status message.
"""
# The name of the ECS cluster running the existing API task.
cluster = env_param('CLUSTER')
# The name of the service running on the cluster whose image should
# be used to run the background jobs.
service = env_param('SERVICE')
# The name of the container within the service to override.
container_name = env_param('CONTAINER_NAME')
# A list of security group IDs that the migration task should be
# placed in.
security_groups = env_list('SECURITY_GROUPS')
# A list of subnet IDs corresponding to the subnets the migration
# task may be placed in.
subnet_ids = env_list('SUBNETS')
logger.info('Beginning process of running background tasks.')
logger.info(
'Searching cluster "%s" for "%s" service...',
cluster,
service,
)
logger.info(
'Task to be executed with security groups %s in subnets %s',
security_groups,
subnet_ids,
)
# The first step is to describe the service so we can get access to
# the task definition being used.
services_info = ecs.describe_services(cluster=cluster, services=[service])
assert len(services_info['services']) == 1, (
'Received multiple services. Aborting!'
)
logger.info('Received information about "%s" service.', service)
service_info = services_info['services'][0]
task_definition_arn = service_info['taskDefinition']
logger.info(
'ARN of task definition from service is %s',
task_definition_arn,
)
# Pull roles from task definition information. We run the background
# task with the same roles that the API web server tasks normally
# run under.
task_definition = ecs.describe_task_definition(
taskDefinition=task_definition_arn,
)
execution_role = task_definition['taskDefinition']['executionRoleArn']
task_role = task_definition['taskDefinition']['taskRoleArn']
logger.info(
'Will execute task with role %s as role %s',
task_role,
execution_role,
)
# Using the parameters we have pulled from the previous steps, we
# launch what is essentially a modified version of the webserver
# task that performs the tasks required to migrate between versions
# of the codebase.
ecs.run_task(
cluster=cluster,
launchType='FARGATE',
overrides={
'containerOverrides': [
{
'command': ['background-jobs'],
'name': container_name,
},
],
# The role used by the ECS agent.
'executionRoleArn': execution_role,
# The role our code runs under.
'taskRoleArn': task_role,
},
networkConfiguration={
'awsvpcConfiguration': {
# Need to assign a public IP so the image can be pulled.
'assignPublicIp': 'ENABLED',
'securityGroups': security_groups,
'subnets': subnet_ids,
},
},
taskDefinition=task_definition_arn,
)
return {
'body': 'Success',
'statusCode': 200
}
|
StarcoderdataPython
|
1693003
|
from django.contrib import admin
from .models import Tag, Article
# admin.site.register(Tag)
# admin.site.register(Article)
def set_active(modeladmin, request, queryset):
queryset.update(is_active = True)
class ArticleAdmin(admin.ModelAdmin):
list_display = ['article_name','article_text', 'is_active','article_rating']
actions = [set_active]
class TagAdmin(admin.ModelAdmin):
list_display = ['tag_name','is_active']
actions = [set_active]
admin.site.register(Tag, TagAdmin)
admin.site.register(Article, ArticleAdmin)
|
StarcoderdataPython
|
139396
|
<filename>mooiter/account.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Mooiter
# Copyright 2010 <NAME>
# See LICENCE for details.
import sys
import base64
#Test third party modules
try:
import tweepy
from PyQt4 import QtGui
from PyQt4 import QtCore
except ImportError as e:
print "Import Error" + e
class TwitterAccount(QtGui.QDialog):
def __init__(self, Parent=None):
super(TwitterAccount, self).__init__(Parent)
#Garbage collect on dialog close
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.settings = QtCore.QSettings("cutiepie4", "Mooiter")
self.setWindowTitle("Account")
vbox = QtGui.QVBoxLayout()
hbox = QtGui.QHBoxLayout()
vboxlabels = QtGui.QVBoxLayout()
vboxedits = QtGui.QVBoxLayout()
hboxbuttons = QtGui.QHBoxLayout()
delete = QtGui.QPushButton('&Delete')
buttonbox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Save|
QtGui.QDialogButtonBox.Close)
#Create horizontal line
seperator = QtGui.QFrame()
seperator.setFrameShape(QtGui.QFrame.HLine)
seperator.setFrameShadow(QtGui.QFrame.Sunken)
self.useredit = QtGui.QLineEdit()
self.passwordedit = QtGui.QLineEdit()
self.useredit.setMinimumWidth(200)
self.passwordedit.setMinimumWidth(200)
self.passwordedit.setEchoMode(QtGui.QLineEdit.Password)
labeluser = QtGui.QLabel("&Username:")
labelpassword = QtGui.QLabel("&Password:")
labeluser.setBuddy(self.useredit)
labelpassword.setBuddy(self.passwordedit)
vboxlabels.addWidget(labeluser)
vboxlabels.addWidget(labelpassword)
vboxedits.addWidget(self.useredit)
vboxedits.addWidget(self.passwordedit)
hboxbuttons.addStretch()
hboxbuttons.addWidget(delete)
hboxbuttons.addWidget(buttonbox)
hbox.addLayout(vboxlabels)
hbox.addLayout(vboxedits)
vbox.addLayout(hbox)
vbox.addWidget(seperator)
vbox.addLayout(hboxbuttons)
self.setLayout(vbox)
self.useredit.setFocus()
self.setTabOrder(self.useredit, self.passwordedit)
self.setTabOrder(delete, buttonbox)
self.connect(buttonbox.button(QtGui.QDialogButtonBox.Save),
QtCore.SIGNAL("clicked()"), self.new_account)
self.connect(buttonbox, QtCore.SIGNAL("rejected()"),
self, QtCore.SLOT("reject()"))
self.connect(delete, QtCore.SIGNAL('clicked()'), self.delete_account)
#Find out if an account already exists
if self.settings.contains("User") and self.settings.contains("use"):
username = base64.b64decode(self.settings.value("User").toString())
password = <PASSWORD>64.b64decode(self.settings.value("use").toString())
self.useredit.setText(unicode(username))
self.passwordedit.setText(unicode(password))
def new_account(self):
"""Verfiy and store twitter account details"""
username = self.useredit.text()
password = <PASSWORD>()
#Verfiy twitter account exists on twitter.
auth = tweepy.BasicAuthHandler(username, password)
api = tweepy.API(auth)
if not api.verify_credentials():
QtGui.QMessageBox.warning(self, 'Warning',
"Could not authenticate twitter account",
QtGui.QMessageBox.Ok)
else:
#Store username and password
self.settings.setValue("User", (QtCore.QVariant\
(base64.b64encode(str(username)))))
self.settings.setValue("use", (QtCore.QVariant\
(base64.b64encode(str(password)))))
#Signal account change to main window
self.emit(QtCore.SIGNAL("changed"))
print "pie"
def delete_account(self):
"""Remove all twitter account details"""
self.settings.remove("User")
self.settings.remove("use")
self.useredit.setText("")
self.passwordedit.setText("")
#Signal account change to main window
self.emit(QtCore.SIGNAL("changed"))
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
meep = TwitterAccount()
meep.show()
sys.exit(app.exec_())
|
StarcoderdataPython
|
20407
|
# "javascript" section for javascript. see @app.route('/config.js') in app/views.py
# oauth constants
HOSTNAME = "http://hackathon.chinacloudapp.cn" # host name of the UI site
QQ_OAUTH_STATE = "openhackathon" # todo state should be constant. Actually it should be unguessable to prevent CSFA
HACkATHON_API_ENDPOINT = "http://hackathon.chinacloudapp.cn:15000"
Config = {
"environment": "local",
"login": {
"github": {
"access_token_url": 'https://github.com/login/oauth/access_token?client_id=a10e2290ed907918d5ab&client_secret=5b240a2a1bed6a6cf806fc2f34eb38a33ce03d75&redirect_uri=%s/github&code=' % HOSTNAME,
"user_info_url": 'https://api.github.com/user?access_token=',
"emails_info_url": 'https://api.github.com/user/emails?access_token='
},
"qq": {
"access_token_url": 'https://graph.qq.com/oauth2.0/token?grant_type=authorization_code&client_id=101192358&client_secret=d94f8e7baee4f03371f52d21c4400cab&redirect_uri=%s/qq&code=' % HOSTNAME,
"openid_url": 'https://graph.qq.com/oauth2.0/me?access_token=',
"user_info_url": 'https://graph.qq.com/user/get_user_info?access_token=%s&oauth_consumer_key=%s&openid=%s'
},
"gitcafe": {
"access_token_url": 'https://api.gitcafe.com/oauth/token?client_id=25ba4f6f90603bd2f3d310d11c0665d937db8971c8a5db00f6c9b9852547d6b8&client_secret=e3d821e82d15096054abbc7fbf41727d3650cab6404a242373f5c446c0918634&redirect_uri=%s/gitcafe&grant_type=authorization_code&code=' % HOSTNAME
},
"provider_enabled": ["github", "qq", "gitcafe"],
"session_minutes": 60,
"token_expiration_minutes": 60 * 24
},
"hackathon-api": {
"endpoint": HACkATHON_API_ENDPOINT
},
"javascript": {
"renren": {
"clientID": "client_id=7e0932f4c5b34176b0ca1881f5e88562",
"redirect_url": "redirect_uri=%s/renren" % HOSTNAME,
"scope": "scope=read_user_message+read_user_feed+read_user_photo",
"response_type": "response_type=token",
},
"github": {
"clientID": "client_id=a10e2290ed907918d5ab",
"redirect_uri": "redirect_uri=%s/github" % HOSTNAME,
"scope": "scope=user",
},
"google": {
"clientID": "client_id=304944766846-7jt8jbm39f1sj4kf4gtsqspsvtogdmem.apps.googleusercontent.com",
"redirect_url": "redirect_uri=%s/google" % HOSTNAME,
"scope": "scope=https://www.googleapis.com/auth/userinfo.profile+https://www.googleapis.com/auth/userinfo.email",
"response_type": "response_type=token",
},
"qq": {
"clientID": "client_id=101192358",
"redirect_uri": "redirect_uri=%s/qq" % HOSTNAME,
"scope": "scope=get_user_info",
"state": "state=%s" % QQ_OAUTH_STATE,
"response_type": "response_type=code",
},
"gitcafe": {
"clientID": "client_id=25ba4f6f90603bd2f3d310d11c0665d937db8971c8a5db00f6c9b9852547d6b8",
"clientSecret": "client_secret=<KEY>",
"redirect_uri": "redirect_uri=http://hackathon.chinacloudapp.cn/gitcafe",
"response_type": "response_type=code",
"scope": "scope=public"
},
"hackathon": {
"name": "open-xml-sdk",
"endpoint": HACkATHON_API_ENDPOINT
}
}
}
|
StarcoderdataPython
|
1781639
|
<reponame>amplify-education/bcfg2
from django.contrib.auth.models import User
from nisauth import *
class NISBackend(object):
def authenticate(self, username=None, password=<PASSWORD>):
try:
print("start nis authenticate")
n = nisauth(username, password)
temp_pass = User.objects.make_random_password(100)
nis_user = dict(username=username,
)
user_session_obj = dict(email=username,
first_name=None,
last_name=None,
uid=n.uid)
user, created = User.objects.get_or_create(username=username)
return user
except NISAUTHError:
e = sys.exc_info()[1]
print(e)
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
e = sys.exc_info()[1]
print(e)
return None
|
StarcoderdataPython
|
1674106
|
<filename>source/lp_solver.py
"""
Solves the battery control problem using the LP
formulation
"""
import numpy as np
import pulp
def solve_lp(load, price_buy, price_sell, h, b_0, b_max, b_min, eff_c, eff_d, d_max, d_min, commitment=None, eps=1e-5):
"""
Solves the LP asociated with controlling a battery
Params:
load, np.array: net consumption in KW (positive if consuming and negative if surplus or renewable)
in each timeslot
price_buy, np.array: price of buying each kWh in each timeslot
price_sell, np.array: price of selling each kWh in each timeslot
h, float: duration of each timeslot in hours
b_0, float: initial energy in the battery in kWh
b_max, float: maximum capacity of the battery in kWh
b_min, float: minimum capacity of the battery in kWh
eff_c, float: efficiency of charging the battery (0, 1]
eff_d, float: efficiency of discharging the battery (0, 1]
d_max, float: maximum amount of power that the battery can charge in kW
d_min, float: maximum amount of power that the battery can discharge in kW
Returns:
"""
N = load.shape[0]
energy_cons = load * h
keys = [str(i) for i in range(N)]
inv_ec = 1.0 / eff_c
x_c = pulp.LpVariable.dicts('xc', keys, 0, d_max * h) # 0 <= xc <= d_max * h
x_d = pulp.LpVariable.dicts('xd', keys, 0, -d_min * h) # 0 <= xc <= - d_min * h
ts = pulp.LpVariable.dicts('ts', keys)
prob = pulp.LpProblem("Battery control",pulp.LpMinimize)
prob += pulp.lpSum([ts[k] for k in keys]) # Objective functions E t_i
for i in range(N):
si = str(i)
prob += (price_buy[i] * (x_c[si] * inv_ec - x_d[si] * eff_d + energy_cons[i])) <= ts[si]
prob += (price_sell[i] * (x_c[si] * inv_ec - x_d[si] * eff_d + energy_cons[i])) <= ts[si]
prob += pulp.lpSum([x_c[str(j)] - x_d[str(j)] for j in range(i + 1)]) <= b_max - b_0
prob += pulp.lpSum([x_c[str(j)] - x_d[str(j)] for j in range(i + 1)]) >= b_min - b_0
i = 0
si = '0'
if commitment is not None:
if commitment > 0:
prob += (x_c[si] * inv_ec - x_d[si] * eff_d + energy_cons[i]) >= (commitment - eps)
else:
prob += (x_c[si] * inv_ec - x_d[si] * eff_d + energy_cons[i]) <= (commitment + eps)
prob.solve(pulp.GLPK(msg=0))
#prob.solve()
xs = np.array([x_c[k].varValue - x_d[k].varValue for k in keys])
xc = np.array([x_c[k].varValue for k in keys])
xd = np.array([x_d[k].varValue for k in keys])
return pulp.LpStatus[prob.status], pulp.value(prob.objective), xs, xc, xd
|
StarcoderdataPython
|
73901
|
from concurrent.futures import Future, ThreadPoolExecutor
import logging
from vonx.indy.messages import StoredCredential
from vonx.web.view_helpers import (
IndyCredentialProcessor,
IndyCredentialProcessorException,
)
from api_indy.indy.credential import Credential, CredentialException, CredentialManager
from .boot import run_django_proc
LOGGER = logging.getLogger(__name__)
class CredentialProcessorQueue(IndyCredentialProcessor):
def __init__(self, max_threads=10):
super(CredentialProcessorQueue, self).__init__()
self._max_threads = max_threads
def setup(self, app):
app["credqueue"] = self
app.on_startup.append(self.app_start)
app.on_cleanup.append(self.app_stop)
async def app_start(self, _app=None):
self.start()
async def app_stop(self, _app=None):
self.stop()
def start(self):
self._executor = ThreadPoolExecutor(max_workers=self._max_threads)
def stop(self):
self._executor.shutdown(True)
def start_batch(self) -> object:
"""
May return batch info used for caching and/or scheduling
"""
return {"manager": CredentialManager()}
def get_manager(self, batch_info):
if batch_info:
return batch_info["manager"]
return CredentialManager()
def process_credential(
self, stored: StoredCredential, origin_did: str = None, batch_info=None) -> Future:
"""
Perform credential processing and create related objects.
Processing can be deferred until end_batch to determine appropriate chunk size,
currently using naive :class:`ThreadPoolExecutor`
"""
cred = Credential(stored.cred.cred_data, stored.cred.cred_req_metadata, stored.cred_id)
credential_manager = self.get_manager(batch_info)
LOGGER.info("Processing credential %s for DID %s", stored.cred_id, origin_did)
def proc():
try:
return credential_manager.process(cred, origin_did)
except CredentialException as e:
raise IndyCredentialProcessorException(str(e)) from e
return self._executor.submit(run_django_proc, proc)
def end_batch(self, batch_info):
"""
Ensure that processing has been kicked off
"""
pass
|
StarcoderdataPython
|
1628259
|
# -*- coding: UTF-8 -*-
# pep8: disable-msg=E501
# pylint: disable=C0301
import os
import logging
import getpass
import tempfile
__version__ = '0.0.5'
__author__ = '<NAME>'
__author_username__ = 'marco.lovato'
__author_email__ = '<EMAIL>'
__description__ = 'A command-line tool to create projects \
from templates, to start your python work.'
log_filename = os.path.join(tempfile.gettempdir(),
'machete-' + getpass.getuser() + '.log')
log = logging
log.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename=log_filename,
filemode='a')
def __path(filename):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), filename)
# Jenkins
if os.getenv("BUILD_NUMBER"):
file_ = open(__path('build.info'), 'w')
file_.write(os.getenv("TRAVIS_BUILD_NUMBER"))
file_.close()
# Travis
if os.getenv("TRAVIS_BUILD_NUMBER"):
file_ = open(__path('build.info'), 'w')
file_.write(os.getenv("TRAVIS_BUILD_NUMBER"))
file_.close()
__build__ = '0'
if os.path.exists(__path('build.info')):
__build__ = open(__path('build.info')).read().strip()
__version__ = __version__ + '.' + __build__
|
StarcoderdataPython
|
149682
|
from django.db import models
import uuid
from django.db.models.deletion import CASCADE
from django.utils.translation import ugettext_lazy as _
from registry.models import Aircraft, Contact
class AerobridgeCredential(models.Model):
''' A class to store tokens from Digital Sky '''
KEY_ENVIRONMENT = ((0, _('DIGITAL SKY OPERATOR')),(1, _('DIGITAL SKY MANUFACTURER')),(2, _('DIGITAL SKY PILOT')),(3, _('RFM')),(4, _('DSC / eMudra Token')),)
TOKEN_TYPE= ((0, _('PUBLIC_KEY')),(1, _('PRIVATE_KEY')),(2, _('AUTHENTICATION TOKEN')),(4, _('OTHER')),)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=100, help_text="Enter a description for the type of credential you are storing")
token_type = models.IntegerField(choices=TOKEN_TYPE)
association = models.IntegerField(choices=KEY_ENVIRONMENT, default = 4)
token = models.BinaryField()
is_active = models.BooleanField(default = True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def token_type_verbose(self):
return dict(AerobridgeCredential.TOKEN_TYPE)[self.token_type]
|
StarcoderdataPython
|
3316379
|
<reponame>lunastorm/wissbi
#!/usr/bin/env python
import sys
import re
import socket
sep_re = re.compile("^==> (\/.*) <==$")
current_logger = ""
hostname = socket.gethostname()
while True:
line = sys.stdin.readline()
if len(line) == 0:
break
line = line.strip()
if len(line) == 0:
continue
matcher = sep_re.match(line)
if matcher != None:
current_logger = matcher.group(1)
continue
sys.stdout.write("%s\t%s\t%s\n" % (hostname, current_logger, line))
sys.stdout.flush()
|
StarcoderdataPython
|
3209966
|
from napalm import get_network_driver
import pprint as pp
driver = get_network_driver('eos')
device = driver('sw-2', 'admin', 'alta3')
device.open()
pp.pprint(device.compliance_report("/home/student/pyna/sw2_validate01.yml"))
device.close()
|
StarcoderdataPython
|
1704177
|
<gh_stars>0
import asyncio
import os
import re
import sqlite3
import click
import pyperclip
from salmon import config
from salmon.common import AliasedCommands, commandgroup
from salmon.database import DB_PATH
from salmon.errors import ImageUploadFailed
from salmon.images import imgur, mixtape, ptpimg, vgy
loop = asyncio.get_event_loop()
HOSTS = {
"ptpimg": ptpimg,
"mixtape": mixtape,
"vgy.me": vgy,
"imgur": imgur,
}
def validate_image_host(ctx, param, value):
try:
return HOSTS[value]
except KeyError:
raise click.BadParameter(f"{value} is not a valid image host")
@commandgroup.group(cls=AliasedCommands)
def images():
"""Create and manage uploads to image hosts"""
pass
@images.command()
@click.argument(
"filepaths",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
nargs=-1,
)
@click.option(
"--image-host",
"-i",
help="The name of the image host to upload to",
default=config.IMAGE_UPLOADER,
callback=validate_image_host,
)
def up(filepaths, image_host):
"""Upload images to an image host"""
with sqlite3.connect(DB_PATH) as conn:
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
urls = []
try:
tasks = [
loop.run_in_executor(None, lambda f=f: image_host.upload_file(f))
for f in filepaths
]
for url, deletion_url in loop.run_until_complete(asyncio.gather(*tasks)):
cursor.execute(
"INSERT INTO image_uploads (url, deletion_url) VALUES (?, ?)",
(url, deletion_url),
)
click.secho(url)
urls.append(url)
conn.commit()
if config.COPY_UPLOADED_URL_TO_CLIPBOARD:
pyperclip.copy("\n".join(urls))
except (ImageUploadFailed, ValueError) as error:
click.secho(f"Image Upload Failed. {error}", fg="red")
raise ImageUploadFailed("Failed to upload image") from error
@images.command()
@click.option(
"--limit", "-l", type=click.INT, default=20, help="The number of images to show"
)
@click.option(
"--offset",
"-o",
type=click.INT,
default=0,
help="The number of images to offset by",
)
def ls(limit, offset):
"""View previously uploaded images"""
with sqlite3.connect(DB_PATH) as conn:
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute(
"SELECT id, url, deletion_url, time FROM image_uploads "
"ORDER BY id DESC LIMIT ? OFFSET ?",
(limit, offset),
)
for row in cursor.fetchall():
click.secho(f"{row['id']:04d}. ", fg="yellow", nl=False)
click.secho(f"{row['time']} ", fg="green", nl=False)
click.secho(f"{row['url']} ", fg="cyan", nl=False)
if row["deletion_url"]:
click.secho(f"Delete: {row['deletion_url']}", fg="red")
def chunker(seq, size=4):
for pos in range(0, len(seq), size):
yield seq[pos : pos + size]
def upload_cover(path):
"""
Search a folder for a cover image, and if found, upload it.
The image url is returned, otherwise None.
"""
for filename in os.listdir(path):
if re.match(r"^(cover|folder)\.(jpe?g|png)$", filename, flags=re.IGNORECASE):
click.secho(
f"Uploading cover to {config.COVER_UPLOADER}...", fg="yellow", nl=False
)
try:
fpath = os.path.join(path, filename)
try:
url = loop.run_until_complete(
loop.run_in_executor(
None,
lambda: HOSTS[config.COVER_UPLOADER].upload_file(fpath)[0],
)
)
except (ImageUploadFailed, ValueError) as error:
click.secho(f"Image Upload Failed. {error}", fg="red")
raise ImageUploadFailed("Failed to upload image") from error
except ImageUploadFailed:
return click.secho(" failed :(", fg="red")
click.secho(f" done! {url}", fg="yellow")
return url
click.secho(
f"Did not find a cover to upload to {config.IMAGE_UPLOADER}...", fg="red"
)
def upload_spectrals(spectrals, uploader=HOSTS[config.SPECS_UPLOADER], successful=None):
"""
Given the spectrals list of (filename, [spectral_url, ..]), send them
to the coroutine upload handller and return a dictionary of filenames
and spectral urls.
"""
response = {}
successful = successful or set()
one_failed = False
for specs_block in chunker(spectrals):
tasks = [
_spectrals_handler(sid, filename, sp, uploader)
for sid, filename, sp in specs_block
if sid not in successful
]
for sid, urls in loop.run_until_complete(asyncio.gather(*tasks)):
if urls:
response = {**response, sid: urls}
successful.add(sid)
else:
one_failed = True
if one_failed:
return {**response, **_handle_failed_spectrals(spectrals, successful)}
return response
def _handle_failed_spectrals(spectrals, successful):
while True:
host = click.prompt(
click.style(
"Some spectrals failed to upload. Which image host would you like to retry "
f'with? (Options: {", ".join(HOSTS.keys())})',
fg="magenta",
bold=True,
),
default="ptpimg",
).lower()
if host not in HOSTS:
click.secho(
f"{host} is an invalid image host. Please choose another one.", fg="red"
)
else:
return upload_spectrals(
spectrals, uploader=HOSTS[host], successful=successful
)
async def _spectrals_handler(spec_id, filename, spectral_paths, uploader):
try:
click.secho(f"Uploading spectrals for {filename}...", fg="yellow")
tasks = [
loop.run_in_executor(None, lambda f=f: uploader.upload_file(f)[0])
for f in spectral_paths
]
return spec_id, await asyncio.gather(*tasks)
except ImageUploadFailed as e:
click.secho(f"Failed to upload spectrals for {filename}: {e}", fg="red")
return spec_id, None
|
StarcoderdataPython
|
3205261
|
'''
Given an integer n, count the total number of digit 1 appearing in all non-negative integers less than or equal to n.
For example:
Given n = 13,
Return 6, because digit 1 occurred in the following numbers: 1, 10, 11, 12, 13.
'''
class Solution(object):
def countDigitOne(self, n):
"""
:type n: int
:rtype: int
"""
x, m, count = n, 1, 0
while x > 0:
lastDigit = x % 10
x //= 10
count += x * m
if lastDigit == 1:
count += n % m + 1
elif lastDigit > 1:
count += m
m *= 10
return count
if __name__ == "__main__":
assert Solution().countDigitOne(13) == 6
|
StarcoderdataPython
|
64126
|
<gh_stars>0
# Databricks notebook source
# MAGIC %md # Using the DDS package on Databricks
# MAGIC
# MAGIC This notebook shows you a few features of the [dds package](https://github.com/tjhunter/dds_py). Install `dds-py` to your cluster via PyPI to run this notebook.
# COMMAND ----------
dbutils.fs.rm("/data_managed", recurse=True)
dbutils.fs.rm("/data_cache", recurse=True)
# COMMAND ----------
# MAGIC %md ## Set up DDS for Databricks
# COMMAND ----------
import dds
dds.set_store("dbfs", data_dir="/data_managed", internal_dir="/data_cache")
# COMMAND ----------
# MAGIC %md ## Hello world example
# COMMAND ----------
@dds.data_function("/hello_data")
def data():
print("Executing data()")
return "Hello, world"
data()
# COMMAND ----------
# MAGIC %md The data is now on DBFS
# COMMAND ----------
# MAGIC %fs ls /data_managed/
# COMMAND ----------
# MAGIC %fs head /data_managed/hello_data
# COMMAND ----------
# MAGIC %md The data is also cached in the `/data_cache` directory:
# COMMAND ----------
# MAGIC %fs ls /data_cache/blobs
# COMMAND ----------
# Calling this function again does not trigger calculations, the data is stored on DBFS
data()
# COMMAND ----------
# MAGIC %md ## Plotting dependencies
# MAGIC
# MAGIC The following code introduces dependencies between multiple data functions. We can plot the dependencies with the `dds.eval` function:
# COMMAND ----------
# Try modifying this variable to see what happens
outside_var = 3
@dds.data_function("/f1")
def f1():
print("eval f1")
return 1
@dds.data_function("/f2")
def f2():
print("eval f2")
return outside_var + f1()
@dds.data_function("/f3")
def f3():
print("eval f3")
return f1() + f2()
# This is the first time we evaluate it, so everything will be evaluated.
dds.codecs.databricks.displayGraph(f3)
# COMMAND ----------
f3()
# COMMAND ----------
# MAGIC %md Now see what happens when we modify the `outside_var` variable. DDS will detect that the `f2` function (and hence the `f3` function) will be modified and need to be rerun.
# COMMAND ----------
outside_var = 5
dds.codecs.databricks.displayGraph(f3)
# COMMAND ----------
f3()
|
StarcoderdataPython
|
3374111
|
<reponame>ajayhk/quant<filename>algos/Catch-Dips-the-fishing-algo-Live-Trade.py
'''
This algorithm buys and keeps stocks when they suddenlly dip below a certain threshold
For example, buy only when the stock dips 20% below the moving average (20)
Sell happens when it is 2X of what we bought it at
Performance:
Works pretty well, mostly being higher than the buy and hold stragety or almost as good
Not working when we do nasdaq 100 due to some runtime error
'''
########### IMPORT THE LIBRARIES USED IN THE ALGORITHM ####################################
import datetime
import pytz
import pandas as pd
from zipline.utils.tradingcalendar import get_early_closes
########### INITIALZE() IS RUN ONCE (OR IN LIVE TRADING ONCE EACH DAY BEFORE TRADING) #####
def initialize(context):
# context.stock = sid(21724)
context.max = 10000
context.min = 0
context.profit = 0.01
# stocks = [sid(8554), sid(3951), sid(24), sid(23709), sid(18522), sid(6295)]
stocks = [sid(21724), sid(22257), sid(18522), sid(351), sid(6295), sid(20914)]
# stocks = [sid(24), sid(114), sid(122), sid(630), sid(67), \
# sid(20680), sid(328), sid(14328), sid(368), sid(16841), \
# sid(9883), sid(337), sid(38650), sid(739), sid(27533), \
# sid(3806), sid(18529), sid(1209), sid(1406), sid(1419), \
# sid(15101), sid(17632), sid(39095), sid(1637), sid(1900), \
# sid(32301), sid(18870), sid(14014), sid(25317), sid(36930), \
# sid(12652), sid(26111), sid(24819), sid(24482), sid(2618), \
# sid(2663), sid(27543), sid(1787) , sid(2696), sid(42950), \
# sid(20208), sid(2853), sid(8816), sid(12213), sid(3212), \
# sid(9736), sid(23906), sid(26578), sid(22316), sid(13862), \
# sid(3951), sid(8655), sid(25339), sid(4246), sid(43405), \
# sid(27357), sid(32046), sid(4485), sid(43919), sid(4668), \
# sid(8677), sid(22802), sid(3450), sid(5061), sid(5121), \
# sid(5149), sid(5166), sid(23709), sid(13905), sid(19926), \
# sid(19725), sid(8857), sid(5767), sid(5787), sid(19917), \
# sid(6295), sid(6413), sid(6546), sid(20281), sid(6683), \
# sid(26169), sid(6872), sid(11901), sid(13940), sid(7061), \
# sid(15581), sid(24518), sid(7272), sid(39840), sid(7671), \
# sid(27872), sid(8017), sid(38817), sid(8045), sid(8132), \
# sid(8158), sid(24124), sid(8344), sid(8352), sid(14848)]
context.stocks = stocks # add some specific securities
context.no_of_stocks = 6
context.buy_and_hold_number = [0]*context.no_of_stocks
context.run_once = 1
context.lastbuymonth = [0]*context.no_of_stocks
context.lastbuyyear = [0]*context.no_of_stocks
context.last_bought_date = [0]*context.no_of_stocks
context.last_sold_date = [0]*context.no_of_stocks
set_commission(commission.PerShare(cost=0.005))
set_slippage(slippage.FixedSlippage(spread=0.00))
########### HANDLE_DATA() IS RUN ONCE PER MINUTE #######################
def handle_data(context, data):
# If the stock has not yet started trading, exit it
for stock in context.stocks :
if stock not in data:
log.info(stock)
continue
# Get the current exchange time, in local timezone:
exchange_time = pd.Timestamp(get_datetime()).tz_convert('US/Eastern')
today = exchange_time.day + exchange_time.month*30 + exchange_time.year*365
# This is to compare against the buy and hold strategy
# So buy the first time when the algo runs, and then never sell
if context.run_once == 1:
i = 0
for stock in context.stocks :
context.buy_and_hold_number[i] = (context.max/context.no_of_stocks)/data[stock].price
log.info(stock)
log.info(context.buy_and_hold_number[i])
context.run_once = 0
i = i + 1
i = 0
total_buy_and_hold = 0
for stock in context.stocks :
# This is the graph of what would happen if we had just bought and kept
total_buy_and_hold = total_buy_and_hold + context.buy_and_hold_number[i] * data[stock].price
i = i + 1
# This is the graph of what would happen if we had just bought and kept
record(BuyAndHold=total_buy_and_hold)
# All the records
i = 0
for stock in context.stocks :
# This is the Price of the stock today
record(PRICE=data[stock].price)
# This is the value of the portfolio including current value of stock + cash we have
record(PortfolioValue=context.portfolio.positions_value \
+ int(context.portfolio.cash))
# this is the max of capital, to compare against the buy and hold value and portfolio values
#record(InitialCapital=context.max)
i = i + 1
i = -1
for stock in context.stocks :
i = i + 1
if data[stock].price < 0.8*data[stock].vwap(20) and \
(int(today) > int(context.last_bought_date[i]) + 5) :
# do all the buying here
# amount to buy is lesser of the current cash we have and the
# money remaining of the total value we allocate for this stock
# amount_to_buy = min(context.portfolio.cash, \
# (context.max/context.no_of_stocks - context.portfolio.positions[stock].amount*data[stock].price))
if (context.portfolio.positions[stock].amount == 0) :
amount_to_buy = min(context.portfolio.cash, (context.max/context.no_of_stocks))
else :
amount_to_buy = min(context.portfolio.cash, \
(context.max/context.no_of_stocks) - context.portfolio.positions[stock].amount*data[stock].price)
context.order_id = order_value(stock, 0.3*(amount_to_buy))
# Check the order to make sure that it has bought. Right now the filled below returns zero
stock_order = get_order(context.order_id)
# The check below shows if the object exists. Only if it exists, should you
# refer to it. Otherwise you will get a runtime error
if stock_order:
message = ',buy,stock={stock},amount to buy={amount_to_buy},price={price},amount={amount}'
message = message.format(stock=stock,amount=stock_order.amount, price=data[stock].price,amount_to_buy=amount_to_buy)
log.info(message)
record(BUY=data[stock].price)
context.last_bought_date[i] = today
continue
continue
# This doesnt give as good returns as the below except when we put the value to sell when three times
# i = 0
# for stock in context.stocks :
# if data[stock].price > 3*context.portfolio.positions[stock].cost_basis and \
# (int(today) > int(context.last_bought_date[i]) + 5) :
# # do all the selling here
# context.order_id = order(stock, -0.4*(context.portfolio.positions[stock].amount))
# # Check the order to make sure that it has bought. Right now the filled below returns zero
# stock_order = get_order(context.order_id)
# # The check below shows if the object exists. Only if it exists, should you
# # refer to it. Otherwise you will get a runtime error
# if stock_order:
# message = ',sell,stock={stock},price={price},amount={amount}'
# message = message.format(stock=stock,amount=stock_order.amount, price=data[stock].price)
# log.info(message)
# #record(SELL=data[stock].price)
# context.last_sold_date[i] = today
# continue
#
# continue
# i = i + 1
i = 0
for stock in context.stocks :
if data[stock].price > 2*context.portfolio.positions[stock].cost_basis :
context.order_id = order(stock, -0.4*(context.portfolio.positions[stock].amount))
# update the date. This is used to make sure we trade only once a day
# Check the order to make sure that it has bought.
# Right now the filled below returns zero
stock_order = get_order(context.order_id)
# The check below shows if the object exists. Only if it exists, should you
# refer to it. Otherwise you will get a runtime error
if stock_order:
# log the order amount and the amount that is filled
context.last_sold_date[i] = today
message = ',sell,sid={sid}.stocks={stock},amount={amount}'
message = message.format(sid=stock,amount=stock_order.amount, stock=stock_order.filled)
log.info(message)
record(SELL=data[stock].price)
i = i + 1
|
StarcoderdataPython
|
3210470
|
class Student:
def __init__(self, first, last, courses=None):
self.first_name = first
self.last_name = last
if courses == None:
self.courses = []
else:
self.courses = courses
def add_course(self, course):
if course not in self.courses:
self.courses.append(course)
else:
print(f"{self.first_name} is already enrolled in the {course}")
def remove_course(self, course):
if course in self.courses:
self.courses.remove(course)
else:
print(F"{course} not found.")
def find_in_file(self, filename):
with open(filename) as f:
for line in f:
first_name, last_name, course_details = Student.prep_record(line.strip())
Student_read_in = Student(first_name, last_name, course_details)
if self == Student_read_in:
return True
return False
# this is the function that checks, add, or modify existing record in the file.
def add_to_file(self, filename):
# This will check if record exist or not
if self.find_in_file(filename):
# If record exist, this will ask if you want to edit the rocord or cancel
print( f"Record already exists, do you want to update {self.first_name.capitalize()} {self.last_name.capitalize()}'s record?")
with open(filename) as o:
for line in o:
old_record = line
u_response = input("Enter u to Update, c to Cancel -> ")
record_to_update = Student.prep_to_write(self.first_name, self.last_name, self.courses)
# If you choose to edit, this will perform the operation
if u_response == "u":
with open(filename, "w+") as to_update:
to_update.write(record_to_update+"\n")
return f"Record updated \nFrom: {old_record} \nTo: {record_to_update}"
# If you choose to cancel, this will perform the operation
else:
return "No Changes made"
# if record does not exist, this will add new rocord.
else:
record_to_add = Student.prep_to_write(self.first_name, self.last_name, self.courses)
with open(filename, "a+") as to_write:
to_write.write(record_to_add+"\n")
return f"New record added: {record_to_add}"
@staticmethod
def prep_record(line):
line = line.split(":")
first_name, last_name = line[0].split(",")
course_details = line[1].rstrip().split(",")
return first_name, last_name, course_details
@staticmethod
def prep_to_write(first_name, last_name, courses):
full_name = first_name+','+last_name
courses =",".join(courses)
return full_name+':'+courses
def __eq__(self, other):
return self.first_name == other.first_name and self.last_name == other.last_name
def __len__(self):
return len(self.courses)
def __str__(self):
return f"First Name: {self.first_name.capitalize()}\nLast Name: {self.last_name.capitalize()}\nCourses: {', '.join(map(str.capitalize, self.courses))}"
# This is calling the file we will be working with
file_name = "data.txt"
# This are sample records to be added
# mashrur = Student("mashrur", "hossain", ["python", "rubyon","javascript"])
# print(mashrur.add_to_file(file_name))
# joe = Student("joe", "schmo", ["python", "rubyoo", "javascript"])
# print(joe.add_to_file(file_name))
class StudentAthlete(Student):
pass
courses = ["python", "rubyon","javascript"]
jane = StudentAthlete("jane", "doe",courses)
print(jane.cour)
|
StarcoderdataPython
|
3312465
|
import tensorflow as tf
import tensorflow.keras as ks
from kgcnn.ops.partition import change_partition_by_name, partition_row_indexing
from kgcnn.layers.base import GraphBaseLayer
@tf.keras.utils.register_keras_serializable(package='kgcnn',name='PoolingTopK')
class PoolingTopK(GraphBaseLayer):
"""Layer for pooling of nodes. Disjoint representation including length tensor.
This implements a learnable score vector plus gate. Implements gPool of Gao et al.
Args:
k (float): relative number of nodes to remove. Default is 0.1
kernel_initializer (str): Score initialization. Default is 'glorot_uniform',
kernel_regularizer (str): Score regularization. Default is None.
kernel_constraint (bool): Score constrain. Default is None.
"""
def __init__(self,
k=0.1,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
kernel_constraint=None,
**kwargs):
"""Initialize Layer."""
super(PoolingTopK, self).__init__(**kwargs)
self.k = k
self.kernel_initializer = ks.initializers.get(kernel_initializer)
self.kernel_regularizer = ks.regularizers.get(kernel_regularizer)
self.kernel_constraint = ks.constraints.get(kernel_constraint)
self.units_p = None
self.kernel_p = None
def build(self, input_shape):
"""Build Layer."""
super(PoolingTopK, self).build(input_shape)
self.units_p = int(input_shape[0][-1])
self.kernel_p = self.add_weight('score',
shape=[1, self.units_p],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
def call(self, inputs, **kwargs):
"""Forward pass.
Args:
inputs (list): [nodes, node_partition, edges, edge_partition, edge_indices]
- nodes (tf.RaggedTensor): Node embeddings of shape (batch, [N], F)
- edges (tf.RaggedTensor): Edge embeddings of shape (batch, [N], F)
- edge_indices (tf.RaggedTensor): Edge index list of shape of shape (batch, [N], 2)
Returns:
tuple: [nodes, edges, edge_indices], [map_nodes, map_edges]
- nodes (tf.RaggedTensor): Pooled node feature tensor
- edges (tf.RaggedTensor): Pooled edge feature list
- edge_indices (tf.RaggedTensor): Pooled edge index list
- map_nodes (tf.RaggedTensor): Index map between original and pooled nodes
- map_edges (tf.RaggedTensor): Index map between original and pooled edges
"""
self.assert_ragged_input_rank(inputs)
dyn_inputs = inputs
# We cast to values here
node, nodelen = dyn_inputs[0].values, dyn_inputs[0].row_lengths()
edgefeat, edgelen = dyn_inputs[1].values, dyn_inputs[1].row_lengths()
edgeindref, _ = dyn_inputs[2].values, dyn_inputs[2].row_lengths()
index_dtype = edgeindref.dtype
# Get node properties
nvalue = node
nrowlength = tf.cast(nodelen, dtype=index_dtype)
erowlength = tf.cast(edgelen, dtype=index_dtype)
nids = tf.repeat(tf.range(tf.shape(nrowlength)[0], dtype=index_dtype), nrowlength)
# Use kernel p to get score
norm_p = ks.backend.sqrt(ks.backend.sum(ks.backend.square(self.kernel_p), axis=-1, keepdims=True))
nscore = ks.backend.sum(nvalue * self.kernel_p / norm_p, axis=-1)
# Sort nodes according to score
# Then sort after former node ids -> stable = True keeps previous order
sort1 = tf.argsort(nscore, direction='ASCENDING', stable=False)
nids_sorted1 = tf.gather(nids, sort1)
sort2 = tf.argsort(nids_sorted1, direction='ASCENDING', stable=True) # Must be stable=true here
sort12 = tf.gather(sort1, sort2) # index goes from 0 to batch*N, no in batch indexing
nvalue_sorted = tf.gather(nvalue, sort12, axis=0)
nscore_sorted = tf.gather(nscore, sort12, axis=0)
# Make Mask
nremove = tf.cast(tf.math.round(self.k * tf.cast(nrowlength, dtype=tf.keras.backend.floatx())),
dtype=index_dtype)
nkeep = nrowlength - nremove
n_remove_keep = ks.backend.flatten(
tf.concat([ks.backend.expand_dims(nremove, axis=-1), ks.backend.expand_dims(nkeep, axis=-1)], axis=-1))
mask_remove_keep = ks.backend.flatten(tf.concat(
[ks.backend.expand_dims(tf.zeros_like(nremove, dtype=tf.bool), axis=-1),
ks.backend.expand_dims(tf.ones_like(nkeep, tf.bool), axis=-1)], axis=-1))
mask = tf.repeat(mask_remove_keep, n_remove_keep)
# Apply Mask to remove lower score nodes
pooled_n = nvalue_sorted[mask]
pooled_score = nscore_sorted[mask]
pooled_id = nids[mask] # nids should not have changed by final sorting
pooled_len = nkeep # shape=(batch,)
pooled_index = tf.cast(sort12[mask], dtype=index_dtype) # the index goes from 0 to N*batch
removed_index = tf.cast(sort12[tf.math.logical_not(mask)],
dtype=index_dtype) # the index goes from 0 to N*batch
# Pass through gate
gated_n = pooled_n * ks.backend.expand_dims(tf.keras.activations.sigmoid(pooled_score), axis=-1)
# Make index map for new nodes towards old index
index_new_nodes = tf.range(tf.shape(pooled_index)[0], dtype=index_dtype)
old_shape = tf.cast(ks.backend.expand_dims(tf.shape(nvalue)[0]), dtype=index_dtype)
map_index = tf.scatter_nd(ks.backend.expand_dims(pooled_index, axis=-1), index_new_nodes, old_shape)
# Shift index if necessary
edge_ids = tf.repeat(tf.range(tf.shape(edgelen)[0], dtype=index_dtype), edgelen)
shiftind = partition_row_indexing(edgeindref, nrowlength, edge_ids,
partition_type_target="row_length", partition_type_index="value_rowids",
from_indexing=self.node_indexing, to_indexing="batch")
shiftind = tf.cast(shiftind, dtype=index_dtype) # already shifted by batch offset (sub-graphs)
# Remove edges that were from filtered nodes via mask
mask_edge = ks.backend.expand_dims(shiftind, axis=-1) == ks.backend.expand_dims(
ks.backend.expand_dims(removed_index, axis=0), axis=0) # this creates large tensor (batch*#edges,2,remove)
mask_edge = tf.math.logical_not(ks.backend.any(ks.backend.any(mask_edge, axis=-1), axis=-1))
clean_shiftind = shiftind[mask_edge]
clean_edge_ids = edge_ids[mask_edge]
# clean_edge_len = tf.math.segment_sum(tf.ones_like(clean_edge_ids), clean_edge_ids)
clean_edge_len = tf.scatter_nd(tf.expand_dims(clean_edge_ids, axis=-1), tf.ones_like(clean_edge_ids),
tf.cast(tf.shape(erowlength), dtype=index_dtype))
# Map edgeindex to new index
new_edge_index = tf.concat([ks.backend.expand_dims(tf.gather(map_index, clean_shiftind[:, 0]), axis=-1),
ks.backend.expand_dims(tf.gather(map_index, clean_shiftind[:, 1]), axis=-1)],
axis=-1)
batch_order = tf.argsort(new_edge_index[:, 0], axis=0, direction='ASCENDING', stable=True)
new_edge_index_sorted = tf.gather(new_edge_index, batch_order, axis=0)
# Remove the batch offset from edge_indices again for indexing type
out_indexlist = partition_row_indexing(new_edge_index_sorted,
pooled_len, clean_edge_ids,
partition_type_target="row_length",
partition_type_index="value_rowids",
from_indexing="batch",
to_indexing=self.node_indexing)
# Correct edge features the same way (remove and reorder)
edge_feat = edgefeat
clean_edge_feat = edge_feat[mask_edge]
clean_edge_feat_sorted = tf.gather(clean_edge_feat, batch_order, axis=0)
# Make edge feature map for new edge features
edge_position_old = tf.range(tf.shape(edgefeat)[0], dtype=index_dtype)
edge_position_new = edge_position_old[mask_edge]
edge_position_new = tf.gather(edge_position_new, batch_order, axis=0)
# Collect output tensors
out_node = gated_n
out_edge = clean_edge_feat_sorted
out_edge_index = out_indexlist
# Change length to partition required
out_np = pooled_len # row_length
out_ep = clean_edge_len # row_length
# Collect reverse pooling info
# Remove batch offset for old indicies -> but with new length
out_pool = partition_row_indexing(pooled_index,
nrowlength, pooled_len,
partition_type_target="row_length",
partition_type_index="row_length",
from_indexing="batch",
to_indexing=self.node_indexing)
out_pool_edge = partition_row_indexing(edge_position_new,
erowlength, clean_edge_ids,
partition_type_target="row_length",
partition_type_index="value_rowids",
from_indexing="batch",
to_indexing=self.node_indexing)
out = [tf.RaggedTensor.from_row_lengths(out_node, out_np, validate=self.ragged_validate),
tf.RaggedTensor.from_row_lengths(out_edge, out_ep, validate=self.ragged_validate),
tf.RaggedTensor.from_row_lengths(out_edge_index, out_ep, validate=self.ragged_validate)]
out_map = [tf.RaggedTensor.from_row_lengths(out_pool, out_np, validate=self.ragged_validate),
tf.RaggedTensor.from_row_lengths(out_pool_edge, out_ep, validate=self.ragged_validate)]
return out, out_map
def get_config(self):
"""Update layer config."""
config = super(PoolingTopK, self).get_config()
config.update({"k": self.k})
config.update({
'kernel_initializer':
ks.initializers.serialize(self.kernel_initializer),
'kernel_regularizer':
ks.regularizers.serialize(self.kernel_regularizer),
'kernel_constraint':
ks.constraints.serialize(self.kernel_constraint),
})
return config
@tf.keras.utils.register_keras_serializable(package='kgcnn',name='UnPoolingTopK')
class UnPoolingTopK(GraphBaseLayer):
"""Layer for un-pooling of nodes from PoolingTopK.
The edge index information are not reverted since the tensor before pooling can be reused.
Same holds for batch-assignment in number of nodes and edges information.
"""
def __init__(self, **kwargs):
"""Initialize Layer."""
super(UnPoolingTopK, self).__init__(**kwargs)
def build(self, input_shape):
"""Build Layer."""
super(UnPoolingTopK, self).build(input_shape)
def call(self, inputs, **kwargs):
"""Forward pass.
Args:
inputs (list): [node, edge, edge_indices, map_node, map_edge, node_pool, edge_pool, edge_indices_pool]
- node (tf.RaggedTensor): Original node tensor
- edge (tf.RaggedTensor): Original edge feature tensor
- edge_indices (tf.RaggedTensor): Original index tensor
- map_node (tf.RaggedTensor): Index map between original and pooled nodes
- map_edge (tf.RaggedTensor): Index map between original and pooled edges
- node_pool (tf.RaggedTensor): Pooled node tensor
- edge_pool (tf.RaggedTensor): Pooled edge feature tensor
- edge_indices (tf.RaggedTensor): Pooled index tensor
Returns:
List: [nodes, edges, edge_indices]
- nodes (tf.RaggedTensor): Un-pooled node feature tensor
- edges (tf.RaggedTensor): Un-pooled edge feature list
- edge_indices (tf.RaggedTensor): Un-pooled edge index
"""
self.assert_ragged_input_rank(inputs)
dyn_inputs = inputs
# We cast to values here
node_old, nrowlength = dyn_inputs[0].values, dyn_inputs[0].row_lengths()
edge_old, erowlength = dyn_inputs[1].values, dyn_inputs[1].row_lengths()
edgeind_old, _ = dyn_inputs[2].values, dyn_inputs[2].row_lengths()
map_node, _ = dyn_inputs[3].values, dyn_inputs[3].row_lengths()
map_edge, _ = dyn_inputs[4].values, dyn_inputs[4].row_lengths()
node_new, pool_node_len = dyn_inputs[5].values, dyn_inputs[5].row_lengths()
edge_new, pool_edge_id = dyn_inputs[6].values, dyn_inputs[6].value_rowids()
edgeind_new, _ = dyn_inputs[7].values, dyn_inputs[7].row_lengths()
# Correct map index for flatten batch offset
map_node = partition_row_indexing(map_node, nrowlength, pool_node_len,
partition_type_target="row_length", partition_type_index="row_length",
from_indexing=self.node_indexing, to_indexing="batch")
map_edge = partition_row_indexing(map_edge, erowlength, pool_edge_id,
partition_type_target="row_length",
partition_type_index="value_rowids",
from_indexing=self.node_indexing,
to_indexing="batch")
index_dtype = map_node.dtype
node_shape = tf.stack([tf.cast(tf.shape(node_old)[0], dtype=index_dtype),
tf.cast(tf.shape(node_new)[1], dtype=index_dtype)])
out_node = tf.scatter_nd(ks.backend.expand_dims(map_node, axis=-1), node_new, node_shape)
index_dtype = map_edge.dtype
edge_shape = tf.stack([tf.cast(tf.shape(edge_old)[0], dtype=index_dtype),
tf.cast(tf.shape(edge_new)[1], dtype=index_dtype)])
out_edge = tf.scatter_nd(ks.backend.expand_dims(map_edge, axis=-1), edge_new, edge_shape)
outlist = [tf.RaggedTensor.from_row_lengths(out_node, nrowlength, validate=self.ragged_validate),
tf.RaggedTensor.from_row_lengths(out_edge, erowlength, validate=self.ragged_validate),
tf.RaggedTensor.from_row_lengths(edgeind_old, erowlength, validate=self.ragged_validate)]
return outlist
def get_config(self):
"""Update layer config."""
config = super(UnPoolingTopK, self).get_config()
return config
@tf.keras.utils.register_keras_serializable(package='kgcnn', name='AdjacencyPower')
class AdjacencyPower(GraphBaseLayer):
"""Computes powers of the adjacency matrix. This implementation is a temporary solution.
Note: Layer casts to dense until sparse matmul is supported. This can be very inefficient.
Args:
n (int): Power of the adjacency matrix. Default is 2.
"""
def __init__(self, n=2, **kwargs):
"""Initialize layer."""
super(AdjacencyPower, self).__init__(**kwargs)
self.n = n
def build(self, input_shape):
"""Build layer."""
super(AdjacencyPower, self).build(input_shape)
def call(self, inputs, **kwargs):
"""Forward pass.
Args:
inputs (list): [nodes, edges, edge_indices]
- nodes (tf.RaggedTensor): Node embeddings of shape (batch, [N], F)
- edges (tf.RaggedTensor): Adjacency entries of shape (batch, [M], 1)
- edge_indices (tf.RaggedTensor): Edge-index list referring to nodes of shape (batch, [M], 2)
Returns:
list: [edges, edge_indices]
- edges (tf.RaggedTensor): Adjacency entries of shape (batch, [M], 1)
- edge_indices (tf.RaggedTensor): Flatten index list of shape (batch, [M], 2)
"""
self.assert_ragged_input_rank(inputs)
dyn_inputs = inputs
nod, node_len = dyn_inputs[0].values, dyn_inputs[0].row_lengths()
edge = dyn_inputs[1].values
edge_index, edge_len = dyn_inputs[2].values, dyn_inputs[2].row_lengths()
# batch-wise indexing
edge_index = partition_row_indexing(edge_index,
node_len, edge_len,
partition_type_target="row_length",
partition_type_index="row_length",
from_indexing=self.node_indexing,
to_indexing="sample")
ind_batch = tf.cast(tf.expand_dims(tf.repeat(tf.range(tf.shape(edge_len)[0]), edge_len), axis=-1),
dtype=edge_index.dtype)
ind_all = tf.concat([ind_batch, edge_index], axis=-1)
ind_all = tf.cast(ind_all, dtype=tf.int64)
max_index = tf.reduce_max(node_len)
dense_shape = tf.stack([tf.cast(tf.shape(node_len)[0], dtype=max_index.dtype), max_index, max_index])
adj = tf.zeros(dense_shape, dtype=edge.dtype)
ind_flat = tf.range(tf.cast(tf.shape(node_len)[0], dtype=max_index.dtype) * max_index * max_index)
adj = tf.expand_dims(adj, axis=-1)
adj = tf.tensor_scatter_nd_update(adj, ind_all, edge[:, 0:1])
adj = tf.squeeze(adj, axis=-1)
out0 = adj
out = adj
for i in range(self.n - 1):
out = tf.linalg.matmul(out, out0)
# debug_result = out
# Make sparse
mask = out > tf.keras.backend.epsilon()
mask = tf.reshape(mask, (-1,))
out = tf.reshape(out, (-1,))
new_edge = out[mask]
new_edge = tf.expand_dims(new_edge, axis=-1)
new_indices = tf.unravel_index(ind_flat[mask], dims=dense_shape)
new_egde_ids = new_indices[0]
new_edge_index = tf.concat([tf.expand_dims(new_indices[1], axis=-1), tf.expand_dims(new_indices[2], axis=-1)],
axis=-1)
new_edge_len = tf.tensor_scatter_nd_add(tf.zeros_like(node_len), tf.expand_dims(new_egde_ids, axis=-1),
tf.ones_like(new_egde_ids))
# batchwise indexing
new_edge_index = partition_row_indexing(new_edge_index,
node_len, new_edge_len,
partition_type_target="row_length",
partition_type_index="row_length",
from_indexing="sample",
to_indexing=self.node_indexing)
outlist = [tf.RaggedTensor.from_row_lengths(new_edge, new_edge_len, validate=self.ragged_validate),
tf.RaggedTensor.from_row_lengths(new_edge_index, new_edge_len, validate=self.ragged_validate)]
return outlist
def get_config(self):
"""Update layer config."""
config = super(AdjacencyPower, self).get_config()
config.update({"n": self.n})
return config
|
StarcoderdataPython
|
1600288
|
import time
import pytest
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import filestore
import filestore.api
import filestore.handlers
import hxnfly.fly
import hxnfly.log
from hxnfly.callbacks import FlyLiveCrossSection
from hxnfly.fly2d import Fly2D
from hxnfly.bs import FlyPlan2D
from .fly_mock import (MockDetector, MockSignal)
from .sim_detector import TestDetector
from .fixtures import *
def test_scan_points():
from hxnfly.fly2d import _get_scan_points
point_args = (0.0, 1.0, 10,
1.0, 2.0, 10,
)
points = list(_get_scan_points(*point_args, max_points=22))
assert len(points) == 5
assert sum(py for xi, xj, px, yi, yj, py in points) == 10
xis = [xi for xi, xj, px, yi, yj, py in points]
xjs = [xj for xi, xj, px, yi, yj, py in points]
assert min(xis) == 0.0
assert max(xjs) == 1.0
yis = [yi for xi, xj, px, yi, yj, py in points]
yjs = [yj for xi, xj, px, yi, yj, py in points]
assert min(yis) == 1.0
assert max(yjs) == 2.0
def make_2d_data(startx, endx, nptsx,
starty, endy, nptsy,
gathered_points=200,
gate_enable='low_to_high'):
# this is not entirely accurate...
time = np.linspace(0, 10.0, gathered_points)
gather_y = gathered_points // nptsy
px = np.array(np.linspace(startx, endx, gather_y).tolist() * nptsy)
py = np.array(sum(([pt] * gather_y
for pt in np.linspace(starty, endy, nptsy)),
[]))
pz = np.random.rand(gathered_points) / 10.0
enable = []
npts = nptsx * nptsy
exposed_count = (gathered_points // npts) - 1
for i in range(npts):
if gate_enable == 'low_to_high':
enable.extend([0] + [1] * exposed_count)
else:
enable.extend([1] + [0] * exposed_count)
if gate_enable == 'low_to_high':
enable[-1] = 0
else:
enable[-1] = 1
return [np.array(v) for v in (time, enable, px, py, pz)]
@pytest.fixture(scope='function',
params=['with_mock_detector', 'with_sim_detector',
'relative'])
def fly2d(request, monkeypatch, ppmac, gpascii, axes, positioners,
sim_det, ipython, global_state):
import hxntools.scans
hxntools.scans.setup(debug_mode=True)
def run_and_wait(*args, **kwargs):
print('run and wait!')
change_callback = kwargs.pop('change_callback')
time.sleep(0.1)
change_callback('Q1', 0, 1)
time.sleep(1.0)
return 0
monkeypatch.setattr(gpascii, 'run_and_wait', run_and_wait)
if request.param == 'with_sim_detector':
sim_det.count_time.put(0.01)
detectors = [sim_det]
else:
detectors = [MockDetector('', name='det')]
gpascii.set_variable('gather.samples', 100)
gpascii.set_variable('gather.maxlines', 100)
scan = Fly2D(axes=axes, positioners=positioners, detectors=detectors)
startx, endx, nptsx = -1.0, 1.0, 2
starty, endy, nptsy = -1.0, 1.0, 2
exposure_time = 1.0
relative = (request.param == 'relative')
scan.configure(positioners['testx'], startx, endx, nptsx,
positioners['testy'], starty, endy, nptsy,
exposure_time=exposure_time, relative=relative)
npts = nptsx * nptsy
sclr1 = ipython.user_ns['sclr1']
sclr1.mca_by_index[1].spectrum.put(np.array([exposure_time * 50e3] * npts))
gather_client = scan.gather_client
# fake data for parsing
gather_client.gathered = make_2d_data(startx, endx, nptsx,
starty, endy, nptsy,
gathered_points=500,
gate_enable=scan._gate_enable)
def FakeClass(*args, **kwargs):
print('initialized with', args, kwargs)
scan.configure_defaults = dict(return_speed=5.0, dead_time=0.007,
fly_type='soft', max_points=16384)
return scan
testx, testy = positioners['testx'], positioners['testy']
FlyPlan2D.scans = {frozenset({testx, testy}): FakeClass,
}
return scan
def test_fly2d(fly2d):
# kickoff sends in the subscan number normally
data = fly2d.run_subscan(0)
df = pd.DataFrame(list(pd.DataFrame(data)['data']))
print('acquired data:')
print(df)
has_test_det = any(isinstance(det, TestDetector)
for det in fly2d.detectors)
if has_test_det:
assert 'sim_tiff' in df
for img_uid in df['sim_tiff']:
print(img_uid, filestore.api.retrieve(img_uid).shape)
print(fly2d.collect())
print(fly2d.describe())
def test_failed_fly2d(fly2d, gpascii, monkeypatch):
def run_and_wait(*args, **kwargs):
time.sleep(1.0)
return 1
monkeypatch.setattr(gpascii, 'run_and_wait', run_and_wait)
data = fly2d.run_subscan(0)
assert data is None
def test_flyplan2d(monkeypatch, positioners, fly2d, run_engine):
scan_fcn = hxnfly.bs.FlyPlan2D()
gen = scan_fcn(positioners['testx'], -1.0, 1.0, 2,
positioners['testy'], -1.0, 1.0, 2,
1.0, dead_time=0.001,
)
run_engine(gen)
def test_flyplan2d_liveimage(request, monkeypatch, positioners, run_engine,
fly2d, xspress3):
fly2d.detectors.append(xspress3)
scan_fcn = hxnfly.bs.FlyPlan2D()
from hxnfly.callbacks import (FlyDataCallbacks, FlyLiveImage)
monkeypatch.setattr(hxnfly.fly, 'Xspress3Detector', xspress3.__class__)
xspress3.array_counter.put(4)
# TODO is this done in configure_roi?
xspress3.setup_fake_rois([('Fe', [1, 2, 3, 4]),
('Mo', [4, 3, 2, 1])
])
liveimage = FlyLiveImage(['Fe', 'Mo'])
scan_fcn.subs = [FlyDataCallbacks(), liveimage]
gen = scan_fcn(positioners['testx'], -1.0, 1.0, 2,
positioners['testy'], -1.0, 1.0, 2,
1.0, dead_time=0.001,
)
run_engine(gen)
plt.savefig('liveimage-{}.png'.format(request.node.name))
liveimage.disable()
plt.clf()
# TODO crossection plot needs fixing
# @pytest.mark=
def test_flyplan2d_crossection(request, monkeypatch, positioners, run_engine,
fly2d, xspress3):
fly2d.detectors.append(xspress3)
scan_fcn = hxnfly.bs.FlyPlan2D()
monkeypatch.setattr(hxnfly.fly, 'Xspress3Detector', xspress3.__class__)
xspress3.array_counter.put(4)
# TODO is this done in configure_roi?
xspress3.setup_fake_rois([('Fe', [1, 2, 3, 4]),
('Mo', [4, 3, 2, 1])
])
with pytest.raises(ValueError):
# cross-section only does 1 at a time
FlyLiveCrossSection(['Fe', 'Mo'])
crossection = FlyLiveCrossSection(['Fe'])
scan_fcn.subs = [crossection]
gen = scan_fcn(positioners['testx'], -1.0, 1.0, 2,
positioners['testy'], -1.0, 1.0, 2,
1.0, dead_time=0.001,
)
run_engine(gen)
crossection.disable()
from PyQt4.QtGui import QPixmap
live_fn = 'crossection-live-{}.png'.format(request.node.name)
QPixmap.grabWindow(crossection.live_window.winId()).save(live_fn, 'png')
crossection.live_window.close()
if crossection._final_window is not None:
final_fn = 'crossection-final-{}.png'.format(request.node.name)
QPixmap.grabWindow(crossection.final_window.winId()).save(final_fn,
'png')
crossection.final_window.close()
|
StarcoderdataPython
|
4804444
|
import pytest
from ..checker import check
from tenable.errors import APIError, UnexpectedValueError
from tests.pytenable_log_handler import log_exception
def test_queries_constructor_sort_field_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(sort_field=1, tool='1', type='1', filters=[('filtername', 'operator', 'value')])
def test_queries_constructor_description_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(description=1, tool='1', type='1')
def test_queries_constructor_sort_direction_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(sort_direction=1, tool='1', type='1')
def test_queries_constructor_sort_direction_unexpectedvalueerror(sc):
with pytest.raises(UnexpectedValueError):
sc.queries._constructor(sort_direction='nope', tool='1', type='1')
def test_queries_constructor_offset_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(offset='one', tool='1', type='1')
def test_queries_constructor_limit_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(limit='one', tool='1', type='1')
def test_queries_constructor_owner_id_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(owner_id='one', tool='1', type='1')
def test_queries_constructor_context_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(context=1, tool='1', type='1')
def test_queries_constructor_browse_cols_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(browse_cols=1, tool='1', type='1')
def test_queries_constructor_browse_sort_col_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(browse_sort_col=1, tool='1', type='1')
def test_queries_constructor_browse_sort_dir_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(browse_sort_direction=1, tool='1', type='1')
def test_queries_constructor_browse_sort_dir_unexpectedvalueerror(sc):
with pytest.raises(UnexpectedValueError):
sc.queries._constructor(browse_sort_direction='nope', tool='1', type='1')
def test_queries_constructor_tags_typeerror(sc):
with pytest.raises(TypeError):
sc.queries._constructor(tags=1, tool='1', type='ticket', filters=[('filtername', 'operator', [1, 2])])
@pytest.mark.vcr()
def test_queries_constructor_success(sc):
query = sc.queries._constructor(
('filtername', 'operator', 'value'),
('asset', 'op', 2),
tool='vulndetails',
type='thistype',
tags='tag',
sort_field='field1',
sort_direction='asc',
offset=0,
limit=1000,
owner_id=1,
context='nothing',
browse_cols=['something'],
browse_sort_col='yes',
browse_sort_direction='asc',
query_id=1
)
assert isinstance(query, dict)
assert query == {
'tool': 'vulndetails',
'type': 'thistype',
'tags': 'tag',
'filters': [{
'filterName': 'filtername',
'operator': 'operator',
'value': 'value'
}, {
'filterName': 'asset',
'operator': 'op',
'value': {'id': '2'}
}],
'sortField': 'field1',
'sortDir': 'ASC',
'startOffset': 0,
'query_id': 1,
'endOffset': 1000,
'ownerID': '1',
'context': 'nothing',
'browseColumns': 'something',
'browseSortColumn': 'yes',
'browseSortDirection': 'ASC'
}
@pytest.fixture
def query(request, sc, vcr):
with vcr.use_cassette('test_queries_create_success'):
query = sc.queries.create('New Query', 'vulndetails', 'vuln',
('pluginID', '=', '19506'))
def teardown():
try:
with vcr.use_cassette('test_queries_delete_success'):
sc.queries.delete(int(query['id']))
except APIError as error:
log_exception(error)
request.addfinalizer(teardown)
return query
@pytest.mark.vcr()
def test_queries_create_success(sc, query):
assert isinstance(query, dict)
check(query, 'id', str)
check(query, 'name', str)
check(query, 'description', str)
check(query, 'tool', str)
check(query, 'type', str)
check(query, 'tags', str)
check(query, 'context', str)
check(query, 'browseColumns', str)
check(query, 'browseSortColumn', str)
check(query, 'browseSortDirection', str)
check(query, 'createdTime', str)
check(query, 'modifiedTime', str)
check(query, 'status', str)
check(query, 'filters', list)
for filter in query['filters']:
check(filter, 'filterName', str)
check(filter, 'operator', str)
check(filter, 'value', str)
check(query, 'canManage', str)
check(query, 'canUse', str)
check(query, 'creator', dict)
check(query['creator'], 'id', str)
check(query['creator'], 'username', str)
check(query['creator'], 'firstname', str)
check(query['creator'], 'lastname', str)
check(query, 'owner', dict)
check(query['owner'], 'id', str)
check(query['owner'], 'username', str)
check(query['owner'], 'firstname', str)
check(query['owner'], 'lastname', str)
check(query, 'ownerGroup', dict)
check(query['ownerGroup'], 'id', str)
check(query['ownerGroup'], 'name', str)
check(query['ownerGroup'], 'description', str)
check(query, 'targetGroup', dict)
check(query['targetGroup'], 'id', int)
check(query['targetGroup'], 'name', str)
check(query['targetGroup'], 'description', str)
@pytest.mark.vcr()
def test_queries_delete_success(sc, query):
sc.queries.delete(int(query['id']))
@pytest.mark.vcr()
def test_queries_details_success(sc, query):
query = sc.queries.details(int(query['id']))
assert isinstance(query, dict)
check(query, 'id', str)
check(query, 'name', str)
check(query, 'description', str)
check(query, 'tool', str)
check(query, 'type', str)
check(query, 'tags', str)
check(query, 'context', str)
check(query, 'browseColumns', str)
check(query, 'browseSortColumn', str)
check(query, 'browseSortDirection', str)
check(query, 'createdTime', str)
check(query, 'modifiedTime', str)
check(query, 'status', str)
check(query, 'filters', list)
for filter in query['filters']:
check(filter, 'filterName', str)
check(filter, 'operator', str)
check(filter, 'value', str)
check(query, 'canManage', str)
check(query, 'canUse', str)
check(query, 'creator', dict)
check(query['creator'], 'id', str)
check(query['creator'], 'username', str)
check(query['creator'], 'firstname', str)
check(query['creator'], 'lastname', str)
check(query, 'owner', dict)
check(query['owner'], 'id', str)
check(query['owner'], 'username', str)
check(query['owner'], 'firstname', str)
check(query['owner'], 'lastname', str)
check(query, 'ownerGroup', dict)
check(query['ownerGroup'], 'id', str)
check(query['ownerGroup'], 'name', str)
check(query['ownerGroup'], 'description', str)
check(query, 'targetGroup', dict)
check(query['targetGroup'], 'id', int)
check(query['targetGroup'], 'name', str)
check(query['targetGroup'], 'description', str)
@pytest.mark.vcr()
def test_queries_details_success_for_fields(sc, query):
q = sc.queries.details(int(query['id']), fields=["id", "name", "description"])
assert isinstance(q, dict)
check(q, 'id', str)
check(q, 'name', str)
check(q, 'description', str)
@pytest.mark.vcr()
def test_queries_edit_success(sc, query):
query = sc.queries.edit(int(query['id']), name='Updated Name')
assert isinstance(query, dict)
check(query, 'id', str)
check(query, 'name', str)
check(query, 'description', str)
check(query, 'tool', str)
check(query, 'type', str)
check(query, 'tags', str)
check(query, 'context', str)
check(query, 'browseColumns', str)
check(query, 'browseSortColumn', str)
check(query, 'browseSortDirection', str)
check(query, 'createdTime', str)
check(query, 'modifiedTime', str)
check(query, 'filters', list)
for filter in query['filters']:
check(filter, 'filterName', str)
check(filter, 'operator', str)
check(filter, 'value', str)
check(query, 'canManage', str)
check(query, 'canUse', str)
check(query, 'creator', dict)
check(query['creator'], 'id', str)
check(query['creator'], 'username', str)
check(query['creator'], 'firstname', str)
check(query['creator'], 'lastname', str)
check(query, 'owner', dict)
check(query['owner'], 'id', str)
check(query['owner'], 'username', str)
check(query['owner'], 'firstname', str)
check(query['owner'], 'lastname', str)
check(query, 'ownerGroup', dict)
check(query['ownerGroup'], 'id', str)
check(query['ownerGroup'], 'name', str)
check(query['ownerGroup'], 'description', str)
check(query, 'targetGroup', dict)
check(query['targetGroup'], 'id', int)
check(query['targetGroup'], 'name', str)
check(query['targetGroup'], 'description', str)
@pytest.mark.vcr()
def test_queries_list_success(sc, query):
queries = sc.queries.list()
assert isinstance(queries, dict)
for ltype in ['manageable', 'usable']:
for query in queries[ltype]:
assert isinstance(query, dict)
check(query, 'id', str)
check(query, 'name', str)
check(query, 'description', str)
@pytest.mark.vcr()
def test_queries_list_success_for_fields(sc):
queries = sc.queries.list(fields=["id", "name"])
assert isinstance(queries, dict)
for type in ['manageable', 'usable']:
for query in queries[type]:
assert isinstance(query, dict)
check(query, 'id', str)
check(query, 'name', str)
@pytest.mark.vcr()
def test_queries_tags_success(sc):
tags = sc.queries.tags()
assert isinstance(tags, list)
for tag in tags:
assert isinstance(tag, str)
@pytest.mark.vcr()
def test_queries_share_id_typeerror(sc):
with pytest.raises(TypeError):
sc.queries.share('one', 1)
@pytest.mark.vcr()
def test_queries_share_group_id_typeerror(sc):
with pytest.raises(TypeError):
sc.queries.share(1, 'one')
@pytest.mark.vcr()
def test_queries_share_success(sc, query, group):
query = sc.queries.share(int(query['id']), int(group['id']))
assert isinstance(query, dict)
check(query, 'id', str)
check(query, 'name', str)
check(query, 'description', str)
check(query, 'tool', str)
check(query, 'type', str)
check(query, 'tags', str)
check(query, 'context', str)
check(query, 'browseColumns', str)
check(query, 'browseSortColumn', str)
check(query, 'browseSortDirection', str)
check(query, 'createdTime', str)
check(query, 'modifiedTime', str)
check(query, 'status', str)
check(query, 'filters', list)
for filter in query['filters']:
check(filter, 'filterName', str)
check(filter, 'operator', str)
check(filter, 'value', str)
check(query, 'creator', dict)
check(query['creator'], 'id', str)
check(query['creator'], 'username', str)
check(query['creator'], 'firstname', str)
check(query['creator'], 'lastname', str)
check(query, 'owner', dict)
check(query['owner'], 'id', str)
check(query['owner'], 'username', str)
check(query['owner'], 'firstname', str)
check(query['owner'], 'lastname', str)
check(query, 'ownerGroup', dict)
check(query['ownerGroup'], 'id', str)
check(query['ownerGroup'], 'name', str)
check(query['ownerGroup'], 'description', str)
check(query, 'targetGroup', dict)
check(query['targetGroup'], 'id', int)
check(query['targetGroup'], 'name', str)
check(query['targetGroup'], 'description', str)
|
StarcoderdataPython
|
4837835
|
# Imports
import torch
from torch import nn
from torch.utils.data import DataLoader, SubsetRandomSampler, TensorDataset
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import random
from sklearn.model_selection import train_test_split, KFold
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)
device = "cuda" if torch.cuda.is_available() else "cpu"
class LSTMClassifier(nn.Module):
def __init__(self, weights_matrix):
super().__init__()
self.num_words, self.emb_dim = weights_matrix.size()
self.embedding = nn.Embedding(self.num_words, self.emb_dim, padding_idx=0)
self.embedding.load_state_dict({'weight': weights_matrix})
self.embedding.weight.requires_grad = False
self.hidden_dim = 64
self.num_layers = 2
self.lstm = nn.LSTM(self.emb_dim,
self.hidden_dim,
num_layers=self.num_layers,
bidirectional=True,
dropout=0.2,
batch_first=True)
self.fc = nn.Linear(self.hidden_dim * self.num_layers, 1)
self.init_weights()
def init_weights(self):
initrange = 0.5
self.embedding.weight.data.uniform_(-initrange, initrange)
self.fc.weight.data.uniform_(-initrange, initrange)
def forward(self, x):
x = self.embedding(x)
output, (hidden, cell) = self.lstm(x)
# concatenate the values of the two hidden layers
hidden = torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)
x = self.fc(hidden)
return torch.sigmoid(x).squeeze()
def train_and_test_LSTM(trainset, testset, weights_matrix, batch_size = 32):
trainloader = DataLoader(trainset, batch_size = batch_size, collate_fn=collate_fn, shuffle = True)
testloader = DataLoader(testset, batch_size = batch_size, collate_fn=collate_fn, shuffle = True)
model = LSTMClassifier(weights_matrix).to(device)
trainLSTM(model, trainloader)
pred_list = []
label_list = []
with torch.no_grad():
for i, (X, y) in enumerate(testloader):
predicted_label = model(X)
pred_list.extend((predicted_label >= 0.5).cpu().numpy()) # obtain all predictions
label_list.extend(y.cpu().numpy())
target_names = ['non-hate', 'hate']
print(classification_report(label_list, pred_list, target_names=target_names))
return model
def CV_eval_LSTM(trainset, weights_matrix, k_folds = 10, batch_size = 32):
kfold = KFold(n_splits=k_folds, shuffle=True)
total_accur = 0
for fold, (train_ind, valid_ind) in enumerate(kfold.split(trainset)):
model = LSTMClassifier(weights_matrix).to(device)
train_subsampler = SubsetRandomSampler(train_ind)
valid_subsampler = SubsetRandomSampler(valid_ind)
trainloader = DataLoader(trainset, batch_size = batch_size, collate_fn=collate_fn, sampler=train_subsampler)
validloader = DataLoader(trainset, batch_size = batch_size, collate_fn=collate_fn, sampler=valid_subsampler)
trained_model = trainLSTM(model, trainloader)
accur = evalLSTM(trained_model, validloader)
total_accur += accur
del model
del trained_model
return total_accur / k_folds
def evalLSTM(model, validloader):
model.eval()
total_acc, total_count = 0, 0
with torch.no_grad():
for i, (X, y) in enumerate(validloader):
predicted_label = model(X)
total_acc += ((predicted_label >= 0.5) == y).sum().item()
total_count += len(y)
return total_acc/total_count
def trainLSTM(model, trainloader):
LR = 1e-4
EPOCHS = 6
optimizer = optim.Adam(model.parameters(), lr = LR)
criterion = nn.BCELoss()
model.train()
loss_per_epoch = []
for epoch in range(EPOCHS):
running_loss = 0.0
cnt = 0
for i, (X, y) in enumerate(trainloader):
optimizer.zero_grad()
y = y.type(torch.FloatTensor).to(device)
output = model(X)
loss = criterion(output.to(device), y)
running_loss += loss.item()
cnt += len(X)
loss.backward()
optimizer.step()
loss_per_epoch.append(running_loss / cnt)
#print(running_loss / cnt)
plt.plot(range(1, EPOCHS+1), loss_per_epoch) # plot loss over epochs
return model
def collate_fn(batch):
MAX_SEQLEN = 64 # max allowed number of words per tweet to avoid memory overload
label_list = []
text_list = torch.zeros((len(batch), MAX_SEQLEN), dtype=torch.int64)
for i, (_text, _label) in enumerate(batch):
label_list.append(_label)
if _text.size(0) < MAX_SEQLEN: # if < MAX_SEQLEN, pad with padding_idx = 0
_text = F.pad(input=_text, pad=(1, MAX_SEQLEN - _text.size(0) - 1), mode='constant', value=0)
elif _text.size(0) > MAX_SEQLEN: # truncate if > MAX_SEQLEN
_text = _text[0:MAX_SEQLEN]
text_list[i] = _text
label_list = torch.tensor(label_list, dtype=torch.int64)
return text_list.to(device), label_list.to(device)
|
StarcoderdataPython
|
97689
|
<reponame>openedx/openedx-census
#!/usr/bin/env python
"""Automate the process of counting courses on Open edX sites."""
import asyncio
import collections
import csv
import itertools
import json
import logging
import os
import pickle
import pprint
import re
import time
import traceback
import urllib.parse
import attr
import click
import requests
import tqdm
from census.helpers import NotTrying, ScrapeFail
from census.html_report import html_report
from census.keys import username, password
from census.report_helpers import get_known_domains, hash_sites_together, sort_sites
from census.session import SessionFactory
from census.settings import (
STATS_SITE,
UPDATE_JSON,
SITES_CSV,
SITES_PICKLE,
MAX_REQUESTS,
TIMEOUT,
USER_AGENT,
)
from census.sites import Attempt, Site, HashedSite, read_sites_csv, courses_and_orgs, totals, read_sites_flat, overcount
from census.site_patterns import find_site_functions
# We don't use anything from this module, it just registers all the parsers.
from census import parsers
HEADERS = {
'User-Agent': USER_AGENT,
}
GONE_MSGS = [
"Cannot connect to host",
"Bad Gateway",
"TimeoutError",
"500",
"503",
"404",
"530 get http", # Cloudflare DNS failures
]
CERTIFICATE_MSGS = [
"certificate verify failed",
"CertificateError:",
]
FALSE_ALARM_CERTIFICATE_MSGS = [
"unable to get local issuer certificate",
]
log = logging.getLogger(__name__)
def all_have_snippets(errors, snippets):
"""Do all of the errors match one of the snippets?"""
return all(any(snip in err for snip in snippets) for err in errors)
async def parse_site(site, session_factory):
for verify_ssl in [True, False]:
async with session_factory.new(verify_ssl=verify_ssl, listeners=[site]) as session:
start = time.time()
errs = []
success = False
for parser, args, kwargs, custom_parser in find_site_functions(site.url):
attempt = Attempt(parser.__name__)
err = None
try:
attempt.courses = await parser(site, session, *args, **kwargs)
except NotTrying as exc:
attempt.error = str(exc)
except ScrapeFail as exc:
attempt.error = f"{exc.__class__.__name__}: {exc}"
err = str(exc) or exc.__class__.__name__
except Exception as exc:
#print(f"Exception: {exc!r}, {exc}, {exc.__class__.__name__}")
#print(traceback.format_exc())
attempt.error = traceback.format_exc()
err = str(exc) or exc.__class__.__name__
else:
success = True
site.tried.append(attempt)
if err:
errs.append(err)
if custom_parser:
site.custom_parser_err = True
else:
if custom_parser:
break
if success:
site.current_courses = site.attempt_course_count()
if site.is_gone:
char = 'B'
else:
if site.current_courses == site.latest_courses:
char = '='
elif site.current_courses < site.latest_courses:
char = '-'
else:
char = '+'
else:
if verify_ssl and all_have_snippets(errs, CERTIFICATE_MSGS):
# We had an SSL error. Try again. But only mark it as an error if it wasn't
# a false alarm error.
if not all_have_snippets(errs, FALSE_ALARM_CERTIFICATE_MSGS):
site.ssl_err = True
site.tried = []
site.custom_parser_err = False
log.debug("SSL error: %s", (errs,))
continue
gone_content = site.current_courses is None and not site.is_openedx
gone_http = all_have_snippets(errs, GONE_MSGS)
if gone_content or gone_http:
site.is_gone_now = True
if site.is_gone:
char = 'X'
else:
char = 'G'
else:
char = 'E'
site.time = time.time() - start
return char
async def run(sites, session_kwargs):
kwargs = dict(max_requests=MAX_REQUESTS, headers=HEADERS)
kwargs.update(session_kwargs)
factory = SessionFactory(**kwargs)
tasks = [asyncio.ensure_future(parse_site(site, factory)) for site in sites]
chars = collections.Counter()
progress = tqdm.tqdm(asyncio.as_completed(tasks), total=len(tasks), smoothing=0.0)
for completed in progress:
char = await completed
chars[char] += 1
desc = " ".join(f"{c}{v}" for c, v in sorted(chars.items()))
progress.set_description(desc)
progress.close()
print()
def scrape_sites(sites, session_kwargs):
try:
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(run(sites, session_kwargs))
# Some exceptions go to stderr and then to my except clause? Shut up.
loop.set_exception_handler(lambda loop, context: None)
loop.run_until_complete(future)
except KeyboardInterrupt:
pass
@click.group(help=__doc__)
def cli():
pass
@cli.command()
@click.option('--in', 'in_file', type=click.Path(exists=True), help="File of sites to scrape")
@click.option('--log', 'log_level', type=str, default='info', help="Logging level to use")
@click.option('--gone', is_flag=True, help="Scrape the sites we've recorded as gone")
@click.option('--site', is_flag=True, help="Command-line arguments are URLs to scrape")
@click.option('--summarize', is_flag=True, help="Summarize results instead of saving pickle")
@click.option('--save', is_flag=True, help="Save the scraped pages in the save/ directory")
@click.option('--out', 'out_file', type=click.File('wb'), default=SITES_PICKLE, help="Pickle file to write")
@click.option('--timeout', type=int, help=f"Timeout in seconds for each request [{TIMEOUT}]", default=TIMEOUT)
@click.argument('site_patterns', nargs=-1)
def scrape(in_file, log_level, gone, site, summarize, save, out_file, timeout, site_patterns):
"""Visit sites and count their courses."""
logging.basicConfig(level=log_level.upper())
# aiohttp issues warnings about cookies, silence them (and all other warnings!)
# WARNING:aiohttp.client:Can not load response cookies: Illegal key
# The bad cookies were from http://rechum.sev.gob.mx
logging.getLogger('aiohttp.client').setLevel(logging.ERROR)
if site:
# Exact sites provided on the command line
sites = (Site.from_url(u) for u in site_patterns)
else:
# Make the list of sites we're going to scrape.
in_file = in_file or SITES_CSV
if in_file.endswith('.csv'):
sites = read_sites_csv(in_file)
else:
sites = read_sites_flat(in_file)
if site_patterns:
sites = (s for s in sites if any(re.search(p, s.url) for p in site_patterns))
if not gone:
sites = (s for s in sites if not s.is_gone)
sites = list(sites)
if len(sites) == 1:
print("1 site")
else:
print(f"{len(sites)} sites")
os.makedirs("save", exist_ok=True)
# SCRAPE!
session_kwargs = {
'save': save,
'timeout': timeout,
}
scrape_sites(sites, session_kwargs)
if summarize:
show_text_report(sites)
else:
with out_file:
pickle.dump(sites, out_file)
@cli.command()
@click.option('--in', 'in_file', type=click.File('rb'), default=SITES_PICKLE,
help='The sites.pickle file to read')
def summary(in_file):
with in_file:
sites = pickle.load(in_file)
summarize(sites)
def summarize(sites):
old, new = totals(sites)
changed = sum(1 for s in sites if s.should_update())
gone = sum(1 for s in sites if s.is_gone_now and not s.is_gone)
back = sum(1 for s in sites if not s.is_gone_now and s.is_gone and s.current_courses)
print(f"{len(sites)} sites")
print(f"Courses: {old} --> {new} ({new-old:+d}); Sites: {changed} changed, {gone} gone, {back} back")
hashed_sites = collections.defaultdict(HashedSite)
nohash_sites = []
for site in sites:
if site.is_gone_now:
continue
if not site.current_courses:
continue
if site.fingerprint is None:
hashed_site = HashedSite()
hashed_site.sites.append(site)
nohash_sites.append(hashed_site)
else:
hashed_site = hashed_sites[site.fingerprint]
hashed_site.fingerprint = site.fingerprint
hashed_site.sites.append(site)
print(f"{len(nohash_sites)} with no hash, {len(hashed_sites)} with hash")
if nohash_sites:
print("No hash:")
for site in nohash_sites:
print(f" {site.best_url()}: {site.current_courses()}")
chaff_sites = []
not_chaff_sites = []
for hashed_site in itertools.chain(hashed_sites.values(), nohash_sites):
if hashed_site.all_chaff():
chaff_sites.append(hashed_site)
else:
not_chaff_sites.append(hashed_site)
print(f"Total sites: {len(not_chaff_sites)} not chaff, {len(chaff_sites)} chaff")
@cli.command()
@click.option('--in', 'in_file', type=click.File('rb'), default=SITES_PICKLE,
help='The sites.pickle file to read')
@click.option('--out', 'out_file', type=click.File('w'), default="html/sites.html",
help='The HTML file to write')
@click.option('--skip-none', is_flag=True, help="Don't include sites with no count")
@click.option('--only-new', is_flag=True, help="Only include sites we think are new")
@click.option('--full', is_flag=True, help="Include courses, orgs, etc")
def html(in_file, out_file, skip_none, only_new, full):
"""Write an HTML report."""
with in_file:
sites = pickle.load(in_file)
if skip_none:
sites = [site for site in sites if site.current_courses is not None]
# Prep data for reporting.
old, new = totals(sites)
if full:
all_courses, all_orgs, all_course_ids = courses_and_orgs(sites)
with open("course-ids.txt", "w") as f:
f.write("".join(i + "\n" for i in sorted(all_course_ids)))
else:
all_courses = all_orgs = None
html_report(out_file, sites, old, new, all_courses, all_orgs, only_new=only_new)
@cli.command()
@click.option('--in', 'in_file', type=click.File('rb'), default=SITES_PICKLE,
help='The sites.pickle file to read')
@click.option('--out', 'out_file', type=click.File('w'), default="html/sites.csv",
help='The CSV file to write')
def sheet(in_file, out_file):
"""Write a CSV file for importing into a spreadsheet.
Always skips no-course sites. Only includes new sites.
"""
with in_file:
sites = pickle.load(in_file)
sites = [site for site in sites if site.current_courses is not None]
known_domains = get_known_domains()
hashed_sites = hash_sites_together(sites, known_domains, only_new=True)
writer = csv.DictWriter(out_file, ["disposition", "language", "geography", "url", "courses", "sites", "tags", "aliases"])
writer.writeheader()
for hashed_site in hashed_sites:
url = hashed_site.best_url()
other_urls = [site.url for site in hashed_site.sites if site.url != url]
tags = {t for site in hashed_site.sites for t, _ in site.styled_tags()}
writer.writerow({
"url": url,
"courses": hashed_site.current_courses(),
"sites": len(hashed_site.sites),
"tags": ", ".join(sorted(tags)),
"aliases": ", ".join(other_urls),
})
print(f"Wrote {len(hashed_sites)} sites to {out_file.name}")
@cli.command()
@click.option('--in', 'in_file', type=click.File('rb'), default=SITES_PICKLE)
def emails(in_file):
"""Write the emails found."""
with in_file:
sites = pickle.load(in_file)
emails = set()
for site in sites:
emails.update(site.emails)
print("\n".join(sorted(emails)))
@cli.command('json')
@click.option('--in', 'in_file', type=click.File('rb'), default=SITES_PICKLE)
def write_json(in_file):
"""Write the update.json file."""
with in_file:
sites = pickle.load(in_file)
# Prep data for reporting.
sites_descending = sorted(sites, key=lambda s: s.latest_courses, reverse=True)
all_courses, _, _ = courses_and_orgs(sites)
json_update(sites_descending, all_courses, include_overcount=True)
@cli.command('text')
@click.option('--in', 'in_file', type=click.File('rb'), default=SITES_PICKLE,
help='The sites.pickle file to read')
def text_report(in_file):
"""Write a text report about site scraping."""
with in_file:
sites = pickle.load(in_file)
show_text_report(sites)
def show_text_report(sites):
old, new = totals(sites)
sites = sorted(sites, key=lambda s: s.latest_courses, reverse=True)
print(f"Found courses went from {old} to {new}")
for site in sites:
print(f"{site.url}: {site.latest_courses} --> {site.current_courses} ({site.fingerprint})")
for attempt in site.tried:
if attempt.error is not None:
line = attempt.error.splitlines()[-1]
else:
line = f"Counted {attempt.courses} courses"
print(f" {attempt.strategy}: {line}")
tags = ", ".join(t for t, s in site.styled_tags())
if tags:
print(f" [{tags}]")
other = site.other_info + site.emails
if other:
print(f" Info: {'; '.join(set(other))}")
def json_update(sites, all_courses, include_overcount=False):
"""Write a JSON file for uploading to the stats site.
`all_courses` is a dict mapping course_ids to a set of sites running that
course.
"""
data = {}
site_updates = {
s.url: {
'old_course_count': s.latest_courses,
'course_count': s.current_courses if s.current_courses is not None else s.latest_courses,
'is_gone': s.is_gone_now,
}
for s in sites if s.should_update()
}
data['sites'] = site_updates
if include_overcount:
data['overcount'] = overcount(all_courses)
with open(UPDATE_JSON, "w") as update_json:
json.dump(data, update_json, indent=4)
def login(site, session):
login_url = urllib.parse.urljoin(site, "/login/")
resp = session.get(login_url)
resp.raise_for_status()
m = re.search(r'name="csrfmiddlewaretoken" value="([^"]+)"', resp.text)
if m:
csrftoken = m.group(1)
else:
raise Exception(f"No CSRF token found from {login_url}")
resp = session.post(login_url, data={'username': username, 'password': password, 'csrfmiddlewaretoken': csrftoken})
if resp.status_code not in [200, 404]:
resp.raise_for_status()
@cli.command()
@click.argument('site', default=STATS_SITE)
def getcsv(site):
"""Get the sites.csv file from the app."""
with requests.Session() as s:
login(site, s)
csv_url = urllib.parse.urljoin(site, "/sites/csv/?include_gone=1&skip_lang_geo=1")
resp = s.get(csv_url)
if resp.status_code != 200:
resp.raise_for_status()
content = resp.content
with open(SITES_CSV, "wb") as csv_file:
csv_file.write(content)
lines = content.splitlines()
print(f"Wrote {len(lines)-1} sites to {SITES_CSV}")
@cli.command()
@click.argument('site', default=STATS_SITE)
def post(site):
"""Post updated numbers to the stats-collecting site."""
with open(UPDATE_JSON) as f:
data = f.read()
with requests.Session() as s:
login(site, s)
bulk_url = urllib.parse.urljoin(site, "/sites/bulk_update/")
resp = s.post(bulk_url, data=data)
print(resp.text)
@cli.command()
@click.argument('site', default=STATS_SITE)
def bulkcreate(site):
"""Upload a YAML file to create a number of sites.
Create a bulk.yaml file like this:
\b
- url: https://loveandpowerinshakespearesplays.edunext.io/
course_count: 6
notes: edunext
- url: https://lms.cursate.org
course_count: 4
language: Spanish
geography: Colombia
notes: eduNEXT
"""
with open("bulk.yaml") as f:
data = f.read()
with requests.Session() as s:
login(site, s)
bulk_url = urllib.parse.urljoin(site, "/sites/bulk_create/")
resp = s.post(bulk_url, data=data)
print(resp.text)
if __name__ == '__main__':
cli()
|
StarcoderdataPython
|
24183
|
import os
import sys
import json
import argparse
import progressbar
from pathlib import Path
from random import shuffle
from time import time
import torch
from cpc.dataset import findAllSeqs
from cpc.feature_loader import buildFeature, FeatureModule, loadModel, buildFeature_batch
from cpc.criterion.clustering import kMeanCluster
#from cpc.criterion.research.clustering import kMeanCluster
def readArgs(pathArgs):
print(f"Loading args from {pathArgs}")
with open(pathArgs, 'r') as file:
args = argparse.Namespace(**json.load(file))
return args
def loadClusterModule(pathCheckpoint, norm_vec_len=False):
print(f"Loading ClusterModule at {pathCheckpoint}")
state_dict = torch.load(pathCheckpoint)
if "state_dict" in state_dict: #kmeans
clusterModule = kMeanCluster(torch.zeros(1, state_dict["n_clusters"], state_dict["dim"]), norm_vec_len)
clusterModule.load_state_dict(state_dict["state_dict"])
else: #dpmeans
clusterModule = kMeanCluster(state_dict["mu"])
clusterModule = clusterModule.cuda()
return clusterModule
def parseArgs(argv):
# Run parameters
parser = argparse.ArgumentParser(description='Quantize audio files using CPC Clustering Module.')
parser.add_argument('pathCheckpoint', type=str,
help='Path to the clustering checkpoint.')
parser.add_argument('pathDB', type=str,
help='Path to the dataset that we want to quantize.')
parser.add_argument('pathOutput', type=str,
help='Path to the output directory.')
parser.add_argument('--pathSeq', type=str,
help='Path to the sequences (file names) to be included used.')
parser.add_argument('--split', type=str, default=None,
help="If you want to divide the dataset in small splits, specify it "
"with idxSplit-numSplits (idxSplit > 0), eg. --split 1-20.")
parser.add_argument('--file_extension', type=str, default=".flac",
help="Extension of the audio files in the dataset (default: .flac).")
parser.add_argument('--max_size_seq', type=int, default=10240,
help='Maximal number of frames to consider '
'when computing a batch of features (defaut: 10240).')
parser.add_argument('--batch_size', type=int, default=8,
help='Batch size used to compute features '
'when computing each file (defaut: 8).')
parser.add_argument('--strict', type=bool, default=True,
help='If activated, each batch of feature '
'will contain exactly max_size_seq frames (defaut: True).')
parser.add_argument('--debug', action='store_true',
help="Load only a very small amount of files for "
"debugging purposes.")
parser.add_argument('--nobatch', action='store_true',
help="Don't use batch implementation of when building features."
"NOTE: This can have better quantized units as we can set "
"model.gAR.keepHidden = True (line 162), but the quantization"
"will be a bit longer.")
parser.add_argument('--recursionLevel', type=int, default=1,
help='Speaker level in pathDB (defaut: 1). This is only helpful'
'when --separate-speaker is activated.')
parser.add_argument('--separate-speaker', action='store_true',
help="Separate each speaker with a different output file.")
parser.add_argument('--norm_vec_len', action='store_true',
help="Normalize vector lengths.")
return parser.parse_args(argv)
def main(argv):
# Args parser
args = parseArgs(argv)
print("=============================================================")
print(f"Quantizing data from {args.pathDB}")
print("=============================================================")
# Check if directory exists
if not os.path.exists(args.pathOutput):
print("")
print(f"Creating the output directory at {args.pathOutput}")
Path(args.pathOutput).mkdir(parents=True, exist_ok=True)
# Get splits
if args.split:
assert len(args.split.split("-"))==2 and int(args.split.split("-")[1]) >= int(args.split.split("-")[0]) >= 1, \
"SPLIT must be under the form idxSplit-numSplits (numSplits >= idxSplit >= 1), eg. --split 1-20"
idx_split, num_splits = args.split.split("-")
idx_split = int(idx_split)
num_splits = int(num_splits)
# Find all sequences
print("")
print(f"Looking for all {args.file_extension} files in {args.pathDB} with speakerLevel {args.recursionLevel}")
seqNames, speakers = findAllSeqs(args.pathDB,
speaker_level=args.recursionLevel,
extension=args.file_extension,
loadCache=True)
if args.pathSeq:
with open(args.pathSeq, 'r') as f:
seqs = set([x.strip() for x in f])
filtered = []
for s in seqNames:
if s[1].split('/')[-1].split('.')[0] in seqs:
filtered.append(s)
seqNames = filtered
print(f"Done! Found {len(seqNames)} files and {len(speakers)} speakers!")
if args.separate_speaker:
seqNames_by_speaker = {}
for seq in seqNames:
speaker = seq[1].split("/")[args.recursionLevel-1]
if speaker not in seqNames_by_speaker:
seqNames_by_speaker[speaker] = []
seqNames_by_speaker[speaker].append(seq)
# Check if output file exists
if not args.split:
nameOutput = "quantized_outputs.txt"
else:
nameOutput = f"quantized_outputs_split_{idx_split}-{num_splits}.txt"
if args.separate_speaker is False:
outputFile = os.path.join(args.pathOutput, nameOutput)
assert not os.path.exists(outputFile), \
f"Output file {outputFile} already exists !!!"
# Get splits
if args.split:
startIdx = len(seqNames) // num_splits * (idx_split-1)
if idx_split == num_splits:
endIdx = len(seqNames)
else:
endIdx = min(len(seqNames) // num_splits * idx_split, len(seqNames))
seqNames = seqNames[startIdx:endIdx]
print("")
print(f"Quantizing split {idx_split} out of {num_splits} splits, with {len(seqNames)} files (idx in range({startIdx}, {endIdx})).")
# Debug mode
if args.debug:
nsamples=20
print("")
print(f"Debug mode activated, only load {nsamples} samples!")
# shuffle(seqNames)
seqNames = seqNames[:nsamples]
# Load Clustering args
assert args.pathCheckpoint[-3:] == ".pt"
if os.path.exists(args.pathCheckpoint[:-3] + "_args.json"):
pathConfig = args.pathCheckpoint[:-3] + "_args.json"
elif os.path.exists(os.path.join(os.path.dirname(args.pathCheckpoint), "checkpoint_args.json")):
pathConfig = os.path.join(os.path.dirname(args.pathCheckpoint), "checkpoint_args.json")
else:
assert False, \
f"Args file not found in the directory {os.path.dirname(args.pathCheckpoint)}"
clustering_args = readArgs(pathConfig)
print("")
print(f"Clutering args:\n{json.dumps(vars(clustering_args), indent=4, sort_keys=True)}")
print('-' * 50)
# Load CluterModule
clusterModule = loadClusterModule(args.pathCheckpoint, norm_vec_len=args.norm_vec_len)
clusterModule.cuda()
# Load FeatureMaker
print("")
print("Loading CPC FeatureMaker")
if 'level_gru' in vars(clustering_args) and clustering_args.level_gru is not None:
updateConfig = argparse.Namespace(nLevelsGRU=clustering_args.level_gru)
else:
updateConfig = None
model = loadModel([clustering_args.pathCheckpoint], updateConfig=updateConfig)[0]
## If we don't apply batch implementation, we can set LSTM model to keep hidden units
## making the quality of the quantized units better
if args.nobatch:
model.gAR.keepHidden = True
featureMaker = FeatureModule(model, clustering_args.encoder_layer)
if clustering_args.dimReduction is not None:
dimRed = loadDimReduction(clustering_args.dimReduction, clustering_args.centroidLimits)
featureMaker = torch.nn.Sequential(featureMaker, dimRed)
if not clustering_args.train_mode:
featureMaker.eval()
featureMaker.cuda()
def feature_function(x):
if args.nobatch is False:
res0 = buildFeature_batch(featureMaker, x,
seqNorm=False,
strict=args.strict,
maxSizeSeq=args.max_size_seq,
batch_size=args.batch_size)
if args.norm_vec_len:
# [!] we actually used CPC_audio/scripts/quantize_audio.py for that in the end
res0Lengths = torch.sqrt((res0*res0).sum(2))
res0 = res0 / res0Lengths.view(*(res0Lengths.shape), 1)
return res0
else:
res0 = buildFeature(featureMaker, x,
seqNorm=False,
strict=args.strict)
if args.norm_vec_len:
# [!] we actually used CPC_audio/scripts/quantize_audio.py for that in the end
res0Lengths = torch.sqrt((res0*res0).sum(2))
res0 = res0 / res0Lengths.view(*(res0Lengths.shape), 1)
return res0
print("CPC FeatureMaker loaded!")
# Quantization of files
print("")
print(f"Quantizing audio files...")
seqQuantLines = []
bar = progressbar.ProgressBar(maxval=len(seqNames))
bar.start()
start_time = time()
for index, vals in enumerate(seqNames):
bar.update(index)
file_path = vals[1]
file_path = os.path.join(args.pathDB, file_path)
# Get features & quantizing
cFeatures = feature_function(file_path).cuda()
nGroups = cFeatures.size(-1)//clusterModule.Ck.size(-1)
cFeatures = cFeatures.view(1, -1, clusterModule.Ck.size(-1))
if len(vals) > 2 and int(vals[-1]) > 9400000: # Librilight, to avoid OOM
clusterModule = clusterModule.cpu()
cFeatures = cFeatures.cpu()
qFeatures = torch.argmin(clusterModule(cFeatures), dim=-1)
clusterModule = clusterModule.cuda()
else:
qFeatures = torch.argmin(clusterModule(cFeatures), dim=-1)
qFeatures = qFeatures[0].detach().cpu().numpy()
# Transform to quantized line
quantLine = ",".join(["-".join([str(i) for i in item]) for item in qFeatures.reshape(-1, nGroups)])
seqQuantLines.append(quantLine)
bar.finish()
print(f"...done {len(seqQuantLines)} files in {time()-start_time} seconds.")
# Saving outputs
print("")
print(f"Saving outputs to {outputFile}")
outLines = []
for vals, quantln in zip(seqNames, seqQuantLines):
file_path = vals[1]
file_name = os.path.splitext(os.path.basename(file_path))[0]
outLines.append("\t".join([file_name, quantln]))
with open(outputFile, "w") as f:
f.write("\n".join(outLines))
if __name__ == "__main__":
args = sys.argv[1:]
main(args)
|
StarcoderdataPython
|
3224494
|
<filename>term2048/keypress.py<gh_stars>10-100
# -*- coding: UTF-8 -*-
try:
import termios
except ImportError:
# Assume windows
import msvcrt
UP, DOWN, RIGHT, LEFT = 72, 80, 77, 75
def getKey():
while True:
if msvcrt.kbhit():
a = ord(msvcrt.getch())
return a
else:
# refs:
# http://bytes.com/topic/python/answers/630206-check-keypress-linux-xterm
# http://stackoverflow.com/a/2521032/735926
import sys
import tty
__fd = sys.stdin.fileno()
__old = termios.tcgetattr(__fd)
# Arrow keys
# they are preceded by 27 and 91, hence the double 'if' in getKey.
UP, DOWN, RIGHT, LEFT = 65, 66, 67, 68
# Vim keys
K, J, L, H = 107, 106, 108, 104
__key_aliases = {
K: UP,
J: DOWN,
L: RIGHT,
H: LEFT,
}
def __getKey():
"""Return a key pressed by the user"""
try:
tty.setcbreak(sys.stdin.fileno())
termios.tcflush(sys.stdin, termios.TCIOFLUSH)
ch = sys.stdin.read(1)
return ord(ch) if ch else None
finally:
termios.tcsetattr(__fd, termios.TCSADRAIN, __old)
def getKey():
"""
same as __getKey, but handle arrow keys
"""
k = __getKey()
if k == 27:
k = __getKey()
if k == 91:
k = __getKey()
return __key_aliases.get(k, k)
# legacy support
getArrowKey = getKey
|
StarcoderdataPython
|
3259379
|
<reponame>zealoussnow/chromium<gh_stars>1000+
#!/usr/bin/env python
# Copyright (c) 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a dummy RTS filter file if a real one doesn't exist yes.
Real filter files are generated by the RTS binary for suites with any
skippable tests. The rest of the suites need to have dummy files because gn
will expect the file to be present.
Implementation uses try / except because the filter files are written
relatively close to when this code creates the dummy files.
The following type of implementation would have a race condition:
if not os.path.isfile(filter_file):
open(filter_file, 'w') as fp:
fp.write('*')
"""
import errno
import os
import sys
def main():
filter_file = sys.argv[1]
directory = os.path.dirname(filter_file)
try:
os.makedirs(directory)
except OSError as err:
if err.errno == errno.EEXIST:
pass
else:
raise
try:
fp = os.open(filter_file, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
except OSError as err:
if err.errno == errno.EEXIST:
pass
else:
raise
else:
with os.fdopen(fp, 'w') as file_obj:
file_obj.write('*') # '*' is a dummy that means run everything
if __name__ == '__main__':
sys.exit(main())
|
StarcoderdataPython
|
1636088
|
<gh_stars>1-10
# ladowanie danych do tablicy dwu-wymiarowej
with open('../dane/dane.txt') as f:
data = []
for line in f.readlines():
data.append(line[:-1].split(' '))
# wyszukanie minimum i maximum w tablicy
brightest = 0
darkest = 256
# przejscie po kazdym wierszu
for ln in data:
# przejscie po kazdej liczbie w wierszu
for number in ln:
# podmiana min i max
number = int(number)
if number > brightest:
brightest = number
if number < darkest:
darkest = number
# wyswietlenie odpowiedzi
answer = f'6.1. Najciemniejszy: {darkest}; najjasniejszy: {brightest}'
print(answer)
|
StarcoderdataPython
|
1642435
|
"""
Copyright MIT and Harvey Mudd College
MIT License
Summer 2020
Defines the interface of the Display module of the racecar_core library.
"""
import abc
import numpy as np
import math
from typing import List, Tuple, Any
from nptyping import NDArray
import racecar_utils as rc_utils
class Display(abc.ABC):
"""
Allows the user to print images to the screen.
"""
# The radii dots used to indicate points
__BIG_DOT_RADIUS = 8
__SMALL_DOT_RADIUS = 4
__LIDAR_CAR_RADIUS = 2
@abc.abstractmethod
def create_window(self) -> None:
"""
Creates an empty window into which images will be displayed.
Note:
It is not necessary to call create_window before any of the other display
methods (show_color_image, show_depth_image, etc.). These methods will
automatically create a new window if one was not already created.
Example::
# Creates a window
rc.camera.create_window()
# Display an image in this window
image = rc.camera.get_color_image()
rc.display.show_color_image(image)
"""
pass
@abc.abstractmethod
def show_color_image(self, image: NDArray) -> None:
"""
Displays a color image in a window.
Args:
image: The color image to display to the the screen.
Example::
image = rc.camera.get_color_image()
# Show the image captured by the camera
rc.display.show_color_image(image)
"""
pass
def show_depth_image(
self,
image: NDArray[(Any, Any), np.float32],
max_depth: int = 1000,
points: List[Tuple[int, int]] = [],
) -> None:
"""
Displays a depth image in grayscale in a window.
Args:
image: The depth image to display to the screen.
max_depth: The farthest depth to show in the image in cm. Anything past
this depth is shown as black.
points: A list of points in (pixel row, pixel column) format to show on
the image as colored dots.
Example::
depth_image = rc.camera.get_depth_image()
# Show the depth_image captured by the camera.
rc.display.show_depth_image(depth_image)
# Show anything that is at most 500 cm away, and show a black cross at
# row 3, column 5
rc.display.show_depth_image(depth_image, 500, [(3, 5)])
"""
assert max_depth > 0, "max_depth must be positive."
for point in points:
assert (
0 <= point[0] < image.shape[0] and 0 <= point[1] < image.shape[1]
), "The point {} is not a valid pixel row and column within image.".format(
point
)
color_image = rc_utils.colormap_depth_image(image, max_depth)
# Draw a dot at each point in points
for point in points:
rc_utils.draw_circle(
color_image,
point,
rc_utils.ColorBGR.green.value,
radius=self.__BIG_DOT_RADIUS,
)
rc_utils.draw_circle(
color_image,
point,
rc_utils.ColorBGR.blue.value,
radius=self.__SMALL_DOT_RADIUS,
)
self.show_color_image(color_image)
def show_lidar(
self,
samples: NDArray[Any, np.float32],
radius: int = 128,
max_range: int = 1000,
highlighted_samples: List[Tuple[float, float]] = [],
) -> None:
"""
Displays a set of LIDAR samples.
Args:
samples: A complete LIDAR scan.
radius: Half of the width or height (in pixels) of the generated image.
max_range: The farthest depth to show in the image in cm. Anything past
this depth is shown as black.
highlighted_samples: A list of samples in (angle, distance) format to show
as light blue dots. Angle must be in degrees from straight ahead
(clockwise), and distance must be in cm.
Note:
Each sample in samples is shown as a red pixel. Each sample in
highlighted_samples is shown as a blue pixel. The car is shown as a green
dot at the center of the visualization.
Warning:
samples must be a complete LIDAR scan. This function assumes that each
sample is equal angle appart, and that samples spans the entire 360 degrees.
If this is not the case, the visualization will be inaccurate.
Example::
depth_image = rc.camera.get_depth_image()
# Show the depth_image captured by the camera.
rc.display.show_depth_image(depth_image)
# Show anything that is at most 500 cm away, and show a black cross at
# row 3, column 5
rc.display.show_depth_image(depth_image, 500, [(3, 5)])
"""
assert radius > 0, "radius must be positive."
assert max_range > 0, "max_range must be positive."
# Create a square black image with the requested radius
image = np.zeros((2 * radius, 2 * radius, 3), np.uint8, "C")
num_samples: int = len(samples)
# Draw a red pixel for each non-zero sample less than max_range
for i in range(num_samples):
if 0 < samples[i] < max_range:
angle: float = 2 * math.pi * i / num_samples
length: float = radius * samples[i] / max_range
r: int = int(radius - length * math.cos(angle))
c: int = int(radius + length * math.sin(angle))
image[r][c][2] = 255
# Draw a green dot to denote the car
rc_utils.draw_circle(
image,
(radius, radius),
rc_utils.ColorBGR.green.value,
self.__LIDAR_CAR_RADIUS,
)
# Draw a light blue pixel for each point in highlighted_samples
for (angle, distance) in highlighted_samples:
if 0 < distance < max_range:
angle_rad = angle * math.pi / 180
length: float = radius * distance / max_range
r: int = int(radius - length * math.cos(angle_rad))
c: int = int(radius + length * math.sin(angle_rad))
image[r][c][0] = 255
image[r][c][1] = 255
image[r][c][2] = 0
self.show_color_image(image)
|
StarcoderdataPython
|
3222918
|
from __future__ import print_function, division, absolute_import
import pytest
import subprocess32 as subprocess
from runner import main as runner, _check_tool
def test_bad_tx_cmd():
# Trigger a fatal error from a command line tool, to make sure
# it is handled correctly.
with pytest.raises(subprocess.CalledProcessError):
runner(['-t', 'tx', '-o', 'bad_opt'])
@pytest.mark.parametrize('tool_name', ['not_a_tool'])
def test_check_tool_error(tool_name):
assert isinstance(_check_tool(tool_name), tuple)
@pytest.mark.parametrize('tool_name', [
'detype1', 'type1', 'sfntedit', 'sfntdiff', 'makeotfexe'])
def test_check_tool_unhacked(tool_name):
assert _check_tool(tool_name) == tool_name
def test_capture_error_message():
output_path = runner(['-t', 'makeotfexe', '-s', '-e'])
with open(output_path, 'rb') as f:
output = f.read()
assert b"[FATAL] Source font file not found: font.ps" in output
|
StarcoderdataPython
|
1676832
|
import logging
from contextlib import contextmanager
from django.db import models
from django_directed.models.abstract_base_models import BaseGraph
try:
from asgiref.local import Local as local
except ImportError:
from threading import local
logger = logging.getLogger("django_directed")
_threadlocals = local()
def _set_current_graph_instance(graph_fullname, current_graph_instance):
"""Sets the graph in the local thread"""
setattr(_threadlocals, graph_fullname, current_graph_instance)
def get_current_graph_instance(graph_fullname):
"""Returns the graph if it exists in the local thread"""
current_graph_instance = getattr(_threadlocals, graph_fullname, None)
return current_graph_instance
@contextmanager
def graph_scope(graph):
"""
Context manager for graphs. Used to set and cleanup Graph instance.
If nested, saves outer context and resets it at conclusion of scope.
`value` should be Graph instance.
graph = MyGraphModel.objects.get(pk=1)
Using the context context manager:
```python
with graph_scope(graph):
Profile.objects.get(pk=1)
```
Using it as a decorator
```python
@graph_scope(graph)
def foo():
Profile.object.get(pk=1)
```
Using it in a task
```python
def graph_do_some_task(graph_instance=None):
with graph_scope(graph_instance):
work()
```
"""
if not isinstance(graph, BaseGraph):
raise Exception("Wrong Graph type provided to graph_scope")
graph_fullname = f"{graph._meta.app_label}.{graph._meta.label}"
previous = getattr(_threadlocals, graph_fullname, None)
_set_current_graph_instance(graph_fullname=graph_fullname, current_graph_instance=graph)
try:
yield
finally:
if previous is not None:
_set_current_graph_instance(graph_fullname=graph_fullname, current_graph_instance=previous)
else:
delattr(_threadlocals, graph_fullname)
|
StarcoderdataPython
|
50620
|
<filename>TDMA_solver.py
# Matrix solver (TDMA)
# parameters required:
# n: number of unknowns (length of x)
# a: coefficient matrix
# b: RHS/constant array
# return output:
# b: solution array
def solve_TDMA(n, a, b):
# forward substitution
a[0][2] = -a[0][2] / a[0][1]
b[0] = b[0] / a[0][1]
for i in range(1, n):
a[i][2] = -a[i][2] / (a[i][1] + a[i][0] * a[i - 1][2])
b[i] = (b[i] - a[i][0] * b[i - 1]) / (a[i][1] + a[i][0] * a[i - 1][2])
# backward elimination
for i in range(n - 2, -1, -1):
b[i] = a[i][2] * b[i + 1] + b[i]
return b
|
StarcoderdataPython
|
4838191
|
<gh_stars>0
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Implements a pinhole camera interface."""
from __future__ import annotations
from dataclasses import dataclass
from functools import cached_property
from pathlib import Path
from typing import Tuple, Union
import numpy as np
import av2.geometry.geometry as geometry_utils
import av2.utils.io as io_utils
from av2.geometry.se3 import SE3
from av2.utils.typing import NDArrayBool, NDArrayFloat, NDArrayInt
@dataclass(frozen=True)
class Intrinsics:
"""Models a camera intrinsic matrix.
Args:
fx_px: Horizontal focal length in pixels.
fy_px: Vertical focal length in pixels.
cx_px: Horizontal focal center in pixels.
cy_px: Vertical focal center in pixels.
width_px: Width of image in pixels.
height_px: Height of image in pixels.
"""
fx_px: float
fy_px: float
cx_px: float
cy_px: float
width_px: int
height_px: int
@cached_property
def K(self) -> NDArrayFloat:
"""Camera intrinsic matrix."""
K: NDArrayFloat = np.eye(3, dtype=float)
K[0, 0] = self.fx_px
K[1, 1] = self.fy_px
K[0, 2] = self.cx_px
K[1, 2] = self.cy_px
return K
@dataclass(frozen=True)
class PinholeCamera:
"""Parameterizes a pinhole camera with zero skew.
Args:
ego_SE3_cam: pose of camera in the egovehicle frame (inverse of extrinsics matrix).
intrinsics: `Intrinsics` object containing intrinsic parameters and image dimensions.
cam_name: sensor name that camera parameters correspond to.
"""
ego_SE3_cam: SE3
intrinsics: Intrinsics
cam_name: str
@property
def width_px(self) -> int:
"""Return the width of the image in pixels."""
return self.intrinsics.width_px
@property
def height_px(self) -> int:
"""Return the height of the image in pixels."""
return self.intrinsics.height_px
@cached_property
def extrinsics(self) -> NDArrayFloat:
"""Return the camera extrinsics."""
return self.ego_SE3_cam.inverse().transform_matrix
@classmethod
def from_feather(cls, log_dir: Path, cam_name: str) -> PinholeCamera:
"""Create a pinhole camera model from a feather file.
Note: Data is laid out with sensor names along row dimension, and columns are sensor attribute data.
Args:
log_dir: path to a log directory containing feather files w/ calibration info.
cam_name: name of the camera.
Returns:
A new PinholeCamera object, containing camera extrinsics and intrinsics.
"""
intrinsics_path = log_dir / "calibration" / "intrinsics.feather"
intrinsics_df = io_utils.read_feather(intrinsics_path).set_index("sensor_name")
params = intrinsics_df.loc[cam_name]
intrinsics = Intrinsics(
fx_px=params["fx_px"],
fy_px=params["fy_px"],
cx_px=params["cx_px"],
cy_px=params["cy_px"],
width_px=int(params["width_px"]),
height_px=int(params["height_px"]),
)
sensor_name_to_pose = io_utils.read_ego_SE3_sensor(log_dir)
return cls(
ego_SE3_cam=sensor_name_to_pose[cam_name],
intrinsics=intrinsics,
cam_name=cam_name,
)
def cull_to_view_frustum(self, uv: NDArrayFloat, points_cam: NDArrayFloat) -> NDArrayBool:
"""Cull 3d points to camera view frustum.
Given a set of coordinates in the image plane and corresponding points
in the camera coordinate reference frame, determine those points
that have a valid projection into the image. 3d points with valid
projections have x coordinates in the range [0,width_px-1], y-coordinates
in the range [0,height_px-1], and a positive z-coordinate (lying in
front of the camera frustum).
Ref: https://en.wikipedia.org/wiki/Hidden-surface_determination#Viewing-frustum_culling
Args:
uv: Numpy array of shape (N,2) representing image plane coordinates in [0,W-1] x [0,H-1]
where (H,W) are the image height and width.
points_cam: Numpy array of shape (N,3) representing corresponding 3d points in the camera coordinate frame.
Returns:
Numpy boolean array of shape (N,) indicating which points fall within the camera view frustum.
"""
is_valid_x = np.logical_and(0 <= uv[:, 0], uv[:, 0] < self.width_px - 1)
is_valid_y = np.logical_and(0 <= uv[:, 1], uv[:, 1] < self.height_px - 1)
is_valid_z = points_cam[:, 2] > 0
is_valid_points: NDArrayBool = np.logical_and.reduce([is_valid_x, is_valid_y, is_valid_z])
return is_valid_points
def project_ego_to_img(
self, points_ego: NDArrayFloat, remove_nan: bool = False
) -> Tuple[NDArrayFloat, NDArrayFloat, NDArrayBool]:
"""Project a collection of 3d points (provided in the egovehicle frame) to the image plane.
Args:
points_ego: numpy array of shape (N,3) representing points in the egovehicle frame.
remove_nan: whether to remove coordinates that project to invalid (NaN) values.
Returns:
uv: image plane coordinates, as Numpy array of shape (N,2).
points_cam: camera frame coordinates as Numpy array of shape (N,3) representing
is_valid_points: boolean indicator of valid cheirality and within image boundary, as
boolean Numpy array of shape (N,).
"""
# convert cartesian to homogeneous coordinates.
points_ego_hom = geometry_utils.cart_to_hom(points_ego)
points_cam: NDArrayFloat = self.extrinsics @ points_ego_hom.T
# remove bottom row of all 1s.
uv = self.intrinsics.K @ points_cam[:3, :]
uv = uv.T
points_cam = points_cam.T
if remove_nan:
uv, points_cam = remove_nan_values(uv, points_cam)
uv = uv[:, :2] / uv[:, 2].reshape(-1, 1)
is_valid_points = self.cull_to_view_frustum(uv, points_cam)
return uv, points_cam, is_valid_points
def project_cam_to_img(
self, points_cam: NDArrayFloat, remove_nan: bool = False
) -> Tuple[NDArrayFloat, NDArrayFloat, NDArrayBool]:
"""Project a collection of 3d points in the camera reference frame to the image plane.
Args:
points_cam: numpy array of shape (N,3) representing points in the egovehicle frame.
remove_nan: whether to remove coordinates that project to invalid (NaN) values.
Returns:
uv: image plane coordinates, as Numpy array of shape (N,2).
points_cam: camera frame coordinates as Numpy array of shape (N,3) representing
is_valid_points: boolean indicator of valid cheirality and within image boundary, as
boolean Numpy array of shape (N,).
"""
uv = self.intrinsics.K @ points_cam[:3, :]
uv = uv.T
points_cam = points_cam.T
if remove_nan:
uv, points_cam = remove_nan_values(uv, points_cam)
uv = uv[:, :2] / uv[:, 2].reshape(-1, 1)
is_valid_points = self.cull_to_view_frustum(uv, points_cam)
return uv, points_cam, is_valid_points
def project_ego_to_img_motion_compensated(
self,
points_lidar_time: NDArrayFloat,
city_SE3_ego_cam_t: SE3,
city_SE3_ego_lidar_t: SE3,
) -> Tuple[NDArrayFloat, NDArrayFloat, NDArrayBool]:
"""Project points in the ego frame to the image with motion compensation.
Because of the high frame rate, motion compensation's role between the
sensors is not very significant, moving points only by millimeters
to centimeters. If the vehicle is moving at 25 miles per hour, equivalent
to 11 meters/sec, then in 17 milliseconds (the max time between a lidar sweep
and camera image capture) we should be able to move up to 187 millimeters.
This can be verified in practice as the mean_change:
mean_change = np.amax(points_h_cam_time.T[:,:3] - points_h_lidar_time ,axis=0)
Adjust LiDAR points for egovehicle motion. This function accepts the
egovehicle's pose in the city map both at camera time and also at
the LiDAR time.
We perform the following transformation:
pt_egovehicle_cam_t = egovehicle_cam_t_SE3_city * city_SE3_egovehicle_lidar_t * pt_egovehicle_lidar_t
Note that both "cam_time_points_h" and "lidar_time_points_h" are 3D points in the
vehicle coordinate frame, but captured at different times. These LiDAR points
always live in the vehicle frame, but just in different timestamps. If we take
a lidar point in the egovehicle frame, captured at lidar time, and bring it into
the map at this lidar timestamp, then we know the transformation from map to
egovehicle reference frame at the time when the camera image was captured.
Thus, we move from egovehicle @ lidar time, to the map (which is time agnostic),
then we move from map to egovehicle @ camera time. Now we suddenly have lidar points
living in the egovehicle frame @ camera time.
Args:
points_lidar_time: Numpy array of shape (N,3)
city_SE3_ego_cam_t: egovehicle pose when camera image was recorded.
city_SE3_ego_lidar_t: egovehicle pose when LiDAR sweep was recorded.
Returns:
uv: image plane coordinates, as Numpy array of shape (N,2).
points_cam: Numpy array of shape (N,3) representing coordinates of points within the camera frame.
is_valid_points_cam: boolean indicator of valid cheirality and within image boundary, as
boolean Numpy array of shape (N,).
Raises:
ValueError: If `city_SE3_ego_cam_t` or `city_SE3_ego_lidar_t` is `None`.
"""
if city_SE3_ego_cam_t is None:
raise ValueError("city_SE3_ego_cam_t cannot be `None`!")
if city_SE3_ego_lidar_t is None:
raise ValueError("city_SE3_ego_lidar_t cannot be `None`!")
ego_cam_t_SE3_ego_lidar_t = city_SE3_ego_cam_t.inverse().compose(city_SE3_ego_lidar_t)
points_cam_time = ego_cam_t_SE3_ego_lidar_t.transform_point_cloud(points_lidar_time)
return self.project_ego_to_img(points_cam_time)
@cached_property
def right_clipping_plane(self) -> NDArrayFloat:
"""Form the right clipping plane for a camera view frustum.
Returns:
(4,) tuple of Hessian normal coefficients.
"""
a, b, c, d = -self.intrinsics.fx_px, 0.0, self.width_px / 2.0, 0.0
coeffs: NDArrayFloat = np.array([a, b, c, d]) / np.linalg.norm([a, b, c]) # type: ignore
return coeffs
@cached_property
def left_clipping_plane(self) -> NDArrayFloat:
"""Form the left clipping plane for a camera view frustum.
Returns:
(4,) tuple of Hessian normal coefficients.
"""
a, b, c, d = self.intrinsics.fx_px, 0.0, self.width_px / 2.0, 0.0
coeffs: NDArrayFloat = np.array([a, b, c, d]) / np.linalg.norm([a, b, c]) # type: ignore
return coeffs
@cached_property
def top_clipping_plane(self) -> NDArrayFloat:
"""Top clipping plane for a camera view frustum.
Returns:
(4,) tuple of Hessian normal coefficients.
"""
a, b, c, d = 0.0, self.intrinsics.fx_px, self.height_px / 2.0, 0.0
coeffs: NDArrayFloat = np.array([a, b, c, d]) / np.linalg.norm([a, b, c]) # type: ignore
return coeffs
@cached_property
def bottom_clipping_plane(self) -> NDArrayFloat:
"""Bottom clipping plane for a camera view frustum.
Returns:
(4,) tuple of Hessian normal coefficients.
"""
a, b, c, d = 0.0, -self.intrinsics.fx_px, self.height_px / 2.0, 0.0
coeffs: NDArrayFloat = np.array([a, b, c, d]) / np.linalg.norm([a, b, c]) # type: ignore
return coeffs
def near_clipping_plane(self, near_clip_m: float) -> NDArrayFloat:
"""Near clipping plane for a camera view frustum.
Args:
near_clip_m: Near clipping plane distance in meters.
Returns:
(4,) tuple of Hessian normal coefficients.
"""
a, b, c, d = 0.0, 0.0, 1.0, -near_clip_m
coeffs: NDArrayFloat = np.array([a, b, c, d])
return coeffs
def frustum_planes(self, near_clip_dist: float = 0.5) -> NDArrayFloat:
"""Compute the planes enclosing the field of view (view frustum).
Reference (1): https://en.wikipedia.org/wiki/Viewing_frustum
Reference (2): https://en.wikipedia.org/wiki/Plane_(geometry)
Solve for the coefficients of all frustum planes:
ax + by + cz = d
Args:
near_clip_dist: Distance of the near clipping plane from the origin.
Returns:
(5, 4) matrix where each row corresponds to the coeffients of a plane.
"""
left_plane = self.left_clipping_plane
right_plane = self.right_clipping_plane
top_plane = self.top_clipping_plane
bottom_plane = self.bottom_clipping_plane
near_plane = self.near_clipping_plane(near_clip_dist)
planes: NDArrayFloat = np.stack([left_plane, right_plane, near_plane, bottom_plane, top_plane])
return planes
@cached_property
def egovehicle_yaw_cam_rad(self) -> float:
"""Compute the camera's yaw, in the egovehicle frame.
R takes the x axis to be a vector equivalent to the first column of R.
Similarly, the y and z axes are transformed to be the second and third columns.
Returns:
Counter-clockwise angle from x=0 (in radians) of camera center ray, in the egovehicle frame.
"""
egovehicle_SE3_camera = self.ego_SE3_cam
# the third column of this rotation matrix, is the new basis vector for the z axis (pointing out of camera)
# take its x and y components (the z component is near zero, as close to horizontal)
new_z_axis = egovehicle_SE3_camera.rotation[:, 2]
dx, dy, dz = new_z_axis
egovehicle_yaw_cam_rad = np.arctan2(dy, dx)
return float(egovehicle_yaw_cam_rad)
@cached_property
def fov_theta_rad(self) -> float:
"""Compute the field of view of a camera frustum to use for view frustum culling during rendering.
Returns:
Angular extent of camera's field of view (measured in radians).
"""
fov_theta_rad = 2 * np.arctan(0.5 * self.width_px / self.intrinsics.fx_px)
return float(fov_theta_rad)
def compute_pixel_ray_directions(self, uv: Union[NDArrayFloat, NDArrayInt]) -> NDArrayFloat:
"""Given (u,v) coordinates and intrinsics, generate pixel rays in the camera coordinate frame.
Assume +z points out of the camera, +y is downwards, and +x is across the imager.
Args:
uv: Numpy array of shape (N,2) with (u,v) coordinates
Returns:
Array of shape (N,3) with ray directions to each pixel, provided in the camera frame.
Raises:
ValueError: If input (u,v) coordinates are not (N,2) in shape.
RuntimeError: If generated ray directions are not (N,3) in shape.
"""
fx, fy = self.intrinsics.fx_px, self.intrinsics.fy_px
img_h, img_w = self.height_px, self.width_px
if not np.isclose(fx, fy, atol=1e-3):
raise ValueError(f"Focal lengths in the x and y directions must match: {fx} != {fy}")
if uv.shape[1] != 2:
raise ValueError("Input (u,v) coordinates must be (N,2) in shape.")
# Approximation for principal point
px = img_w / 2
py = img_h / 2
u = uv[:, 0]
v = uv[:, 1]
num_rays = uv.shape[0]
ray_dirs = np.zeros((num_rays, 3))
# x center offset from center
ray_dirs[:, 0] = u - px
# y center offset from center
ray_dirs[:, 1] = v - py
ray_dirs[:, 2] = fx
# elementwise multiplication of scalars requires last dim to match
ray_dirs = ray_dirs / np.linalg.norm(ray_dirs, axis=1, keepdims=True) # type: ignore
if ray_dirs.shape[1] != 3:
raise RuntimeError("Ray directions must be (N,3)")
return ray_dirs
def remove_nan_values(uv: NDArrayFloat, points_cam: NDArrayFloat) -> Tuple[NDArrayFloat, NDArrayFloat]:
"""Remove NaN values from camera coordinates and image plane coordinates (accepts corrupt array).
Args:
uv: image plane coordinates, as Numpy array of shape (N,2).
points_cam: Numpy array of shape (N,3) representing coordinates of points within the camera frame.
Returns:
uv_valid: subset of image plane coordinates, which contain no NaN coordinates.
is_valid_points_cam: subset of 3d points within the camera frame, which contain no NaN coordinates.
"""
is_u_valid = np.logical_not(np.isnan(uv[:, 0]))
is_v_valid = np.logical_not(np.isnan(uv[:, 1]))
is_uv_valid = np.logical_and(is_u_valid, is_v_valid)
uv_valid = uv[is_uv_valid]
is_valid_points_cam = points_cam[is_uv_valid]
return uv_valid, is_valid_points_cam
|
StarcoderdataPython
|
51743
|
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reads and parses lines from a serial device.
Typically from an Arduino. Lines are expected to follow the InfluxDB's line
protocol format (with the difference that the timestamp is allowed to be
missing).
"""
import logging
import time
import serial
class Sample(object):
"""Represents a single sample in the InfluxDB format."""
def __init__(self, line):
"""Parses a given line and stores in a new Sample.
If timestamp is missing, the current time is used.
Args:
line: String to be parsed.
Raises:
ValueError if the line can't be parsed.
"""
words = line.strip().split(" ")
if len(words) == 2:
(self.tags_line, self.values_line) = words
self.timestamp = time.time()
elif len(words) == 3:
(self.tags_line, self.values_line, timestamp) = words
self.timestamp = float(timestamp) / 1000000000.0
else:
raise ValueError("Unable to parse line {0!r}".format(line))
def AddTags(self, tag_line):
"""Adds tags from 'tag_line' into 'self.tags_line'."""
if tag_line:
self.tags_line += ","
self.tags_line += tag_line
return self
def FormatInfluxLine(self):
"""Formats the accumulated tags and values into an InfluxDB line."""
return "{0} {1} {2:d}".format(
self.tags_line, self.values_line, long(self.timestamp * 1000000000))
def __str__(self):
return '{0}(tags_line={1},values_line={2},timestamp={3})'.format(
self.__class__.__name__, self.tags_line, self.values_line,
self.timestamp)
def __repr__(self):
return "{0}({1!r})".format(self.__class__.__name__,
self.FormatInfluxLine())
def SkipUntilNewLine(handle):
"""Skips data until a new-line character is received.
This is needed so that the first sample is read from a complete line.
"""
logging.debug("Skipping until the end of a new line.")
while not handle.readline(4096).endswith('\n'):
pass
class LineOverflowError(IOError):
"""Thrown when a line longer than a given limit is received."""
def __init__(self, line):
super(LineOverflowError, self).__init__(
"Received incomplete line {0!r}".format(line))
def SerialLines(device_url, baud_rate, read_timeout, max_line_length):
"""A generator that yields lines from a configured serial line.
Will never exit normally, only with an exception when there is an error
in the serial communication.
"""
with serial.serial_for_url(device_url, baudrate=baud_rate,
timeout=read_timeout) as handle:
SkipUntilNewLine(handle)
while True:
line = handle.readline(max_line_length)
logging.debug("Received line %r", line)
if not line.endswith('\n'):
raise LineOverflowError(line)
try:
yield Sample(line.rstrip())
except ValueError:
logging.exception("Failed to parse Sample from '%s'", line)
|
StarcoderdataPython
|
3265838
|
from JumpScale import j
from JumpScale.grid.osis.OSISStoreMongo import OSISStoreMongo
class mainclass(OSISStoreMongo):
"""
Defeault object implementation
"""
def __init__(self, *args, **kwargs):
super(mainclass, self).__init__(*args, **kwargs)
def set(self, key, value, waitIndex=False, session=None):
db, counter = self._getMongoDB(session)
dbval = {"_id": key, "guid": key, "value": value}
db.save(dbval)
return key
def get(self, key, full=False, session=None):
try:
val = OSISStoreMongo.get(self, key, full, session=session)
return val['value']
except:
return None
def delete(self, key, session=None):
self.runTasklet('delete', key, session)
db, counter = self._getMongoDB(session)
try:
res = OSISStoreMongo.get(self, key, True, session=session)
db.remove(res["_id"])
except KeyError:
pass
|
StarcoderdataPython
|
3212598
|
<reponame>Deltares/HYDROLIB-core
import logging
from typing import List, Literal, Optional
from pydantic import Field
from pydantic.class_validators import validator
from pydantic.types import NonNegativeInt
from hydrolib.core.io.ini.models import INIBasedModel, INIGeneral, INIModel
from hydrolib.core.io.ini.util import (
get_split_string_on_delimiter_validator,
make_list_length_root_validator,
make_list_validator,
)
logger = logging.getLogger(__name__)
class OneDFieldGeneral(INIGeneral):
"""The 1D field file's `[General]` section with file meta data."""
class Comments(INIBasedModel.Comments):
fileversion: Optional[str] = Field(
"File version. Do not edit this.", alias="fileVersion"
)
filetype: Optional[str] = Field(
"File type. Should be '1dField'. Do not edit this.",
alias="fileType",
)
comments: Comments = Comments()
_header: Literal["General"] = "General"
fileversion: str = Field("2.00", alias="fileVersion")
filetype: Literal["1dField"] = Field("1dField", alias="fileType")
class OneDFieldGlobal(INIBasedModel):
"""The `[Global]` block with a uniform value for use inside a 1D field file."""
class Comments(INIBasedModel.Comments):
quantity: Optional[str] = Field("The name of the quantity", alias="quantity")
unit: Optional[str] = Field("The unit of the quantity", alias="unit")
value: Optional[str] = Field(
"The global default value for this quantity", alias="value"
)
comments: Comments = Comments()
_header: Literal["Global"] = "Global"
quantity: str = Field(alias="quantity")
unit: str = Field(alias="unit")
value: float = Field(alias="value")
class OneDFieldBranch(INIBasedModel):
"""
A `[Branch]` block for use inside a 1D field file.
Each block can define value(s) on a particular branch.
"""
class Comments(INIBasedModel.Comments):
branchid: Optional[str] = Field("The name of the branch", alias="branchId")
numlocations: Optional[str] = Field(
"Number of locations on branch. The default 0 value implies branch uniform values.",
alias="numLocations",
)
chainage: Optional[str] = Field(
"Space separated list of locations on the branch (m). Locations sorted by increasing chainage. The keyword must be specified if numLocations >0.",
alias="chainage",
)
values: Optional[str] = Field(
"Space separated list of numLocations values; one for each chainage specified. One value required if numLocations =0",
alias="values",
)
comments: Comments = Comments()
_header: Literal["Branch"] = "Branch"
branchid: str = Field(alias="branchId")
numlocations: Optional[NonNegativeInt] = Field(0, alias="numLocations")
chainage: Optional[List[float]] = Field(alias="chainage")
values: List[float] = Field(alias="values")
_split_to_list = get_split_string_on_delimiter_validator(
"chainage",
"values",
delimiter=" ",
)
_check_list_length_values = make_list_length_root_validator(
"chainage",
length_name="numlocations",
list_required_with_length=True,
)
_check_list_length_chainage = make_list_length_root_validator(
"values",
length_name="numlocations",
list_required_with_length=True,
min_length=1,
)
def _get_identifier(self, data: dict) -> Optional[str]:
return data.get("branchid")
class OneDFieldModel(INIModel):
"""
The overall 1D field model that contains the contents of a 1D field file.
This model is typically used when a [FMModel][hydrolib.core.io.mdu.models.FMModel]`.geometry.inifieldfile[..].initial[..].datafiletype==DataFileType.onedfield`.
Attributes:
general (OneDFieldGeneral): `[General]` block with file metadata.
global_ (Optional[OneDFieldGlobal]): Optional `[Global]` block with uniform value.
branch (List[OneDFieldBranch]): Definitions of `[Branch]` field values.
"""
general: OneDFieldGeneral = OneDFieldGeneral()
global_: Optional[OneDFieldGlobal] = Field(
alias="global"
) # to circumvent built-in kw
branch: List[OneDFieldBranch] = []
_split_to_list = make_list_validator(
"branch",
)
@classmethod
def _ext(cls) -> str:
return ".ini"
@classmethod
def _filename(cls) -> str:
return "1dfield"
|
StarcoderdataPython
|
3304892
|
<filename>libgoods/scripts/examples/COOPS_FVCOM_multifile_example.py
#!/usr/bin/env python
from __future__ import print_function
from libgoods import tri_grid, noaa_coops, nctools
import datetime as dt
import os
from netCDF4 import num2date
'''
Sample script to retrieve data from NOAA CO-OPS FVCOM netcdf "file" (can be
OPeNDAP url), generate necessary grid topology (boundary info), and write
GNOME compatible output.
To script illustrates how to access data from multiple files (urls) by looping
through a filelist. Also only extracts a small subset from unstructured grid file
Alternatively, the list of filenames/urls can be passed directly when instantiating
the ugrid object-- this creates a netcdf4 MFDataset and isa good option for not too
many files (all output is written to one nc file for GNOME in this case)
Since multiple files are created, also create a text file that can be loaded
into GNOME pointing to the individual files
'''
out_dir = 'ngofs'
# start = dt.date(2018,10,2)
# end = dt.date(2018,10,4)
# date = start
# dates = []
# while date <= end:
# dates.append(date)
# date += datetime.timedelta(days=1)
#flist = ['fvcom_maine' + str(d.year) + str(d.month).zfill(2) + str(d.day).zfill(2) for d in dates]
flist = noaa_coops.make_server_filelist('ngofs',3,dt.date(2019,5,30),end=None,test_exist=False)
# the utools class requires a mapping of specific model variable names (values)
# to common names (keys) so that the class methods can work with FVCOM, SELFE,
# and ADCIRC which have different variable names
# (This seemed easier than finding them by CF long_names etc)
var_map = { 'longitude':'lon', \
'latitude':'lat', \
'time':'time', \
'u_velocity':'u', \
'v_velocity':'v', \
'nodes_surrounding_ele':'nv',\
'eles_surrounding_ele':'nbe',\
}
# class instantiation creates a netCDF Dataset object as an attribute --
# use the first file in the list only
ngofs = tri_grid.ugrid(flist[0])
#get longitude, latitude
print('Downloading data dimensions')
ngofs.get_dimensions(var_map,get_time=False)
# get grid topo variables (nbe, nv)
print('Downloading grid topo variables')
ngofs.get_grid_topo(var_map)
# subset bounding box
nl = 30.7; sl = 28.1
wl = -90.4; el = -87.4
ngofs.find_nodes_eles_in_ss(nl,sl,wl,el)
# find and order the boundary
print('Finding boundary')
bnd = ngofs.find_bndry_segs(subset=True)
print('Ordering boundary')
#In this case entire subset boundary will be set to land -- see COOPS_FVCOM_subset_example
#for how to use entire domain boundary to correctly determine type of subset boundary
ngofs.order_boundary(bnd)
# GNOME needs to know whether the elements are ordered clockwise (FVCOM) or counter-clockwise (SELFE)
ngofs.atts['nbe']['order'] = 'cw'
try:
os.mkdir(out_dir)
except:
pass
for f in flist:
print(f)
ngofs.update(f)
print('Downloading data dimensions')
ngofs.get_dimensions(var_map,get_xy=False)
of_dt = nctools.round_time(num2date(ngofs.data['time'][0],ngofs.atts['time']['units']),roundto=3600)
ofn = of_dt.strftime('%Y%m%d_%H') + '.nc'
if not os.path.exists(ofn):
#get the data
print('Downloading data')
#ngofs.get_data(var_map) #First time step only
ngofs.get_data(var_map,nindex=ngofs.nodes_in_ss) #All time steps in file
print('Writing to GNOME file')
ngofs.write_unstruc_grid(os.path.join(out_dir,ofn))
else:
print(ofn + ' aready exists')
nctools.make_filelist_for_GNOME(out_dir,'*.nc')
|
StarcoderdataPython
|
3329535
|
<reponame>shmuelamar/phonelocator<gh_stars>1-10
import pytest
from phonelocator import locator
@pytest.fixture(scope='function')
def countries():
return {
u'US': u'1', u'UG': u'256', u'IL': u'972', u'TZ': u'255',
u'TW': u'886', u'AU': u'61',
}
@pytest.fixture(scope='function')
def states():
return {
u'US': {
u'WY': [u'307'],
u'WV': [u'304', u'681'],
u'WI': [u'262', u'274', u'414', u'534', u'608', u'715', u'920'],
u'WA': [u'206', u'253', u'360', u'425', u'509', u'564'],
u'NY': [u'212'],
},
u'AU': {
u'WA': [u'85', u'86', u'89'],
u'VIC': [u'33', u'34', u'35'],
u'TAS': [u'36'],
}
}
def test_get_phone_prefixes_from_default_files_smoke():
phone_prefixes = locator.get_phone_prefixes()
assert isinstance(phone_prefixes.prefixes, dict)
assert isinstance(phone_prefixes.prefix_lengths, list)
def test_get_phone_prefixes(countries, states):
phone_prefixes = locator.get_phone_prefixes(countries, states)
assert len(phone_prefixes.prefixes) == 28
assert phone_prefixes.prefixes[u'972'] == [u'IL']
assert phone_prefixes.prefixes[u'1212'] == [u'US', u'NY']
assert u'1' not in phone_prefixes.prefixes
assert u'61' not in phone_prefixes.prefixes
def test_get_phone_prefixes_bad_file(states):
with pytest.raises(ValueError):
locator.get_phone_prefixes(countries={}, states=states)
@pytest.mark.parametrize('phone_no,expected', [
(u'9721234567890', [u'IL']),
(u'1212345678902', [u'US', u'NY']),
(u'9999999999999', None),
])
def test_locate_phone(phone_no, expected, countries, states):
phone_prefixes = locator.get_phone_prefixes(countries, states)
assert locator.locate_phone(phone_no, phone_prefixes) == expected
|
StarcoderdataPython
|
1651014
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.conf import settings
import env
from api.client import BKComponentClient
from api.utils.request import batch_request
# TODO:待GSE KIT接入ESB后修改
GSE_KIT_API_ENTRY = env.BK_GSE_KIT_API_ENTRY or "{}/{}".format(
settings.BK_PAAS_INNER_HOST, "api/c/compapi/v2/gse_kit"
)
def _get_gse_kit_api(api_name):
return "{}/{}/".format(GSE_KIT_API_ENTRY, api_name)
class BKGseKitClient(BKComponentClient):
def _pre_process_data(self, data):
"""
去除值为None的可选字段
:param data:
:return:
"""
data = super()._pre_process_data(data)
for param in data.keys():
if data[param] is None:
data.pop(param)
return data
def list_process(
self,
page_param,
scope=None,
expression_scope=None,
bk_cloud_ids=None,
process_status=None,
is_auto=None,
):
params = {
"scope": scope,
"expression_scope": expression_scope,
"bk_cloud_ids": bk_cloud_ids,
"process_status": process_status,
"is_auto": is_auto
}
return batch_request(func=self._list_processes, params=params, page_param=page_param)
def _list_processes(
self,
pagesize,
page,
scope=None,
expression_scope=None,
bk_cloud_ids=None,
process_status=None,
is_auto=None,
):
return self._request(
method="post",
url=_get_gse_kit_api("list_process"),
data={
"pagesize": pagesize,
"page": page,
"scope": scope,
"expression_scope": expression_scope,
"bk_cloud_ids": bk_cloud_ids,
"process_status": process_status,
"is_auto": is_auto
}
)
def create_job(self,
bk_biz_id,
job_object,
job_action,
scope=None,
expression_scope=None
):
"""
创建 gsekit 任务命令
"""
param = {
"job_object": job_object,
"job_action": job_action,
"scope": scope,
"expression_scope": expression_scope
}
return self._request(
method="post",
url=_get_gse_kit_api("{bk_biz_id}/job".format(bk_biz_id=bk_biz_id)),
data=param
)
def job_status(self, bk_biz_id, job_task_id):
"""
查询gsekit 任务状态
:param job_task_id: string
"""
param = {
"job_task_id_list": [job_task_id]
}
return self._request(
method="post",
url=_get_gse_kit_api("{bk_biz_id}/job".format(bk_biz_id=bk_biz_id)),
data=param
)
|
StarcoderdataPython
|
167048
|
<reponame>DLR-RM/python-jsonconversion
# Copyright (C) 2016-2017 DLR
#
# All rights reserved. This program and the accompanying materials are made
# available under the terms of the 2-Clause BSD License ("Simplified BSD
# License") which accompanies this distribution, and is available at
# https://opensource.org/licenses/BSD-2-Clause
#
# Contributors:
# <NAME> <<EMAIL>>
from jsonconversion.jsonobject import JSONObject
from testing_utils import convert_with_assertion
class JO(JSONObject):
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
@classmethod
def from_dict(cls, _dict):
return cls(_dict['a'], _dict['b'], _dict['c'])
def to_dict(self):
return {'a': self.a, 'b': self.b, 'c': self.c}
def __eq__(self, other):
return self.a == other.a and self.b == other.b and self.c == other.c
def __str__(self):
return "{0}: a = {1} {4}, b = {2} {5}, c = {3} {6}".format(self.__class__.__name__, self.a, self.b, self.c,
type(self.a), type(self.b), type(self.c))
def test_simple_objects():
j1 = JO(1, 2, 3)
j2 = JO(1.1, 2.2, 3.3)
j3 = JO('a', "bcd", None)
j4 = JO('1.1', "3", "0")
j5 = JO(-2, "preempted", float)
convert_with_assertion(j1)
convert_with_assertion(j2)
convert_with_assertion(j3)
convert_with_assertion(j4)
convert_with_assertion(j5)
def test_nested_objects():
l1 = [JO(1, 2, 3), JO('1.1', "3", "0")]
t1 = tuple(l1[:])
d1 = {0: l1[:], 1: tuple(t1)}
convert_with_assertion(l1)
convert_with_assertion(t1)
convert_with_assertion(d1)
def test_complex_objects():
j1 = JO({0: 'a', 1: int, 2: -5}, dict(enumerate(list("abc"))), (1, 2, 3))
j2 = JO([1, 2, {3: 'a'}, (4, 5, set([6, 7]))], {0: [1, 2], 1: (3, 4), 'abc': [{'x': 1, 'y': 2}]}, j1)
j3 = JO({0: j1, 1: {0: j1}}, {0: list, 1: {0: float}}, {0: None, 1: {0: None}})
j4 = JO({0: JO(1, 2, 3), 1: {0: JO(1, 2, 3)}},
{0: JO(-2, "preempted", float), 1: {0: JO(-2, "preempted", float)}},
JO(-2, "preempted", float))
convert_with_assertion(j1)
convert_with_assertion(j2)
convert_with_assertion(j3)
convert_with_assertion(j4)
|
StarcoderdataPython
|
4842521
|
<filename>tests/test_comm.py
import unittest
from qupy.framing.slip import Slip
from qupy.comm.client import CommClient
from qupy.comm.server import CommServer
from qupy.comm.errors import CommError
from qupy.interface.tcp import TcpSocketClient, TcpSocketServer
from qupy.interface.errors import InterfaceTimeoutError
class TestCommClient(unittest.TestCase):
def setUp(self):
self.framing_server = Slip()
self.tcp_server = TcpSocketServer()
self.framing_client = Slip()
self.tcp_client = TcpSocketClient()
self.comm_client = CommClient(self.tcp_client, self.framing_client)
self.comm_server = CommServer(self.tcp_server, self.framing_server)
self.tcp_server.bind()
self.tcp_client.connect()
self.tcp_server.listen()
self.comm_server.start()
self.comm_client.start()
super().setUp()
def tearDown(self):
self.comm_client.stop()
self.tcp_client.close()
self.comm_server.stop()
self.tcp_server.close()
self.tcp_server.unbind()
super().tearDown()
def test_send_recv_client(self):
tx_msg = b'abc'
self.comm_client.send(tx_msg)
rx_msg = self.comm_server.recv()
self.assertEqual(rx_msg, tx_msg)
tx_msg = b'def'
self.comm_server.confirm(tx_msg)
rx_msg = self.comm_client.recv()
self.assertEqual(rx_msg, tx_msg)
def test_send_no_recv_client(self):
tx_msg = b'abc'
self.comm_client.send(tx_msg)
rx_msg = self.comm_server.recv()
self.assertEqual(rx_msg, tx_msg)
self.comm_server.confirm(None)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1725565
|
<filename>2018/Day21.py
from Day19 import Device, make_command
script = """\
#ip 3
seti 123 0 1
bani 1 456 1
eqri 1 72 1
addr 1 3 3
seti 0 0 3
seti 0 0 1
bori 1 65536 2
seti 10605201 9 1
bani 2 255 5
addr 1 5 1
bani 1 16777215 1
muli 1 65899 1
bani 1 16777215 1
gtir 256 2 5
addr 5 3 3
addi 3 1 3
seti 27 3 3
seti 0 3 5
addi 5 1 4
muli 4 256 4
gtrr 4 2 4
addr 4 3 3
addi 3 1 3
seti 25 3 3
addi 5 1 5
seti 17 5 3
setr 5 5 2
seti 7 6 3
eqrr 1 0 5
addr 5 3 3
seti 5 8 3"""
ip_command, *program = list(map(make_command, script.splitlines()))
if __name__ == "__main__":
A_start = 0
while True:
print(A_start)
seen_states = []
dev = Device(A=A_start)
dev.opsd[ip_command[0]](*ip_command[1])
while dev.ip_pos in range(0, len(program)) and len(seen_states)<=10000000:
if not len(seen_states)%1000000: print("-> "+str(len(seen_states)//1000000)+"M")
seen_states.append(list(dev))
com, args = program[dev.ip_pos]
dev.opsd[com](dev, *args)
dev.loop()
if dev.ip_pos not in range(0, len(program)):
break
A_start += 1
|
StarcoderdataPython
|
3309154
|
from typing import List
from geom2d import Circle, Rect, Segment, Point, Polygon, Vector
from graphic.svg.attributes import attrs_to_str
from graphic.svg.read import read_template
__segment_template = read_template('line')
__rect_template = read_template('rect')
__circle_template = read_template('circle')
__polygon_template = read_template('polygon')
__polyline_template = read_template('polyline')
__text_template = read_template('text')
__group_template = read_template('group')
def segment(seg: Segment, attributes=()):
"""
Returns an SVG line segment element as string.
:param seg: `Segment` geometric primitive
:param attributes: list of SVG attributes
:return: <line x1="" y1="" x2="" y2="" ... />
"""
return __segment_template \
.replace('{{x1}}', str(seg.start.x)) \
.replace('{{y1}}', str(seg.start.y)) \
.replace('{{x2}}', str(seg.end.x)) \
.replace('{{y2}}', str(seg.end.y)) \
.replace('{{attrs}}', attrs_to_str(attributes))
def rectangle(rect: Rect, attributes=()):
"""
Returns an SVG rectangle element as string.
:param rect: `Rect` geometric primitive
:param attributes: list of SVG attributes
:return: <rect x="" y="" width="" height="" ... />
"""
return __rect_template \
.replace('{{x}}', str(rect.origin.x)) \
.replace('{{y}}', str(rect.origin.y)) \
.replace('{{width}}', str(rect.size.width)) \
.replace('{{height}}', str(rect.size.height)) \
.replace('{{attrs}}', attrs_to_str(attributes))
def circle(circ: Circle, attributes=()):
"""
Returns an SVG circle element as string.
:param circ: `Circle` geometric primitive
:param attributes: list of SVG attributes
:return: <circle cx="" cy="" r="" ... />
"""
return __circle_template \
.replace('{{cx}}', str(circ.center.x)) \
.replace('{{cy}}', str(circ.center.y)) \
.replace('{{r}}', str(circ.radius)) \
.replace('{{attrs}}', attrs_to_str(attributes))
def polygon(pol: Polygon, attributes=()):
"""
Returns an SVG polygon element as string.
:param pol: `Polygon` geometric primitive
:param attributes: list of SVG attributes
:return: <polygon points="" ... />
"""
return __polygon_template \
.replace('{{points}}', __format_points(pol.vertices)) \
.replace('{{attrs}}', attrs_to_str(attributes))
def polyline(points: List[Point], attributes=()):
"""
Returns an SVG polyline element as string.
:param points: sequence of `Point`s, the vertices of the polyline
:param attributes: list of SVG attributes
:return: <polyline points="" .../>
"""
return __polyline_template \
.replace('{{points}}', __format_points(points)) \
.replace('{{attrs}}', attrs_to_str(attributes))
def text(txt: str, pos: Point, disp: Vector, attrs_list=()):
"""
Returns an SVG text element as string.
:param txt: the text
:param pos: origin position for the text
:param disp: displacement applied to the text
:param attrs_list: list of SVG attributes
:return: <text x="" y="" dx="" dy="" ...>Hello</text>
"""
return __text_template \
.replace('{{x}}', str(pos.x)) \
.replace('{{y}}', str(pos.y)) \
.replace('{{dx}}', str(disp.u)) \
.replace('{{dy}}', str(disp.v)) \
.replace('{{text}}', txt) \
.replace('{{attrs}}', attrs_to_str(attrs_list))
def group(primitives: List[str], attributes=()):
"""
Returns an SVG group element with the primitives list inside of
it.
:param primitives: list of SVG primitives as string.
:param attributes: list of SVG attributes
:return: <g ...>...</g>
"""
return __group_template \
.replace('{{content}}', '\n\t'.join(primitives)) \
.replace('{{attrs}}', attrs_to_str(attributes))
def arrow(
_segment: Segment,
length: float,
height: float,
attributes=()
):
"""
Returns an SVG group of primitives representing an arrow: a
segment with an arrow head in its end.
:param _segment: arrow's line segment
:param length: arrow's head length
:param height: arrow's head height
:param attributes: list of SVG attributes
:return: <g>...</g>
"""
director = _segment.direction_vector
v_l = director.opposite().with_length(length)
v_h1 = director.perpendicular().with_length(height / 2.0)
v_h2 = v_h1.opposite()
return group(
[
segment(_segment),
polyline([
_segment.end.displaced(v_l + v_h1),
_segment.end,
_segment.end.displaced(v_l + v_h2)
])
],
attributes
)
def __format_points(points: List[Point]):
return ' '.join([f'{p.x},{p.y}' for p in points])
|
StarcoderdataPython
|
4801114
|
<filename>CodeWars/7 Kyu/21 Sticks.py
def makeMove(sticks):
return max(sticks % 4, 1)
|
StarcoderdataPython
|
4841070
|
"""
Network optimization
"""
import pandas as pd
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
class NetworkOptimizationFeatures(object):
"""
This class just sets-ip constants of the linear model of the distribution.
These results are extracted from the work of Florian at the chair.
Unfortunately his work only worked for this case study and could not be used else where
See the paper of Fonseca et al 2015 of the city energy analyst for more info on how that procedure used to work.
"""
def __init__(self, district_heating_network, district_cooling_network, locator):
self.network_names = ['']
for network_name in self.network_names:
if district_heating_network:
self.E_pump_DHN_W = pd.read_csv(locator.get_network_energy_pumping_requirements_file("DH", network_name))[
'pressure_loss_total_kW'].values * 1000
self.mass_flow_rate_DHN = self.mass_flow_rate_plant(locator, network_name, "DH")
self.thermallosses_DHN = pd.read_csv(locator.get_network_total_thermal_loss_file("DH", network_name))[
'thermal_loss_total_kW'].values
self.pipesCosts_DHN_USD = self.pipe_costs(locator, network_name, "DH")
if district_cooling_network:
self.E_pump_DCN_W = pd.read_csv(locator.get_network_energy_pumping_requirements_file("DC", network_name))[
'pressure_loss_total_kW'].values * 1000
self.mass_flow_rate_DCN = self.mass_flow_rate_plant(locator, network_name, "DC")
self.thermallosses_DCN = pd.read_csv(locator.get_network_total_thermal_loss_file("DC", network_name))[
'thermal_loss_total_kW'].values
self.pipesCosts_DCN_USD = self.pipe_costs(locator, network_name, "DC")
def mass_flow_rate_plant(self, locator, network_name, network_type):
mass_flow_df = pd.read_csv((locator.get_thermal_network_layout_massflow_nodes_file(network_type, network_name)))
mass_flow_nodes_df = pd.read_csv((locator.get_thermal_network_node_types_csv_file(network_type, network_name)))
# identify the node with the plant
node_id = mass_flow_nodes_df.loc[mass_flow_nodes_df['Type'] == "PLANT", 'Name'].item()
return mass_flow_df[node_id].values
def pipe_costs(self, locator, network_name, network_type):
edges_file = pd.read_csv(locator.get_thermal_network_edge_list_file(network_type, network_name))
piping_cost_data = pd.read_excel(locator.get_database_distribution_systems(), sheet_name="THERMAL_GRID")
merge_df = edges_file.merge(piping_cost_data, left_on='Pipe_DN', right_on='Pipe_DN')
merge_df['Inv_USD2015'] = merge_df['Inv_USD2015perm'] * merge_df['length_m']
pipe_costs = merge_df['Inv_USD2015'].sum()
return pipe_costs
|
StarcoderdataPython
|
30140
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 5 14:01:56 2019
@author: <NAME>
This implementation use ST-ResNet for inflow / outflow bike prediction on the city of NY
Article: https://arxiv.org/pdf/1610.00081.pdf
References and credits:
<NAME>, <NAME>, <NAME>. Deep Spatio-Temporal Residual Networks for Citywide Crowd Flows Prediction. In AAAI 2017.
The dataset can be download checking the information on the following link:
https://github.com/lucktroy/DeepST/tree/master/data/BikeNYC
"""
##################################################################################
# Libraries
##################################################################################
import os
import math
from datetime import datetime
from datetime import timedelta
import numpy as np
import h5py
import matplotlib.pyplot as plt
import matplotlib.cm
import seaborn as sns
sns.set()
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, Reshape, Activation, Add, LeakyReLU
from keras.layers import Conv2D , BatchNormalization, Lambda, concatenate
from keras.callbacks import ModelCheckpoint, CSVLogger, EarlyStopping
from keras.optimizers import Adam
from keras.engine.topology import Layer
np.random.seed(42) # My nickname Recruta42
############################################################################################
# Load Dataset
############################################################################################
dataset_folder = 'dataset'
dataset_file = os.path.join(dataset_folder,'NYC14_M16x8_T60_NewEnd.h5')
images_folder = 'images'
nyc_map = plt.imread(os.path.join(images_folder,'nyc.jpg'))
# Plot New York Map
f, ax = plt.subplots(figsize=(8,8))
ax.imshow(nyc_map)
# Load dataset file
f = h5py.File(dataset_file)
data = f['data'][()]
timestamps = f['date'][()]
# Convert data from [batch x flow matrices x map height x map width] to [batch x map height x map width x flow matrices]
data = np.transpose(data, (0, 2, 3, 1))
# Plot some samples from dataset
n_samples = 5
for i in range(n_samples):
# define the size of images
f, (ax1, ax2) = plt.subplots(1, 2)
f.set_figwidth(12)
f.set_figheight(8)
# randomly select a sample
idx = np.random.randint(0, len(data))
inflow = data[idx][:,:,0] #input flow is the first matrix
outflow = data[idx][:,:,1] #output flow is the second matrix
date = datetime.strptime(timestamps[idx].decode("utf-8"), '%Y%m%d%H')
hmax1 = sns.heatmap(inflow, cmap = matplotlib.cm.winter, alpha = 0.3, annot = False,zorder = 2, ax=ax1)
hmax1.imshow(nyc_map,aspect = hmax1.get_aspect(),extent = hmax1.get_xlim() + hmax1.get_ylim(), zorder = 1)
ax1.set_title('In Flow: {0}'.format(date))
hmax2 = sns.heatmap(outflow, cmap = matplotlib.cm.winter, alpha = 0.3, annot = False,zorder = 2, ax=ax2)
hmax2.imshow(nyc_map,aspect = hmax2.get_aspect(),extent = hmax2.get_xlim() + hmax2.get_ylim(), zorder = 1)
ax2.set_title('Out Flow: {0}'.format(date))
############################################################################################
# Pre-Process Dataset
############################################################################################
# Convert timestamps from ASCII format to string
formated_timestamps = []
for ts in timestamps:
formated_timestamps.append(ts.decode("utf-8"))
# Scale in flow and out flow values on the map matrices to a range between [-1,1]
min_value = data.min()
max_value = data.max()
print("Minimum values: {0} , Maximum value: {1}".format(min_value,max_value))
data_scaled = 1. * (data - min_value) / (max_value - min_value)
data_scaled = 2. * data_scaled - 1.
print("Minimum scaled values: {0} , Maximum scaled value: {1}".format(data_scaled.min(),data_scaled.max()))
############################################################################################
# Create Train / Target data
############################################################################################
'''
Minimum granularity will be 1 hour
To create the input for our model we need to aggregate the inflow and outflow matrices according
to three interval of times defined in the article as: closeness, period and trend.
For this project:
* Closeness is a difference in 1 hour period between two matrices
* Period is a difference is 24 hours period between two matrices
* Trend is a difference is 7 days period between two matrices
This means that for example, for a data (16 x 8 x 2) inflow/outflow matrices collected
at time stamp: 2014 08 07 01:00:00 we will have to do the following transformations:
Input closeness = len closeness stack of consecutive matrices distant between closeness interval.
Ex: Len = 3 and interval = 1 hour - stack [2014 08 07 01:00:00, 2014 08 07 02:00:00 , 2014 08 07 03:00:00] matrices
Input period = len period stack of consecutive matrices distant between period interval.
Ex: Len = 4 and interval = 24 hours - stack [2014 08 07 01:00:00, 2014 08 08 01:00:00 , 2014 08 09 01:00:00, 2014 08 10 01:00:00] matrices
Input trend = len trend stack of consecutive matrices distant between trend interval.
Ex: Len = 4 and interval = 168 hours - stack [2014 08 07 01:00:00, 2014 08 14 01:00:00 , 2014 08 21 01:00:00, 2014 08 28 01:00:00] matrices
This is an important information and the dataset should have little or almost NO disconnected interval between two
inflow / outflow matrices meaning that we should avoid missing hours.
'''
# Simple function that receives a string in format YmdH and convert to a datetime object
def str_to_date(timestamp):
# We can't direct stripe the data using datetime.strptime(ts, '%Y%m%d%H')
# because the hours are in 01 to 24 format instead of 00 to 23
year, month, day, hour = int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), int(timestamp[8:])-1
converted_time = datetime(year, month, day, hour)
return converted_time
# Convert timestamp to a one hot encoded vector taking into account week way and if it is weekend or not
def one_hot_day_week(timestamp):
converted_time = str_to_date(timestamp)
i = converted_time.weekday()
one_hot_encoded = np.zeros((8))
# Day week (sunday, monday...) encoder
one_hot_encoded[i] = 1
# Weekend / Not Weekend encoder
if i >= 5:
one_hot_encoded[7] = 0
else:
one_hot_encoded[7] = 1
return one_hot_encoded
closeness_interval = 1 # distance between hours
period_interval = 24 * closeness_interval # number of time intervals in one day
trend_interval = 7 * period_interval
closeness_len = 3 # recent time (closeness)
period_len = 4 # near history (period)
trend_len = 4 # distant history (trend)
closeness_range = [x * closeness_interval for x in range(1,closeness_len+1)]
period_range = [x * period_interval for x in range(1,period_len + 1)]
trend_range = [x * trend_interval for x in range(1,trend_len+1)]
# Build a dictionary of time stamps. This will ease our work to convert between timestamps to indices to get
# the in/out flow matrices.
ts_dict = {}
ts_list = []
for i, ts in enumerate(formated_timestamps):
converted_time = str_to_date(ts)
# Add converted time from string to a list for iteration and for a dictionary for search purposes
ts_list.append(str_to_date(ts))
ts_dict[converted_time] = i
# Create X, y data
X_Closeness, X_Period, X_Trend, X_External, Y , Y_timestamp = [],[],[],[],[],[]
# Crete the datasets for closeness, period and trend
# Since we have future predictions as output we need to build the dataset based on the lates trend period as starting point
starting_period = trend_interval * trend_len
# We construct the X, y datasets based on a reversed time interval, from the latest trend to starting closeness
for i in range(starting_period, len(formated_timestamps)):
# Starting period
date = str_to_date(formated_timestamps[i])
check_dates = []
# Get all dates in the closeness interval near the target
for c in closeness_range:
check_dates.append(date - timedelta(hours=c))
for p in period_range:
check_dates.append(date - timedelta(hours=p))
for t in trend_range:
check_dates.append(date - timedelta(hours=t))
# Check if all those selected dates exists in our timestamp dictionary and if not go to the next iteration
break_flag = False
for check_date in check_dates:
if check_date not in ts_dict:
print("Date frame missing!: {0} ".format(formated_timestamps[i]))
break_flag = True
if break_flag:
continue
# Parse again to create de dataset stacking the time range for closeness, period and trend
# X Closeness
xc = []
for c in closeness_range:
xc.append(data_scaled[ts_dict[date - timedelta(hours=c)]])
xc = np.concatenate(xc,axis=-1)
# X Period
xp = []
for p in period_range:
xp.append(data_scaled[ts_dict[date - timedelta(hours=p)]])
xp = np.concatenate(xp,axis=-1)
# X Trend
xt = []
for t in trend_range:
xt.append(data_scaled[ts_dict[date - timedelta(hours=t)]])
xt = np.concatenate(xt,axis=-1)
# Target
y = data_scaled[ts_dict[date]]
# Add each created set to the final datasets
X_Closeness.append(xc)
X_Period.append(xp)
X_Trend.append(xt)
X_External.append(one_hot_day_week(formated_timestamps[i]))
Y.append(y)
Y_timestamp.append(formated_timestamps[i])
X_Closeness = np.asarray(X_Closeness)
X_Period = np.asarray(X_Period)
X_Trend = np.asarray(X_Trend)
X_External = np.asarray(X_External)
Y = np.asarray(Y)
print("X_Closeness shape: ", X_Closeness.shape)
print("X_Period shape: ", X_Period.shape)
print("X_Trend shape: ", X_Trend.shape)
print("X_External shape: ", X_External.shape)
print( "Y shape:", Y.shape)
############################################################################################
# Split dataset into Train / Test
############################################################################################
days_test = 10
n_test = 24 * days_test
# Split dataset into training / test sets
XC_train, XP_train, XT_train,XE_train, Y_train = X_Closeness[:-n_test], X_Period[:-n_test], X_Trend[:-n_test],X_External[:-n_test], Y[:-n_test]
XC_test, XP_test, XT_test, XE_test, Y_test = X_Closeness[-n_test:], X_Period[-n_test:], X_Trend[-n_test:],X_External[-n_test:], Y[-n_test:]
# Time stamp split so we can track the period
timestamp_train, timestamp_test = Y_timestamp[:-n_test], Y_timestamp[-n_test:]
# Concatenate closeness , period and trend
X_train = [XC_train,XP_train,XT_train,XE_train]
X_test = [XC_test,XP_test,XT_test,XE_test]
print("X Train size: ", len(X_train))
print("X Test size: ", len(X_test))
############################################################################################
# Spatial Temporal Residual Network
############################################################################################
############################################################################################
# ResNet Identity Block
############################################################################################
def identity_block(inputs, filters, block_id):
x = BatchNormalization(name='block_' + block_id + '_identity_batch_1')(inputs)
x = Activation('relu', name='block_' + block_id + '_identity_relu_1')(x)
x = Conv2D(filters, kernel_size=(3,3), strides=(1,1), padding='same', kernel_initializer='he_normal', name='block_' + block_id + '_identity_conv2d_1')(x)
x = BatchNormalization(name='block_' + block_id + '_identity_batch_2')(x)
x = Activation('relu',name='block_' + block_id + '_identity_relu_2')(x)
x = Conv2D(filters, kernel_size=(3,3), strides=(1,1), padding='same', kernel_initializer='he_normal', name='block_' + block_id + '_identity_conv2d_2')(x)
x = Add(name='block_' + block_id + '_add')([inputs,x])
return x
############################################################################################
# ResNet bottleNeck block
############################################################################################
def bottleneck_block(inputs,kernel_size, filters, block_id):
f1, f2, f3 = filters
x = Conv2D(f1, kernel_size=(1,1), use_bias=False, kernel_initializer='he_normal', name='block_' + block_id + '_identity_conv2d_1')(inputs)
x = BatchNormalization(name='block_' + block_id + '_identity_batch_1')(x)
x = Activation('relu', name='block_' + block_id + '_identity_relu_1')(x)
x = Conv2D(f2, kernel_size = kernel_size, padding='same', use_bias=False, kernel_initializer='he_normal', name='block_' + block_id + '_identity_conv2d_2')(x)
x = BatchNormalization(name='block_' + block_id + '_identity_batch_2')(x)
x = Activation('relu',name='block_' + block_id + '_identity_relu_2')(x)
x = Conv2D(f3, kernel_size=(1,1), use_bias=False, kernel_initializer='he_normal', name='block_' + block_id + '_identity_conv2d_3')(x)
x = BatchNormalization(name='block_' + block_id + '_identity_batch_3')(x)
x = Add(name='block_' + block_id + '_add')([x, inputs])
x = Activation('relu', name='block_' + block_id + '_identity_relu_3')(x)
return x
############################################################################################
# ResNetXt group block
############################################################################################
def grouped_block(inputs, filters, cardinality, block_id):
assert not filters % cardinality
convolution_groups = []
n_convs = filters // cardinality
for j in range(cardinality):
group = Lambda(lambda z: z[:, :, :, j * n_convs:j * n_convs + n_convs])(inputs)
convolution_groups.append(Conv2D(n_convs, kernel_size=(3, 3), strides=(1,1) , padding='same')(group))
x = concatenate(convolution_groups, name='block_Xt' + block_id + '_concatenate')
return x
############################################################################################
# ResNet bottleNeck block
############################################################################################
def resnetXt_block(inputs, filters, cardinality, block_id):
f1, f2, f3 = filters
x = Conv2D(f1, kernel_size=(1,1), use_bias=False, kernel_initializer='he_normal', name='block_' + block_id + '_xt_conv2d_1')(inputs)
x = BatchNormalization(name='block_' + block_id + '_xt_batch_1')(x)
x = LeakyReLU(name='block_' + block_id + '_identity_leakyrelu_1')(x)
x = grouped_block(x, f2, cardinality, block_id)
x = BatchNormalization(name='block_' + block_id + '_identity_batch_2')(x)
x = Activation('relu',name='block_' + block_id + '_identity_relu_2')(x)
x = Conv2D(f3, kernel_size=(1,1), use_bias=False, kernel_initializer='he_normal', name='block_' + block_id + '_identity_conv2d_3')(x)
x = BatchNormalization(name='block_' + block_id + '_identity_batch_3')(x)
x = Add(name='block_' + block_id + '_add')([x, inputs])
x = LeakyReLU(name='block_' + block_id + '_identity_leakyrelu_relu_3')(x)
return x
############################################################################################
# Fusion Block
############################################################################################
class FusionLayer(Layer):
def __init__(self, **kwargs):
super(FusionLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[1:]),
initializer='uniform',
trainable=True)
super(FusionLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x, mask=None):
return x * self.kernel
def get_output_shape_for(self, input_shape):
return input_shape
############################################################################################
# ST-ResNet version 1
############################################################################################
def STResNet_v1(c_conf=(32, 32, 2, 3),
p_conf=(32, 32, 2, 3),
t_conf=(32, 32, 2, 3),
output_shape = (32, 32, 2),
res_units=3,
external_dim = None):
height, width, n_flows = output_shape
main_inputs = []
Input_c = Input(shape=(c_conf[0], c_conf[1], c_conf[2] * c_conf[3]), name='input_c')
Input_p = Input(shape=(p_conf[0], p_conf[1], p_conf[2] * p_conf[3]), name='input_p')
Input_t = Input(shape=(t_conf[0], t_conf[1], t_conf[2] * t_conf[3]), name='input_t')
main_inputs.append(Input_c)
main_inputs.append(Input_p)
main_inputs.append(Input_t)
# Input
x_c = Conv2D(64, kernel_size=(3,3),strides=(1,1), padding="same", name= 'conv_input_c')(Input_c)
x_p = Conv2D(64, kernel_size=(3,3),strides=(1,1), padding="same", name= 'conv_input_p')(Input_p)
x_t = Conv2D(64, kernel_size=(3,3),strides=(1,1), padding="same", name= 'conv_input_t')(Input_t)
for i in range(res_units):
x_c = identity_block(x_c, 64, block_id= str(i) +'_c')
x_p = identity_block(x_p, 64, block_id= str(i) +'_p')
x_t = identity_block(x_t, 64, block_id= str(i) +'_t')
x_c = Conv2D(1, kernel_size=(3,3),strides=(1,1), padding="same", name= 'conv_output_c')(x_c)
x_p = Conv2D(1, kernel_size=(3,3),strides=(1,1), padding="same", name= 'conv_output__p')(x_p)
x_t = Conv2D(1, kernel_size=(3,3),strides=(1,1), padding="same", name= 'conv_output__t')(x_t)
# Fusion Layers
x_c = FusionLayer()(x_c)
x_p = FusionLayer()(x_p)
x_t = FusionLayer()(x_t)
fusion = Add(name='temporal_fusion')([x_c,x_p,x_t])
#########################################################################
# External Block
#########################################################################
if external_dim != None and external_dim > 0:
# Concatenate external inputs with temporal inputs
external_input = Input(shape=(external_dim,), name='external_input')
main_inputs.append(external_input)
embedding = Dense(10, name='external_dense_1')(external_input)
embedding = Activation('relu')(embedding)
embedding = Dense(height * width * n_flows* channels)(embedding)
embedding = Activation('relu')(embedding)
external_output = Reshape((height, width, n_flows ) ,name='external_output')(embedding)
# Fuse with external output
fusion = Add(name='external_fusion')([fusion,external_output])
final_output = Activation('tanh', name='Tanh')(fusion)
model = Model(inputs=main_inputs,outputs=final_output)
return model
############################################################################################
# Training pipeline
############################################################################################
# Metric for our model
def rmse(y_true, y_pred):
return K.mean(K.square(y_pred - y_true)) ** 0.5
# Hyperparameters
epochs = 500
batch_size = 32
learning_rate = 0.0002
# callbacks
model_path = 'saved_models'
# File were the best model will be saved during checkpoint
model_file = os.path.join(model_path,'nyc_bike_flow.h5')
# Early stop to avoid overfitting our model
early_stopping = EarlyStopping(monitor='val_rmse', patience=5, mode='min')
# Check point for saving the best model
check_pointer = ModelCheckpoint(model_file, monitor='val_rmse', mode='min',verbose=1, save_best_only=True)
# Heatmap parameters
map_height = 16
map_width = 8
n_flows = 2
c_conf=(map_height, map_width, n_flows, closeness_len) # closeness
p_conf=(map_height, map_width, n_flows, period_len) # period
t_conf=(map_height, map_width, n_flows, trend_len) # trend
output_shape=(map_height, map_width, n_flows)
external_dim = 8
# Create ST-ResNet Model
model = STResNet_v1(c_conf,p_conf,t_conf, output_shape, res_units=3, external_dim = external_dim,unit_type = 'v2')
# Create Optimizer
optimizer = Adam(lr=learning_rate)
model.compile(optimizer, loss='mse' , metrics=[rmse])
model.summary()
# Train the model
history = model.fit(X_train, Y_train,
epochs=epochs,
batch_size=batch_size,
validation_split=0.1,
callbacks=[check_pointer,early_stopping],
verbose=1)
############################################################################################
# Predict
############################################################################################
# If we want to test on a pre trained model use the following line
model.load_weights(os.path.join(model_path,'bikenyc-0.0020.h5'), by_name=False)
n_samples = 3
for i in range(n_samples):
f, (ax1, ax2, ax3,ax4) = plt.subplots(1, 4)
f.set_figwidth(14)
f.set_figheight(6)
# randomly select a sample
idx = np.random.randint(0, len(X_test[0]))
# Add single dimension to each input to simulate batch
X = [X_test[0][idx][np.newaxis,...],X_test[1][idx][np.newaxis,...],X_test[2][idx][np.newaxis,...],X_test[3][idx][np.newaxis,...]]
y_true = Y_test[idx]
# Predict values using our trained model
y_pred = model.predict(X)
y_pred = np.squeeze(y_pred)
date =
hmax1 = sns.heatmap(y_true[:,:,0], cmap = matplotlib.cm.winter, alpha = 0.3, annot = False,zorder = 2, ax=ax1)
hmax1.imshow(nyc_map,aspect = hmax1.get_aspect(),extent = hmax1.get_xlim() + hmax1.get_ylim(), zorder = 1)
ax1.set_title('True In Flow: {0}'.format(timestamps[idx].decode("utf-8")))
hmax2 = sns.heatmap(y_pred[:,:,0], cmap = matplotlib.cm.winter, alpha = 0.3, annot = False,zorder = 2, ax=ax2)
hmax2.imshow(nyc_map,aspect = hmax2.get_aspect(),extent = hmax2.get_xlim() + hmax2.get_ylim(), zorder = 1)
ax2.set_title('Pred In Flow: {0}'.format(timestamps[idx].decode("utf-8")))
hmax3 = sns.heatmap(y_true[:,:,1], cmap = matplotlib.cm.winter, alpha = 0.3, annot = False,zorder = 2, ax=ax3)
hmax3.imshow(nyc_map,aspect = hmax3.get_aspect(),extent = hmax3.get_xlim() + hmax3.get_ylim(), zorder = 1)
ax3.set_title('True Out Flow: {0}'.format(timestamps[idx].decode("utf-8")))
hmax4 = sns.heatmap(y_pred[:,:,1], cmap = matplotlib.cm.winter, alpha = 0.3, annot = False,zorder = 2, ax=ax4)
hmax4.imshow(nyc_map,aspect = hmax4.get_aspect(),extent = hmax4.get_xlim() + hmax4.get_ylim(), zorder = 1)
ax4.set_title('Pred Out Flow: {0}'.format(timestamps[idx].decode("utf-8")))
############################################################################################
# Evaluate
############################################################################################
# This information was provided in the original article an file !
'''
For NYC Bike data, there are 81 available grid-based areas, each of
which includes at least ONE bike station. Therefore, we modify the final
RMSE by multiplying the following factor (i.e., factor).
'''
nb_area = 81
m_factor = math.sqrt(1. * map_height * map_width / nb_area)
score = model.evaluate(X_train, Y_train, batch_size=Y_train.shape[0] // 48, verbose=0)
print('Train score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
(score[0], score[1], score[1] * (max_value - min_value) / 2. * m_factor))
score = model.evaluate(X_test, Y_test, batch_size=Y_test.shape[0], verbose=0)
print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
(score[0], score[1], score[1] * (max_value - min_value) / 2. * m_factor))
|
StarcoderdataPython
|
1686218
|
# imports
import numpy as np
import tensorflow as tf
from numpy import random
import math
import time
import matplotlib.pyplot as plt
"""Part 1 - Forward Propagation"""
def initialize_parameters(layer_dims):
"""
Description: This function initializes weights and biases
:param layer_dims: an array of the dimensions of each layer in the /
network (layer 0 is the size of the flattened input, layer L is the output softmax)
:return: a dictionary containing the initialized W and b parameters of each layer (W1…WL, b1…bL).
"""
parameters = {}
for l in range(1, len(layer_dims)):
parameters[f'W{l}'] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * np.sqrt(2 / layer_dims[l - 1])
parameters[f'b{l}'] = np.zeros(shape=(layer_dims[l], 1))
return parameters
def linear_forward(A, W, b):
"""
Description: Implement the linear part of a layer's forward propagation.
:param A: the activations of the previous layer
:param W: the weight matrix of the current layer (of shape [size of current layer, size of previous layer])
:param b: the bias vector of the current layer (of shape [size of current layer, 1])
:return: Z: the linear component of the activation function (i.e., the value before applying the non-linear function)
:return: linear_cache: a dictionary containing A, W, b (stored for making the backpropagation easier to compute)
"""
Z = np.dot(W, A) + b
linear_cache = dict({'A': A, 'W': W, 'b': b})
return Z, linear_cache
def softmax(Z):
"""
Description: Implementation of softmax function
:param Z: the linear component of the activation function
:return: A: the activations of the layer
:return: activation_cache: returns Z, which will be useful for the backpropagation
"""
numerator = np.exp(Z)
denominator = np.sum(numerator, axis=0, keepdims=True)
A = numerator / denominator
activation_cache = Z
return A, activation_cache
def relu(Z):
"""
Description: Implementation of relu function
:param Z: the linear component of the activation function
:return: A: the activations of the layer
:return: activation_cache: returns Z, which will be useful for the backpropagation
"""
A = np.maximum(0, Z)
activation_cache = Z
return A, activation_cache
def linear_activation_forward(A_prev, W, B, activation):
"""
Description: Implement the forward propagation for the LINEAR->ACTIVATION layer
:param A_prev: activations of the previous layer
:param W: the weights matrix of the current layer
:param B: the bias vector of the current layer
:param activation: the activation function to be used (a string, either “softmax” or “relu”)
:return: A: the activations of the current layer
:return: cache: a joint dictionary containing both linear_cache and activation_cache
"""
if activation in ['relu', 'softmax']:
Z, linear_cache = linear_forward(A_prev, W, B)
A, activation_cache = globals()[activation](Z)
cache = dict({'linear_cache': linear_cache, 'activation_cache': activation_cache})
return A, cache
else:
raise NotImplementedError(
"The given actiavtion function was not implemented. please choose one between {relu} and {softmax}")
def l_model_forward(X, parameters, use_batchnorm):
"""
Description: Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SOFTMAX computation
:param X: the data, numpy array of shape (input size, number of examples)
:param parameters: the initialized W and b parameters of each layer
:param use_batchnorm: a boolean flag used to determine whether to apply batchnorm after the activation/
:return:AL: the last post-activation value
:return: caches: a list of all the cache objects generated by the linear_forward function
"""
caches = list()
A_prev = X
num_layers = int(len(parameters.keys()) / 2)
for l in range(1, num_layers):
W = parameters[f'W{l}']
B = parameters[f'b{l}']
A_prev, cache = linear_activation_forward(A_prev, W, B, 'relu')
if use_batchnorm:
A_prev = apply_batchnorm(A_prev)
caches.append(cache)
W = parameters[f'W{num_layers}']
B = parameters[f'b{num_layers}']
AL, cache = linear_activation_forward(A_prev, W, B, 'softmax')
caches.append(cache)
return AL, caches
def compute_cost(AL, Y):
"""
Description: Implement the cost function defined by equation. The requested cost function is categorical cross-entropy loss.
:param AL: probability vector corresponding to your label predictions, shape (num_of_classes, number of examples)
:param Y: the labels vector (i.e. the ground truth)
:return: cost: the cross-entropy cost
"""
inner_sum_classes = np.sum(Y * np.log(AL), axis=0, keepdims=True)
outer_sum_samples = np.sum(inner_sum_classes, axis=1)
m = AL.shape[1]
cost = -1 / m * outer_sum_samples
return cost
def apply_batchnorm(A):
"""
Description: performs batchnorm on the received activation values of a given layer.
:param A: the activation values of a given layer
:return: NA: the normalized activation values, based on the formula learned in class
"""
mu = np.mean(A, axis=0, keepdims=True)
variance = np.var(A, axis=0, keepdims=True)
epsilon = 0.01
NA = (A - mu) / np.sqrt(variance + epsilon)
return NA
"""Part 2 - Backward Propagation"""
def linear_backward(dZ, cache):
"""
Description: Implements the linear part of the backward propagation process for a single layer
:param dZ: the gradient of the cost with respect to the linear output of the current layer (layer l)
:param cache: tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
:return:dA_prev: Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
:return: dW: Gradient of the cost with respect to W (current layer l), same shape as W
:return: db: Gradient of the cost with respect to b (current layer l), same shape as b
"""
m = dZ.shape[1]
dA_prev = np.dot(cache['W'].T, dZ)
dW = (1 / m) * np.dot(dZ, cache['A'].T)
db = (1 / m) * np.sum(dZ, axis=1, keepdims=True)
return dA_prev, dW, db
def linear_activation_backward(dA, cache, activation):
"""
Description: Implements the backward propagation for the LINEAR->ACTIVATION layer.
The function first computes dZ and then applies the linear_backward function.
:param dA: post activation gradient of the current layer
:param cache: contains both the linear cache and the activations cache
:param activation: the activation function name = ['relu' or 'softmax']
:return: dA_prev: Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
:return: dW: Gradient of the cost with respect to W (current layer l), same shape as W
:return: db: Gradient of the cost with respect to b (current layer l), same shape as b
"""
if activation == 'softmax':
dZ = softmax_backward(dA, cache['activation_cache'])
elif activation == 'relu':
dZ = relu_backward(dA, cache['activation_cache'])
dA_prev, dW, db = linear_backward(dZ, cache['linear_cache'])
return dA_prev, dW, db
def relu_backward(dA, activation_cache):
"""
Description: Implements backward propagation for a ReLU unit
:param dA: the post-activation gradient
:param activation_cache: contains Z (stored during the forward propagation)
:return: dZ: gradient of the cost with respect to Z
"""
derivative = activation_cache
derivative[derivative >= 0] = 1
derivative[derivative < 0] = 0
dZ = dA * derivative
return dZ
def softmax_backward(dA, activation_cache):
"""
Description: Implements backward propagation for a softmax unit
:param dA: the post-activation gradient
:param activation_cache: contains Z (stored during the forward propagation)
:return: dZ: gradient of the cost with respect to Z
"""
AL = activation_cache['AL']
Y = activation_cache['Y']
dZ = AL - Y
return dZ
def l_model_backward(AL, Y, caches):
"""
Description: Implement the backward propagation process for the entire network.
:param AL: the probabilities vector, the output of the forward propagation (L_model_forward)
:param Y: the true labels vector (the "ground truth" - true classifications)
:param caches: list of caches containing for each layer: a) the linear cache; b) the activation cache
:return: Grads: a dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = dict()
num_of_layers = len(caches)
last_cache = caches[-1]
last_cache['activation_cache'] = dict({'AL': AL, 'Y': Y, 'Z': last_cache['activation_cache']})
dA_prev, dW, db = linear_activation_backward(None, last_cache, "softmax") # dA = None cause not necessary
grads["dA" + str(num_of_layers)] = dA_prev
grads["dW" + str(num_of_layers)] = dW
grads["db" + str(num_of_layers)] = db
for l in range(num_of_layers - 1, 0, -1):
dA_prev, dW, db = linear_activation_backward(dA_prev, caches[l - 1], "relu")
grads["dA" + str(l)] = dA_prev
grads["dW" + str(l)] = dW
grads["db" + str(l)] = db
return grads
def update_parameters(parameters, grads, learning_rate):
"""
Description: Updates parameters using gradient descent
:param parameters: a python dictionary containing the DNN architecture’s parameters
:param grads: a python dictionary containing the gradients (generated by L_model_backward)
:param learning_rate: the learning rate used to update the parameters (the “alpha”)
:return: parameters: the updated values of the parameters object provided as input
"""
num_of_layers = int(len(parameters.keys()) / 2)
for l in range(1, num_of_layers + 1):
parameters[f"W{l}"] = parameters[f"W{l}"] - learning_rate * grads[f"dW{l}"]
parameters[f"b{l}"] = parameters[f"b{l}"] - learning_rate * grads[f"db{l}"]
return parameters
"""Part 3 - Train and Predict"""
def train_validation_split(X, Y, train_size):
"""
Description: (auxiliary function) split the train set into train and validation sets
:param X: train set samples
:param Y: train set labels
:param train_size: percentage of the train set
:return: tuples of (x_train, y_train), (x_val, y_val)
"""
# create indices for train and validation sets
indices = list(range(0, X.shape[1]))
random.shuffle(indices)
num_of_x_train_samples = math.ceil(X.shape[1] * train_size)
# split train & validation
x_train, y_train = X[:, indices[0:num_of_x_train_samples]], Y[:, indices[0:num_of_x_train_samples]]
x_val, y_val = X[:, indices[num_of_x_train_samples:X.shape[1]]], Y[:, indices[num_of_x_train_samples:X.shape[1]]]
return (x_train, y_train), (x_val, y_val)
def l_layer_model(X, Y, layers_dims, learning_rate, num_iterations, batch_size):
"""
Description: Implements a L-layer neural network. All layers but the last should have the ReLU activation function,
and the final layer will apply the softmax activation function. The size of the output layer should be equal to
the number of labels in the data. Please select a batch size that enables your code to run well
(i.e. no memory overflows while still running relatively fast).
the function should use the earlier functions in the following order:
initialize -> L_model_forward -> compute_cost -> L_model_backward -> update parameters
:param X: the input data, a numpy array of shape (height*width , number_of_examples)
:param Y: the “real” labels of the data, a vector of shape (num_of_classes, number of examples)
:param layers_dims: a list containing the dimensions of each layer, including the input
:param learning_rate: alpa parameter
:param num_iterations: number of iterations - each iteration equals to one batch
:param batch_size: the number of examples in a single training batch
:return: parameters: the parameters learnt by the system during the training (the same parameters that were updated in the update_parameters function).
:return: costs: the values of the cost function (calculated by the compute_cost function). One value is to be saved after each 100 training iterations (e.g. 3000 iterations -> 30 values).
"""
costs_val = []
costs_train = []
parameters = initialize_parameters(layers_dims)
is_batchnorm = True # change to True when batchnorm is needed
epoch = 0
val_accuracy = -1
train_accuracy = -1
log = []
(x_train, y_train), (x_val, y_val) = train_validation_split(X, Y, 0.8)
# split x and y train sets to batches
num_of_batches = math.ceil(x_train.shape[1] / batch_size)
batches_x = np.array_split(x_train, num_of_batches, axis=1)
batches_y = np.array_split(y_train, num_of_batches, axis=1)
for num_of_iteration in range(0, num_iterations):
batch_num = num_of_iteration % num_of_batches # current batch number
current_batch_x, current_batch_y = batches_x[batch_num], batches_y[batch_num] # get current batches
AL, caches = l_model_forward(current_batch_x, parameters, is_batchnorm)
grads = l_model_backward(AL, current_batch_y, caches)
parameters = update_parameters(parameters, grads, learning_rate)
# each hundred iterations compute costs for current batch and validation set, and accuracy for validation and
# train sets
if num_of_iteration % 100 == 0:
AL_val, caches_val = l_model_forward(x_val, parameters, is_batchnorm)
costs_val.append(compute_cost(AL_val, y_val))
val_accuracy = predict(x_val, y_val, parameters)
AL_batch, caches_train = l_model_forward(current_batch_x, parameters, is_batchnorm)
costs_train.append(compute_cost(AL_batch, current_batch_y))
log.append(f"Iteration: {num_of_iteration}, Cost: {costs_train[-1]}")
train_accuracy = predict(x_train, y_train, parameters)
print(
f"Epoch: {epoch}, Iteration: {num_of_iteration}, batch_loss: {costs_train[-1]}, train_accuracy: {train_accuracy}, val_loss: {costs_val[-1]}, validation_accuracy: {val_accuracy}")
# stopping criterion - no improvement on the validation set - threshold = 0.05
if len(costs_val) > 2 and (costs_val[-1] - costs_val[-2] >= 0.005) and (costs_val[-2] - costs_val[-3] >= 0.005):
print("Early stopping reached.")
break
# count epochs
if num_of_iteration % num_of_batches == 0 and num_of_iteration > 0:
epoch += 1
print(f"val_accuracy {val_accuracy}")
plot_model_history(costs_val, "Validation")
print(f"train_accuracy {train_accuracy}")
plot_model_history(costs_train, "Training")
print(*log, sep="\n") # for report
return parameters, costs_train
def predict(X, Y, parameters):
"""
Description: The function receives an input data and the true labels and calculates the accuracy of the trained neural network on the data.
:param X: the input data, a numpy array of shape (height*width, number_of_examples)
:param Y: the “real” labels of the data, a vector of shape (num_of_classes, number of examples)
:param parameters: a python dictionary containing the DNN architecture’s parameters
:return: accuracy – the accuracy measure of the neural net on the provided data
(i.e. the percentage of the samples for which the correct label receives the highest confidence score).
Use the softmax function to normalize the output values.
"""
is_batchnorm = True # change to True when batchnorm is needed
AL, caches = l_model_forward(X, parameters, is_batchnorm)
y_predict = (AL == np.amax(AL, axis=0)).astype(int) # the class with the maximum prob is the predicted class
accuracy = np.sum(y_predict * Y) / AL.shape[1] # sum number of correct predictions
return accuracy
def plot_model_history(costs_list, type):
"""
Description: (auxiliary function) Plot graph of cost per 100 iterations
:param costs_list:
:param type: str - validation or training
"""
x_index = range(0, len(costs_list)*100, 100)
plt.plot(x_index, costs_list)
plt.title(f'{type} Model Costs')
plt.ylabel('Costs')
plt.xlabel('Iterations')
plt.show()
"""Part 4 - MNIST classification - W/out batchnorm"""
# load MNIST data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data(path='mnist.npz')
# normalize train and test sets
x_train = x_train / 255
x_test = x_test / 255
# flatten matrices
x_train = x_train.reshape(x_train.shape[0], 784).T
x_test = x_test.reshape(x_test.shape[0], 784).T
# encode y vectors to one hot vector
num_of_classes = len(np.unique(y_train))
y_train_one_hot = np.zeros((num_of_classes, y_train.shape[0]))
y_train_one_hot[y_train, np.arange(y_train.shape[0])] = 1
y_test_one_hot = np.zeros((num_of_classes, y_test.shape[0]))
y_test_one_hot[y_test, np.arange(y_test.shape[0])] = 1
# run network
layers_dims = [x_train.shape[0], 20, 7, 5, 10]
learning_rate = 0.009
num_iterations = 50000
batch_size = 32
start = time.time()
parameters, costs = l_layer_model(x_train, y_train_one_hot, layers_dims, learning_rate, num_iterations, batch_size)
accuracy = predict(x_test, y_test_one_hot, parameters)
print(f"Total time: {(time.time() - start)} seconds.")
print(f"Test Accuracy: {accuracy}")
|
StarcoderdataPython
|
4833300
|
import django_tables2 as tables
from django_tables2.utils import Accessor
from utilities.tables import BaseTable, ToggleColumn
from .models import SecretRole, Secret
SECRETROLE_EDIT_LINK = """
{% if perms.secrets.change_secretrole %}
<a href="{% url 'secrets:secretrole_edit' slug=record.slug %}">Edit</a>
{% endif %}
"""
#
# Secret roles
#
class SecretRoleTable(BaseTable):
pk = ToggleColumn()
name = tables.LinkColumn(verbose_name='Name')
secret_count = tables.Column(verbose_name='Secrets')
slug = tables.Column(verbose_name='Slug')
edit = tables.TemplateColumn(template_code=SECRETROLE_EDIT_LINK, verbose_name='')
class Meta(BaseTable.Meta):
model = SecretRole
fields = ('pk', 'name', 'secret_count', 'slug', 'edit')
#
# Secrets
#
class SecretTable(BaseTable):
pk = ToggleColumn()
device = tables.LinkColumn('secrets:secret', args=[Accessor('pk')], verbose_name='Device')
role = tables.Column(verbose_name='Role')
name = tables.Column(verbose_name='Name')
last_updated = tables.DateTimeColumn(verbose_name='Last updated')
class Meta(BaseTable.Meta):
model = Secret
fields = ('pk', 'device', 'role', 'name', 'last_updated')
|
StarcoderdataPython
|
3274396
|
<gh_stars>0
from nehushtan.httpd.exceptions.NehushtanHTTPError import NehushtanHTTPError
class NehushtanRequestDeniedByFilterError(NehushtanHTTPError):
"""
Since 0.4.0
When a Filter denies a request
"""
def __init__(self, filter_name: str, error_message: str, http_code: int):
super(NehushtanRequestDeniedByFilterError, self).__init__(
f'Filter [{filter_name}] Denied Request: {error_message}',
http_code
)
|
StarcoderdataPython
|
1732389
|
<gh_stars>0
from .parser import Parser
__version__ = "0.1.0"
def convert(html: str) -> str:
parser = Parser()
parser.feed(html)
result = []
for elem in parser.parse_result:
result.append(elem.to_str())
return "\n".join(result)
|
StarcoderdataPython
|
123260
|
# -*- coding: utf-8 -*-
# best for i/o sql interface for history stats
#TODO:
#sql for pin_id_4_goods
#shop for pin_ids
#chart_generator
#statistical view
import logging
import datetime
import threading
import time
import mysql.connector
from mysql.connector import errorcode
import redis
# method example code
# sqldb = mysql.connector.connect(host='localhost',database='iotbi',user='root',password='<PASSWORD>')
'''
try:
cnx = mysql.connector.connect(user='scott',
database='employ')
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.close()
'''
rpool = redis.ConnectionPool(host='localhost', port=6379, db=0)
rd = redis.Redis(connection_pool=rpool)
### set up logging
logger = logging.getLogger('datamodel')
logger.setLevel(logging.DEBUG)
# fh
fh = logging.FileHandler('datamodel.log')
fh.setLevel(logging.DEBUG)
# ch
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s -%(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
# sql tables
DB_NAME = 'iotbi'
TABLES = {}
TABLES['devicepinstats'] = (
"CREATE TABLE `devicepinstats` ("
" `rec_no` int(11) NOT NULL AUTO_INCREMENT,"
" `date` date NOT NULL,"
" `devicepin` bigint NOT NULL,"
" `count` smallint NOT NULL,"
" PRIMARY KEY (`rec_no`)"
") ) ENGINE=InnoDB")
#
class DBPinstats:
def __init__(self):
# add sql conn
# self.db = sqldb
self.sql_init()
self.sql_init_table()
self.sql_close()
# recon before use cnx/cursor
def save_devicecount(self, devicepin, count):
#
# date : date
# devicepin: hexstr
# count: int
script = (
"INSERT INTO devicepinstats "
"(date, devicepin, count) "
"VALUES (%s, %s, %s)")
d = datetime.datetime.now().date()
args = (d, int(devicepin, 16), count)
try:
o = self.sql_execute(script, args)
return o
def get_history(self, devicepin):
# one device pin/4 a tuple
# return days with counts
query = ("SELECT date, devicepin, count FROM devicepinstats "
"WHERE devicepin = %s AND date BETWEEN %s AND %s")
d_end = datetime.datetime.now().date()
d_start = datetime.datetime.now().date() - datetime.timedelta(days=7)
args = (int(devicepin, 16), d_start, d_end)
try:
o = self.sql_execute(query, args)
return o
def sql_init(self):
def create_database(cursor):
try:
cursor.execute(
"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(DB_NAME)
)
except mysql.connector.Error as err:
print("failed create db")
exit(1)
try:
cnx = mysql.connector.connect(user='root',
# database='iotbi',
password='<PASSWORD>',
host='localhost'
)
cursor = cnx.cursor()
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
logger.error('db access denied')
# return
logger.error('db conn error')
return
# execute command
try:
cursor.execute("USE {}".format(DB_NAME))
except mysql.connector.Error as err:
print("Database {} does not exists.".format(DB_NAME))
if err.errno == errorcode.ER_BAD_DB_ERROR:
create_database(cursor)
print("Database {} created successfully.".format(DB_NAME))
cnx.database = DB_NAME
else:
print(err)
exit(1)
#
else:
self.cnx = cnx
# self.sql_init_table()
def sql_init_table(self):
if self.cnx is None:
return
cursor = self.cnx.cursor()
for table in TABLES:
tablescript = TABLES[table]
try:
cursor.execute(tablescript)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
logger.info('already exist table')
else:
logger.error(err.msg)
else:
logger.info('table OK')
cursor.close()
def sql_conn(self):
if self.cnx is None:
cnx = mysql.connector.connect(user='root',
database='iotbi',
password='<PASSWORD>',
host='localhost'
)
self.cnx = cnx
def sql_close(self):
if self.cnx is not None:
self.cnx.close()
def sql_execute(self, command, values):
#
if self.cnx is None:
return
else:
cursor = self.cnx.cursor()
try:
cursor.execute(command, values)
except mysql.connector.Error as err:
logger.error(err.msg)
cursor.close()
return False
else:
cursor.close()
return True
sdb = DBPinstats()
# sdb.sql_init()
def dayswap():
# retrieve data from redis
# then save it to sql
sdb.sql_conn()
while rd.llen('actionlist') > 0:
el = rd.lpop('actionlist').decode('ascii')
# loop 4 pins
for i in range(4):
k = '%s%s' % (el, i)
counts = rd.get(k)
rd.delete(k)
# save it to sql
#TODO
o = sdb.save_devicecount(k, counts)
sdb.sql_close()
class WorkerThread(threading.Thread):
"""
worker thread
"""
def __init__(self, date):
super(WorkerThread, self).__init__()
self.date = date
def run(self):
while not self.stoprequest.isSet():
try:
if datetime.datetime.now().date() - self.date >= datetime.timedelta(days=1):
dayswap()
else:
time.sleep(3600)
if __name__ == "__main__":
if rd.get('date') is None:
d = datetime.datetime.now().date()
rd.set('date', d)
else:
ds = rd.get('date').decode('ascii')
dsl = ds.split('-')
d = datetime.datetime(dsl[0], dsl[1], dsl[2])
t = WorkerThread(date=d)
t.start()
|
StarcoderdataPython
|
161061
|
<reponame>eas342/dust_mie
from dust_mie import calc_mie
import matplotlib.pyplot as plt
median_r = 1.0
s = 0.5
r, dr = calc_mie.get_r_to_evaluate(r=median_r,s=s)
n = calc_mie.lognorm(r,s=s,med=median_r)
plt.plot(r,n)
plt.xlabel('Particle Radius ($\mu$m)')
plt.ylabel('Number')
plt.savefig('radius_distribution.png')
|
StarcoderdataPython
|
170833
|
class AdvancedBoxScore:
def __init__(self, seconds_played, offensive_rating, defensive_rating,
teammate_assist_percentage, assist_to_turnover_ratio, assists_per_100_possessions,
offensive_rebound_percentage, defensive_rebound_percentage, turnovers_per_100_possessions,
effective_field_goal_percentage, true_shooting_percentage):
self.seconds_played = seconds_played
self.offensive_rating = offensive_rating
self.defensive_rating = defensive_rating
self.teammate_assist_percentage = teammate_assist_percentage
self.assist_to_turnover_ratio = assist_to_turnover_ratio
self.assists_per_100_possessions = assists_per_100_possessions
self.offensive_rebound_percentage = offensive_rebound_percentage
self.defensive_rebound_percentage = defensive_rebound_percentage
self.turnovers_per_100_possessions = turnovers_per_100_possessions
self.effective_field_goal_percentage = effective_field_goal_percentage
self.true_shooting_percentage = true_shooting_percentage
def __unicode__(self):
return '{0} | {1}'.format(self.get_additional_unicode(), self.get_base_unicode)
def get_base_unicode(self):
return 'seconds played: {seconds_played} | offensive rating: {offensive_rating} | ' \
'defensive rating: {defensive_rating} | teammate assist percentage: {teammate_assist_percentage} |' \
'assist to turnover ratio: {assist_to_turnover_ratio} | ' \
'assists per 100 possessions: {assists_per_100_possessions} | ' \
'offensive rebound percentage: {offensive_rebound_percentage} |' \
'defensive rebound percentage: {defensive_rebound_percentage} |' \
'turnovers per 100 possessions: {turnovers_per_100_possessions} |' \
'effective field goal percentage: {effective_field_goal_percentage} |' \
'true shooting percentage: {true_shooting_percentage}'\
.format(seconds_played=self.seconds_played, offensive_rating=self.offensive_rating,
defensive_rating=self.defensive_rating, teammate_assist_percentage=self.teammate_assist_percentage,
assist_to_turnover_ratio=self.assist_to_turnover_ratio,
assists_per_100_possessions=self.assists_per_100_possessions,
offensive_rebound_percentage=self.offensive_rebound_percentage,
defensive_rebound_percentage=self.defensive_rebound_percentage,
turnovers_per_100_possessions=self.turnovers_per_100_possessions,
effective_field_goal_percentage=self.effective_field_goal_percentage,
true_shooting_percentage=self.true_shooting_percentage)
def get_additional_unicode(self):
return NotImplementedError('Should be implemented in concrete class')
class AdvancedPlayerBoxScore(AdvancedBoxScore):
def __init__(self, player, seconds_played, offensive_rating, defensive_rating,
teammate_assist_percentage, assist_to_turnover_ratio, assists_per_100_possessions,
offensive_rebound_percentage, defensive_rebound_percentage, turnovers_per_100_possessions,
effective_field_goal_percentage, true_shooting_percentage, usage_percentage):
self.player = player
self.usage_percentage = usage_percentage
AdvancedBoxScore.__init__(self, seconds_played=seconds_played,
offensive_rating=offensive_rating, defensive_rating=defensive_rating,
teammate_assist_percentage=teammate_assist_percentage,
assist_to_turnover_ratio=assist_to_turnover_ratio,
assists_per_100_possessions=assists_per_100_possessions,
offensive_rebound_percentage=offensive_rebound_percentage,
defensive_rebound_percentage=defensive_rebound_percentage,
turnovers_per_100_possessions=turnovers_per_100_possessions,
effective_field_goal_percentage=effective_field_goal_percentage,
true_shooting_percentage=true_shooting_percentage)
def get_additional_unicode(self):
return 'player: {player} | usage percentage: {usage_percentage}'.format(player=self.player,
usage_percentage=self.usage_percentage)
class AdvancedTeamBoxScore(AdvancedBoxScore):
def __init__(self, team, seconds_played, offensive_rating, defensive_rating,
teammate_assist_percentage, assist_to_turnover_ratio, assists_per_100_possessions,
offensive_rebound_percentage, defensive_rebound_percentage, turnovers_per_100_possessions,
effective_field_goal_percentage, true_shooting_percentage):
self.team = team
AdvancedBoxScore.__init__(self, seconds_played=seconds_played,
offensive_rating=offensive_rating, defensive_rating=defensive_rating,
teammate_assist_percentage=teammate_assist_percentage,
assist_to_turnover_ratio=assist_to_turnover_ratio,
assists_per_100_possessions=assists_per_100_possessions,
offensive_rebound_percentage=offensive_rebound_percentage,
defensive_rebound_percentage=defensive_rebound_percentage,
turnovers_per_100_possessions=turnovers_per_100_possessions,
effective_field_goal_percentage=effective_field_goal_percentage,
true_shooting_percentage=true_shooting_percentage)
def get_additional_unicode(self):
return 'team: {team}'.format(team=self.team)
class TraditionalBoxScore:
def __init__(self, seconds_played, field_goals_made, field_goals_attempted,
three_point_field_goals_made, three_point_field_goals_attempted,
free_throws_made, free_throws_attempted, offensive_rebounds, defensive_rebounds, assists,
steals, blocks, turnovers, personal_fouls):
self.seconds_played = seconds_played
self.field_goals_made = field_goals_made
self.field_goals_attempted = field_goals_attempted
self.three_point_field_goals_made = three_point_field_goals_made
self.three_point_field_goals_attempted = three_point_field_goals_attempted
self.free_throws_made = free_throws_made
self.free_throws_attempted = free_throws_attempted
self.offensive_rebounds = offensive_rebounds
self.defensive_rebounds = defensive_rebounds
self.assists = assists
self.steals = steals
self.blocks = blocks
self.turnovers = turnovers
self.personal_fouls = personal_fouls
def __unicode__(self):
return '{0} | {1}'.format(self.get_additional_unicode(), self.get_base_unicode())
def get_base_unicode(self):
return 'seconds played: {seconds_played} | field goals made: {field_goals_made} |' \
'field goals attempted: {field_goals_attempted} | ' \
'three point field goals made: {three_point_field_goals_made} | ' \
'three point field goals attempted: {three_point_field_goals_attempted} | ' \
'free throws made: {free_throws_made} |' 'free throws attempted: {free_throws_attempted} | ' \
'offensive rebounds: {offensive rebounds} |' 'defensive rebounds: {defensive rebounds} | ' \
'assists: {assists} | steals: {steals} | blocks: {blocks} | turnovers: {turnovers} | ' \
'personal fouls: {personal_fouls}'.format(seconds_played=self.seconds_played,
field_goals_made=self.field_goals_made,
field_goals_attempted=self.field_goals_attempted,
three_point_field_goals_made=self.three_point_field_goals_made,
three_point_field_goals_attempted=self.three_point_field_goals_attempted,
free_throws_made=self.free_throws_made,
free_throws_attempted=self.free_throws_attempted,
offensive_rebounds=self.offensive_rebounds,
defensive_rebounds=self.defensive_rebounds,
assists=self.assists, steals=self.steals, blocks=self.blocks,
turnovers=self.turnovers, personal_fouls=self.personal_fouls)
def get_additional_unicode(self):
raise NotImplementedError('Implement in concrete classes')
class TraditionalPlayerBoxScore(TraditionalBoxScore):
def __init__(self, player, seconds_played, field_goals_made, field_goals_attempted,
three_point_field_goals_made, three_point_field_goals_attempted,
free_throws_made, free_throws_attempted, offensive_rebounds, defensive_rebounds, assists,
steals, blocks, turnovers, personal_fouls, plus_minus):
self.player = player
self.plus_minus = plus_minus
TraditionalBoxScore.__init__(self, seconds_played=seconds_played, field_goals_made=field_goals_made,
field_goals_attempted=field_goals_attempted,
three_point_field_goals_made=three_point_field_goals_made,
three_point_field_goals_attempted=three_point_field_goals_attempted,
free_throws_made=free_throws_made, free_throws_attempted=free_throws_attempted,
offensive_rebounds=offensive_rebounds, defensive_rebounds=defensive_rebounds,
assists=assists, steals=steals, blocks=blocks, turnovers=turnovers,
personal_fouls=personal_fouls)
def get_additional_unicode(self):
return 'player: {player} | plus minus: {plus_minus}'.format(player=self.player, plus_minus=self.plus_minus)
class TraditionalTeamBoxScore(TraditionalBoxScore):
def __init__(self, team, seconds_played, field_goals_made, field_goals_attempted, free_throws_attempted,
three_point_field_goals_made, three_point_field_goals_attempted,
free_throws_made, offensive_rebounds, defensive_rebounds, assists,
steals, blocks, turnovers, personal_fouls):
self.team = team
TraditionalBoxScore.__init__(self, seconds_played=seconds_played, field_goals_made=field_goals_made,
field_goals_attempted=field_goals_attempted,
three_point_field_goals_made=three_point_field_goals_made,
three_point_field_goals_attempted=three_point_field_goals_attempted,
free_throws_made=free_throws_made, free_throws_attempted=free_throws_attempted,
offensive_rebounds=offensive_rebounds, defensive_rebounds=defensive_rebounds,
assists=assists, steals=steals, blocks=blocks, turnovers=turnovers,
personal_fouls=personal_fouls)
def get_additional_unicode(self):
return 'team: {team}'.format(self.team)
class GameBoxScore:
def __init__(self, game_id, player_box_scores, team_box_scores):
self.game_id = game_id
self.player_box_scores = player_box_scores
self.team_box_scores = team_box_scores
|
StarcoderdataPython
|
139867
|
<reponame>IonaBrenac/BiblioAnalysis
__all__ = ['ALIAS_UK',
'CHANGE',
'COUNTRIES',
'COUNTRIES_GPS',
'DIC_CHANGE_CHAR',
'IN_TO_MM',
'USA_STATES',]
# Countries normalized names and GPS coordinates
COUNTRY = '''
United States,Afghanistan,Albania,Algeria,American Samoa,Andorra,Angola,
Anguilla,Antarctica,Antigua And Barbuda,Argentina,Armenia,Aruba,Australia,
Austria,Azerbaijan,Bahamas,Bahrain,Bangladesh,Barbados,Belarus,Belgium,
Belize,Benin,Bermuda,Bhutan,Bolivia,Bosnia And Herzegowina,Botswana,Bouvet Island,
Brazil,Brunei Darussalam,Bulgaria,Burkina Faso,Burundi,Cambodia,Cameroon,Canada,
Cape Verde,Cayman Islands,Central African Rep,Chad,Chile,China,Christmas Island,
Cocos Islands,Colombia,Comoros,Congo,Cook Islands,Costa Rica,Cote D`ivoire,Croatia,
Cuba,Cyprus,Czech Republic,Denmark,Djibouti,Dominica,Dominican Republic,East Timor,
Ecuador,Egypt,El Salvador,Equatorial Guinea,Eritrea,Estonia,Ethiopia,Falkland Islands (Malvinas),
Faroe Islands,Fiji,Finland,France,French Guiana,French Polynesia,French S. Territories,
Gabon,Gambia,Georgia,Germany,Ghana,Gibraltar,Greece,Greenland,Grenada,Guadeloupe,Guam,
Guatemala,Guinea,Guinea-bissau,Guyana,Haiti,Honduras,Hong Kong,Hungary,Iceland,India,
Indonesia,Iran,Iraq,Ireland,Israel,Italy,Jamaica,Japan,Jordan,Kazakhstan,Kenya,Kiribati,
North Korea,South Korea,Kuwait,Kyrgyzstan,Laos,Latvia,Lebanon,Lesotho,Liberia,Libya,
Liechtenstein,Lithuania,Luxembourg,Macau,Macedonia,Madagascar,Malawi,Malaysia,Maldives,
Mali,Malta,Marshall Islands,Martinique,Mauritania,Mauritius,Mayotte,Mexico,Micronesia,
Moldova,Monaco,Mongolia,Montserrat,Morocco,Mozambique,Myanmar,Namibia,Nauru,Nepal,Netherlands,
Netherlands Antilles,New Caledonia,New Zealand,Nicaragua,Niger,Nigeria,Niue,Norfolk Island,
Northern Mariana Islands,Norway,Oman,Pakistan,Palau,Panama,Papua New Guinea,Paraguay,Peru,
Philippines,Pitcairn,Poland,Portugal,Puerto Rico,Qatar,Reunion,Romania,Russian Federation,
Rwanda,Saint Kitts And Nevis,Saint Lucia,St Vincent/Grenadines,Samoa,San Marino,Sao Tome,
Saudi Arabia,Senegal,Seychelles,Sierra Leone,Singapore,Slovakia,Slovenia,Solomon Islands,
Somalia,South Africa,Spain,Sri Lanka,St. Helena,St.Pierre,Sudan,Suriname,Swaziland,Sweden,
Switzerland,Syrian Arab Republic,Taiwan,Tajikistan,Tanzania,Thailand,Togo,Tokelau,Tonga,
Trinidad And Tobago,Tunisia,Turkey,Turkmenistan,Tuvalu,Uganda,Ukraine,United Arab Emirates,
United Kingdom,Uruguay,Uzbekistan,Vanuatu,Vatican City State,Venezuela,Viet Nam,Virgin Islands (British),
Virgin Islands (U.S.),Western Sahara,Yemen,Yugoslavia,Zaire,Zambia,Zimbabwe
'''
COUNTRIES = [x.strip() for x in COUNTRY.split(',')]
USA_STATES = '''AL,AK,AZ,AR,CA,CO,CT,DE,FL,GA,HI,ID,IL,IN,IA,KS,KY,LA,ME,MD,MA,MI,MN,MS,MO,MT,
NE,NV,NH,NJ,NM,NY,NC,ND,OH,OK,OR,PA,RI,SC,SD,TN,TX,UT,VT,VA,WA,WV,WI,WY'''
USA_STATES = [x.strip() for x in USA_STATES.split(',')]
ALIAS_UK = '''England,Wales,North Ireland,Scotland'''
ALIAS_UK = [x.strip() for x in ALIAS_UK.split(',')]
COUNTRIES_GPS_STRING = '''Aruba:12.5,-69.97;Afghanistan:33,65;Angola:-12.5,18.5;Anguilla:18.25,-63.17;
Albania:41,20;Andorra:42.5,1.5;United Arab Emirates:24,54;Argentina:-34,-64;Armenia:40,45;
American Samoa:-14.33,-170;Antarctica:-90,0;French Southern and Antarctic Lands:-49.25,69.167;
Antigua And Barbuda:17.05,-61.8;Australia:-27,133;Austria:47.3,13.3;Azerbaijan:40.5,47.5;
Burundi:-3.5,30;Belgium:50.83,4;Benin:9.5,2.25;Burkina Faso:13,-2;Bangladesh:24,90;Bulgaria:43,25;
Bahrain:26,50.55;Bahamas:24.25,-76;Bosnia And Herzegowina:44,18;Saint Barthélemy:18.5,-63.417;
Belarus:53,28;Belize:17.25,-88.75;Bermuda:32.3,-64.75;Bolivia:-17,-65;Brazil:-10,-55;
Barbados:13.16,-59.53;Brunei Darussalam:4.5,114.67;Bhutan:27.5,90.5;Bouvet Island:-54.43,3.4;
Botswana:-22,24;Central African Rep:7,21;Canada:60,-95;Switzerland:47,8;Chile:-30,-71;China:35,105;
IvoryCoast:8,-5;Cameroon:6,12;Congo:0,25;Republic of theCongo:-1,15;Cook Islands:-21.23,-159.77;
Colombia:4,-72;Comoros:-12.17,44.25;Cape Verde:16,-24;Costa Rica:10,-84;Cuba:21.5,-80;
Curacao:12.116667,-68.933333;Christmas Island:-10.5,105.66;Cayman Islands:19.5,-80.5;Cyprus:35,33;
Czech Republic:49.75,15.5;Germany:51,9;Djibouti:11.5,43;Dominica:15.416,-61.33;Denmark:56,10;
Dominican Republic:19,-70.7;Algeria:28,3;Ecuador:-2,-77.5;Egypt:27,30;Eritrea:15,39;Western Sahara:24.5,-13;
Spain:40,-4;Estonia:59,26;Ethiopia:8,38;Finland:64,26;Fiji:-18,175;Falkland Islands:-51.75,-59;
France:46,2;Faroe Islands:62,-7;Micronesia:6.917,158.25;Gabon:-1,11.75;United Kingdom:54,-2;
Georgia:42,43.5;Guernsey:49.46,-2.583;Ghana:8,-2;Gibraltar:36.13,-5.35;Guinea:11,-10;
Guadeloupe:16.25,-61.583;Gambia:13.47,-16.57;Guinea-bissau:12,-15;Equatorial Guinea:2,10;Greece:39,22;
Grenada:12.117,-61.67;Greenland:72,-40;Guatemala:15.5,-90.25;French Guiana:4,-53;Guam:13.47,144.783;
Guyana:5,-59;Hong Kong:22.267,114.188;Honduras:15,-86.5;Croatia:45.17,15.5;Haiti:19,-72.417;Hungary:47,20;
Indonesia:-5,120;Isle of Man:54.25,-4.5;India:20,77;British Indian Ocean Territory:-6,71.5;Ireland:53,-8;
Iran:32,53;Iraq:33,44;Iceland:65,-18;Israel:31.47,35.13;Italy:42.83,12.83;Jamaica:18.25,-77.5;
Jersey:49.25,-2.17;Jordan:31,36;Japan:36,138;Kazakhstan:48,68;Kenya:1,38;Kyrgyzstan:41,75;Cambodia:13,105;
Kiribati:1.417,173;Saint Kitts And Nevis:17.33,-62.75;South Korea:37,127.5;Kosovo:42.67,21.17;Kuwait:29.5,45.75;
Laos:18,105;Lebanon:33.83,35.83;Liberia:6.5,-9.5;Libya:25,17;Saint Lucia:13.883,-60.97;Liechtenstein:47.27,9.53;
Sri Lanka:7,81;Lesotho:-29.5,28.5;Lithuania:56,24;Luxembourg:49.75,6.16;Latvia:57,25;Macau:22.17,113.55;
Saint Martin:18.083,-63.95;Morocco:32,-5;Monaco:43.73,7.4;Moldova:47,29;Madagascar:-20,47;Maldives:3.25,73;
Mexico:23,-102;Marshall Islands:9,168;Macedonia:41.83,22;Mali:17,-4;Malta:35.83,14.583;Myanmar:22,98;
Montenegro:42.5,19.3;Mongolia:46,105;Northern Mariana Islands:15.2,145.75;Mozambique:-18.25,35;Mauritania:20,-12;
Montserrat:16.75,-62.2;Martinique:14.67,-61;Mauritius:-20.283,57.55;Malawi:-13.5,34;Malaysia:2.5,112.5;
Mayotte:-12.83,45.17;Namibia:-22,17;New Caledonia:-21.5,165.5;Niger:16,8;Norfolk Island:-29.03,167.95;
Nigeria:10,8;Nicaragua:13,-85;Niue:-19.03,-169.87;Netherlands:52.5,5.75;Norway:62,10;Nepal:28,84;
Nauru:-0.53,166.917;New Zealand:-41,174;Oman:21,57;Pakistan:30,70;Panama:9,-80;Pitcairn:-25.07,-130.1;
Peru:-10,-76;Philippines:13,122;Palau:7.5,134.5;Papua New Guinea:-6,147;Poland:52,20;Puerto Rico:18.25,-66.5;
North Korea:40,127;Portugal:39.5,-8;Paraguay:-23,-58;Palestine:31.9,35.2;French Polynesia:-15,-140;Qatar:25.5,51.25;
Reunion:-21.15,55.5;Romania:46,25;Russian Federation:60,100;Rwanda:-2,30;Saudi Arabia:25,45;Sudan:15,30;Senegal:14,-14;
Singapore:1.36,103.8;SouthGeorgia:-54.5,-37;Svalbard and Jan Mayen:78,20;Solomon Islands:-8,159;
Sierra Leone:8.5,-11.5;El Salvador:13.83,-88.916;San Marino:43.76,12.416;Somalia:10,49;
SaintPierreandMiquelon:46.83,-56.33;Serbia:44,21;SouthSudan:7,30;Sao Tome:1,7;
Suriname:4,-56;Slovakia:48.66,19.5;Slovenia:46.116,14.816;Sweden:62,15;Swaziland:-26.5,31.5;
Sint Maarten:18.03,-63.05;Seychelles:-4.583,55.66;Syrian Arab Republic:35,38;TurksandCaicosIslands:21.75,-71.583;
Chad:15,19;Togo:8,1.16;Thailand:15,100;Tajikistan:39,71;Tokelau:-9,-172;Turkmenistan:40,60;East Timor:-8.83,125.916;
Tonga:-20,-175;Trinidad And Tobago:11,-61;Tunisia:34,9;Turkey:39,35;Tuvalu:-8,178;Taiwan:23.5,121;Tanzania:-6,35;
Uganda:1,32;Ukraine:49,32;United States Minor Outlying Islands:19.2911437,166.618332;Uruguay:-33,-56;United States:38,-97;
Uzbekistan:41,64;Vatican City State:41.9,12.45;St Vincent/Grenadines:13.25,-61.2;Venezuela:8,-66;
Virgin Islands (British):18.431383,-64.62305;Virgin Islands (U.S.):18.35,-64.933333;Viet Nam:16.16,107.83;
Vanuatu:-16,167;Wallis and Futuna:-13.3,-176.2;Samoa:-13.583,-172.33;Yemen:15,48;South Africa:-29,24;
Zambia:-15,30;Zimbabwe:-20,30'''
import re
pattern = re.compile(r"^[\n]{0,1}(?P<country>[\w\s\-\(\)\./]+):(?P<long>[\d\.\-]+),(?P<lat>[\d\.\-]+)$",re.M)
COUNTRIES_GPS = {}
for country in COUNTRIES_GPS_STRING.split(';'):
match = pattern.search(country)
COUNTRIES_GPS[match.group("country")] = (float(match.group("long")),float(match.group("lat")))
# Character replacements
DIC_CHANGE_CHAR = {"Ł":"L", # polish capital to L
"ł":"l", # polish l
"ı":"i",
"‐":"-", # Non-Breaking Hyphen to hyphen-minus
"Đ":"D", # D with stroke (Vietamese,South Slavic) to D
".":"",
",":""}
CHANGE = str.maketrans(DIC_CHANGE_CHAR)
IN_TO_MM = 25.4
|
StarcoderdataPython
|
26963
|
<filename>compiler/extensions/python/runtime/src/zserio/bitfield.py
"""
The module provides help methods for bit fields calculation.
"""
from zserio.exception import PythonRuntimeException
def getBitFieldLowerBound(length):
"""
Gets the lower bound of a unsigned bitfield type with given length.
:param length: Length of the unsigned bitfield in bits.
:returns: The lowest value the unsigned bitfield can hold.
:raises PythonRuntimeException: If unsigned bitfield with wrong length has been specified.
"""
_checkBitFieldLength(length, MAX_UNSIGNED_BITFIELD_BITS)
return 0
def getBitFieldUpperBound(length):
"""
Gets the upper bound of a unsigned bitfield type with given length.
:param length: Length of the unsigned bitfield in bits.
:returns: The largest value the unsigned bitfield can hold.
:raises PythonRuntimeException: If unsigned bitfield with wrong length has been specified.
"""
_checkBitFieldLength(length, MAX_UNSIGNED_BITFIELD_BITS)
return (1 << length) - 1
def getSignedBitFieldLowerBound(length):
"""
Gets the lower bound of a signed bitfield type with given length.
:param length: Length of the signed bitfield in bits.
:returns: The lowest value the signed bitfield can hold.
:raises PythonRuntimeException: If signed bitfield with wrong length has been specified.
"""
_checkBitFieldLength(length, MAX_SIGNED_BITFIELD_BITS)
return -(1 << (length - 1))
def getSignedBitFieldUpperBound(length):
"""
Gets the upper bound of a signed bitfield type with given length.
:param length: Length of the signed bitfield in bits.
:returns: The largest value the signed bitfield can hold.
:raises PythonRuntimeException: If signed bitfield with wrong length has been specified.
"""
_checkBitFieldLength(length, MAX_SIGNED_BITFIELD_BITS)
return (1 << (length - 1)) - 1
def _checkBitFieldLength(length, maxBitFieldLength):
if length <= 0 or length > maxBitFieldLength:
raise PythonRuntimeException("Asking for bound of bitfield with invalid length %d!" % length)
MAX_SIGNED_BITFIELD_BITS = 64
MAX_UNSIGNED_BITFIELD_BITS = 63
|
StarcoderdataPython
|
98863
|
<filename>alg1/batch_tests/BatchTest-batched.py
import subprocess
if __name__ == '__main__':
commbatch = "python ../StressTest.py --bw_factor=1.0 --lat_factor=1.0 --res_factor=1.0 --vnf_sharing=0.0 --vnf_sharing_same_sg=0.0 --shareable_sg_count=4 --batch_length=4 --request_seed="
commnonbatch = "python ../StressTest.py --bw_factor=1.0 --lat_factor=1.0 --res_factor=1.0 --vnf_sharing=0.0 --vnf_sharing_same_sg=0.0 --shareable_sg_count=4 --batch_length=1 --request_seed="
for i in xrange(0,1500):
batched = commbatch + str(i) + " 2>> batched.out"
with open("batched.out", "a") as batch:
batch.write("\nCommand seed: %s\n"%i)
subprocess.call(batched, shell=True)
with open("batched.out", "a") as batch:
batch.write("\n============================================\n")
|
StarcoderdataPython
|
3216701
|
#!/usr/bin/env python3
#****************************************************************************************************************************************************
# Copyright (c) 2016 Freescale Semiconductor, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the Freescale Semiconductor, Inc. nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#****************************************************************************************************************************************************
from typing import List
import os
import os.path
import xml.etree.ElementTree as ET
from FslBuildGen import IOUtil
from FslBuildGen.BasicConfig import BasicConfig
from FslBuildGen.DataTypes import PackageLanguage
from FslBuildGen.Exceptions import FileNotFoundException
from FslBuildGen.Xml.Exceptions import XmlException
from FslBuildGen.Xml.Exceptions import XmlInvalidRootElement
from FslBuildGen.Xml.XmlBase import XmlBase
class XmlNewVSProjectTemplate(XmlBase):
def __init__(self, basicConfig: BasicConfig, xmlElement: ET.Element) -> None:
super(XmlNewVSProjectTemplate, self).__init__(basicConfig, xmlElement)
self.Name = XmlBase._ReadAttrib(self, xmlElement, 'Name')
self.Description = XmlBase._ReadAttrib(self, xmlElement, 'Description')
packageLanguage = XmlBase._ReadAttrib(self, xmlElement, 'PackageLanguage')
self.PackageLanguage = PackageLanguage.FromString(packageLanguage)
self.ProjectExtension = XmlBase._ReadAttrib(self, xmlElement, 'ProjectExtension', 'vcxproj')
class XmlNewVSProjectTemplateFile(XmlBase):
def __init__(self, basicConfig: BasicConfig, filename: str) -> None:
if not os.path.isfile(filename):
raise FileNotFoundException("Could not locate config file %s", filename)
tree = ET.parse(filename)
elem = tree.getroot()
if elem.tag != 'FslBuildNewVSProjectTemplate':
raise XmlInvalidRootElement("The file did not contain the expected root tag 'FslBuildNewVSProjectTemplate'")
super(XmlNewVSProjectTemplateFile, self).__init__(basicConfig, elem)
strVersion = XmlBase._ReadAttrib(self, elem, 'Version')
if strVersion != "1":
raise Exception("Unsupported version")
xmlTemplate = self.__LoadTemplateConfiguration(basicConfig, elem)
if len(xmlTemplate) != 1:
raise XmlException("The file did not contain exactly one Template element")
directoryName = IOUtil.GetDirectoryName(filename)
self.Name = IOUtil.GetFileName(directoryName)
self.Id = self.Name.lower()
self.Version = 1
self.Template = xmlTemplate[0]
self.Path = IOUtil.GetDirectoryName(filename)
self.Prefix = ("%s_" % (self.Name)).upper()
if self.Name != self.Template.Name:
raise Exception("The parent template directory name '{0}' does not match the template name '{1}' {2}".format(self.Name, self.Template.Name, self.Path))
def __LoadTemplateConfiguration(self, basicConfig: BasicConfig, element: ET.Element) -> List[XmlNewVSProjectTemplate]:
res = []
foundElements = element.findall("Template")
for foundElement in foundElements:
res.append(XmlNewVSProjectTemplate(basicConfig, foundElement))
return res
|
StarcoderdataPython
|
1785499
|
<filename>cache_purge_hooks/backends/varnishbackend.py
import logging
import subprocess
from django.conf import settings
logger = logging.getLogger('django.cache_purge_hooks')
VARNISHADM_HOST = getattr(settings, 'VARNISHADM_HOST', 'localhost')
VARNISHADM_PORT = getattr(settings, 'VARNISHADM_PORT', 6082)
VARNISHADM_SECRET = getattr(settings, 'VARNISHADM_SECRET', '/etc/varnish/secret')
VARNISHADM_SITE_DOMAIN = getattr(settings, 'VARNISHADM_SITE_DOMAIN', '.*')
VARNISHADM_BIN = getattr(settings, 'VARNISHADM_ADM_BIN', '/usr/bin/varnishadm')
class VarnishManager(object):
def purge(self, url):
command = 'ban req.http.host ~ "{host}" && req.url ~ "{url}"'.format(
host=VARNISHADM_SITE_DOMAIN.encode('ascii'),
url=url.encode('ascii'),
)
self.send_command(command)
def purge_all(self):
self.purge('.*')
def send_command(self, command):
args = [VARNISHADM_BIN, '-S', VARNISHADM_SECRET, '-T', VARNISHADM_HOST+':'+str(VARNISHADM_PORT), command]
try:
subprocess.check_call(args)
except subprocess.CalledProcessError as error:
logger.error('Command "{0}" returned {1}'.format(' '.join(args), error.returncode))
return False
else:
logger.debug('Command "{0}" executed successfully'.format(' '.join(args)))
return True
|
StarcoderdataPython
|
165392
|
<reponame>matthaeusheer/uncertify
import logging
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import matplotlib
from uncertify.visualization.plotting import set_matplotlib_rc
from uncertify.common import DATA_DIR_PATH
from typing import List
LABEL_MAP = {
'rec_err': '$\ell_{1}$',
'kl_div': '$D_{KL}$',
'elbo': '$\mathcal{L}$',
'entropy': '$H_{\ell_{1}}$',
'entropy_rec_err_kl_div_elbo': '$H_{\ell_{1}}, \ell_{1}, D_{KL}, \mathcal{L}'
}
LOG = logging.getLogger(__name__)
def do_pair_plot_statistics(statistics_dict: dict, dose_statistics: List[str],
dataset_name: str, hue: str = 'is_lesional') -> sns.PairGrid:
"""
Arguments
---------
statistics_dict: dictionary as returned by aggregate_slice_wise_statistics
dose_statistics: statistics to use in the plot
dataset_name: name of the dataset used for file naming
hue: which column in the dataframe to use as hue
"""
sns.set_style({"xtick.direction": "in", "ytick.direction": "in",
"xtick.major.size": 10, "ytick.major.size": 10,
"xtick.minor.size": 7, "ytick.minor.size": 7})
stat_df = pd.DataFrame(statistics_dict)
set_matplotlib_rc()
matplotlib.rcParams.update({'font.size': 18, 'legend.fontsize': 16})
grid = sns.pairplot(stat_df, vars=dose_statistics, corner=True, plot_kws={"s": 10}, palette='viridis',
hue=hue, diag_kws={'shade': False}, diag_kind='kde')
grid.fig.set_size_inches(7, 7)
grid.map_lower(sns.kdeplot, shade=True, thresh=0.05, alpha=0.7)
if hue is not None:
grid._legend.set_title('')
new_labels = ['healthy', 'lesional']
for t, l in zip(grid._legend.texts, new_labels):
t.set_text(l)
# Set nice x and y labels
for ax in grid.axes.flatten():
if ax is not None:
ax.xaxis.set_major_locator(plt.MaxNLocator(3))
old_xlabel = ax.get_xlabel()
old_ylabel = ax.get_ylabel()
if old_xlabel in LABEL_MAP:
ax.set_xlabel(LABEL_MAP[old_xlabel])
if old_ylabel in LABEL_MAP:
ax.set_ylabel(LABEL_MAP[old_ylabel])
grid.tight_layout()
grid.savefig(DATA_DIR_PATH / 'plots' / f'dose_pairplot_{dataset_name}.png')
return grid
|
StarcoderdataPython
|
4813416
|
<filename>backend/test/test_unit/test_communication_utils.py
from unittest.mock import patch
from django.test import TestCase
# Import module
from backend.communication_utils import *
class OSAware(TestCase):
def setUp(self) -> None:
self.data_linux = {'id': 42, 'file_path': '/home/user/test', 'folder': {'path': '/home/user/test_folder'},
'clips': ['/home/user/test_folder/test_clip1.tvf', '/home/user/test_folder/test_clip2.tvf'],
'folders': [{'file_path': '/home/user/test/1'}, {'file_path': '/home/user/test/2'}]}
self.data_windows = {'id': 42, 'file_path': '\\home\\user\\test',
'folder': {'path': '\\home\\user\\test_folder'},
'clips': ['\\home\\user\\test_folder\\test_clip1.tvf',
'\\home\\user\\test_folder\\test_clip2.tvf'],
'folders': [{'file_path': '\\home\\user\\test\\1'},
{'file_path': '\\home\\user\\test\\2'}]}
@patch('backend.communication_utils.os')
def test_linux(self, mock_os):
mock_os.name = 'posix'
mock_os.path.sep = '/'
self.assertEqual(os_aware(self.data_linux), self.data_linux)
self.assertEqual(os_aware(self.data_windows), self.data_linux)
@patch('backend.communication_utils.os')
def test_windows(self, mock_os):
mock_os.name = 'nt'
mock_os.path.sep = '\\'
self.assertEqual(os_aware(self.data_linux), self.data_windows)
self.assertEqual(os_aware(self.data_windows), self.data_windows)
|
StarcoderdataPython
|
115935
|
<gh_stars>0
N = int(input())
ans = 1e12
# 約数を求める。N**.5までで十分
for n in range(1, int(N ** 0.5) + 1):
if N % n == 0:
ans = min(ans, n + N // n - 2)
print(ans)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.