repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
barak/autograd
examples/fluidsim/wing.py
1
6136
from __future__ import absolute_import from __future__ import print_function import autograd.numpy as np from autograd import value_and_grad from scipy.optimize import minimize import matplotlib.pyplot as plt import os from builtins import range rows, cols = 40, 60 # Fluid simulation code based on # "Real-Time Fluid Dynamics for Games" by Jos Stam # http://www.intpowertechcorp.com/GDC03.pdf def occlude(f, occlusion): return f * (1 - occlusion) def project(vx, vy, occlusion): """Project the velocity field to be approximately mass-conserving, using a few iterations of Gauss-Seidel.""" p = np.zeros(vx.shape) div = -0.5 * (np.roll(vx, -1, axis=1) - np.roll(vx, 1, axis=1) + np.roll(vy, -1, axis=0) - np.roll(vy, 1, axis=0)) div = make_continuous(div, occlusion) for k in range(50): p = (div + np.roll(p, 1, axis=1) + np.roll(p, -1, axis=1) + np.roll(p, 1, axis=0) + np.roll(p, -1, axis=0))/4.0 p = make_continuous(p, occlusion) vx = vx - 0.5*(np.roll(p, -1, axis=1) - np.roll(p, 1, axis=1)) vy = vy - 0.5*(np.roll(p, -1, axis=0) - np.roll(p, 1, axis=0)) vx = occlude(vx, occlusion) vy = occlude(vy, occlusion) return vx, vy def advect(f, vx, vy): """Move field f according to x and y velocities (u and v) using an implicit Euler integrator.""" rows, cols = f.shape cell_xs, cell_ys = np.meshgrid(np.arange(cols), np.arange(rows)) center_xs = (cell_xs - vx).ravel() center_ys = (cell_ys - vy).ravel() # Compute indices of source cells. left_ix = np.floor(center_ys).astype(np.int) top_ix = np.floor(center_xs).astype(np.int) rw = center_ys - left_ix # Relative weight of right-hand cells. bw = center_xs - top_ix # Relative weight of bottom cells. left_ix = np.mod(left_ix, rows) # Wrap around edges of simulation. right_ix = np.mod(left_ix + 1, rows) top_ix = np.mod(top_ix, cols) bot_ix = np.mod(top_ix + 1, cols) # A linearly-weighted sum of the 4 surrounding cells. flat_f = (1 - rw) * ((1 - bw)*f[left_ix, top_ix] + bw*f[left_ix, bot_ix]) \ + rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix]) return np.reshape(flat_f, (rows, cols)) def make_continuous(f, occlusion): non_occluded = 1 - occlusion num = np.roll(f, 1, axis=0) * np.roll(non_occluded, 1, axis=0)\ + np.roll(f, -1, axis=0) * np.roll(non_occluded, -1, axis=0)\ + np.roll(f, 1, axis=1) * np.roll(non_occluded, 1, axis=1)\ + np.roll(f, -1, axis=1) * np.roll(non_occluded, -1, axis=1) den = np.roll(non_occluded, 1, axis=0)\ + np.roll(non_occluded, -1, axis=0)\ + np.roll(non_occluded, 1, axis=1)\ + np.roll(non_occluded, -1, axis=1) return f * non_occluded + (1 - non_occluded) * num / ( den + 0.001) def sigmoid(x): return 0.5*(np.tanh(x) + 1.0) # Output ranges from 0 to 1. def simulate(vx, vy, num_time_steps, occlusion, ax=None, render=False): occlusion = sigmoid(occlusion) # Disallow occlusion outside a certain area. mask = np.zeros((rows, cols)) mask[10:30, 10:30] = 1.0 occlusion = occlusion * mask # Initialize smoke bands. red_smoke = np.zeros((rows, cols)) red_smoke[rows/4:rows/2] = 1 blue_smoke = np.zeros((rows, cols)) blue_smoke[rows/2:3*rows/4] = 1 print("Running simulation...") vx, vy = project(vx, vy, occlusion) for t in range(num_time_steps): plot_matrix(ax, red_smoke, occlusion, blue_smoke, t, render) vx_updated = advect(vx, vx, vy) vy_updated = advect(vy, vx, vy) vx, vy = project(vx_updated, vy_updated, occlusion) red_smoke = advect(red_smoke, vx, vy) red_smoke = occlude(red_smoke, occlusion) blue_smoke = advect(blue_smoke, vx, vy) blue_smoke = occlude(blue_smoke, occlusion) plot_matrix(ax, red_smoke, occlusion, blue_smoke, num_time_steps, render) return vx, vy def plot_matrix(ax, r, g, b, t, render=False): if ax: plt.cla() ax.imshow(np.concatenate((r[...,np.newaxis], g[...,np.newaxis], b[...,np.newaxis]), axis=2)) ax.set_xticks([]) ax.set_yticks([]) plt.draw() if render: plt.savefig('step{0:03d}.png'.format(t), bbox_inches='tight') plt.pause(0.001) if __name__ == '__main__': simulation_timesteps = 20 print("Loading initial and target states...") init_vx = np.ones((rows, cols)) init_vy = np.zeros((rows, cols)) # Initialize the occlusion to be a block. init_occlusion = -np.ones((rows, cols)) init_occlusion[15:25, 15:25] = 0.0 init_occlusion = init_occlusion.ravel() def drag(vx): return np.mean(init_vx - vx) def lift(vy): return np.mean(vy - init_vy) def objective(params): cur_occlusion = np.reshape(params, (rows, cols)) final_vx, final_vy = simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion) return -lift(final_vy) / drag(final_vx) # Specify gradient of objective function using autograd. objective_with_grad = value_and_grad(objective) fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111, frameon=False) def callback(weights): cur_occlusion = np.reshape(weights, (rows, cols)) simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion, ax) print("Rendering initial flow...") callback(init_occlusion) print("Optimizing initial conditions...") result = minimize(objective_with_grad, init_occlusion, jac=True, method='CG', options={'maxiter':50, 'disp':True}, callback=callback) print("Rendering optimized flow...") final_occlusion = np.reshape(result.x, (rows, cols)) simulate(init_vx, init_vy, simulation_timesteps, final_occlusion, ax, render=True) print("Converting frames to an animated GIF...") # Using imagemagick. os.system("convert -delay 5 -loop 0 step*.png " "-delay 250 step{0:03d}.png wing.gif".format(simulation_timesteps)) os.system("rm step*.png")
mit
7,440,294,014,902,484,000
36.414634
100
0.608703
false
2.987342
false
false
false
MarvinTeichmann/KittiBox
tests/test_anno_load.py
1
14271
from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import json import logging import os import sys import random from random import shuffle import numpy as np import scipy as scp import scipy.misc sys.path.insert(1, '../incl') from scipy.misc import imread, imresize from utils.data_utils import (annotation_jitter, annotation_to_h5) from utils.annolist import AnnotationLib as AnnoLib import threading from collections import namedtuple import tensorflow as tf flags = tf.app.flags FLAGS = flags.FLAGS tf.app.flags.DEFINE_boolean( 'save', False, ('Whether to save the run. In case --nosave (default) ' 'output will be saved to the folder TV_DIR_RUNS/debug, ' 'hence it will get overwritten by further runs.')) flags.DEFINE_string('name', None, 'Append a name Tag to run.') fake_anno = namedtuple('fake_anno_object', ['rects']) from PIL import Image, ImageDraw rect = namedtuple('Rectangel', ['left', 'top', 'right', 'bottom']) def _get_ignore_rect(x, y, cell_size): left = x*cell_size right = (x+1)*cell_size top = y*cell_size bottom = (y+1)*cell_size return rect(left, top, right, bottom) def _rescale_boxes(current_shape, anno, target_height, target_width): x_scale = target_width / float(current_shape[1]) y_scale = target_height / float(current_shape[0]) for r in anno.rects: assert r.x1 < r.x2 r.x1 *= x_scale r.x2 *= x_scale assert r.y1 < r.y2 r.y1 *= y_scale r.y2 *= y_scale return anno def read_kitti_anno(label_file): """ Reads a kitti annotation file. Args: label_file: Path to file Returns: Lists of rectangels: Cars and don't care area. """ labels = [line.rstrip().split(' ') for line in open(label_file)] rect_list = [] for label in labels: if not (label[0] == 'Car' or label[0] == 'Van' or label[0] == 'DontCare'): continue if label[0] == 'DontCare': class_id = -1 else: class_id = 1 object_rect = AnnoLib.AnnoRect( x1=float(label[4]), y1=float(label[5]), x2=float(label[6]), y2=float(label[7])) assert object_rect.x1 < object_rect.x2 assert object_rect.y1 < object_rect.y2 object_rect.classID = class_id rect_list.append(object_rect) return rect_list def _load_idl_tf(idlfile, hypes, jitter=False, random_shuffel=True): """Take the idlfile and net configuration and create a generator that outputs a jittered version of a random image from the annolist that is mean corrected.""" annolist = AnnoLib.parse(idlfile) annos = [] for anno in annolist: anno.imageName = os.path.join( os.path.dirname(os.path.realpath(idlfile)), anno.imageName) annos.append(anno) random.seed(0) if hypes['data']['truncate_data']: annos = annos[:10] for epoch in itertools.count(): if random_shuffel: random.shuffle(annos) for anno in annos: im = imread(anno.imageName) if im.shape[2] == 4: im = im[:, :, :3] if im.shape[0] != hypes["image_height"] or \ im.shape[1] != hypes["image_width"]: if epoch == 0: anno = _rescale_boxes(im.shape, anno, hypes["image_height"], hypes["image_width"]) im = imresize( im, (hypes["image_height"], hypes["image_width"]), interp='cubic') if jitter: jitter_scale_min = 0.9 jitter_scale_max = 1.1 jitter_offset = 16 im, anno = annotation_jitter( im, anno, target_width=hypes["image_width"], target_height=hypes["image_height"], jitter_scale_min=jitter_scale_min, jitter_scale_max=jitter_scale_max, jitter_offset=jitter_offset) boxes, flags = annotation_to_h5(hypes, anno, hypes["grid_width"], hypes["grid_height"], hypes["rnn_len"]) boxes = boxes.reshape([hypes["grid_height"], hypes["grid_width"], 4]) flags = flags.reshape(hypes["grid_height"], hypes["grid_width"]) yield {"image": im, "boxes": boxes, "flags": flags, "rects": anno.rects, "anno": anno} def _generate_mask(hypes, ignore_rects): width = hypes["image_width"] height = hypes["image_height"] grid_width = hypes["grid_width"] grid_height = hypes["grid_height"] mask = np.ones([grid_height, grid_width]) for rect in ignore_rects: left = int(rect.x1/width*grid_width) right = int(rect.x2/width*grid_width) top = int(rect.y1/height*grid_height) bottom = int(rect.y2/height*grid_height) for x in range(left, right+1): for y in range(top, bottom+1): mask[y, x] = 0 return mask def _load_kitti_txt(kitti_txt, hypes, jitter=False, random_shuffel=True): """Take the txt file and net configuration and create a generator that outputs a jittered version of a random image from the annolist that is mean corrected.""" base_path = os.path.realpath(os.path.dirname(kitti_txt)) files = [line.rstrip() for line in open(kitti_txt)] if hypes['data']['truncate_data']: files = files[:10] random.seed(0) for epoch in itertools.count(): if random_shuffel: random.shuffle(files) for file in files: image_file, gt_image_file = file.split(" ") image_file = os.path.join(base_path, image_file) assert os.path.exists(image_file), \ "File does not exist: %s" % image_file gt_image_file = os.path.join(base_path, gt_image_file) assert os.path.exists(gt_image_file), \ "File does not exist: %s" % gt_image_file rect_list = read_kitti_anno(gt_image_file) anno = fake_anno(rect_list) im = scp.misc.imread(image_file) if im.shape[2] == 4: im = im[:, :, :3] if im.shape[0] != hypes["image_height"] or \ im.shape[1] != hypes["image_width"]: if epoch == 0: anno = _rescale_boxes(im.shape, anno, hypes["image_height"], hypes["image_width"]) im = imresize( im, (hypes["image_height"], hypes["image_width"]), interp='cubic') if jitter: jitter_scale_min = 0.9 jitter_scale_max = 1.1 jitter_offset = 16 im, anno = annotation_jitter( im, anno, target_width=hypes["image_width"], target_height=hypes["image_height"], jitter_scale_min=jitter_scale_min, jitter_scale_max=jitter_scale_max, jitter_offset=jitter_offset) pos_list = [rect for rect in anno.rects if rect.classID == 1] pos_anno = fake_anno(pos_list) boxes, confs = annotation_to_h5(hypes, pos_anno, hypes["grid_width"], hypes["grid_height"], hypes["rnn_len"]) mask_list = [rect for rect in anno.rects if rect.classID == -1] mask = _generate_mask(hypes, mask_list) boxes = boxes.reshape([hypes["grid_height"], hypes["grid_width"], 4]) confs = confs.reshape(hypes["grid_height"], hypes["grid_width"]) yield {"image": im, "boxes": boxes, "confs": confs, "rects": pos_list, "mask": mask} def _make_sparse(n, d): v = np.zeros((d,), dtype=np.float32) v[n] = 1. return v def _load_data_gen(hypes, phase, jitter): grid_size = hypes['grid_width'] * hypes['grid_height'] data_file = hypes["data"]['%s_idl' % phase] data_dir = hypes['dirs']['data_dir'] data_file = os.path.join(data_dir, data_file) data = _load_idl_tf(data_file, hypes, jitter={'train': jitter, 'val': False}[phase]) for d in data: output = {} rnn_len = hypes["rnn_len"] flags = d['flags'][0, :, 0, 0:rnn_len, 0] boxes = np.transpose(d['boxes'][0, :, :, 0:rnn_len, 0], (0, 2, 1)) assert(flags.shape == (grid_size, rnn_len)) assert(boxes.shape == (grid_size, rnn_len, 4)) output['image'] = d['image'] confs = [[_make_sparse(int(detection), d=hypes['num_classes']) for detection in cell] for cell in flags] output['confs'] = np.array(confs) output['boxes'] = boxes output['flags'] = flags yield output def test_new_kitti(): idlfile = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train_3.idl" kitti_txt = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train.txt" with open('../hypes/kittiBox.json', 'r') as f: logging.info("f: %s", f) hypes = json.load(f) hypes["rnn_len"] = 1 hypes["image_height"] = 200 hypes["image_width"] = 800 gen1 = _load_kitti_txt(kitti_txt, hypes, random_shuffel=False) gen2 = _load_idl_tf(idlfile, hypes, random_shuffel=False) print('testing generators') for i in range(20): data1 = gen1.next() data2 = gen2.next() rects1 = data1['rects'] rects2 = data2['rects'] assert len(rects1) <= len(rects2) if not len(rects1) == len(rects2): print('ignoring flags') continue else: print('comparing flags') assert(np.all(data1['image'] == data2['image'])) # assert(np.all(data1['boxes'] == data2['boxes'])) if np.all(data1['flags'] == data2['flags']): print('same') else: print('diff') def draw_rect(draw, rect, color): rect_cords = ((rect.left, rect.top), (rect.left, rect.bottom), (rect.right, rect.bottom), (rect.right, rect.top), (rect.left, rect.top)) draw.line(rect_cords, fill=color, width=2) def draw_encoded(image, confs, mask=None, rects=None, cell_size=32): image = image.astype('uint8') im = Image.fromarray(image) shape = confs.shape if mask is None: mask = np.ones(shape) # overimage = mycm(confs_pred, bytes=True) poly = Image.new('RGBA', im.size) pdraw = ImageDraw.Draw(poly) for y in range(shape[0]): for x in range(shape[1]): outline = (0, 0, 0, 255) if confs[y, x]: fill = (0, 255, 0, 100) else: fill = (0, 0, 0, 0) rect = _get_ignore_rect(x, y, cell_size) pdraw.rectangle(rect, fill=fill, outline=fill) if not mask[y, x]: pdraw.line(((rect.left, rect.bottom), (rect.right, rect.top)), fill=(0, 0, 0, 255), width=2) pdraw.line(((rect.left, rect.top), (rect.right, rect.bottom)), fill=(0, 0, 0, 255), width=2) color = (0, 0, 255) for rect in rects: rect_cords = ((rect.x1, rect.y1), (rect.x1, rect.y2), (rect.x2, rect.y2), (rect.x2, rect.y1), (rect.x1, rect.y1)) pdraw.line(rect_cords, fill=color, width=2) im.paste(poly, mask=poly) return np.array(im) def draw_kitti_jitter(): idlfile = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train_3.idl" kitti_txt = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train.txt" with open('../hypes/kittiBox.json', 'r') as f: logging.info("f: %s", f) hypes = json.load(f) hypes["rnn_len"] = 1 gen = _load_kitti_txt(kitti_txt, hypes, random_shuffel=False) data = gen.next() for i in range(20): data = gen.next() image = draw_encoded(image=data['image'], confs=data['confs'], rects=data['rects'], mask=data['mask']) scp.misc.imshow(image) scp.misc.imshow(data['mask']) def draw_idl(): idlfile = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train_3.idl" kitti_txt = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train.txt" with open('../hypes/kittiBox.json', 'r') as f: logging.info("f: %s", f) hypes = json.load(f) hypes["rnn_len"] = 1 gen = _load_idl_tf(idlfile, hypes, random_shuffel=False) data = gen.next() for i in range(20): data = gen.next() image = draw_encoded(image=data['image'], confs=data['flags'], rects=data['rects']) scp.misc.imshow(image) def draw_both(): idlfile = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train_3.idl" kitti_txt = "/home/mifs/mttt2/cvfs/DATA/KittiBox/train.txt" with open('../hypes/kittiBox.json', 'r') as f: logging.info("f: %s", f) hypes = json.load(f) hypes["rnn_len"] = 1 gen1 = _load_idl_tf(idlfile, hypes, random_shuffel=False) gen2 = _load_kitti_txt(kitti_txt, hypes, random_shuffel=False) data1 = gen1.next() data2 = gen2.next() for i in range(20): data1 = gen1.next() data2 = gen2.next() image1 = draw_encoded(image=data1['image'], confs=data1['flags'], rects=data1['rects']) image2 = draw_encoded(image=data2['image'], confs=data2['confs'], rects=data2['rects'], mask=data2['mask']) scp.misc.imshow(image1) scp.misc.imshow(image2) if __name__ == '__main__': draw_both()
mit
-198,509,635,553,043,260
31.434091
78
0.529676
false
3.366596
false
false
false
baayso/learn-python3
basic/the_list.py
1
1216
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # list是一种有序的集合,可以随时添加和删除其中的元素 classmates = ['Michael', 'Bob', 'Tracy'] print(classmates) print(len(classmates)) print(classmates[0]) print(classmates[1]) print(classmates[2]) print(classmates[-1]) print(classmates[-2]) print(classmates[-3]) print() # 追加元素到末尾 classmates.append('Adam') print(classmates) print() # 把元素插入到指定的位置 classmates.insert(1, 'Jack') print(classmates) print() # 删除list末尾的元素 classmates.pop() print(classmates) print() # 删除指定位置的元素 classmates.pop(1) print(classmates) print() # 把某个元素替换成别的元素,直接赋值给对应的索引位置 classmates[1] = 'Sarah' print(classmates) print() # list里面的元素的数据类型也可以不同 L = ['Apple', 123, True] print(L) print() # list元素也可以是另一个list s = ['python', 'java', ['asp', 'php'], 'scheme'] print(len(s)) print(s) print(len(s[2])) print(s[2][1]) print() # 注意s只有4个元素,其中s[2]又是一个list p = ['asp', 'php'] s = ['python', 'java', p, 'scheme'] print(len(s)) print(s) print() # 空list L = [] print(len(L))
apache-2.0
-2,483,030,746,383,875,600
12.71831
48
0.676591
false
1.723894
false
true
false
studenteternal/SoftLayer
get_list.py
1
1502
#!/usr/bin/python import yaml import SoftLayer from pprint import pprint credsFile = open("softcreds.yaml",'r') creds = yaml.load(credsFile) #print creds['username'] #print creds['api_key'] client = SoftLayer.Client(username=(creds['username']), api_key=(creds['api_key'])) n = 1 count = 1 kill_file = open("kill-file",'a') while n < 2: server_name = 'jbsampsobuntutemp' + str(count) n = n + 1 server_return = client['Virtual_Guest'].createObject({ 'datacenter': {'name': 'mex01'}, 'hostname': server_name, 'domain': 'test.com', 'startCpus': 1, 'maxMemory': 4096, 'hourlyBillingFlag': 'true', 'localDiskFlag': 'false', 'networkComponents': [{'maxSpeed': 1000}], 'privateNetworkOnlyFlag': 'false', 'blockDevices': [{'device': '0', 'diskImage': {'capacity': 100}}], 'operatingSystemReferenceCode': 'UBUNTU_latest', 'primaryBackendNetworkComponent': {'networkVlan': {'id': 773482}}, # 'tags': 'jbsampso,temp', # 'postInstallScriptUri': 'https://mex01.objectstorage.softlayer.net/v1/AUTH_3d7f3c03-9b34-418d-96f1-09a45712c21c/Jbsampso_startup_scripts/post_test.sh', }) count = count + 1 kill_file.write(str(server_return['id']) + '\n') # print server_return # server_return = server_return.split(',') # print server_return[15] # client['Virtual_Guest'].setUserMetadata(id=server_return['id']{ # 'metadata': {'jbsampso, temp'}} kill_file.close() credsFile.close() #pprint( server_return ) #print server_return['id']
mit
-2,538,125,823,673,107,500
28.45098
154
0.661784
false
2.839319
false
false
false
beia/beialand
projects/CitisimWebApp/app/src/users.py
1
2358
#!flask/bin/python from flask_bcrypt import Bcrypt from flask_login import UserMixin from main import login_manager from main import mydb import MySQLdb @login_manager.user_loader def load_user(user_id): user = User() return user.getUserByID(int(user_id)) class User(UserMixin): id = None username = None email = None password = None def __init__(self): print "Empty constructor" def getUserByID(self, id): mycursor = mydb.connection.cursor(MySQLdb.cursors.DictCursor) mycursor.execute("select * from Users u where u.userID = " + str(id)) row = mycursor.fetchone() if(row is None): return None self.id = row['userID'] self.username = row['userName'] self.email = row['userEmail'] self.password = row['userPass'] return self def getUserByEmail(self, email): mycursor = mydb.connection.cursor(MySQLdb.cursors.DictCursor) mycursor.execute("select * from Users u where u.userEmail = '" + str(email) + "'") row = mycursor.fetchone() if(row is None): return None self.id = row['userID'] self.username = row['userName'] self.email = row['userEmail'] self.password = row['userPass'] return self def userAuthentication(self, email, password): mycursor = mydb.connection.cursor(MySQLdb.cursors.DictCursor) mycursor.execute("select * from Users u where u.userEmail = '" + str(email) + "'") row = mycursor.fetchone() if(row is None): return False bcrypt = Bcrypt() return bcrypt.check_password_hash(row['userPass'], password) def checkIfEmailExists(self, email): mycursor = mydb.connection.cursor(MySQLdb.cursors.DictCursor) mycursor.execute("select * from Users u where u.userEmail = '" + str(email) + "'") row = mycursor.fetchone() if(row is None): return False return True def addUser(self, name, email, password): bcrypt = Bcrypt() mycursor = mydb.connection.cursor(MySQLdb.cursors.DictCursor) mycursor.execute("insert into Users (userName, userEmail, userPass) values ('"+str(name)+"','"+str(email)+"','"+bcrypt.generate_password_hash(password)+"')") mydb.connection.commit()
gpl-3.0
5,566,151,524,797,660,000
29.623377
165
0.621713
false
3.778846
false
false
false
gitterHQ/ansible
v2/ansible/playbook/attribute.py
1
1053
# (c) 2012-2014, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. #from ansible.common.errors import AnsibleError class Attribute(object): def __init__(self, isa=None, private=False, default=None): self.isa = isa self.private = private self.value = None self.default = default def __call__(self): return self.value class FieldAttribute(Attribute): pass
gpl-3.0
8,832,590,789,999,572,000
30.909091
70
0.720798
false
4.003802
false
false
false
abusesa/idiokit
idiokit/xmpp/jid.py
1
6351
# Module for XMPP JID processing as defined in on RFC 3920 # (http://www.ietf.org/rfc/rfc3920.txt) And RFC 3454 # (http://www.ietf.org/rfc/rfc3454.txt). # # This module was originally written using both the above RFCs and the # xmppstringprep module of the pyxmpp package # (http://pyxmpp.jajcus.net/) as well as the # twisted.words.protocols.jabber.xmpp_stringprep module of Twisted # (http://twistedmatrix.com/) as a reference. import re import threading import stringprep from encodings import idna from unicodedata import ucd_3_2_0 as unicodedata class JIDError(Exception): pass def check_prohibited_and_unassigned(chars, prohibited_tables): in_table_a1 = stringprep.in_table_a1 for pos, ch in enumerate(chars): if any(table(ch) for table in prohibited_tables): raise JIDError("prohibited character {0!r} at index {1}".format(ch, pos)) if in_table_a1(ch): raise JIDError("unassigned characted {0!r} at index {1}".format(ch, pos)) def check_bidirectional(chars): in_table_d1 = stringprep.in_table_d1 in_table_d2 = stringprep.in_table_d2 # RFC 3454: If a string contains any RandALCat character, the # string MUST NOT contain any LCat character. if not any(in_table_d1(ch) for ch in chars): return if any(in_table_d2(ch) for ch in chars): raise JIDError("string contains RandALCat and LCat characters") # RFC 3454: If a string contains any RandALCat character, a # RandALCat character MUST be the first character of the string, # and a RandALCat character MUST be the last character of the # string. if not (in_table_d1(chars[0]) and in_table_d1(chars[-1])): raise JIDError("string must start and end with RandALCat characters") NODEPREP_PROHIBITED = ( stringprep.in_table_c11, stringprep.in_table_c12, stringprep.in_table_c21, stringprep.in_table_c22, stringprep.in_table_c3, stringprep.in_table_c4, stringprep.in_table_c5, stringprep.in_table_c6, stringprep.in_table_c7, stringprep.in_table_c8, stringprep.in_table_c9, frozenset(u"\"&'/:<>@").__contains__ ) def nodeprep(string): in_table_b1 = stringprep.in_table_b1 map_table_b2 = stringprep.map_table_b2 string = u"".join(map_table_b2(ch) for ch in string if not in_table_b1(ch)) string = unicodedata.normalize("NFKC", string) check_prohibited_and_unassigned(string, NODEPREP_PROHIBITED) check_bidirectional(string) return string RESOURCEPREP_PROHIBITED = ( stringprep.in_table_c12, stringprep.in_table_c21, stringprep.in_table_c22, stringprep.in_table_c3, stringprep.in_table_c4, stringprep.in_table_c5, stringprep.in_table_c6, stringprep.in_table_c7, stringprep.in_table_c8, stringprep.in_table_c9 ) def resourceprep(string): in_table_b1 = stringprep.in_table_b1 string = u"".join(ch for ch in string if not in_table_b1(ch)) string = unicodedata.normalize("NFKC", string) check_prohibited_and_unassigned(string, RESOURCEPREP_PROHIBITED) check_bidirectional(string) return string JID_REX = re.compile(r"^(?:(.*?)@)?([^\.\/]+(?:\.[^\.\/]+)*)(?:/(.*))?$", re.U) def split_jid(jid): match = JID_REX.match(jid) if not match: raise JIDError("not a valid JID") return match.groups() def check_length(identifier, value): if len(value) > 1023: raise JIDError("{0} identifier too long".format(identifier)) return value def prep_node(node): if not node: return None node = nodeprep(node) return check_length("node", node) def prep_resource(resource): if not resource: return None resource = resourceprep(resource) return check_length("resource", resource) def prep_domain(domain): labels = domain.split(".") try: labels = map(idna.nameprep, labels) labels = map(idna.ToASCII, labels) except UnicodeError as ue: raise JIDError("not an internationalized label: {0}".format(ue)) labels = map(idna.ToUnicode, labels) domain = ".".join(labels) return check_length("domain", domain) def unicodify(item): if item is None: return None return unicode(item) class JID(object): cache = dict() cache_size = 2 ** 14 cache_lock = threading.Lock() __slots__ = "_node", "_domain", "_resource" node = property(lambda x: x._node) domain = property(lambda x: x._domain) resource = property(lambda x: x._resource) def __new__(cls, node=None, domain=None, resource=None): node = unicodify(node) domain = unicodify(domain) resource = unicodify(resource) with cls.cache_lock: cache_key = node, domain, resource if cache_key in cls.cache: return cls.cache[cache_key] if node is None and domain is None: raise JIDError("either a full JID or at least a domain expected") elif domain is None: if resource is not None: raise JIDError("resource not expected with a full JID") node, domain, resource = split_jid(node) obj = super(JID, cls).__new__(cls) obj._node = prep_node(node) obj._domain = prep_domain(domain) obj._resource = prep_resource(resource) with cls.cache_lock: if len(cls.cache) >= cls.cache_size: cls.cache.clear() cls.cache[cache_key] = obj return obj def bare(self): return JID(self.node, self.domain) def __reduce__(self): return JID, (self.node, self.domain, self.resource) def __eq__(self, other): if not isinstance(other, JID): return NotImplemented return self is other or unicode(self) == unicode(other) def __ne__(self, other): result = self.__eq__(other) if result is NotImplemented: return result return not result def __hash__(self): return hash(unicode(self)) def __repr__(self): return "{0}({1!r})".format(type(self).__name__, unicode(self)) def __unicode__(self): jid = self.domain if self.node is not None: jid = self.node + "@" + jid if self.resource is not None: jid = jid + "/" + self.resource return jid
mit
-8,818,032,369,330,515,000
27.608108
85
0.634861
false
3.374601
false
false
false
moustakas/impy
teaching/siena_class_roster.py
1
3362
import requests #import BeautifulSoup from bs4 import BeautifulSoup import HTMLParser from HTMLParser import HTMLParser import sys import os # Open the file. r = open(sys.argv[1]) if not os.path.isdir('Detail_Class_List_files'): os.rename('Detail Class List_files', 'Detail_Class_List_files') # Try to parse the webpage by looking for the tables. soup = BeautifulSoup(r) print "\documentclass{article}" print "\usepackage{graphicx}" print "\usepackage{subfig}" print "\hoffset=-1.50in" print "\setlength{\\textwidth}{7.5in}" print "\setlength{\\textheight}{9in}" print "\setlength{\\voffset}{0pt}" print "\setlength{\\topmargin}{0pt}" print "\setlength{\headheight}{0pt}" print "\setlength{\headsep}{0pt}" h2s = soup.find_all('h2') caption = 'Default' for h in h2s: if h.string.find('Class Roster For')>=0: caption = h.string tables = soup.find_all('table') icount = 0 closed_figure = False for table in tables: if table['class'][0]=='datadisplaytable': rows = table.findAll('tr') image = None name = None for row in rows: cols = row.findAll('td') for col in cols: img = col.findAll('img') a = col.findAll('a') #if len(a)>0: # import pdb ; pdb.set_trace() if len(img)>0 and img[0]['src'].find('jpg')>=0: image = img[0]['src'] image = image.replace(' ','_').replace('%20', '_') if not os.path.isfile(image): import pdb ; pdb.set_trace() #if os.path.isfile(image): # import pdb ; pdb.set_trace() if len(a)>0 and 'mailto' in a[0]['href']: name = a[0]['target'] #if len(a)>0 and a[0]['class']==['leftaligntext']: # name = a[0].string # print(image, a[0].string) #import pdb ; pdb.set_trace() if name is not None and image is not None: if icount%25==0: if icount > 0: print "\\clearpage" else: print "\\begin{document}" print "\\begin{figure}" print "\centering" closed_figure = False if os.stat(image).st_size < 250: #image = './file_not_found.jpg' image = './smiley.png' if icount%5==4: print "\subfloat[%s]{\includegraphics[height=0.19\\textwidth]{%s}}\\\\" % (name,image) else: print "\subfloat[%s]{\includegraphics[height=0.19\\textwidth]{%s}}\\hfill" % (name,image) image = None name = None if icount%25==24: print "\caption{%s}" % (caption) print "\end{figure}" closed_figure = True icount += 1 if not closed_figure: print "\caption{%s}" % (caption) print "\end{figure}" print "\end{document}"
gpl-2.0
-457,119,040,262,636,200
29.844037
113
0.467281
false
4.1
false
false
false
facebookexperimental/eden
eden/integration/snapshot/verify.py
1
9902
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This software may be used and distributed according to the terms of the # GNU General Public License version 2. # pyre-strict import abc import os import stat as stat_mod import typing from pathlib import Path from typing import Dict, Iterator, List, Mapping, Optional, TypeVar, Union from eden.integration.lib import hgrepo _AnyPath = Union[Path, str] class _DefaultObject: pass _DEFAULT_OBJECT: _DefaultObject = _DefaultObject() class ExpectedFileBase(metaclass=abc.ABCMeta): def __init__( self, path: _AnyPath, contents: bytes, perms: int, file_type: int ) -> None: self.path: Path = Path(path) self.contents: bytes = contents self.permissions: int = perms self.file_type: int = file_type def verify( self, verifier: "SnapshotVerifier", path: Path, stat_info: os.stat_result ) -> None: found_perms = stat_mod.S_IMODE(stat_info.st_mode) if found_perms != self.permissions: verifier.error( f"{self.path}: expected permissions to be {self.permissions:#o}, " f"found {found_perms:#o}" ) found_file_type = stat_mod.S_IFMT(stat_info.st_mode) if found_file_type != self.file_type: verifier.error( f"{self.path}: expected file type to be {self.file_type:#o}, " f"found {found_file_type:#o}" ) else: self.verify_contents(verifier, path) @abc.abstractmethod def verify_contents(self, verifier: "SnapshotVerifier", path: Path) -> None: pass def _error(self, msg: str) -> None: raise ValueError(msg) class ExpectedFile(ExpectedFileBase): def __init__(self, path: _AnyPath, contents: bytes, perms: int = 0o644) -> None: super().__init__(path, contents, perms, stat_mod.S_IFREG) def verify_contents(self, verifier: "SnapshotVerifier", path: Path) -> None: with path.open("rb") as f: actual_contents = f.read() if actual_contents != self.contents: verifier.error( f"file contents mismatch for {self.path}:\n" f"expected: {self.contents!r}\n" f"actual: {actual_contents!r}" ) class ExpectedSymlink(ExpectedFileBase): def __init__(self, path: _AnyPath, contents: bytes, perms: int = 0o777) -> None: super().__init__(path, contents, perms, stat_mod.S_IFLNK) def verify_contents(self, verifier: "SnapshotVerifier", path: Path) -> None: actual_contents = os.readlink(bytes(path)) if actual_contents != self.contents: verifier.error( f"symlink contents mismatch for {self.path}:\n" f"expected: {self.contents!r}\n" f"actual: {actual_contents!r}" ) class ExpectedSocket(ExpectedFileBase): def __init__(self, path: _AnyPath, perms: int = 0o755) -> None: super().__init__(path, b"", perms, stat_mod.S_IFSOCK) def verify_contents(self, verifier: "SnapshotVerifier", path: Path) -> None: pass _ExpectedFile = TypeVar("_ExpectedFile", bound=ExpectedFileBase) class ExpectedFileSet(Mapping[Path, ExpectedFileBase]): """ ExpectedFileSet is basically a container of ExpectedFileBase objects, but also provides some helper methods for accessing and updating entries by path. """ def __init__(self) -> None: self._entries: Dict[Path, ExpectedFileBase] = {} def __len__(self) -> int: return len(self._entries) def __iter__(self) -> Iterator[Path]: return iter(self._entries.keys()) def __getitem__(self, path: _AnyPath) -> ExpectedFileBase: key = Path(path) return self._entries[key] def __delitem__(self, path: _AnyPath) -> None: key = Path(path) del self._entries[key] # pyre-fixme[14]: `__contains__` overrides method defined in `Mapping` # inconsistently. def __contains__(self, path: object) -> bool: if isinstance(path, str): key = Path(path) elif isinstance(path, Path): key = path else: return False return key in self._entries @typing.overload def pop(self, path: _AnyPath) -> ExpectedFileBase: ... @typing.overload # noqa: F811 def pop(self, path: _AnyPath, default: ExpectedFileBase) -> ExpectedFileBase: ... @typing.overload # noqa: F811 def pop(self, path: _AnyPath, default: None) -> Optional[ExpectedFileBase]: ... def pop( # noqa: F811 self, path: _AnyPath, default: Union[ExpectedFileBase, None, _DefaultObject] = _DEFAULT_OBJECT, ) -> Optional[ExpectedFileBase]: key = Path(path) if default is _DEFAULT_OBJECT: return self._entries.pop(key) else: tmp = typing.cast(Optional[ExpectedFileBase], default) return self._entries.pop(key, tmp) def add_file( self, path: _AnyPath, contents: bytes, perms: int = 0o644 ) -> ExpectedFile: return self.add(ExpectedFile(path=path, contents=contents, perms=perms)) def add_symlink( self, path: _AnyPath, contents: bytes, perms: int = 0o777 ) -> ExpectedSymlink: return self.add(ExpectedSymlink(path=path, contents=contents, perms=perms)) def add_socket(self, path: _AnyPath, perms: int = 0o755) -> ExpectedSocket: return self.add(ExpectedSocket(path=path, perms=perms)) def add(self, entry: _ExpectedFile) -> _ExpectedFile: assert entry.path not in self self._entries[entry.path] = entry return entry def set_file( self, path: _AnyPath, contents: bytes, perms: int = 0o644 ) -> ExpectedFile: return self.set(ExpectedFile(path=path, contents=contents, perms=perms)) def set_symlink( self, path: _AnyPath, contents: bytes, perms: int = 0o777 ) -> ExpectedSymlink: return self.set(ExpectedSymlink(path=path, contents=contents, perms=perms)) def set_socket(self, path: _AnyPath, perms: int = 0o755) -> ExpectedSocket: return self.set(ExpectedSocket(path=path, perms=perms)) def set(self, entry: _ExpectedFile) -> _ExpectedFile: self._entries[entry.path] = entry return entry class SnapshotVerifier: def __init__(self) -> None: self.errors: List[str] = [] self.quiet: bool = False def error(self, message: str) -> None: self.errors.append(message) if not self.quiet: print(f"==ERROR== {message}") def verify_directory(self, path: Path, expected: ExpectedFileSet) -> None: """Confirm that the contents of a directory match the expected file state.""" found_files = enumerate_directory(path) for expected_entry in expected.values(): file_stat = found_files.pop(expected_entry.path, None) if file_stat is None: self.error(f"{expected_entry.path}: file not present in snapshot") continue full_path = path / expected_entry.path try: expected_entry.verify(self, full_path, file_stat) except AssertionError as ex: self.error(f"{expected_entry.path}: {ex}") continue for path, stat_info in found_files.items(): if stat_mod.S_ISDIR(stat_info.st_mode): # Don't require directories to be listed explicitly in the input files continue if str(path.parents[0]) == ".hg": # Don't complain about files inside the .hg directory that the caller # did not explicitly specify. Mercurial can create a variety of files # here, and we don't care about checking the exact list of files it # happened to create when the snapshot was generated. continue self.error(f"{path}: unexpected file present in snapshot") def verify_hg_status( self, repo: hgrepo.HgRepository, expected: Dict[str, str], check_ignored: bool = True, ) -> None: actual_status = repo.status(include_ignored=check_ignored) for path, expected_char in expected.items(): actual_char = actual_status.pop(path, None) if expected_char != actual_char: self.error( f"{path}: unexpected hg status difference: " f"reported as {actual_char}, expected {expected_char}" ) for path, actual_char in actual_status.items(): self.error( f"{path}: unexpected hg status difference: " f"reported as {actual_char}, expected None" ) def enumerate_directory(path: Path) -> Dict[Path, os.stat_result]: """ Recursively walk a directory and return a dictionary of all of the files and directories it contains. Returns a dictionary of [path -> os.stat_result] The returned paths are relative to the input directory. """ entries: Dict[Path, os.stat_result] = {} _enumerate_directory_helper(path, Path(), entries) return entries def _enumerate_directory_helper( root_path: Path, rel_path: Path, results: Dict[Path, os.stat_result] ) -> None: for entry in os.scandir(root_path / rel_path): # Current versions of typeshed don't know about the follow_symlinks argument, # so ignore type errors on the next line. stat_info: os.stat_result = entry.stat(follow_symlinks=False) entry_path: Path = rel_path / entry.name results[entry_path] = stat_info if stat_mod.S_ISDIR(stat_info.st_mode): _enumerate_directory_helper(root_path, entry_path, results)
gpl-2.0
-8,863,614,257,235,671,000
34.113475
86
0.608261
false
3.893826
false
false
false
codelv/enaml-native
src/enamlnative/widgets/popup_window.py
1
4574
""" Copyright (c) 2017, Jairus Martin. Distributed under the terms of the MIT License. The full license is in the file LICENSE, distributed with this software. Created on Mar 17, 2018 @author: jrm """ from atom.api import ( Typed, ForwardTyped, Str, Float, Coerced, Bool, Enum, observe, ) from enaml.core.declarative import d_ from enaml.widgets.toolkit_object import ToolkitObject, ProxyToolkitObject from .view import coerce_size, coerce_gravity class ProxyPopupWindow(ProxyToolkitObject): """ The abstract definition of a proxy dialgo object. """ #: A reference to the Label declaration. declaration = ForwardTyped(lambda: PopupWindow) def set_height(self, height): raise NotImplementedError def set_width(self, width): raise NotImplementedError def set_x(self, x): raise NotImplementedError def set_y(self, y): raise NotImplementedError def set_position(self, position): raise NotImplementedError def set_focusable(self, enabled): raise NotImplementedError def set_touchable(self, enabled): raise NotImplementedError def set_outside_touchable(self, enabled): raise NotImplementedError def set_background_color(self, color): raise NotImplementedError def set_show(self, show): raise NotImplementedError def set_style(self, style): raise NotImplementedError def set_animation(self, style): raise NotImplementedError class PopupWindow(ToolkitObject): """ A popup window that may contain a view. """ #: Width and height or a string "match_parent" or "fill_parent" width = d_(Coerced(int, coercer=coerce_size)) height = d_(Coerced(int, coercer=coerce_size)) #: Layout gravity gravity = d_(Coerced(int, coercer=coerce_gravity)) #: Position x = d_(Float(strict=False)) y = d_(Float(strict=False)) #: Set whether the popup window can be focused focusable = d_(Bool()) #: Set whether the popup is touchable touchable = d_(Bool(True)) #: Controls whether the pop-up will be informed of touch events outside #: of its window. outside_touchable = d_(Bool(True)) #: Start the popup and display it on screen (or hide if False) show = d_(Bool()) #: Background color of the window (white by default) background_color = d_(Str()) #: If relative, show as a dropdown on the parent view, otherwise #: show at the position given by `x` and `y`. position = d_(Enum('relative', 'absolute')) #: Animation style for the PopupWindow using the @style format #: (ex. @style/MyAnimation animation = d_(Str()) #: PopupWindow style using the @style format #: (ex. @style/Theme_Light_NoTitleBar_Fullscreen style = d_(Str()) #: A reference to the proxy object. proxy = Typed(ProxyPopupWindow) # ------------------------------------------------------------------------- # Observers # ------------------------------------------------------------------------- @observe('width', 'height', 'x', 'y', 'position', 'focusable', 'touchable', 'outside_touchable', 'show', 'animation', 'style', 'background_color') def _update_proxy(self, change): """ An observer which sends the state change to the proxy. """ # The superclass implementation is sufficient. super(PopupWindow, self)._update_proxy(change) def popup(self): """ Show the window from code. This will initialize and activate if needed. Examples -------- >>> enamldef ContextMenu(PopupWindow): popup: attr result: lambda text: None Button: text = "One" clicked :: dialog.show = False dialog.result(self.text) Button: text = "Two" clicked :: dialog.show = False dialog.result(self.text) def on_result(value): print("User clicked: {}".format(value)) ContextMenu(result=on_result).popup() Notes ------ This does NOT block. Callbacks should be used to handle click events or the `show` state should be observed to know when it is closed. """ if not self.is_initialized: self.initialize() if not self.proxy_is_active: self.activate_proxy() self.show = True
mit
-8,627,291,073,902,098,000
27.234568
79
0.593791
false
4.40231
false
false
false
gst/alignak
alignak/macroresolver.py
2
23875
# -*- coding: utf-8 -*- # # Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors # # This file is part of Alignak. # # Alignak is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Alignak is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Alignak. If not, see <http://www.gnu.org/licenses/>. # # # This file incorporates work covered by the following copyright and # permission notice: # # Copyright (C) 2009-2014: # Hartmut Goebel, [email protected] # Nicolas Dupeux, [email protected] # Gerhard Lausser, [email protected] # Grégory Starck, [email protected] # Frédéric Pégé, [email protected] # Sebastien Coavoux, [email protected] # Olivier Hanesse, [email protected] # Jean Gabes, [email protected] # Zoran Zaric, [email protected] # David Gil, [email protected] # This file is part of Shinken. # # Shinken is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Shinken is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Shinken. If not, see <http://www.gnu.org/licenses/>. """ This class resolve Macro in commands by looking at the macros list in Class of elements. It give a property that call be callable or not. It not callable, it's a simple property and replace the macro with the value If callable, it's a method that is called to get the value. for example, to get the number of service in a host, you call a method to get the len(host.services) """ import re import time from alignak.borg import Borg class MacroResolver(Borg): """MacroResolver class is used to resolve macros (in command call). See above for details""" my_type = 'macroresolver' # Global macros macros = { 'TOTALHOSTSUP': '_get_total_hosts_up', 'TOTALHOSTSDOWN': '_get_total_hosts_down', 'TOTALHOSTSUNREACHABLE': '_get_total_hosts_unreachable', 'TOTALHOSTSDOWNUNHANDLED': '_get_total_hosts_unhandled', 'TOTALHOSTSUNREACHABLEUNHANDLED': '_get_total_hosts_unreachable_unhandled', 'TOTALHOSTPROBLEMS': '_get_total_host_problems', 'TOTALHOSTPROBLEMSUNHANDLED': '_get_total_host_problems_unhandled', 'TOTALSERVICESOK': '_get_total_service_ok', 'TOTALSERVICESWARNING': '_get_total_services_warning', 'TOTALSERVICESCRITICAL': '_get_total_services_critical', 'TOTALSERVICESUNKNOWN': '_get_total_services_unknown', 'TOTALSERVICESWARNINGUNHANDLED': '_get_total_services_warning_unhandled', 'TOTALSERVICESCRITICALUNHANDLED': '_get_total_services_critical_unhandled', 'TOTALSERVICESUNKNOWNUNHANDLED': '_get_total_services_unknown_unhandled', 'TOTALSERVICEPROBLEMS': '_get_total_service_problems', 'TOTALSERVICEPROBLEMSUNHANDLED': '_get_total_service_problems_unhandled', 'LONGDATETIME': '_get_long_date_time', 'SHORTDATETIME': '_get_short_date_time', 'DATE': '_get_date', 'TIME': '_get_time', 'TIMET': '_get_timet', 'PROCESSSTARTTIME': '_get_process_start_time', 'EVENTSTARTTIME': '_get_events_start_time', } output_macros = [ 'HOSTOUTPUT', 'HOSTPERFDATA', 'HOSTACKAUTHOR', 'HOSTACKCOMMENT', 'SERVICEOUTPUT', 'SERVICEPERFDATA', 'SERVICEACKAUTHOR', 'SERVICEACKCOMMENT' ] def init(self, conf): """Init macroresolver instance with conf. Must be called once. :param conf: conf to load :type conf: :return: None """ # For searching class and elements for ondemand # we need link to types self.conf = conf self.lists_on_demand = [] self.hosts = conf.hosts # For special void host_name handling... self.host_class = self.hosts.inner_class self.lists_on_demand.append(self.hosts) self.services = conf.services self.contacts = conf.contacts self.lists_on_demand.append(self.contacts) self.hostgroups = conf.hostgroups self.lists_on_demand.append(self.hostgroups) self.commands = conf.commands self.servicegroups = conf.servicegroups self.lists_on_demand.append(self.servicegroups) self.contactgroups = conf.contactgroups self.lists_on_demand.append(self.contactgroups) self.illegal_macro_output_chars = conf.illegal_macro_output_chars # Try cache :) # self.cache = {} def _get_macros(self, chain): """Get all macros of a chain Cut '$' char and create a dict with the following structure:: { 'MacroSTR1' : {'val': '', 'type': 'unknown'} 'MacroSTR2' : {'val': '', 'type': 'unknown'} } :param chain: chain to parse :type chain: str :return: dict with macro parsed as key :rtype: dict """ # if chain in self.cache: # return self.cache[chain] regex = re.compile(r'(\$)') elts = regex.split(chain) macros = {} in_macro = False for elt in elts: if elt == '$': in_macro = not in_macro elif in_macro: macros[elt] = {'val': '', 'type': 'unknown'} # self.cache[chain] = macros if '' in macros: del macros[''] return macros def _get_value_from_element(self, elt, prop): """Get value from a element's property the property may be a function to call. :param elt: element :type elt: object :param prop: element property :type prop: str :return: getattr(elt, prop) or getattr(elt, prop)() (call) :rtype: str """ try: value = getattr(elt, prop) if callable(value): return unicode(value()) else: return unicode(value) except AttributeError, exp: # Return no value return '' except UnicodeError, exp: if isinstance(value, str): return unicode(value, 'utf8', errors='ignore') else: return '' def _delete_unwanted_caracters(self, chain): """Remove not wanted char from chain unwanted char are illegal_macro_output_chars attribute :param chain: chain to remove char from :type chain: str :return: chain cleaned :rtype: str """ for char in self.illegal_macro_output_chars: chain = chain.replace(char, '') return chain def get_env_macros(self, data): """Get all environment macros from data For each object in data :: * Fetch all macros in object.__class__.macros * Fetch all customs macros in o.custom :param data: data to get macro :type data: :return: dict with macro name as key and macro value as value :rtype: dict """ env = {} for obj in data: cls = obj.__class__ macros = cls.macros for macro in macros: if macro.startswith("USER"): break prop = macros[macro] value = self._get_value_from_element(obj, prop) env['NAGIOS_%s' % macro] = value if hasattr(obj, 'customs'): # make NAGIOS__HOSTMACADDR from _MACADDR for cmacro in obj.customs: new_env_name = 'NAGIOS__' + obj.__class__.__name__.upper() + cmacro[1:].upper() env[new_env_name] = obj.customs[cmacro] return env def resolve_simple_macros_in_string(self, c_line, data, args=None): """Replace macro in the command line with the real value :param c_line: command line to modify :type c_line: str :param data: objects list, use to look for a specific macro :type data: :param args: args given to the command line, used to get "ARGN" macros. :type args: :return: command line with '$MACRO$' replaced with values :rtype: str """ # Now we prepare the classes for looking at the class.macros data.append(self) # For getting global MACROS if hasattr(self, 'conf'): data.append(self.conf) # For USERN macros clss = [d.__class__ for d in data] # we should do some loops for nested macros # like $USER1$ hiding like a ninja in a $ARG2$ Macro. And if # $USER1$ is pointing to $USER34$ etc etc, we should loop # until we reach the bottom. So the last loop is when we do # not still have macros :) still_got_macros = True nb_loop = 0 while still_got_macros: nb_loop += 1 # Ok, we want the macros in the command line macros = self._get_macros(c_line) # We can get out if we do not have macros this loop still_got_macros = (len(macros) != 0) # print "Still go macros:", still_got_macros # Put in the macros the type of macro for all macros self._get_type_of_macro(macros, clss) # Now we get values from elements for macro in macros: # If type ARGN, look at ARGN cutting if macros[macro]['type'] == 'ARGN' and args is not None: macros[macro]['val'] = self._resolve_argn(macro, args) macros[macro]['type'] = 'resolved' # If class, get value from properties if macros[macro]['type'] == 'class': cls = macros[macro]['class'] for elt in data: if elt is not None and elt.__class__ == cls: prop = cls.macros[macro] macros[macro]['val'] = self._get_value_from_element(elt, prop) # Now check if we do not have a 'output' macro. If so, we must # delete all special characters that can be dangerous if macro in self.output_macros: macros[macro]['val'] = \ self._delete_unwanted_caracters(macros[macro]['val']) if macros[macro]['type'] == 'CUSTOM': cls_type = macros[macro]['class'] # Beware : only cut the first _HOST value, so the macro name can have it on it.. macro_name = re.split('_' + cls_type, macro, 1)[1].upper() # Ok, we've got the macro like MAC_ADDRESS for _HOSTMAC_ADDRESS # Now we get the element in data that have the type HOST # and we check if it got the custom value for elt in data: if elt is not None and elt.__class__.my_type.upper() == cls_type: if '_' + macro_name in elt.customs: macros[macro]['val'] = elt.customs['_' + macro_name] # Then look on the macromodulations, in reserver order, so # the last to set, will be the firt to have. (yes, don't want to play # with break and such things sorry...) mms = getattr(elt, 'macromodulations', []) for macromod in mms[::-1]: # Look if the modulation got the value, # but also if it's currently active if '_' + macro_name in macromod.customs and macromod.is_active(): macros[macro]['val'] = macromod.customs['_' + macro_name] if macros[macro]['type'] == 'ONDEMAND': macros[macro]['val'] = self._resolve_ondemand(macro, data) # We resolved all we can, now replace the macro in the command call for macro in macros: c_line = c_line.replace('$' + macro + '$', macros[macro]['val']) # A $$ means we want a $, it's not a macro! # We replace $$ by a big dirty thing to be sure to not misinterpret it c_line = c_line.replace("$$", "DOUBLEDOLLAR") if nb_loop > 32: # too much loop, we exit still_got_macros = False # We now replace the big dirty token we made by only a simple $ c_line = c_line.replace("DOUBLEDOLLAR", "$") # print "Retuning c_line", c_line.strip() return c_line.strip() def resolve_command(self, com, data): """Resolve command macros with data :param com: check / event handler or command call object :type com: object :param data: objects list, use to look for a specific macro :type data: :return: command line with '$MACRO$' replaced with values :rtype: str """ c_line = com.command.command_line return self.resolve_simple_macros_in_string(c_line, data, args=com.args) def _get_type_of_macro(self, macros, clss): r"""Set macros types Example:: ARG\d -> ARGN, HOSTBLABLA -> class one and set Host in class) _HOSTTOTO -> HOST CUSTOM MACRO TOTO SERVICESTATEID:srv-1:Load$ -> MACRO SERVICESTATEID of the service Load of host srv-1 :param macros: macros list :type macros: list[str] :param clss: classes list, used to tag class macros :type clss: :return: None """ for macro in macros: # ARGN Macros if re.match(r'ARG\d', macro): macros[macro]['type'] = 'ARGN' continue # USERN macros # are managed in the Config class, so no # need to look that here elif re.match(r'_HOST\w', macro): macros[macro]['type'] = 'CUSTOM' macros[macro]['class'] = 'HOST' continue elif re.match(r'_SERVICE\w', macro): macros[macro]['type'] = 'CUSTOM' macros[macro]['class'] = 'SERVICE' # value of macro: re.split('_HOST', '_HOSTMAC_ADDRESS')[1] continue elif re.match(r'_CONTACT\w', macro): macros[macro]['type'] = 'CUSTOM' macros[macro]['class'] = 'CONTACT' continue # On demand macro elif len(macro.split(':')) > 1: macros[macro]['type'] = 'ONDEMAND' continue # OK, classical macro... for cls in clss: if macro in cls.macros: macros[macro]['type'] = 'class' macros[macro]['class'] = cls continue def _resolve_argn(self, macro, args): """Get argument from macro name ie : $ARG3$ -> args[2] :param macro: macro to parse :type macro: :param args: args given to command line :type args: :return: argument at position N-1 in args table (where N is the int parsed) :rtype: None | str """ # first, get the number of args _id = None matches = re.search(r'ARG(?P<id>\d+)', macro) if matches is not None: _id = int(matches.group('id')) - 1 try: return args[_id] except IndexError: return '' def _resolve_ondemand(self, macro, data): """Get on demand macro value :param macro: macro to parse :type macro: :param data: data to get value from :type data: :return: macro value :rtype: str """ # print "\nResolving macro", macro elts = macro.split(':') nb_parts = len(elts) macro_name = elts[0] # Len 3 == service, 2 = all others types... if nb_parts == 3: val = '' # print "Got a Service on demand asking...", elts (host_name, service_description) = (elts[1], elts[2]) # host_name can be void, so it's the host in data # that is important. We use our self.host_class to # find the host in the data :) if host_name == '': for elt in data: if elt is not None and elt.__class__ == self.host_class: host_name = elt.host_name # Ok now we get service serv = self.services.find_srv_by_name_and_hostname(host_name, service_description) if serv is not None: cls = serv.__class__ prop = cls.macros[macro_name] val = self._get_value_from_element(serv, prop) # print "Got val:", val return val # Ok, service was easy, now hard part else: val = '' elt_name = elts[1] # Special case: elt_name can be void # so it's the host where it apply if elt_name == '': for elt in data: if elt is not None and elt.__class__ == self.host_class: elt_name = elt.host_name for od_list in self.lists_on_demand: cls = od_list.inner_class # We search our type by looking at the macro if macro_name in cls.macros: prop = cls.macros[macro_name] i = od_list.find_by_name(elt_name) if i is not None: val = self._get_value_from_element(i, prop) # Ok we got our value :) break return val return '' def _get_long_date_time(self): """Get long date time Example : Fri 15 May 11:42:39 CEST 2009 :return: long date local time :rtype: str TODO: Should be moved to util TODO: Should consider timezone """ return time.strftime("%a %d %b %H:%M:%S %Z %Y").decode('UTF-8', 'ignore') def _get_short_date_time(self): """Get short date time Example : 10-13-2000 00:30:28 :return: short date local time :rtype: str TODO: Should be moved to util TODO: Should consider timezone """ return time.strftime("%d-%m-%Y %H:%M:%S") def _get_date(self): """Get date Example : 10-13-2000 :return: local date :rtype: str TODO: Should be moved to util TODO: Should consider timezone """ return time.strftime("%d-%m-%Y") def _get_time(self): """Get date time Example : 00:30:28 :return: date local time :rtype: str TODO: Should be moved to util TODO: Should consider timezone """ return time.strftime("%H:%M:%S") def _get_timet(self): """Get epoch time Example : 1437143291 :return: timestamp :rtype: str TODO: Should be moved to util TODO: Should consider timezone """ return str(int(time.time())) def _tot_hosts_by_state(self, state): """Generic function to get the number of host in the specified state :param state: state to filter on :type state: :return: number of host in state *state* :rtype: int TODO: Should be moved """ return sum(1 for h in self.hosts if h.state == state) _get_total_hosts_up = lambda s: s._tot_hosts_by_state('UP') _get_total_hosts_down = lambda s: s._tot_hosts_by_state('DOWN') _get_total_hosts_unreachable = lambda s: s._tot_hosts_by_state('UNREACHABLE') def _get_total_hosts_unreachable_unhandled(self): """DOES NOTHING( Should get the number of unreachable hosts not handled) :return: 0 always :rtype: int TODO: Implement this """ return 0 def _get_total_hosts_problems(self): """Get the number of hosts that are a problem :return: number of hosts with is_problem attribute True :rtype: int """ return sum(1 for h in self.hosts if h.is_problem) def _get_total_hosts_problems_unhandled(self): """DOES NOTHING( Should get the number of host problems not handled) :return: 0 always :rtype: int TODO: Implement this """ return 0 def _tot_services_by_state(self, state): """Generic function to get the number of service in the specified state :param state: state to filter on :type state: :return: number of service in state *state* :rtype: int TODO: Should be moved """ return sum(1 for s in self.services if s.state == state) _get_total_service_ok = lambda s: s._tot_services_by_state('OK') _get_total_service_warning = lambda s: s._tot_services_by_state('WARNING') _get_total_service_critical = lambda s: s._tot_services_by_state('CRITICAL') _get_total_service_unknown = lambda s: s._tot_services_by_state('UNKNOWN') def _get_total_services_warning_unhandled(self): """DOES NOTHING (Should get the number of warning services not handled) :return: 0 always :rtype: int TODO: Implement this """ return 0 def _get_total_services_critical_unhandled(self): """DOES NOTHING (Should get the number of critical services not handled) :return: 0 always :rtype: int TODO: Implement this """ return 0 def _get_total_services_unknown_unhandled(self): """DOES NOTHING (Should get the number of unknown services not handled) :return: 0 always :rtype: int TODO: Implement this """ return 0 def _get_total_service_problems(self): """Get the number of services that are a problem :return: number of services with is_problem attribute True :rtype: int """ return sum(1 for s in self.services if s.is_problem) def _get_total_service_problems_unhandled(self): """DOES NOTHING (Should get the number of service problems not handled) :return: 0 always :rtype: int TODO: Implement this """ return 0 def _get_process_start_time(self): """DOES NOTHING ( Should get process start time) :return: 0 always :rtype: int TODO: Implement this """ return 0 def _get_events_start_time(self): """DOES NOTHING ( Should get events start time) :return: 0 always :rtype: int TODO: Implement this """ return 0
agpl-3.0
-6,827,662,738,583,955,000
35.442748
100
0.556054
false
4.076161
false
false
false
vitormazzi/django-jython
doj/backends/zxjdbc/oracle/query.py
1
6130
""" Custom Query class for Oracle. Derived from: django.db.models.sql.query.Query """ import datetime from django.db.backends import util # Cache. Maps default query class to new Oracle query class. _classes = {} def query_class(QueryClass, Database): """ Returns a custom django.db.models.sql.query.Query subclass that is appropriate for Oracle. The 'Database' module (cx_Oracle) is passed in here so that all the setup required to import it only needs to be done by the calling module. """ global _classes try: return _classes[QueryClass] except KeyError: pass class OracleQuery(QueryClass): def resolve_columns(self, row, fields=()): index_start = len(self.extra_select.keys()) values = [self.convert_values(v, type(v)) for v in row[:index_start]] for value, field in map(None, row[index_start:], fields): values.append(self.convert_values(value, field)) return values def convert_values(self, value, field): from django.db.models.fields import DateField, DateTimeField, \ TimeField, BooleanField, NullBooleanField, DecimalField, FloatField, Field # Oracle stores empty strings as null. We need to undo this in # order to adhere to the Django convention of using the empty # string instead of null, but only if the field accepts the # empty string. if value is None: pass elif value is None and isinstance(field, Field) and field.empty_strings_allowed: value = u'' # Convert 1 or 0 to True or False elif isinstance(value, float): value = float(value) # Added 04-26-2009 to repair "Invalid literal for int() base 10" error elif isinstance(value,int): value = int(value) elif field is not None and field.get_internal_type() == 'AutoField': value = int(float(value)) elif value in (1, 0) and field is not None and field.get_internal_type() in ('BooleanField', 'NullBooleanField'): value = bool(value) # Force floats to the correct type elif field is not None and field.get_internal_type() == 'FloatField': value = float(value) # Convert floats to decimals elif field is not None and field.get_internal_type() == 'DecimalField': value = util.typecast_decimal(field.format_number(value)) elif field is not None and field.get_internal_type() == 'SmallIntegerField': value = util.typecast_decimal(field.format_number(value)) return value def as_sql(self, with_limits=True, with_col_aliases=False): """ Creates the SQL for this query. Returns the SQL string and list of parameters. This is overriden from the original Query class to handle the additional SQL Oracle requires to emulate LIMIT and OFFSET. If 'with_limits' is False, any limit/offset information is not included in the query. """ # The `do_offset` flag indicates whether we need to construct # the SQL needed to use limit/offset with Oracle. do_offset = with_limits and (self.high_mark is not None or self.low_mark) if not do_offset: sql, params = super(OracleQuery, self).as_sql(with_limits=False, with_col_aliases=with_col_aliases) else: # `get_columns` needs to be called before `get_ordering` to # populate `_select_alias`. self.pre_sql_setup() self.get_columns() #ordering = self.get_ordering() # # Removed Ordering on 03/27/2009 as it caused error: # TypeError: sequence item 0: expected string, list found # ordering = False # Oracle's ROW_NUMBER() function requires an ORDER BY clause. if ordering: rn_orderby = ', '.join(ordering) else: # Create a default ORDER BY since none was specified. qn = self.quote_name_unless_alias opts = self.model._meta rn_orderby = '%s.%s' % (qn(opts.db_table), qn(opts.fields[0].db_column or opts.fields[0].column)) # Ensure the base query SELECTs our special "_RN" column self.extra_select['_RN'] = ('ROW_NUMBER() OVER (ORDER BY %s)' % rn_orderby, '') sql, params = super(OracleQuery, self).as_sql(with_limits=False, with_col_aliases=True) # Wrap the base query in an outer SELECT * with boundaries on # the "_RN" column. This is the canonical way to emulate LIMIT # and OFFSET on Oracle. sql = 'SELECT * FROM (%s) WHERE "_RN" > %d' % (sql, self.low_mark) if self.high_mark is not None: sql = '%s AND "_RN" <= %d' % (sql, self.high_mark) return sql, params def set_limits(self, low=None, high=None): super(OracleQuery, self).set_limits(low, high) # We need to select the row number for the LIMIT/OFFSET sql. # A placeholder is added to extra_select now, because as_sql is # too late to be modifying extra_select. However, the actual sql # depends on the ordering, so that is generated in as_sql. self.extra_select['_RN'] = ('1', '') def clear_limits(self): super(OracleQuery, self).clear_limits() if '_RN' in self.extra_select: del self.extra_select['_RN'] _classes[QueryClass] = OracleQuery return OracleQuery
bsd-3-clause
7,167,331,611,330,312,000
44.073529
125
0.558564
false
4.490842
false
false
false
simplegeo/sqlalchemy
lib/sqlalchemy/dialects/mssql/base.py
1
48621
# mssql.py """Support for the Microsoft SQL Server database. Connecting ---------- See the individual driver sections below for details on connecting. Auto Increment Behavior ----------------------- ``IDENTITY`` columns are supported by using SQLAlchemy ``schema.Sequence()`` objects. In other words:: Table('test', mss_engine, Column('id', Integer, Sequence('blah',100,10), primary_key=True), Column('name', String(20)) ).create() would yield:: CREATE TABLE test ( id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY, name VARCHAR(20) NULL, ) Note that the ``start`` and ``increment`` values for sequences are optional and will default to 1,1. Implicit ``autoincrement`` behavior works the same in MSSQL as it does in other dialects and results in an ``IDENTITY`` column. * Support for ``SET IDENTITY_INSERT ON`` mode (automagic on / off for ``INSERT`` s) * Support for auto-fetching of ``@@IDENTITY/@@SCOPE_IDENTITY()`` on ``INSERT`` Collation Support ----------------- MSSQL specific string types support a collation parameter that creates a column-level specific collation for the column. The collation parameter accepts a Windows Collation Name or a SQL Collation Name. Supported types are MSChar, MSNChar, MSString, MSNVarchar, MSText, and MSNText. For example:: Column('login', String(32, collation='Latin1_General_CI_AS')) will yield:: login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL LIMIT/OFFSET Support -------------------- MSSQL has no support for the LIMIT or OFFSET keysowrds. LIMIT is supported directly through the ``TOP`` Transact SQL keyword:: select.limit will yield:: SELECT TOP n If using SQL Server 2005 or above, LIMIT with OFFSET support is available through the ``ROW_NUMBER OVER`` construct. For versions below 2005, LIMIT with OFFSET usage will fail. Nullability ----------- MSSQL has support for three levels of column nullability. The default nullability allows nulls and is explicit in the CREATE TABLE construct:: name VARCHAR(20) NULL If ``nullable=None`` is specified then no specification is made. In other words the database's configured default is used. This will render:: name VARCHAR(20) If ``nullable`` is ``True`` or ``False`` then the column will be ``NULL` or ``NOT NULL`` respectively. Date / Time Handling -------------------- DATE and TIME are supported. Bind parameters are converted to datetime.datetime() objects as required by most MSSQL drivers, and results are processed from strings if needed. The DATE and TIME types are not available for MSSQL 2005 and previous - if a server version below 2008 is detected, DDL for these types will be issued as DATETIME. Compatibility Levels -------------------- MSSQL supports the notion of setting compatibility levels at the database level. This allows, for instance, to run a database that is compatibile with SQL2000 while running on a SQL2005 database server. ``server_version_info`` will always retrun the database server version information (in this case SQL2005) and not the compatibiility level information. Because of this, if running under a backwards compatibility mode SQAlchemy may attempt to use T-SQL statements that are unable to be parsed by the database server. Known Issues ------------ * No support for more than one ``IDENTITY`` column per table """ import datetime, decimal, inspect, operator, sys, re import itertools from sqlalchemy import sql, schema as sa_schema, exc, util from sqlalchemy.sql import select, compiler, expression, \ operators as sql_operators, \ functions as sql_functions, util as sql_util from sqlalchemy.engine import default, base, reflection from sqlalchemy import types as sqltypes from sqlalchemy import processors from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \ FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\ VARBINARY, BLOB from sqlalchemy.dialects.mssql import information_schema as ischema MS_2008_VERSION = (10,) MS_2005_VERSION = (9,) MS_2000_VERSION = (8,) RESERVED_WORDS = set( ['add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization', 'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade', 'case', 'check', 'checkpoint', 'close', 'clustered', 'coalesce', 'collate', 'column', 'commit', 'compute', 'constraint', 'contains', 'containstable', 'continue', 'convert', 'create', 'cross', 'current', 'current_date', 'current_time', 'current_timestamp', 'current_user', 'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default', 'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double', 'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec', 'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor', 'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full', 'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity', 'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert', 'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like', 'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not', 'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource', 'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer', 'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print', 'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext', 'reconfigure', 'references', 'replication', 'restore', 'restrict', 'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount', 'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select', 'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics', 'system_user', 'table', 'tablesample', 'textsize', 'then', 'to', 'top', 'tran', 'transaction', 'trigger', 'truncate', 'tsequal', 'union', 'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values', 'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with', 'writetext', ]) class REAL(sqltypes.Float): """A type for ``real`` numbers.""" __visit_name__ = 'REAL' def __init__(self): super(REAL, self).__init__(precision=24) class TINYINT(sqltypes.Integer): __visit_name__ = 'TINYINT' # MSSQL DATE/TIME types have varied behavior, sometimes returning # strings. MSDate/TIME check for everything, and always # filter bind parameters into datetime objects (required by pyodbc, # not sure about other dialects). class _MSDate(sqltypes.Date): def bind_processor(self, dialect): def process(value): if type(value) == datetime.date: return datetime.datetime(value.year, value.month, value.day) else: return value return process _reg = re.compile(r"(\d+)-(\d+)-(\d+)") def result_processor(self, dialect, coltype): def process(value): if isinstance(value, datetime.datetime): return value.date() elif isinstance(value, basestring): return datetime.date(*[ int(x or 0) for x in self._reg.match(value).groups() ]) else: return value return process class TIME(sqltypes.TIME): def __init__(self, precision=None, **kwargs): self.precision = precision super(TIME, self).__init__() __zero_date = datetime.date(1900, 1, 1) def bind_processor(self, dialect): def process(value): if isinstance(value, datetime.datetime): value = datetime.datetime.combine( self.__zero_date, value.time()) elif isinstance(value, datetime.time): value = datetime.datetime.combine(self.__zero_date, value) return value return process _reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d+))?") def result_processor(self, dialect, coltype): def process(value): if isinstance(value, datetime.datetime): return value.time() elif isinstance(value, basestring): return datetime.time(*[ int(x or 0) for x in self._reg.match(value).groups()]) else: return value return process class _DateTimeBase(object): def bind_processor(self, dialect): def process(value): if type(value) == datetime.date: return datetime.datetime(value.year, value.month, value.day) else: return value return process class _MSDateTime(_DateTimeBase, sqltypes.DateTime): pass class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime): __visit_name__ = 'SMALLDATETIME' class DATETIME2(_DateTimeBase, sqltypes.DateTime): __visit_name__ = 'DATETIME2' def __init__(self, precision=None, **kwargs): self.precision = precision # TODO: is this not an Interval ? class DATETIMEOFFSET(sqltypes.TypeEngine): __visit_name__ = 'DATETIMEOFFSET' def __init__(self, precision=None, **kwargs): self.precision = precision class _StringType(object): """Base for MSSQL string types.""" def __init__(self, collation=None): self.collation = collation class TEXT(_StringType, sqltypes.TEXT): """MSSQL TEXT type, for variable-length text up to 2^31 characters.""" def __init__(self, *args, **kw): """Construct a TEXT. :param collation: Optional, a column-level collation for this string value. Accepts a Windows Collation Name or a SQL Collation Name. """ collation = kw.pop('collation', None) _StringType.__init__(self, collation) sqltypes.Text.__init__(self, *args, **kw) class NTEXT(_StringType, sqltypes.UnicodeText): """MSSQL NTEXT type, for variable-length unicode text up to 2^30 characters.""" __visit_name__ = 'NTEXT' def __init__(self, *args, **kwargs): """Construct a NTEXT. :param collation: Optional, a column-level collation for this string value. Accepts a Windows Collation Name or a SQL Collation Name. """ collation = kwargs.pop('collation', None) _StringType.__init__(self, collation) length = kwargs.pop('length', None) sqltypes.UnicodeText.__init__(self, length, **kwargs) class VARCHAR(_StringType, sqltypes.VARCHAR): """MSSQL VARCHAR type, for variable-length non-Unicode data with a maximum of 8,000 characters.""" def __init__(self, *args, **kw): """Construct a VARCHAR. :param length: Optinal, maximum data length, in characters. :param convert_unicode: defaults to False. If True, convert ``unicode`` data sent to the database to a ``str`` bytestring, and convert bytestrings coming back from the database into ``unicode``. Bytestrings are encoded using the dialect's :attr:`~sqlalchemy.engine.base.Dialect.encoding`, which defaults to `utf-8`. If False, may be overridden by :attr:`sqlalchemy.engine.base.Dialect.convert_unicode`. :param collation: Optional, a column-level collation for this string value. Accepts a Windows Collation Name or a SQL Collation Name. """ collation = kw.pop('collation', None) _StringType.__init__(self, collation) sqltypes.VARCHAR.__init__(self, *args, **kw) class NVARCHAR(_StringType, sqltypes.NVARCHAR): """MSSQL NVARCHAR type. For variable-length unicode character data up to 4,000 characters.""" def __init__(self, *args, **kw): """Construct a NVARCHAR. :param length: Optional, Maximum data length, in characters. :param collation: Optional, a column-level collation for this string value. Accepts a Windows Collation Name or a SQL Collation Name. """ collation = kw.pop('collation', None) _StringType.__init__(self, collation) sqltypes.NVARCHAR.__init__(self, *args, **kw) class CHAR(_StringType, sqltypes.CHAR): """MSSQL CHAR type, for fixed-length non-Unicode data with a maximum of 8,000 characters.""" def __init__(self, *args, **kw): """Construct a CHAR. :param length: Optinal, maximum data length, in characters. :param convert_unicode: defaults to False. If True, convert ``unicode`` data sent to the database to a ``str`` bytestring, and convert bytestrings coming back from the database into ``unicode``. Bytestrings are encoded using the dialect's :attr:`~sqlalchemy.engine.base.Dialect.encoding`, which defaults to `utf-8`. If False, may be overridden by :attr:`sqlalchemy.engine.base.Dialect.convert_unicode`. :param collation: Optional, a column-level collation for this string value. Accepts a Windows Collation Name or a SQL Collation Name. """ collation = kw.pop('collation', None) _StringType.__init__(self, collation) sqltypes.CHAR.__init__(self, *args, **kw) class NCHAR(_StringType, sqltypes.NCHAR): """MSSQL NCHAR type. For fixed-length unicode character data up to 4,000 characters.""" def __init__(self, *args, **kw): """Construct an NCHAR. :param length: Optional, Maximum data length, in characters. :param collation: Optional, a column-level collation for this string value. Accepts a Windows Collation Name or a SQL Collation Name. """ collation = kw.pop('collation', None) _StringType.__init__(self, collation) sqltypes.NCHAR.__init__(self, *args, **kw) class IMAGE(sqltypes.LargeBinary): __visit_name__ = 'IMAGE' class BIT(sqltypes.TypeEngine): __visit_name__ = 'BIT' class MONEY(sqltypes.TypeEngine): __visit_name__ = 'MONEY' class SMALLMONEY(sqltypes.TypeEngine): __visit_name__ = 'SMALLMONEY' class UNIQUEIDENTIFIER(sqltypes.TypeEngine): __visit_name__ = "UNIQUEIDENTIFIER" class SQL_VARIANT(sqltypes.TypeEngine): __visit_name__ = 'SQL_VARIANT' # old names. MSDateTime = _MSDateTime MSDate = _MSDate MSReal = REAL MSTinyInteger = TINYINT MSTime = TIME MSSmallDateTime = SMALLDATETIME MSDateTime2 = DATETIME2 MSDateTimeOffset = DATETIMEOFFSET MSText = TEXT MSNText = NTEXT MSString = VARCHAR MSNVarchar = NVARCHAR MSChar = CHAR MSNChar = NCHAR MSBinary = BINARY MSVarBinary = VARBINARY MSImage = IMAGE MSBit = BIT MSMoney = MONEY MSSmallMoney = SMALLMONEY MSUniqueIdentifier = UNIQUEIDENTIFIER MSVariant = SQL_VARIANT ischema_names = { 'int' : INTEGER, 'bigint': BIGINT, 'smallint' : SMALLINT, 'tinyint' : TINYINT, 'varchar' : VARCHAR, 'nvarchar' : NVARCHAR, 'char' : CHAR, 'nchar' : NCHAR, 'text' : TEXT, 'ntext' : NTEXT, 'decimal' : DECIMAL, 'numeric' : NUMERIC, 'float' : FLOAT, 'datetime' : DATETIME, 'datetime2' : DATETIME2, 'datetimeoffset' : DATETIMEOFFSET, 'date': DATE, 'time': TIME, 'smalldatetime' : SMALLDATETIME, 'binary' : BINARY, 'varbinary' : VARBINARY, 'bit': BIT, 'real' : REAL, 'image' : IMAGE, 'timestamp': TIMESTAMP, 'money': MONEY, 'smallmoney': SMALLMONEY, 'uniqueidentifier': UNIQUEIDENTIFIER, 'sql_variant': SQL_VARIANT, } class MSTypeCompiler(compiler.GenericTypeCompiler): def _extend(self, spec, type_): """Extend a string-type declaration with standard SQL COLLATE annotations. """ if getattr(type_, 'collation', None): collation = 'COLLATE %s' % type_.collation else: collation = None if type_.length: spec = spec + "(%d)" % type_.length return ' '.join([c for c in (spec, collation) if c is not None]) def visit_FLOAT(self, type_): precision = getattr(type_, 'precision', None) if precision is None: return "FLOAT" else: return "FLOAT(%(precision)s)" % {'precision': precision} def visit_REAL(self, type_): return "REAL" def visit_TINYINT(self, type_): return "TINYINT" def visit_DATETIMEOFFSET(self, type_): if type_.precision: return "DATETIMEOFFSET(%s)" % type_.precision else: return "DATETIMEOFFSET" def visit_TIME(self, type_): precision = getattr(type_, 'precision', None) if precision: return "TIME(%s)" % precision else: return "TIME" def visit_DATETIME2(self, type_): precision = getattr(type_, 'precision', None) if precision: return "DATETIME2(%s)" % precision else: return "DATETIME2" def visit_SMALLDATETIME(self, type_): return "SMALLDATETIME" def visit_unicode(self, type_): return self.visit_NVARCHAR(type_) def visit_unicode_text(self, type_): return self.visit_NTEXT(type_) def visit_NTEXT(self, type_): return self._extend("NTEXT", type_) def visit_TEXT(self, type_): return self._extend("TEXT", type_) def visit_VARCHAR(self, type_): return self._extend("VARCHAR", type_) def visit_CHAR(self, type_): return self._extend("CHAR", type_) def visit_NCHAR(self, type_): return self._extend("NCHAR", type_) def visit_NVARCHAR(self, type_): return self._extend("NVARCHAR", type_) def visit_date(self, type_): if self.dialect.server_version_info < MS_2008_VERSION: return self.visit_DATETIME(type_) else: return self.visit_DATE(type_) def visit_time(self, type_): if self.dialect.server_version_info < MS_2008_VERSION: return self.visit_DATETIME(type_) else: return self.visit_TIME(type_) def visit_large_binary(self, type_): return self.visit_IMAGE(type_) def visit_IMAGE(self, type_): return "IMAGE" def visit_boolean(self, type_): return self.visit_BIT(type_) def visit_BIT(self, type_): return "BIT" def visit_MONEY(self, type_): return "MONEY" def visit_SMALLMONEY(self, type_): return 'SMALLMONEY' def visit_UNIQUEIDENTIFIER(self, type_): return "UNIQUEIDENTIFIER" def visit_SQL_VARIANT(self, type_): return 'SQL_VARIANT' class MSExecutionContext(default.DefaultExecutionContext): _enable_identity_insert = False _select_lastrowid = False _result_proxy = None _lastrowid = None def pre_exec(self): """Activate IDENTITY_INSERT if needed.""" if self.isinsert: tbl = self.compiled.statement.table seq_column = tbl._autoincrement_column insert_has_sequence = seq_column is not None if insert_has_sequence: self._enable_identity_insert = \ seq_column.key in self.compiled_parameters[0] else: self._enable_identity_insert = False self._select_lastrowid = insert_has_sequence and \ not self.compiled.returning and \ not self._enable_identity_insert and \ not self.executemany if self._enable_identity_insert: self.cursor.execute("SET IDENTITY_INSERT %s ON" % self.dialect.identifier_preparer.format_table(tbl)) def post_exec(self): """Disable IDENTITY_INSERT if enabled.""" if self._select_lastrowid: if self.dialect.use_scope_identity: self.cursor.execute( "SELECT scope_identity() AS lastrowid", ()) else: self.cursor.execute("SELECT @@identity AS lastrowid", ()) # fetchall() ensures the cursor is consumed without closing it row = self.cursor.fetchall()[0] self._lastrowid = int(row[0]) if (self.isinsert or self.isupdate or self.isdelete) and \ self.compiled.returning: self._result_proxy = base.FullyBufferedResultProxy(self) if self._enable_identity_insert: self.cursor.execute( "SET IDENTITY_INSERT %s OFF" % self.dialect.identifier_preparer. format_table(self.compiled.statement.table) ) def get_lastrowid(self): return self._lastrowid def handle_dbapi_exception(self, e): if self._enable_identity_insert: try: self.cursor.execute( "SET IDENTITY_INSERT %s OFF" % self.dialect.identifier_preparer.\ format_table(self.compiled.statement.table) ) except: pass def get_result_proxy(self): if self._result_proxy: return self._result_proxy else: return base.ResultProxy(self) class MSSQLCompiler(compiler.SQLCompiler): returning_precedes_values = True extract_map = util.update_copy( compiler.SQLCompiler.extract_map, { 'doy': 'dayofyear', 'dow': 'weekday', 'milliseconds': 'millisecond', 'microseconds': 'microsecond' }) def __init__(self, *args, **kwargs): super(MSSQLCompiler, self).__init__(*args, **kwargs) self.tablealiases = {} def visit_now_func(self, fn, **kw): return "CURRENT_TIMESTAMP" def visit_current_date_func(self, fn, **kw): return "GETDATE()" def visit_length_func(self, fn, **kw): return "LEN%s" % self.function_argspec(fn, **kw) def visit_char_length_func(self, fn, **kw): return "LEN%s" % self.function_argspec(fn, **kw) def visit_concat_op(self, binary, **kw): return "%s + %s" % \ (self.process(binary.left, **kw), self.process(binary.right, **kw)) def visit_match_op(self, binary, **kw): return "CONTAINS (%s, %s)" % ( self.process(binary.left, **kw), self.process(binary.right, **kw)) def get_select_precolumns(self, select): """ MS-SQL puts TOP, it's version of LIMIT here """ if select._distinct or select._limit: s = select._distinct and "DISTINCT " or "" if select._limit: if not select._offset: s += "TOP %s " % (select._limit,) return s return compiler.SQLCompiler.get_select_precolumns(self, select) def limit_clause(self, select): # Limit in mssql is after the select keyword return "" def visit_select(self, select, **kwargs): """Look for ``LIMIT`` and OFFSET in a select statement, and if so tries to wrap it in a subquery with ``row_number()`` criterion. """ if not getattr(select, '_mssql_visit', None) and select._offset: # to use ROW_NUMBER(), an ORDER BY is required. orderby = self.process(select._order_by_clause) if not orderby: raise exc.InvalidRequestError('MSSQL requires an order_by when ' 'using an offset.') _offset = select._offset _limit = select._limit select._mssql_visit = True select = select.column( sql.literal_column("ROW_NUMBER() OVER (ORDER BY %s)" \ % orderby).label("mssql_rn") ).order_by(None).alias() limitselect = sql.select([c for c in select.c if c.key!='mssql_rn']) limitselect.append_whereclause("mssql_rn>%d" % _offset) if _limit is not None: limitselect.append_whereclause("mssql_rn<=%d" % (_limit + _offset)) return self.process(limitselect, iswrapper=True, **kwargs) else: return compiler.SQLCompiler.visit_select(self, select, **kwargs) def _schema_aliased_table(self, table): if getattr(table, 'schema', None) is not None: if table not in self.tablealiases: self.tablealiases[table] = table.alias() return self.tablealiases[table] else: return None def visit_table(self, table, mssql_aliased=False, **kwargs): if mssql_aliased: return super(MSSQLCompiler, self).visit_table(table, **kwargs) # alias schema-qualified tables alias = self._schema_aliased_table(table) if alias is not None: return self.process(alias, mssql_aliased=True, **kwargs) else: return super(MSSQLCompiler, self).visit_table(table, **kwargs) def visit_alias(self, alias, **kwargs): # translate for schema-qualified table aliases self.tablealiases[alias.original] = alias kwargs['mssql_aliased'] = True return super(MSSQLCompiler, self).visit_alias(alias, **kwargs) def visit_extract(self, extract, **kw): field = self.extract_map.get(extract.field, extract.field) return 'DATEPART("%s", %s)' % \ (field, self.process(extract.expr, **kw)) def visit_rollback_to_savepoint(self, savepoint_stmt): return ("ROLLBACK TRANSACTION %s" % self.preparer.format_savepoint(savepoint_stmt)) def visit_column(self, column, result_map=None, **kwargs): if column.table is not None and \ (not self.isupdate and not self.isdelete) or self.is_subquery(): # translate for schema-qualified table aliases t = self._schema_aliased_table(column.table) if t is not None: converted = expression._corresponding_column_or_error( t, column) if result_map is not None: result_map[column.name.lower()] = \ (column.name, (column, ), column.type) return super(MSSQLCompiler, self).\ visit_column(converted, result_map=None, **kwargs) return super(MSSQLCompiler, self).visit_column(column, result_map=result_map, **kwargs) def visit_binary(self, binary, **kwargs): """Move bind parameters to the right-hand side of an operator, where possible. """ if ( isinstance(binary.left, expression._BindParamClause) and binary.operator == operator.eq and not isinstance(binary.right, expression._BindParamClause) ): return self.process( expression._BinaryExpression(binary.right, binary.left, binary.operator), **kwargs) else: if ( (binary.operator is operator.eq or binary.operator is operator.ne) and ( (isinstance(binary.left, expression._FromGrouping) and isinstance(binary.left.element, expression._ScalarSelect)) or (isinstance(binary.right, expression._FromGrouping) and isinstance(binary.right.element, expression._ScalarSelect)) or isinstance(binary.left, expression._ScalarSelect) or isinstance(binary.right, expression._ScalarSelect) ) ): op = binary.operator == operator.eq and "IN" or "NOT IN" return self.process( expression._BinaryExpression(binary.left, binary.right, op), **kwargs) return super(MSSQLCompiler, self).visit_binary(binary, **kwargs) def returning_clause(self, stmt, returning_cols): if self.isinsert or self.isupdate: target = stmt.table.alias("inserted") else: target = stmt.table.alias("deleted") adapter = sql_util.ClauseAdapter(target) def col_label(col): adapted = adapter.traverse(col) if isinstance(col, expression._Label): return adapted.label(c.key) else: return self.label_select_column(None, adapted, asfrom=False) columns = [ self.process( col_label(c), within_columns_clause=True, result_map=self.result_map ) for c in expression._select_iterables(returning_cols) ] return 'OUTPUT ' + ', '.join(columns) def label_select_column(self, select, column, asfrom): if isinstance(column, expression.Function): return column.label(None) else: return super(MSSQLCompiler, self).\ label_select_column(select, column, asfrom) def for_update_clause(self, select): # "FOR UPDATE" is only allowed on "DECLARE CURSOR" which # SQLAlchemy doesn't use return '' def order_by_clause(self, select, **kw): order_by = self.process(select._order_by_clause, **kw) # MSSQL only allows ORDER BY in subqueries if there is a LIMIT if order_by and (not self.is_subquery() or select._limit): return " ORDER BY " + order_by else: return "" class MSSQLStrictCompiler(MSSQLCompiler): """A subclass of MSSQLCompiler which disables the usage of bind parameters where not allowed natively by MS-SQL. A dialect may use this compiler on a platform where native binds are used. """ ansi_bind_rules = True def visit_in_op(self, binary, **kw): kw['literal_binds'] = True return "%s IN %s" % ( self.process(binary.left, **kw), self.process(binary.right, **kw) ) def visit_notin_op(self, binary, **kw): kw['literal_binds'] = True return "%s NOT IN %s" % ( self.process(binary.left, **kw), self.process(binary.right, **kw) ) def visit_function(self, func, **kw): kw['literal_binds'] = True return super(MSSQLStrictCompiler, self).visit_function(func, **kw) def render_literal_value(self, value, type_): """ For date and datetime values, convert to a string format acceptable to MSSQL. That seems to be the so-called ODBC canonical date format which looks like this: yyyy-mm-dd hh:mi:ss.mmm(24h) For other data types, call the base class implementation. """ # datetime and date are both subclasses of datetime.date if issubclass(type(value), datetime.date): # SQL Server wants single quotes around the date string. return "'" + str(value) + "'" else: return super(MSSQLStrictCompiler, self).\ render_literal_value(value, type_) class MSDDLCompiler(compiler.DDLCompiler): def get_column_specification(self, column, **kwargs): colspec = (self.preparer.format_column(column) + " " + self.dialect.type_compiler.process(column.type)) if column.nullable is not None: if not column.nullable or column.primary_key: colspec += " NOT NULL" else: colspec += " NULL" if column.table is None: raise exc.InvalidRequestError( "mssql requires Table-bound columns " "in order to generate DDL") seq_col = column.table._autoincrement_column # install a IDENTITY Sequence if we have an implicit IDENTITY column if seq_col is column: sequence = isinstance(column.default, sa_schema.Sequence) and \ column.default if sequence: start, increment = sequence.start or 1, \ sequence.increment or 1 else: start, increment = 1, 1 colspec += " IDENTITY(%s,%s)" % (start, increment) else: default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default return colspec def visit_drop_index(self, drop): return "\nDROP INDEX %s.%s" % ( self.preparer.quote_identifier(drop.element.table.name), self.preparer.quote( self._validate_identifier(drop.element.name, False), drop.element.quote) ) class MSIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = RESERVED_WORDS def __init__(self, dialect): super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[', final_quote=']') def _escape_identifier(self, value): return value def quote_schema(self, schema, force=True): """Prepare a quoted table and schema name.""" result = '.'.join([self.quote(x, force) for x in schema.split('.')]) return result class MSDialect(default.DefaultDialect): name = 'mssql' supports_default_values = True supports_empty_insert = False execution_ctx_cls = MSExecutionContext use_scope_identity = True max_identifier_length = 128 schema_name = "dbo" colspecs = { sqltypes.DateTime : _MSDateTime, sqltypes.Date : _MSDate, sqltypes.Time : TIME, } ischema_names = ischema_names supports_native_boolean = False supports_unicode_binds = True postfetch_lastrowid = True server_version_info = () statement_compiler = MSSQLCompiler ddl_compiler = MSDDLCompiler type_compiler = MSTypeCompiler preparer = MSIdentifierPreparer def __init__(self, query_timeout=None, use_scope_identity=True, max_identifier_length=None, schema_name=u"dbo", **opts): self.query_timeout = int(query_timeout or 0) self.schema_name = schema_name self.use_scope_identity = use_scope_identity self.max_identifier_length = int(max_identifier_length or 0) or \ self.max_identifier_length super(MSDialect, self).__init__(**opts) def do_savepoint(self, connection, name): util.warn("Savepoint support in mssql is experimental and " "may lead to data loss.") connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION") connection.execute("SAVE TRANSACTION %s" % name) def do_release_savepoint(self, connection, name): pass def initialize(self, connection): super(MSDialect, self).initialize(connection) if self.server_version_info[0] not in range(8, 17): # FreeTDS with version 4.2 seems to report here # a number like "95.10.255". Don't know what # that is. So emit warning. util.warn( "Unrecognized server version info '%s'. Version specific " "behaviors may not function properly. If using ODBC " "with FreeTDS, ensure server version 7.0 or 8.0, not 4.2, " "is configured in the FreeTDS configuration." % ".".join(str(x) for x in self.server_version_info) ) if self.server_version_info >= MS_2005_VERSION and \ 'implicit_returning' not in self.__dict__: self.implicit_returning = True def _get_default_schema_name(self, connection): user_name = connection.scalar("SELECT user_name() as user_name;") if user_name is not None: # now, get the default schema query = """ SELECT default_schema_name FROM sys.database_principals WHERE name = ? AND type = 'S' """ try: default_schema_name = connection.scalar(query, [user_name]) if default_schema_name is not None: return unicode(default_schema_name) except: pass return self.schema_name def has_table(self, connection, tablename, schema=None): current_schema = schema or self.default_schema_name columns = ischema.columns if current_schema: whereclause = sql.and_(columns.c.table_name==tablename, columns.c.table_schema==current_schema) else: whereclause = columns.c.table_name==tablename s = sql.select([columns], whereclause) c = connection.execute(s) return c.first() is not None @reflection.cache def get_schema_names(self, connection, **kw): s = sql.select([ischema.schemata.c.schema_name], order_by=[ischema.schemata.c.schema_name] ) schema_names = [r[0] for r in connection.execute(s)] return schema_names @reflection.cache def get_table_names(self, connection, schema=None, **kw): current_schema = schema or self.default_schema_name tables = ischema.tables s = sql.select([tables.c.table_name], sql.and_( tables.c.table_schema == current_schema, tables.c.table_type == u'BASE TABLE' ), order_by=[tables.c.table_name] ) table_names = [r[0] for r in connection.execute(s)] return table_names @reflection.cache def get_view_names(self, connection, schema=None, **kw): current_schema = schema or self.default_schema_name tables = ischema.tables s = sql.select([tables.c.table_name], sql.and_( tables.c.table_schema == current_schema, tables.c.table_type == u'VIEW' ), order_by=[tables.c.table_name] ) view_names = [r[0] for r in connection.execute(s)] return view_names # The cursor reports it is closed after executing the sp. @reflection.cache def get_indexes(self, connection, tablename, schema=None, **kw): current_schema = schema or self.default_schema_name col_finder = re.compile("(\w+)") full_tname = "%s.%s" % (current_schema, tablename) indexes = [] s = sql.text("exec sp_helpindex '%s'" % full_tname) rp = connection.execute(s) if rp.closed: # did not work for this setup. return [] for row in rp: if 'primary key' not in row['index_description']: indexes.append({ 'name' : row['index_name'], 'column_names' : col_finder.findall(row['index_keys']), 'unique': 'unique' in row['index_description'] }) return indexes @reflection.cache def get_view_definition(self, connection, viewname, schema=None, **kw): current_schema = schema or self.default_schema_name views = ischema.views s = sql.select([views.c.view_definition], sql.and_( views.c.table_schema == current_schema, views.c.table_name == viewname ), ) rp = connection.execute(s) if rp: view_def = rp.scalar() return view_def @reflection.cache def get_columns(self, connection, tablename, schema=None, **kw): # Get base columns current_schema = schema or self.default_schema_name columns = ischema.columns if current_schema: whereclause = sql.and_(columns.c.table_name==tablename, columns.c.table_schema==current_schema) else: whereclause = columns.c.table_name==tablename s = sql.select([columns], whereclause, order_by=[columns.c.ordinal_position]) c = connection.execute(s) cols = [] while True: row = c.fetchone() if row is None: break (name, type, nullable, charlen, numericprec, numericscale, default, collation) = ( row[columns.c.column_name], row[columns.c.data_type], row[columns.c.is_nullable] == 'YES', row[columns.c.character_maximum_length], row[columns.c.numeric_precision], row[columns.c.numeric_scale], row[columns.c.column_default], row[columns.c.collation_name] ) coltype = self.ischema_names.get(type, None) kwargs = {} if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText, MSNText, MSBinary, MSVarBinary, sqltypes.LargeBinary): kwargs['length'] = charlen if collation: kwargs['collation'] = collation if coltype == MSText or \ (coltype in (MSString, MSNVarchar) and charlen == -1): kwargs.pop('length') if coltype is None: util.warn( "Did not recognize type '%s' of column '%s'" % (type, name)) coltype = sqltypes.NULLTYPE if issubclass(coltype, sqltypes.Numeric) and \ coltype is not MSReal: kwargs['scale'] = numericscale kwargs['precision'] = numericprec coltype = coltype(**kwargs) cdict = { 'name' : name, 'type' : coltype, 'nullable' : nullable, 'default' : default, 'autoincrement':False, } cols.append(cdict) # autoincrement and identity colmap = {} for col in cols: colmap[col['name']] = col # We also run an sp_columns to check for identity columns: cursor = connection.execute("sp_columns @table_name = '%s', " "@table_owner = '%s'" % (tablename, current_schema)) ic = None while True: row = cursor.fetchone() if row is None: break (col_name, type_name) = row[3], row[5] if type_name.endswith("identity") and col_name in colmap: ic = col_name colmap[col_name]['autoincrement'] = True colmap[col_name]['sequence'] = dict( name='%s_identity' % col_name) break cursor.close() if ic is not None and self.server_version_info >= MS_2005_VERSION: table_fullname = "%s.%s" % (current_schema, tablename) cursor = connection.execute( "select ident_seed('%s'), ident_incr('%s')" % (table_fullname, table_fullname) ) row = cursor.first() if row is not None and row[0] is not None: colmap[ic]['sequence'].update({ 'start' : int(row[0]), 'increment' : int(row[1]) }) return cols @reflection.cache def get_primary_keys(self, connection, tablename, schema=None, **kw): current_schema = schema or self.default_schema_name pkeys = [] # information_schema.referential_constraints RR = ischema.ref_constraints # information_schema.table_constraints TC = ischema.constraints # information_schema.constraint_column_usage: # the constrained column C = ischema.key_constraints.alias('C') # information_schema.constraint_column_usage: # the referenced column R = ischema.key_constraints.alias('R') # Primary key constraints s = sql.select([C.c.column_name, TC.c.constraint_type], sql.and_(TC.c.constraint_name == C.c.constraint_name, C.c.table_name == tablename, C.c.table_schema == current_schema) ) c = connection.execute(s) for row in c: if 'PRIMARY' in row[TC.c.constraint_type.name]: pkeys.append(row[0]) return pkeys @reflection.cache def get_foreign_keys(self, connection, tablename, schema=None, **kw): current_schema = schema or self.default_schema_name # Add constraints #information_schema.referential_constraints RR = ischema.ref_constraints # information_schema.table_constraints TC = ischema.constraints # information_schema.constraint_column_usage: # the constrained column C = ischema.key_constraints.alias('C') # information_schema.constraint_column_usage: # the referenced column R = ischema.key_constraints.alias('R') # Foreign key constraints s = sql.select([C.c.column_name, R.c.table_schema, R.c.table_name, R.c.column_name, RR.c.constraint_name, RR.c.match_option, RR.c.update_rule, RR.c.delete_rule], sql.and_(C.c.table_name == tablename, C.c.table_schema == current_schema, C.c.constraint_name == RR.c.constraint_name, R.c.constraint_name == RR.c.unique_constraint_name, C.c.ordinal_position == R.c.ordinal_position ), order_by = [ RR.c.constraint_name, R.c.ordinal_position]) # group rows by constraint ID, to handle multi-column FKs fkeys = [] fknm, scols, rcols = (None, [], []) def fkey_rec(): return { 'name' : None, 'constrained_columns' : [], 'referred_schema' : None, 'referred_table' : None, 'referred_columns' : [] } fkeys = util.defaultdict(fkey_rec) for r in connection.execute(s).fetchall(): scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r rec = fkeys[rfknm] rec['name'] = rfknm if not rec['referred_table']: rec['referred_table'] = rtbl if schema is not None or current_schema != rschema: rec['referred_schema'] = rschema local_cols, remote_cols = \ rec['constrained_columns'],\ rec['referred_columns'] local_cols.append(scol) remote_cols.append(rcol) return fkeys.values()
mit
-1,975,082,025,182,092,500
34.962278
80
0.55947
false
4.218376
false
false
false
pmuller/ipkg
ipkg/versions.py
1
1177
import __builtin__ # because we override sorted in this module import pkg_resources def compare(a, b): if a < b: return -1 elif a == b: return 0 else: # a > b return 1 def extract(item): if isinstance(item, dict): version = item['version'] revision = item['revision'] else: version = item.version revision = item.revision return parse(version), parse(str(revision)) def parse(version): """Parses a ``version`` string. Currently a simple wrapper around ``pkg_resources.parse_version()``, for API purpose. Parsing could change later. """ return pkg_resources.parse_version(version) def sorted(versions, parser=parse, reverse=False): """Returned sorted ``versions``. """ return __builtin__.sorted(versions, key=parser, cmp=compare, reverse=reverse) def most_recent(versions, parser=parse): """Returns the most recent version among ``versions``. * ``versions`` must be an iterable of versions. * ``parser`` defaults to ``parse`` which parses version strings. """ return sorted(versions, reverse=True)[0]
mit
-3,061,816,019,314,012,000
24.042553
72
0.620221
false
4.188612
false
false
false
pmdp/GIW
mongodb-1/consultas.py
1
10427
# -*- coding: utf-8 -*- from bottle import run, get, request, template from pymongo import MongoClient from os import linesep mongoclient = MongoClient() db = mongoclient.giw #Columnas para las tablas de los ejercicios 2, 3, 4, 5 y 7 all_table_data = ['Nombre de usuario', 'e-mail', 'Página web', 'Tarjeta de crédito', 'Hash de contraseña', 'Nombre', 'Apellido', 'Dirección', 'Aficiones', 'Fecha de nacimiento'] #Columnas para el ejercicio 6 mid_table_data = ['id', 'e-mail', 'Fecha de nacimiento'] #Función que recibe un cursor de mongo y prepara una lista para luego mostrarla por html def get_results_data(c): data = [] #Por cada elemento en el cursor devuelto en la consulta for r in c: userData = [] userData.append(r['_id']) userData.append(r['email']) userData.append(r['webpage']) creditCardData = u"Número: " + r['credit_card']['number'] + linesep creditCardData += u"Fecha de expiración: " + r['credit_card']['expire']['month'] + '/' + r['credit_card']['expire']['year'] userData.append(creditCardData) userData.append(r['password']) userData.append(r['name']) userData.append(r['surname']) addressData = "Pais: " + r['address']['country'] + linesep addressData += "Zip: " + r['address']['zip'] + linesep addressData += "Calle: " + r['address']['street'] + linesep addressData += "Num: " + r['address']['num'] userData.append(addressData) likesData = '' for like in r['likes']: likesData += str(like) + linesep userData.append(likesData) userData.append(r['birthdate']) data.append(userData) return data #Función que recibe una lista con los argumentos que deberían haber llegado al servidor # también recibe un variable que dice si todos los argumentos son obligatorios o no def validate_arguments(args_list, all_needed=False): args = request.query invalid_args = [] valid_args = [] # Comprueba que todos los argumentos pasados son válidos for a in args: # Si no es válido lo añade a la lista de argumentos inválidos if a not in args_list: invalid_args.append(a) #Si no lo mete en la lista de argumentos válidos else: valid_args.append(a) if len(invalid_args) != 0: return False, show_args_error(invalid_args) elif not all_needed and len(valid_args) > 0: return True, '' elif all_needed and len(valid_args) == len(args) and len(args) > 0: return True, '' else: return False, "<p style='color:red'>No se han recibido los argumentos necesarios</p>" #Función que muestra un mensaje de error con los argumento inválidos def show_args_error(invalid_args): out = "<p style='color:red'>Argumentos inválidos:</p>\n" out += "<ul>" for i in invalid_args: out += "<li>" + i + "</li>" out += "</ul>" return out @get('/find_user') def find_user(): # http://localhost:8080/find_user?username=burgoscarla valid, msg = validate_arguments(['username'], all_needed=True) if valid: #Coge el nombre de usuario de la petición GET username = request.query.username c = db.usuarios #Busca todos un único usuario con ese id res = c.find_one({"_id":username}) #Si existe dicho usuario rellena las listas con los datos de la BD if res: #Lista para datos simples simple_data = list() #Lista para todos los datos de dirección address = list() #Lista para todos los datos de la tarjeta de crédito credit_card = list() #Lista de todo lo que le gusta al usuario likes = list() for key, value in res.items(): if key == 'credit_card': credit_card.append('month : ' + value['expire']['month']) credit_card.append('year : ' + value['expire']['year']) credit_card.append('number : ' + value['number']) elif key == 'address': for k, v in value.items(): address.append(k + ' : ' + v) elif key == 'likes': for l in value: likes.append(l) else: simple_data.append(key + ' : ' + value) return template('datos', title=username, simple_data=simple_data, address=address, credit_card=credit_card, likes=likes) #Si no existe devuelve un error else: return "<p style='color:red'>El usuario <strong>" + username + " </strong> no existe en la BD.</p>" else: return msg @get('/find_users') def find_users(): # http://localhost:8080/find_users?name=Luz # http://localhost:8080/find_users?name=Luz&surname=Romero # http://localhost:8080/find_users?name=Luz&food=hotdog valid, msg = validate_arguments(['name', 'surname', 'birthday']) if valid: #Si no hay ningún elemento inválido procede con la consulta name = request.query.name surname = request.query.surname birth = request.query.birthday #Diccionario donde van a ir los datos a buscar data = dict() if name: data['name'] = name if surname: data['surname'] = surname if birth: data['birthdate'] = birth c = db.usuarios res = c.find(data) data = get_results_data(res) return template('table', num_results=str(res.count()), table_titles=all_table_data, rows=data) else: return msg @get('/find_users_or') def find_users_or(): # http://localhost:8080/find_users_or?name=Luz&surname=Corral valid, msg = validate_arguments(['name', 'surname', 'birthday']) # Si no hay ningún elemento inválido procede con la consulta if valid: name = request.query.name surname = request.query.surname birth = request.query.birthday # Diccionario donde van a ir los datos a buscar data = [] if name: data.append({'name': name}) if surname: data.append({'surname': surname}) if birth: data.append({'birthdate': birth}) c = db.usuarios res = c.find({'$or': data}) data = get_results_data(res) return template('table', num_results=str(res.count()), table_titles=all_table_data, rows=data) else: return msg @get('/find_like') def find_like(): # http://localhost:8080/find_like?like=football valid, msg = validate_arguments(['like'], all_needed=True) # Si no hay ningún elemento inválido procede con la consulta if valid: like = request.query.like c = db.usuarios res = c.find({'likes': like}) data = get_results_data(res) return template('table', num_results=str(res.count()), table_titles=all_table_data, rows=data) else: return msg @get('/find_country') def find_country(): # http://localhost:8080/find_country?country=Irlanda valid, msg = validate_arguments(['country'], all_needed=True) # Si no hay ningún elemento inválido procede con la consulta if valid: country = request.query.country c = db.usuarios res = c.find({'address.country': country}) data = get_results_data(res) return template('table', num_results=str(res.count()), table_titles=all_table_data, rows=data) else: return msg @get('/find_email_birthdate') def email_birthdate(): # http://localhost:8080/find_email_birthdate?from=1973-01-01&to=1990-12-31 valid, msg = validate_arguments(['from', 'to'], all_needed=True) # Si no hay ningún elemento inválido procede con la consulta if valid: from_date = request.query['from'] to_date = request.query.to c = db.usuarios # Fecha de nacimiento mayor que fromDate y menor que toDate query = {'birthdate': {'$gt': from_date, '$lt': to_date}} # query que busca las fechas de nacimiento ordenadas por fecha de nacimiento y por _id # y solo proyecta los datos necesarios res = c.find(query, {'_id': 1, 'email': 1, 'birthdate': 1 }).sort([('birthdate', 1), ('_id', 1)]) data = [] for r in res: user_data = [] user_data.append(r['_id']) user_data.append(r['email']) user_data.append(r['birthdate']) data.append(user_data) return template('table', num_results=str(res.count()), table_titles=mid_table_data, rows=data) else: return msg @get('/find_country_likes_limit_sorted') def find_country_likes_limit_sorted(): # http://localhost:8080/find_country_likes_limit_sorted?country=Irlanda&likes=movies,animals&limit=4&ord=asc valid, msg = validate_arguments(['country', 'likes', 'limit', 'ord'], all_needed=True) # Si no hay ningún elemento inválido procede con la consulta if valid: country = request.query.country likes = request.query.likes limit = request.query.limit order = request.query.ord # Almacenamos en una lista todos los likes q se pasan por parametro. Hacemos lista para que $all pueda leer bien. gustos = [] cadena = "" for i in likes: if i != ',': cadena += i else: gustos.append(cadena) cadena = "" gustos.append(cadena) # en funcion del tipo de ordenacion se le da un valor entero a la variable order if order == 'asc': order = 1 elif order == 'desc': order = -1 c = db.usuarios query = {'$and': [{'address.country': country}, {'likes': {'$all': gustos}}]} # query que busca en funcion de un country y de los gustos ordenando por fechas de nacimiento y con limite = limit res = c.find(query).sort('birthdate', int(order)).limit(int(limit)) data = get_results_data(res) return template('table', num_results=str(res.count()), table_titles=all_table_data, rows=data) else: return msg if __name__ == "__main__": # No cambiar host ni port ni debug run(host='localhost',port=8080,debug=True)
gpl-3.0
4,137,989,952,177,386,000
37.779851
177
0.592322
false
3.424382
false
false
false
tuxfux-hlp-notes/python-batches
archieves/Batch-63/12-Logging/seventh.py
1
2105
#!/usr/bin/python # logging.basicConfig? # logging.Formatter? # man data or time.strftime(). # https://docs.python.org/2/library/subprocess.html # cronjob or scheduler # import logging.handlers for rest all handlers. from subprocess import Popen,PIPE from logging.handlers import SysLogHandler import logging #logging.basicConfig(filename='my_logs.txt',filemode='a',level=logging.DEBUG, # format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',datefmt='%c') # Loggers expose the interface that application code directly uses. # ex:logger - root # Handlers send the log records (created by loggers) to the appropriate destination. # https://docs.python.org/2/howto/logging.html#useful-handlers # ex: filename='my_logs.txt',filemode='a' # Filters provide a finer grained facility for determining which log records to output. # ex: level=logging.DEBUG # Formatters specify the layout of log records in the final output. # ex: format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',datefmt='%c' # create logger logger = logging.getLogger('disk_monitor') # logger name logger.setLevel(logging.DEBUG) # Filter for logger # create console handler and set level to debug ch = SysLogHandler(address="/dev/log") # handler ch.setLevel(logging.DEBUG) # filter for handler # create formatter formatter = logging.Formatter('- %(name)s - %(levelname)s - %(message)s') # add formatter to ch ch.setFormatter(formatter) # handler and formatter # add ch to logger logger.addHandler(ch) # logger and handler # Main p1 = Popen(['df','-h','/'],stdout=PIPE) p2 = Popen(['tail','-n','1'],stdin=p1.stdout,stdout=PIPE) disk_size = int(p2.communicate()[0].split()[4].split('%')[0]) if disk_size < 50: logger.info("The disk looks health at {}".format(disk_size)) elif disk_size < 70: logger.warning("The disk is getting filled up {}".format(disk_size)) elif disk_size < 80: logger.error("your application is sleeping now {}".format(disk_size)) elif disk_size < 100: logger.critical("your application is not working {}".format(disk_size))
gpl-3.0
4,377,696,708,954,028,500
32.951613
87
0.705463
false
3.496678
false
false
false
KristianJensen/cameo
cameo/network_analysis/networkx_based.py
1
3295
# Copyright 2015 Novo Nordisk Foundation Center for Biosustainability, DTU. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, print_function __all__ = ['model_to_network', 'reactions_to_network', 'remove_highly_connected_nodes'] import networkx as nx from cameo.network_analysis.util import distance_based_on_molecular_formula def model_to_network(model, *args, **kwargs): """Convert a model into a networkx graph. Calls reactions_to_network with model.reactions. Parameters ---------- model : SolverBasedModel The model. Returns ------- networkx.MultiDiGraph See Also -------- reactions_to_network """ return reactions_to_network(model.reactions, *args, **kwargs) def reactions_to_network(reactions, max_distance=0.3): """Convert a list of reactions into a networkx graph. Parameters ---------- reactions : list The list of reactions. max_distance : float, optional A threshold on the normalized distance between two compounds. If distance is above this threshold, no edge between those compounds will be created. Returns ------- networkx.MultiDiGraph See Also -------- distance_based_on_molecular_formula """ edges = list() for reaction in reactions: substrates = list(reaction.reactants) for substrate in substrates: products = list(reaction.products) for product in products: try: distance = distance_based_on_molecular_formula(substrate, product, normalize=True) except ValueError: distance = 0. if distance <= max_distance: if reaction.reversibility: edges.append((product, substrate, dict(reaction=reaction))) edges.append((substrate, product, dict(reaction=reaction))) elif reaction.lower_bound > 0: edges.append((substrate, product, dict(reaction=reaction))) else: edges.append((product, substrate, dict(reaction=reaction))) multi_graph = nx.MultiDiGraph(edges) return multi_graph def remove_highly_connected_nodes(network, max_degree=10, ignore=[]): """Remove highly connected nodes (changes network in place). Parameters ---------- network : networkx graph max_degree : int (default 10) Remove nodes with degree > max_degree ignore : list List of nodes to ignore. Returns ------- None """ to_remove = [node for node, degree in network.degree_iter() if degree > max_degree and node not in ignore] network.remove_nodes_from(to_remove)
apache-2.0
-9,001,907,852,848,431,000
31.303922
122
0.640061
false
4.364238
false
false
false
chand3040/cloud_that
lms/djangoapps/shoppingcart/processors/PayPal.py
1
21754
""" Implementation the PayPal processor. To enable this implementation, add the following to lms.auth.json: CC_PROCESSOR_NAME = "PayPal" CC_PROCESSOR = { "PayPal": { "PURCHASE_ENDPOINT": "sandbox or live url of paypal", "CLIENT_ID": "<paypal client_id>", "CLIENT_SECRET": "<paypal client secret>", "MODE": "sandbox | live", "RETURN_URL": 'host/dashboard', "NOTIFY_URL": 'host/paypal', "CANCEL_URL": 'where to redirect if user cancels order' } } """ import time import hmac import binascii import re import json import urlparse import logging from collections import OrderedDict, defaultdict from decimal import Decimal, InvalidOperation from hashlib import sha1 from textwrap import dedent from django.conf import settings from django.utils.translation import ugettext as _ from edxmako.shortcuts import render_to_string from shoppingcart.models import Order from shoppingcart.processors.exceptions import * from shoppingcart.processors.helpers import get_processor_config from microsite_configuration import microsite from django.core.urlresolvers import reverse from paypal.standard.models import ST_PP_COMPLETED, ST_PP_CANCELLED, ST_PP_DENIED from paypal.standard.ipn.signals import valid_ipn_received from paypal.standard.pdt.views import process_pdt logger = logging.getLogger(__name__) def process_postpay_callback(request): """ The top level call to this module, basically This function is handed the callback request after the customer has entered the CC info and clicked "buy" on the external Hosted Order Page. It is expected to verify the callback and determine if the payment was successful. It returns {'success':bool, 'order':Order, 'error_html':str} If successful this function must have the side effect of marking the order purchased and calling the purchased_callbacks of the cart items. If unsuccessful this function should not have those side effects but should try to figure out why and return a helpful-enough error message in error_html. Author: Naresh Makwana created on: 07-Apr-2015 """ logger.info('Handling GET request %s', request.GET) logger.info('Handling POST request %s', request.POST) pdt_obj, failed = process_pdt(request) logger.info('invoice %s', getattr(pdt_obj, 'invoice')) logger.info('mc_currency %s', getattr(pdt_obj, 'mc_currency')) logger.info('payment_status %s', getattr(pdt_obj, 'payment_status')) try: result = payment_accepted(pdt_obj) if result['accepted']: # SUCCESS CASE first, rest are some sort of oddity record_purchase(pdt_obj, result['order']) return {'success': True, 'order': result['order'], 'error_html': ''} else: return {'success': False, 'order': result['order'], 'error_html': get_processor_decline_html(pdt_obj)} except CCProcessorException as error: return {'success': False, 'order': None, # due to exception we may not have the order 'error_html': get_processor_exception_html(error)} def processor_hash(value): """ Performs the base64(HMAC_SHA1(key, value)) used by CyberSource Hosted Order Page """ shared_secret = get_processor_config().get('SHARED_SECRET', '') hash_obj = hmac.new(shared_secret.encode('utf-8'), value.encode('utf-8'), sha1) return binascii.b2a_base64(hash_obj.digest())[:-1] # last character is a '\n', which we don't want def sign(params, signed_fields_key='orderPage_signedFields', full_sig_key='orderPage_signaturePublic'): """ params needs to be an ordered dict, b/c cybersource documentation states that order is important. Reverse engineered from PHP version provided by cybersource """ merchant_id = get_processor_config().get('MERCHANT_ID', '') order_page_version = get_processor_config().get('ORDERPAGE_VERSION', '7') serial_number = get_processor_config().get('SERIAL_NUMBER', '') params['merchantID'] = merchant_id params['orderPage_timestamp'] = int(time.time() * 1000) params['orderPage_version'] = order_page_version params['orderPage_serialNumber'] = serial_number fields = u",".join(params.keys()) values = u",".join([u"{0}={1}".format(i, params[i]) for i in params.keys()]) fields_sig = processor_hash(fields) values += u",signedFieldsPublicSignature=" + fields_sig params[full_sig_key] = processor_hash(values) params[signed_fields_key] = fields return params def verify_signatures(ipn_obj): """ Use the signature we receive in the POST back from PayPal to verify the identity of the sender (PayPal) and that the contents of the message have not been tampered with. Args: params (dictionary): The POST parameters we received from PayPal. Returns: dict: Contains the parameters we will use elsewhere, converted to the appropriate types Raises: CCProcessorSignatureException: The calculated signature does not match the signature we received. CCProcessorDataException: The parameters we received from CyberSource were not valid (missing keys, wrong types) """ # First see if the user cancelled the transaction # if so, then not all parameters will be passed back so we can't yet verify signatures if getattr(ipn_obj, 'payment_status') == ST_PP_CANCELLED: raise CCProcessorUserCancelled() # if the user decline the transaction # if so, then amount will not be passed back so we can't yet verify signatures if getattr(ipn_obj, 'payment_status') == ST_PP_DENIED: raise CCProcessorUserDeclined() return ipn_obj def render_purchase_form_html(cart, **kwargs): """ Renders the HTML of the hidden POST form that must be used to initiate a purchase with CyberSource """ return render_to_string('shoppingcart/paypal_form.html', { 'action': get_purchase_endpoint(), 'params': get_signed_purchase_params(cart), }) def get_signed_purchase_params(cart, **kwargs): return sign(get_purchase_params(cart)) def get_purchase_params(cart): cart_items = cart.orderitem_set.all() total_cost = cart.total_cost amount = "{0:0.2f}".format(total_cost) cart_items = cart.orderitem_set.all() params = OrderedDict() params['business'] = settings.PAYPAL_RECEIVER_EMAIL params['invoice'] = "{0:d}".format(cart.id) params['item_number'] = "{0:d}".format(cart.id) params['notify_url'] = get_processor_config().get('NOTIFY_URL', '') params['return'] = get_processor_config().get('RETURN_URL', '') params['cancel_return'] = get_processor_config().get('CANCEL_URL', '') params['currency_code'] = cart.currency.upper() params['orderPage_transactionType'] = 'sale' params['orderNumber'] = "{0:d}".format(cart.id) params['no_shipping'] = 1 params['charset'] = 'utf-8' params['upload'] = 1 for counter, cart_item in enumerate(cart_items): params['item_name_'+str(counter+1)] = cart_item.line_desc params['amount_'+str(counter+1)] = cart_item.list_price params['quantity_'+str(counter+1)] = cart_item.qty params['cmd'] = '_cart' return params def get_purchase_endpoint(): return get_processor_config().get('PURCHASE_ENDPOINT', '') def payment_accepted(ipn_obj): """ Check that paypal has accepted the payment params: a dictionary of POST parameters returned by paypal in their post-payment callback returns: true if the payment was correctly accepted, for the right amount false if the payment was not accepted raises: CCProcessorDataException if the returned message did not provide required parameters CCProcessorWrongAmountException if the amount charged is different than the order amount """ #make sure required keys are present and convert their values to the right type valid_params = {} for key, key_type in [('invoice', int), ('mc_currency', str), ('payment_status', str)]: if not hasattr(ipn_obj, key): raise CCProcessorDataException( _("The payment processor did not return a required parameter: {0}").format(key) ) try: valid_params[key] = key_type(getattr(ipn_obj, key)) except ValueError: raise CCProcessorDataException( _("The payment processor returned a badly-typed value {0} for param {1}.").format(getattr(ipn_obj, key), key) ) try: order = Order.objects.get(id=valid_params['invoice']) except Order.DoesNotExist: raise CCProcessorDataException(_("The payment processor accepted an order whose number is not in our system.")) if valid_params['payment_status'] == ST_PP_COMPLETED: try: # Moved reading of charged_amount here from the valid_params loop above because # only 'ACCEPT' messages have a 'mc_gross' parameter charged_amt = Decimal(getattr(ipn_obj, 'mc_gross')) except InvalidOperation: raise CCProcessorDataException( _("The payment processor returned a badly-typed value {0} for param {1}.").format( getattr(ipn_obj, 'mc_gross'), 'mc_gross' ) ) if charged_amt == order.total_cost and valid_params['mc_currency'] == order.currency.upper(): return {'accepted': True, 'amt_charged': charged_amt, 'currency': valid_params['mc_currency'].lower(), 'order': order} else: raise CCProcessorWrongAmountException( _("The amount charged by the processor {0} {1} is different than the total cost of the order {2} {3}.") .format( charged_amt, valid_params['mc_currency'], order.total_cost, order.currency ) ) else: return {'accepted': False, 'amt_charged': 0, 'currency': 'usd', 'order': order} def record_purchase(ipn_obj, order): """ Record the purchase and run purchased_callbacks """ ccnum_str = getattr(ipn_obj, 'card_accountNumber', '') m = re.search("\d", ccnum_str) if m: ccnum = ccnum_str[m.start():] else: ccnum = "####" order.purchase( first=getattr(ipn_obj, 'first_name', ''), last=getattr(ipn_obj, 'last_name', ''), street1=getattr(ipn_obj, 'billTo_street1', ''), street2=getattr(ipn_obj, 'address_street', ''), city=getattr(ipn_obj, 'address_city', ''), state=getattr(ipn_obj, 'address_state', ''), country=getattr(ipn_obj, 'address_country', ''), postalcode=getattr(ipn_obj, 'billTo_postalCode', ''), ccnum=ccnum, cardtype=CARDTYPE_MAP[getattr(ipn_obj, 'card_cardType', 'UNKNOWN')], processor_reply_dump=dict(urlparse.parse_qsl(str(getattr(ipn_obj, 'query', 'UNKNOWN=UNKNOWN')))) ) def get_processor_decline_html(ipn_obj): """Have to parse through the error codes to return a helpful message""" # see if we have an override in the microsites payment_support_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL) msg = _( "Sorry! Our payment processor did not accept your payment. " "The decision they returned was {decision_text}, " "and the reason was {reason_text}. " "You were not charged. " "Please try a different form of payment. " "Contact us with payment-related questions at {email}." ) formatted = msg.format( decision_text='<span class="decision">{}</span>'.format(getattr(ipn_obj, 'payment_status')), reason_text='<span class="reason">{code}:{msg}</span>'.format( code=params['reasonCode'], msg=REASONCODE_MAP[getattr(ipn_obj,'reason_code')], ), email=payment_support_email, ) return '<p class="error_msg">{}</p>'.format(formatted) def get_processor_exception_html(exception): """Return error HTML associated with exception""" # see if we have an override in the microsites payment_support_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL) if isinstance(exception, CCProcessorDataException): msg = _( "Sorry! Our payment processor sent us back a payment confirmation " "that had inconsistent data!" "We apologize that we cannot verify whether the charge went through " "and take further action on your order." "The specific error message is: {error_message}. " "Your credit card may possibly have been charged. " "Contact us with payment-specific questions at {email}." ) formatted = msg.format( error_message='<span class="exception_msg">{msg}</span>'.format( msg=exception.message, ), email=payment_support_email, ) return '<p class="error_msg">{}</p>'.format(formatted) elif isinstance(exception, CCProcessorWrongAmountException): msg = _( "Sorry! Due to an error your purchase was charged for " "a different amount than the order total! " "The specific error message is: {error_message}. " "Your credit card has probably been charged. " "Contact us with payment-specific questions at {email}." ) formatted = msg.format( error_message='<span class="exception_msg">{msg}</span>'.format( msg=exception.message, ), email=payment_support_email, ) return '<p class="error_msg">{}</p>'.format(formatted) elif isinstance(exception, CCProcessorSignatureException): msg = _( "Sorry! Our payment processor sent us back a corrupted message " "regarding your charge, so we are unable to validate that " "the message actually came from the payment processor. " "The specific error message is: {error_message}. " "We apologize that we cannot verify whether the charge went through " "and take further action on your order. " "Your credit card may possibly have been charged. " "Contact us with payment-specific questions at {email}." ) formatted = msg.format( error_message='<span class="exception_msg">{msg}</span>'.format( msg=exception.message, ), email=payment_support_email, ) return '<p class="error_msg">{}</p>'.format(formatted) # fallthrough case, which basically never happens return '<p class="error_msg">EXCEPTION!</p>' CARDTYPE_MAP = defaultdict(lambda: "UNKNOWN") CARDTYPE_MAP.update( { '001': 'Visa', '002': 'MasterCard', '003': 'American Express', '004': 'Discover', '005': 'Diners Club', '006': 'Carte Blanche', '007': 'JCB', '014': 'EnRoute', '021': 'JAL', '024': 'Maestro', '031': 'Delta', '033': 'Visa Electron', '034': 'Dankort', '035': 'Laser', '036': 'Carte Bleue', '037': 'Carta Si', '042': 'Maestro', '043': 'GE Money UK card' } ) REASONCODE_MAP = defaultdict(lambda: "UNKNOWN REASON") REASONCODE_MAP.update( { '100': _('Successful transaction.'), '101': _('The request is missing one or more required fields.'), '102': _('One or more fields in the request contains invalid data.'), '104': dedent(_( """ The merchantReferenceCode sent with this authorization request matches the merchantReferenceCode of another authorization request that you sent in the last 15 minutes. Possible fix: retry the payment after 15 minutes. """)), '150': _('Error: General system failure. Possible fix: retry the payment after a few minutes.'), '151': dedent(_( """ Error: The request was received but there was a server timeout. This error does not include timeouts between the client and the server. Possible fix: retry the payment after some time. """)), '152': dedent(_( """ Error: The request was received, but a service did not finish running in time Possible fix: retry the payment after some time. """)), '201': _('The issuing bank has questions about the request. Possible fix: retry with another form of payment'), '202': dedent(_( """ Expired card. You might also receive this if the expiration date you provided does not match the date the issuing bank has on file. Possible fix: retry with another form of payment """)), '203': dedent(_( """ General decline of the card. No other information provided by the issuing bank. Possible fix: retry with another form of payment """)), '204': _('Insufficient funds in the account. Possible fix: retry with another form of payment'), # 205 was Stolen or lost card. Might as well not show this message to the person using such a card. '205': _('Unknown reason'), '207': _('Issuing bank unavailable. Possible fix: retry again after a few minutes'), '208': dedent(_( """ Inactive card or card not authorized for card-not-present transactions. Possible fix: retry with another form of payment """)), '210': _('The card has reached the credit limit. Possible fix: retry with another form of payment'), '211': _('Invalid card verification number. Possible fix: retry with another form of payment'), # 221 was The customer matched an entry on the processor's negative file. # Might as well not show this message to the person using such a card. '221': _('Unknown reason'), '231': _('Invalid account number. Possible fix: retry with another form of payment'), '232': dedent(_( """ The card type is not accepted by the payment processor. Possible fix: retry with another form of payment """)), '233': _('General decline by the processor. Possible fix: retry with another form of payment'), '234': _( "There is a problem with our CyberSource merchant configuration. Please let us know at {0}" ).format(settings.PAYMENT_SUPPORT_EMAIL), # reason code 235 only applies if we are processing a capture through the API. so we should never see it '235': _('The requested amount exceeds the originally authorized amount.'), '236': _('Processor Failure. Possible fix: retry the payment'), # reason code 238 only applies if we are processing a capture through the API. so we should never see it '238': _('The authorization has already been captured'), # reason code 239 only applies if we are processing a capture or credit through the API, # so we should never see it '239': _('The requested transaction amount must match the previous transaction amount.'), '240': dedent(_( """ The card type sent is invalid or does not correlate with the credit card number. Possible fix: retry with the same card or another form of payment """)), # reason code 241 only applies when we are processing a capture or credit through the API, # so we should never see it '241': _('The request ID is invalid.'), # reason code 242 occurs if there was not a previously successful authorization request or # if the previously successful authorization has already been used by another capture request. # This reason code only applies when we are processing a capture through the API # so we should never see it '242': dedent(_( """ You requested a capture through the API, but there is no corresponding, unused authorization record. """)), # we should never see 243 '243': _('The transaction has already been settled or reversed.'), # reason code 246 applies only if we are processing a void through the API. so we should never see it '246': dedent(_( """ The capture or credit is not voidable because the capture or credit information has already been submitted to your processor. Or, you requested a void for a type of transaction that cannot be voided. """)), # reason code 247 applies only if we are processing a void through the API. so we should never see it '247': _('You requested a credit for a capture that was previously voided'), '250': dedent(_( """ Error: The request was received, but there was a timeout at the payment processor. Possible fix: retry the payment. """)), '520': dedent(_( """ The authorization request was approved by the issuing bank but declined by CyberSource.' Possible fix: retry with a different form of payment. """)), } )
agpl-3.0
322,017,873,674,394,940
42.077228
125
0.623472
false
4.254645
true
false
false
gimler/techism2
techism2/ical/views.py
1
3450
#!/usr/local/bin/python # -*- coding: utf-8 -*- from django.http import HttpResponse from django.core.urlresolvers import reverse from techism2 import service from datetime import datetime, timedelta import icalendar import time def ical(request): ninety_days = datetime.utcnow() + timedelta(days=90) event_list = service.get_event_query_set().filter(date_time_begin__lte=ninety_days).order_by('date_time_begin') cal = icalendar.Calendar() cal['prodid'] = icalendar.vText(u'-//Techism//Techism//DE') cal['version'] = icalendar.vText(u'2.0') cal['x-wr-calname'] = icalendar.vText(u'Techism') cal['x-wr-caldesc'] = icalendar.vText(u'Techism - IT-Events in München') for e in event_list: event = icalendar.Event() # TODO should we generate an UUID when creating the event? uid = u'%[email protected]' % (str(e.id)) event['uid'] = icalendar.vText(uid) event['dtstamp'] = icalendar.vDatetime(datetime.utcnow()) # The sequence field must be incremented each time the event is modifed. # The trick here is to subtract the create TS from the modify TS and # use the difference as sequence. sequence = 0 if e.date_time_created and e.date_time_modified: createTimestamp = time.mktime(e.get_date_time_created_utc().timetuple()) modifyTimestamp = time.mktime(e.get_date_time_modified_utc().timetuple()) sequence = modifyTimestamp - createTimestamp event['sequence'] = icalendar.vInt(sequence) # created and last-modified if e.date_time_created: event['created'] = icalendar.vDatetime(e.get_date_time_created_utc()) if e.date_time_modified: event['last-modified'] = icalendar.vDatetime(e.get_date_time_modified_utc()) # TENTATIVE, CONFIRMED, CANCELLED event['status'] = icalendar.vText(u'CONFIRMED') if e.title: event['summary'] = icalendar.vText(e.title) if e.description: event['description'] = icalendar.vText(e.description) if e.date_time_begin: event['dtstart'] = icalendar.vDatetime(e.get_date_time_begin_utc()) if e.date_time_end: event['dtend'] = icalendar.vDatetime(e.get_date_time_end_utc()) if e.url: relative_url = reverse('event-show', args=[e.id]) absolute_url = request.build_absolute_uri(relative_url) event['url'] = icalendar.vUri(absolute_url) # geo value isn't used by iCal readers :-( # maybe a trick is to add the geo coordinates to the location field using the following format: # $latitude, $longitude ($name, $street, $city) if e.location: location = u'%s, %s, %s' % (e.location.name, e.location.street, e.location.city) event['location'] = icalendar.vText(location) if e.location and e.location.latitude and e.location.longitude: event['geo'] = icalendar.vGeo((e.location.latitude, e.location.longitude)) cal.add_component(event) response = HttpResponse(cal.as_string()) response['Content-Type'] = 'text/calendar; charset=UTF-8' response['Cache-Control'] = 'no-cache, no-store, max-age=0, must-revalidate' response['Pragma'] = 'no-cache' response['Expires'] = 'Fri, 01 Jan 1990 00:00:00 GMT' return response
apache-2.0
7,792,382,559,504,474,000
43.217949
115
0.627428
false
3.577801
false
false
false
infobip/infobip-api-python-client
infobip/api/model/nc/notify/NumberContextResponse.py
1
1285
# -*- coding: utf-8 -*- """This is a generated class and is not intended for modification! """ from datetime import datetime from infobip.util.models import DefaultObject, serializable from infobip.api.model.nc.notify.NumberContextResponseDetails import NumberContextResponseDetails class NumberContextResponse(DefaultObject): @property @serializable(name="results", type=NumberContextResponseDetails) def results(self): """ Property is a list of: NumberContextResponseDetails """ return self.get_field_value("results") @results.setter def results(self, results): """ Property is a list of: NumberContextResponseDetails """ self.set_field_value("results", results) def set_results(self, results): self.results = results return self @property @serializable(name="bulkId", type=unicode) def bulk_id(self): """ Property is of type: unicode """ return self.get_field_value("bulk_id") @bulk_id.setter def bulk_id(self, bulk_id): """ Property is of type: unicode """ self.set_field_value("bulk_id", bulk_id) def set_bulk_id(self, bulk_id): self.bulk_id = bulk_id return self
apache-2.0
2,975,082,414,965,617,700
25.791667
97
0.637354
false
4.131833
false
false
false
glenc/sp.py
src/sp/utils.py
1
3239
# Set up References import clr clr.AddReference("System") clr.AddReference("Microsoft.SharePoint") from System import Uri from Microsoft.SharePoint import * from Microsoft.SharePoint.Administration import SPWebApplication # Enumeration # These are simple enumeration methods for walking over various SharePoint # objects and collections. def enum(col, fn): """Enumerate a collection and call function fn for each item.""" for x in col: fn(x) def enum_sites(webapp, fn): """ Enumerate all site collections in the specified web application and call the specified function with each site collection. """ # just in case we were passed a URL, get the web app webapp = get_webapplication(webapp) enum(webapp.Sites, fn) def enum_webs(site, fn): """ Enumerate all webs beneath the site or web specified and call te specified function with each web. """ # do different things based on the type of object provided if type(site) is SPWeb: enum(site.Webs, fn) else: site = get_site(site) enum(site.RootWeb.Webs, fn) def enum_all_webs(site, fn): """Enumerate all webs in a site collection""" site = get_site(site) enum(site.AllWebs, fn) def enum_lists(web, fn): """Enumerate all lists in the web specified""" web = get_web(web) enum(web.Lists, fn) # Get Object Helper Methods # These methods take in some sort of object identifier (usually a URL) # and return the appropriate object instance def get_webapplication(url): """Gets a web application by its URL""" if type(url) is SPWebApplication: return url return SPWebApplication.Lookup(Uri(url)) def get_site(url): """Gets a site collection by its URL""" if type(url) is SPSite: return url return SPSite(url) def get_web(url): """Gets a web by its URL""" if type(url) is SPWeb: return url if type(url) is SPSite: return url.RootWeb site = get_site(url) relative_url = url.replace(site.Url, "") return site.OpenWeb(relative_url) def get_list(web, list_name): """Gets a list within a web""" web = get_web(web) return first(web.Lists, lambda l: l.Title == list_name) def try_get_site(url): """Tries to get a site collection but returns false if no site was found""" try: site = get_site(url) return True, site except: return False, None def try_get_web(url): """Tries to get a web but returns false if no web was found""" web = get_web(url) if web.Exists: return True, web else: return False, None def try_get_list(web, list_name): """Tries to get a list but returns false if no list was found""" l = get_list(web, list_name) return l != None, l # Find Object Helper Methods # These methods are used to find objects in collections def list_exists(web, list_name): """Checks if a list exists""" web = get_web(web) match = first(web.Lists, lambda l: l.Title == list_name) return match != None # List/Collection helper methods def collect(collection, fn): """Collects items where the function evalueates as true""" results = [] for item in collection: if fn(item): results << item return results def first(collection, fn): """Finds the first item in the collection where the function evaluates as true""" for item in collection: if fn(item): return item return None
bsd-3-clause
7,495,485,383,914,679,000
20.885135
82
0.710096
false
3.15692
false
false
false
dukhlov/oslo.messaging
oslo_messaging/_drivers/zmq_driver/server/consumers/zmq_router_consumer.py
1
3834
# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_messaging._drivers import base from oslo_messaging._drivers.zmq_driver.server.consumers\ import zmq_consumer_base from oslo_messaging._drivers.zmq_driver.server import zmq_incoming_message from oslo_messaging._drivers.zmq_driver import zmq_address from oslo_messaging._drivers.zmq_driver import zmq_async from oslo_messaging._drivers.zmq_driver import zmq_names from oslo_messaging._i18n import _LE, _LI LOG = logging.getLogger(__name__) zmq = zmq_async.import_zmq() class RouterIncomingMessage(base.RpcIncomingMessage): def __init__(self, context, message, socket, reply_id, msg_id, poller): super(RouterIncomingMessage, self).__init__(context, message) self.socket = socket self.reply_id = reply_id self.msg_id = msg_id self.message = message poller.resume_polling(socket) def reply(self, reply=None, failure=None, log_failure=True): """Reply is not needed for non-call messages""" def acknowledge(self): LOG.debug("Not sending acknowledge for %s", self.msg_id) def requeue(self): """Requeue is not supported""" class RouterConsumer(zmq_consumer_base.SingleSocketConsumer): def __init__(self, conf, poller, server): super(RouterConsumer, self).__init__(conf, poller, server, zmq.ROUTER) self.matchmaker = server.matchmaker self.host = zmq_address.combine_address(self.conf.rpc_zmq_host, self.port) self.targets = zmq_consumer_base.TargetsManager( conf, self.matchmaker, self.host, zmq.ROUTER) LOG.info(_LI("[%s] Run ROUTER consumer"), self.host) def listen(self, target): LOG.info(_LI("[%(host)s] Listen to target %(target)s"), {'host': self.host, 'target': target}) self.targets.listen(target) def cleanup(self): super(RouterConsumer, self).cleanup() self.targets.cleanup() def _receive_request(self, socket): reply_id = socket.recv() empty = socket.recv() assert empty == b'', 'Bad format: empty delimiter expected' request = socket.recv_pyobj() return request, reply_id def receive_message(self, socket): try: request, reply_id = self._receive_request(socket) LOG.debug("[%(host)s] Received %(type)s, %(id)s, %(target)s", {"host": self.host, "type": request.msg_type, "id": request.message_id, "target": request.target}) if request.msg_type == zmq_names.CALL_TYPE: return zmq_incoming_message.ZmqIncomingRequest( socket, reply_id, request, self.poller) elif request.msg_type in zmq_names.NON_BLOCKING_TYPES: return RouterIncomingMessage( request.context, request.message, socket, reply_id, request.message_id, self.poller) else: LOG.error(_LE("Unknown message type: %s"), request.msg_type) except zmq.ZMQError as e: LOG.error(_LE("Receiving message failed: %s"), str(e))
apache-2.0
3,290,392,908,728,405,500
37.727273
78
0.6265
false
3.968944
false
false
false
stormi/tsunami
src/primaires/objet/commandes/remplir/__init__.py
1
4652
# -*-coding:Utf-8 -* # Copyright (c) 2010 LE GOFF Vincent # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT # OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Package contenant la commande 'remplir'.""" from primaires.interpreteur.commande.commande import Commande class CmdRemplir(Commande): """Commande 'remplir'""" def __init__(self): """Constructeur de la commande""" Commande.__init__(self, "remplir", "fill") self.nom_categorie = "objets" self.schema = "<plat:nom_objet> avec/with (<nombre>) <nom_objet>" self.aide_courte = "remplit un plat de nourriture" self.aide_longue = \ "Cette commande permet de manipuler des plats (assiette, " \ "bol voire poêlon, marmite) en y mettant des objets de type " \ "nourriture. Un repas pris de cette manière sera meilleur " \ "et plus nourrissant." def ajouter(self): """Méthode appelée lors de l'ajout de la commande à l'interpréteur""" nom_objet = self.noeud.get_masque("nom_objet") nom_objet.proprietes["conteneurs"] = \ "(personnage.equipement.inventaire_simple.iter_objets_qtt(" \ "True), )" nom_objet.proprietes["quantite"] = "True" nom_objet.proprietes["conteneur"] = "True" plat = self.noeud.get_masque("plat") plat.prioritaire = True plat.proprietes["conteneurs"] = \ "(personnage.equipement.inventaire, " \ "personnage.salle.objets_sol)" plat.proprietes["types"] = "('conteneur de nourriture', )" def interpreter(self, personnage, dic_masques): """Méthode d'interprétation de commande""" personnage.agir("poser") nombre = 1 if dic_masques["nombre"]: nombre = dic_masques["nombre"].nombre objets = list(dic_masques["nom_objet"].objets_qtt_conteneurs)[:nombre] dans = dic_masques["plat"].objet pose = 0 poids_total = dans.poids for objet, qtt, conteneur in objets: if not objet.peut_prendre: personnage << "Vous ne pouvez pas prendre {} avec vos " \ "mains...".format(objet.get_nom()) return if not objet.est_de_type("nourriture"): personnage << "|err|Ceci n'est pas de la nourriture.|ff|" return poids_total += objet.poids if poids_total > dans.poids_max: if pose == 0: personnage << "Vous ne pouvez rien y poser de plus." return else: break pose += 1 if qtt > nombre: qtt = nombre conteneur.retirer(objet, qtt) dans.nourriture.append(objet) if pose < qtt: pose = qtt personnage << "Vous déposez {} dans {}.".format( objet.get_nom(pose), dans.nom_singulier) personnage.salle.envoyer("{{}} dépose {} dans {}.".format( objet.get_nom(pose), dans.nom_singulier), personnage)
bsd-3-clause
-5,609,864,221,507,276,000
41.981481
79
0.615252
false
3.595662
false
false
false
SVilgelm/CloudFerry
cloudferry/lib/base/action/is_end_iter.py
1
1174
# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an AS IS BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and# # limitations under the License. from cloudferry.lib.base.action import action from cloudferry.lib.utils import utils as utl class IsEndIter(action.Action): def __init__(self, init, iter_info_name='info_iter', resource_name=utl.INSTANCES_TYPE): self.iter_info_name = iter_info_name self.resource_name = resource_name super(IsEndIter, self).__init__(init) def run(self, **kwargs): info = kwargs[self.iter_info_name] objs = info[self.resource_name] if objs: self.num_element = 1 else: self.num_element = 0 return {}
apache-2.0
8,738,482,862,329,376,000
32.542857
70
0.682283
false
3.799353
false
false
false
punchagan/zulip
zerver/webhooks/pagerduty/view.py
1
7008
# Webhooks for external integrations. from typing import Any, Dict, Sequence from django.http import HttpRequest, HttpResponse from zerver.decorator import webhook_view from zerver.lib.exceptions import UnsupportedWebhookEventType from zerver.lib.request import REQ, has_request_variables from zerver.lib.response import json_success from zerver.lib.webhooks.common import check_send_webhook_message from zerver.models import UserProfile PAGER_DUTY_EVENT_NAMES = { "incident.trigger": "triggered", "incident.acknowledge": "acknowledged", "incident.unacknowledge": "unacknowledged", "incident.resolve": "resolved", "incident.assign": "assigned", "incident.escalate": "escalated", "incident.delegate": "delineated", } PAGER_DUTY_EVENT_NAMES_V2 = { "incident.trigger": "triggered", "incident.acknowledge": "acknowledged", "incident.resolve": "resolved", "incident.assign": "assigned", } ASSIGNEE_TEMPLATE = "[{username}]({url})" INCIDENT_WITH_SERVICE_AND_ASSIGNEE = ( "Incident [{incident_num}]({incident_url}) {action} by [{service_name}]" "({service_url}) (assigned to {assignee_info}):\n\n``` quote\n{trigger_message}\n```" ) INCIDENT_WITH_ASSIGNEE = """ Incident [{incident_num}]({incident_url}) {action} by {assignee_info}: ``` quote {trigger_message} ``` """.strip() INCIDENT_ASSIGNED = """ Incident [{incident_num}]({incident_url}) {action} to {assignee_info}: ``` quote {trigger_message} ``` """.strip() INCIDENT_RESOLVED_WITH_AGENT = """ Incident [{incident_num}]({incident_url}) resolved by {resolving_agent_info}: ``` quote {trigger_message} ``` """.strip() INCIDENT_RESOLVED = """ Incident [{incident_num}]({incident_url}) resolved: ``` quote {trigger_message} ``` """.strip() def build_pagerduty_formatdict(message: Dict[str, Any]) -> Dict[str, Any]: format_dict: Dict[str, Any] = {} format_dict["action"] = PAGER_DUTY_EVENT_NAMES[message["type"]] format_dict["incident_id"] = message["data"]["incident"]["id"] format_dict["incident_num"] = message["data"]["incident"]["incident_number"] format_dict["incident_url"] = message["data"]["incident"]["html_url"] format_dict["service_name"] = message["data"]["incident"]["service"]["name"] format_dict["service_url"] = message["data"]["incident"]["service"]["html_url"] if message["data"]["incident"].get("assigned_to_user", None): assigned_to_user = message["data"]["incident"]["assigned_to_user"] format_dict["assignee_info"] = ASSIGNEE_TEMPLATE.format( username=assigned_to_user["email"].split("@")[0], url=assigned_to_user["html_url"], ) else: format_dict["assignee_info"] = "nobody" if message["data"]["incident"].get("resolved_by_user", None): resolved_by_user = message["data"]["incident"]["resolved_by_user"] format_dict["resolving_agent_info"] = ASSIGNEE_TEMPLATE.format( username=resolved_by_user["email"].split("@")[0], url=resolved_by_user["html_url"], ) trigger_message = [] trigger_summary_data = message["data"]["incident"]["trigger_summary_data"] if trigger_summary_data is not None: trigger_subject = trigger_summary_data.get("subject", "") if trigger_subject: trigger_message.append(trigger_subject) trigger_description = trigger_summary_data.get("description", "") if trigger_description: trigger_message.append(trigger_description) format_dict["trigger_message"] = "\n".join(trigger_message) return format_dict def build_pagerduty_formatdict_v2(message: Dict[str, Any]) -> Dict[str, Any]: format_dict = {} format_dict["action"] = PAGER_DUTY_EVENT_NAMES_V2[message["event"]] format_dict["incident_id"] = message["incident"]["id"] format_dict["incident_num"] = message["incident"]["incident_number"] format_dict["incident_url"] = message["incident"]["html_url"] format_dict["service_name"] = message["incident"]["service"]["name"] format_dict["service_url"] = message["incident"]["service"]["html_url"] assignments = message["incident"]["assignments"] if assignments: assignee = assignments[0]["assignee"] format_dict["assignee_info"] = ASSIGNEE_TEMPLATE.format( username=assignee["summary"], url=assignee["html_url"] ) else: format_dict["assignee_info"] = "nobody" last_status_change_by = message["incident"].get("last_status_change_by") if last_status_change_by is not None: format_dict["resolving_agent_info"] = ASSIGNEE_TEMPLATE.format( username=last_status_change_by["summary"], url=last_status_change_by["html_url"], ) trigger_description = message["incident"].get("description") if trigger_description is not None: format_dict["trigger_message"] = trigger_description return format_dict def send_formated_pagerduty( request: HttpRequest, user_profile: UserProfile, message_type: str, format_dict: Dict[str, Any] ) -> None: if message_type in ("incident.trigger", "incident.unacknowledge"): template = INCIDENT_WITH_SERVICE_AND_ASSIGNEE elif message_type == "incident.resolve" and format_dict.get("resolving_agent_info") is not None: template = INCIDENT_RESOLVED_WITH_AGENT elif message_type == "incident.resolve" and format_dict.get("resolving_agent_info") is None: template = INCIDENT_RESOLVED elif message_type == "incident.assign": template = INCIDENT_ASSIGNED else: template = INCIDENT_WITH_ASSIGNEE subject = "Incident {incident_num}".format(**format_dict) body = template.format(**format_dict) check_send_webhook_message(request, user_profile, subject, body) @webhook_view("PagerDuty") @has_request_variables def api_pagerduty_webhook( request: HttpRequest, user_profile: UserProfile, payload: Dict[str, Sequence[Dict[str, Any]]] = REQ(argument_type="body"), ) -> HttpResponse: for message in payload["messages"]: message_type = message.get("type") # If the message has no "type" key, then this payload came from a # Pagerduty Webhook V2. if message_type is None: break if message_type not in PAGER_DUTY_EVENT_NAMES: raise UnsupportedWebhookEventType(message_type) format_dict = build_pagerduty_formatdict(message) send_formated_pagerduty(request, user_profile, message_type, format_dict) for message in payload["messages"]: event = message.get("event") # If the message has no "event" key, then this payload came from a # Pagerduty Webhook V1. if event is None: break if event not in PAGER_DUTY_EVENT_NAMES_V2: raise UnsupportedWebhookEventType(event) format_dict = build_pagerduty_formatdict_v2(message) send_formated_pagerduty(request, user_profile, event, format_dict) return json_success()
apache-2.0
2,138,208,620,822,185,200
34.21608
100
0.661244
false
3.604938
false
false
false
peterkuma/tjrapid
ob/views.py
1
1560
# -*- coding: utf-8 -*- from django.shortcuts import render from django.template import RequestContext from django.utils import translation from django.shortcuts import get_object_or_404 from django.http import Http404, HttpResponseRedirect from main.models import * from ob.models import * def events(request, category_name): category = get_object_or_404(Category, name=category_name) events = Event.objects.filter(category=category) return render(request, 'ob/events.html', { 'events': events, 'category': category, }, RequestContext(request) ) def event(request, name, category_name): category = get_object_or_404(Category, name=category_name) event = get_object_or_404(Event, category=category, name=name) return render(request, 'ob/event.html', { 'event': event, 'category': category, }, RequestContext(request) ) def attachment(request, category_name, event_name, name): category = get_object_or_404(Category, name=category_name) event = get_object_or_404(Event, category=category, name=event_name) for a in event.attachments.all(): if os.path.basename(a.file.name) == name: return HttpResponseRedirect(a.file.url) raise Http404 def members(request, category_name): members_m = Member.objects.filter(category__startswith='M') members_w = Member.objects.filter(category__startswith='W') category = Category.objects.get(name=category_name) return render(request, 'ob/members.html', { 'members_m': members_m, 'members_w': members_w, 'category': category, }, RequestContext(request) )
mit
-8,542,457,093,815,724,000
25.896552
69
0.730128
false
3.203285
false
false
false
google-research/google-research
enas_lm/src/tpu/data_utils.py
1
3091
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Data input pipeline for TPUs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import pickle import numpy as np import tensorflow.compat.v1 as tf gfile = tf.gfile def _pad_to_batch(batch_size, data): """Pad `Tensor`s in data so that `N == batch_size` and return `mask`.""" x = data['x'] curr_batch_size = tf.shape(x)[0] if curr_batch_size == batch_size: masks = tf.ones([batch_size], dtype=tf.float32) return data, masks batch_diff = batch_size - curr_batch_size padded_data = {} for key, val in data.items(): val = tf.pad(val, [[0, batch_diff]] + [[0, 0]] * (val.shape.ndims - 1)) val.set_shape([batch_size] + val.shape.as_list()[1:]) padded_data[key] = val masks = tf.pad(tf.ones([curr_batch_size], dtype=tf.float32), [[0, batch_diff]]) masks.set_shape([batch_size]) return padded_data, masks def input_fn(params): """For `TPUEstimator`.""" with gfile.GFile(params.data_path, 'rb') as finp: x_train, x_valid, x_test, _, _ = pickle.load(finp) tf.logging.info('-' * 80) tf.logging.info('train_size: {0}'.format(np.size(x_train))) tf.logging.info('valid_size: {0}'.format(np.size(x_valid))) tf.logging.info(' test_size: {0}'.format(np.size(x_test))) def _build_dataset(data, batch_size, bptt_steps): """Create LM dataset from a `data` tensor.""" num_batches = np.size(data) // batch_size data = np.reshape(data[:batch_size*num_batches], [batch_size, num_batches]) data = np.transpose(data) dataset = tf.data.Dataset.from_tensor_slices({'x': data[:-1], 'y': data[1:]}) dataset = dataset.repeat() dataset = dataset.batch(batch_size=bptt_steps, drop_remainder=True) def pad_to_batch(data): padded_data, masks = _pad_to_batch(bptt_steps, data) return padded_data, masks dataset = dataset.map(map_func=pad_to_batch) dataset = dataset.prefetch(2) # Prefetch overlaps in-feed with training return dataset if params.task_mode == 'train': return _build_dataset(x_train, params.train_batch_size, params.bptt_steps) elif params.task_mode == 'valid': return _build_dataset(x_valid, params.eval_batch_size, params.bptt_steps) elif params.task_mode == 'test': return _build_dataset(x_test, params.eval_batch_size, params.bptt_steps) else: raise ValueError('Unknown task_mode {0}'.format(params.task_mode))
apache-2.0
5,911,740,309,309,060,000
36.240964
79
0.664833
false
3.250263
false
false
false
PVirie/aknowthai
src/test.py
1
1728
import gen import network as ann import numpy as np import util def eval(neural_net, data, labels): classes, alphas = neural_net.scan(data, gen.get_default_total_code()) data3ch = util.cvtColorGrey2RGB(data) red = np.array([1.0, 0.0, 0.0], dtype=np.float32) for b in xrange(alphas.shape[0]): for c in xrange(alphas.shape[1]): data3ch[b, c, int(np.floor((1.0 - alphas[b, c]) * (data3ch.shape[2] - 1))), :] = red tile = util.make_tile(data3ch, rows=600, cols=800, flip=True) util.numpy_to_image(tile).show() # now get only classess corresponding to high alphas index_output = np.argmax(classes, axis=2) util.save_txt(index_output, "../artifacts/" + "data.out") count = 0 correct = 0 for b in xrange(labels.shape[0]): for c in xrange(labels.shape[1]): if labels[b, c] > 0: correct += 1 if labels[b, c] == index_output[b, c] else 0 count += 1 print "Percent correct = ", correct * 100.0 / count collector = [] for b in xrange(alphas.shape[0]): read_index = 0 converted = gen.indices_to_unicode(index_output[b]) read_word = u"" for c in xrange(alphas.shape[1]): if alphas[b, c] > 0.5: read_word = read_word + converted[read_index] read_index = read_index + 1 print read_word collector.append(read_word) return collector words, imgs = gen.get_tuples(range(100)) word_mat, img_mat = gen.prepare_input_tensors(words, imgs) nn = ann.Network(img_mat.shape, word_mat.shape, gen.get_default_total_code(), 100) nn.load_session("../artifacts/" + "test_weight") eval(nn, img_mat, word_mat) # raw_input()
mit
-4,338,133,510,740,657,000
31.603774
96
0.601852
false
3.141818
false
false
false
xingnix/learning
machinelearning/python/computationallearningtheory/weightedmajority.py
1
2790
import re import numpy as np input_string = """ Example Sky AirTemp Humidity Wind Water Forecast EnjoySport 1 Sunny Warm Normal Strong Warm Same Yes 2 Sunny Warm High Strong Warm Same Yes 3 Rainy Cold High Strong Warm Change No 4 Sunny Warm High Strong Cool Change Yes""" lines = map(lambda x: filter(lambda y: y != '', re.split(' +', x))[1:], # drop first item "Example" filter(lambda x: x != '', re.split('\n', input_string))) names, data = lines[0], lines[1:] data_lines = map(lambda x: dict(zip(names, x)), data) values = dict(zip(names, reduce(lambda x, y: map(lambda z: z[0] | set([z[1]]) if type(z[0]) is set else set(z), zip(x, y)), lines[1:]))) names, target = names[:-1], names[-1] # the last item is target p = 'Yes' n = 'No' def listconcept(names, values): # rules=dict.fromkeys(names,'?') rules = [{}] newrules = [] for name in names: for rule in rules: for value in values[name]: r = rule.copy() r[name] = value newrules.append(r) rule[name] = '?' newrules.append(rule) rules = newrules newrules = [] return rules def listconcept(names, values): # rules=dict.fromkeys(names,'?') rules = [{}] newrules = [] for name in names: for rule in rules: for value in values[name] | set(['?']): r = rule.copy() r[name] = value newrules.append(r) rules = newrules newrules = [] return rules def predict(h,sample): names=h.keys() p = 1 if (reduce(lambda x, y: x and y, map(lambda name: sample[name] == h[name] or h[name] == '?', names))) else 0 return {"Yes":p,"No": 1-p} def weight(samples,names,values): beta=0.1 w=np.ones(len(H)) for sample in samples: p={"Yes":0,"No":0} for i in range(len(H)): a=predict(H[i],sample) if a["Yes"]>a["No"]: c="Yes" else: c="No" if sample[target]!=c: w[i]*=beta return w def weightedmajorityclassifier(H,w,sample): p={"Yes":0,"No":0} for i in range(len(H)): prediction= predict(H[i],sample) for t in ["Yes","No"]: p[t]+=prediction[t]*w[i] return p H=listconcept(names, values) w=weight(data_lines,names,values) weightedmajorityclass=weightedmajorityclassifier(H,w,data_lines[0]) print "Weighted Majority Classification : \n data: " ,data_lines[0], "\n class ",weightedmajorityclass
gpl-3.0
3,463,591,842,420,936,000
29.659341
104
0.513262
false
3.353365
false
false
false
cpbl/cpblUtilities
matplotlib_utils.py
1
7242
#!/usr/bin/python import matplotlib.pyplot as plt def prepare_figure_for_publication(ax=None, width_cm=None, width_inches=None, height_cm=None, height_inches=None, fontsize=None, fontsize_labels=None, fontsize_ticklabels=None, fontsize_legend=None, fontsize_annotations =None, TeX = True, # Used for ax=None case (setup) ): """ Two ways to use this: (1) Before creating a figure, with ax=None (2) To fine-tune a figure, using ax One reasonable option for making compact figures like for Science/Nature is to create everything at double scale. This works a little more naturally with Matplotlib's default line/axis/etc sizes. Also, if you change sizes of, e.g. xticklabels and x-axis labels after they've been created, they will not necessarily be relocated appropriately. So you can call prepare_figure_for_publication with no ax/fig argument to set up figure defaults prior to creating the figure in the first place. Some wisdom on graphics: - 2015: How to produce PDFs of a given width, with chosen font size, etc: (1) Fix width to journal specifications from the beginning / early. Adjust height as you go, according to preferences for aspect ratio: figure(figsize=(11.4/2.54, chosen height)) (2) Do not use 'bbox_inches="tight"' in savefig('fn.pdf'). Instead, use the subplot_adjust options to manually adjust edges to get the figure content to fit in the PDF output (3) Be satisfied with that. If you must get something exactly tight and exactly the right size, you do this in Inkscape. But you cannot scale the content and bbox in the same step. Load PDF, select all, choose the units in the box at the top of the main menu bar, click on the lock htere, set the width. Then, in File Properties dialog, resize file to content. Save. """ if ax is None: # Set up plot settings, prior to creation fo a figure params = { 'axes.labelsize': fontsize_labels if fontsize_labels is not None else fontsize, 'font.size': fontsize, 'legend.fontsize': fontsize_legend if fontsize_legend is not None else fontsize, 'xtick.labelsize': fontsize_ticklabels if fontsize_ticklabels is not None else fontsize_labels if fontsize_labels is not None else fontsize, 'ytick.labelsize': fontsize_ticklabels if fontsize_ticklabels is not None else fontsize_labels if fontsize_labels is not None else fontsize, 'figure.figsize': (width_inches, height_inches), } if TeX: params.update({ 'text.usetex': TeX, 'text.latex.preamble': r'\usepackage{amsmath} \usepackage{amssymb}', 'text.latex.unicode': True, }) if not TeX: params.update({'text.latex.preamble':''}) plt.rcParams.update(params) return fig = ax.get_figure() if width_inches: fig.set_figwidth(width_inches) assert width_cm is None if height_inches: fig.set_figheight(height_inches) assert height_cm is None if width_cm: fig.set_figwidth(width_cm/2.54) assert width_inches is None if height_cm: fig.set_figheight(height_cm/2.54) assert height_inches is None #ax = plt.subplot(111, xlabel='x', ylabel='y', title='title') for item in fig.findobj(plt.Text) + [ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels(): if fontsize: item.set_fontsize(fontsize) def plot_diagonal(xdata=None, ydata=None, ax=None, **args): """ Plot a 45-degree line """ import pandas as pd if ax is None: ax = plt.gca() #LL = min(min(df[xv]), min(df[yv])), max(max(df[xv]), max(df[yv])) if xdata is None and ydata is None: xl, yl = ax.get_xlim(), ax.get_ylim() LL = max(min(xl), min(yl)), min(max(xl), max(yl)), elif xdata is not None and ydata is None: assert isinstance(xdata, pd.DataFrame) dd = xdata.dropna() LL = dd.min().max(), dd.max().min() else: assert xdata is not None assert ydata is not None #if isinstance(xdata, pd.Series): xdata = xdata.vlu xl, yl = xdata, ydata LL = max(min(xl), min(yl)), min(max(xl), max(yl)), ax.plot(LL, LL, **args) def figureFontSetup(uniform=12,figsize='paper', amsmath=True): """ This is deprecated. Use prepare_figure_for_publication Set font size settings for matplotlib figures so that they are reasonable for exporting to PDF to use in publications / presentations..... [different!] If not for paper, this is not yet useful. Here are some good sizes for paper: figure(468,figsize=(4.6,2)) # in inches figureFontSetup(uniform=12) # 12 pt font for a subplot(211) or for a single plot (?) figure(127,figsize=(4.6,4)) # in inches. Only works if figure is not open from last run! why does the following not work to deal with the bad bounding-box size problem?! inkscape -f GSSseries-happyLife-QC-bw.pdf --verb=FitCanvasToDrawing -A tmp.pdf .: Due to inkscape cli sucks! bug. --> See savefigall for an inkscape implementation. 2012 May: new matplotlib has tight_layout(). But it rejigs all subplots etc. My inkscape solution is much better, since it doesn't change the layout. Hoewever, it does mean that the original size is not respected! ... Still, my favourite way from now on to make figures is to append the font size setting to the name, ie to make one for a given intended final size, and to do no resaling in LaTeX. Use tight_layout() if it looks okay, but the inkscape solution in general. n.b. a clf() erases size settings on a figure! """ figsizelookup={'paper':(4.6,4),'quarter':(1.25,1) ,None:None} try: figsize=figsizelookup[figsize] except KeyError,TypeError: pass params = {#'backend': 'ps', 'axes.labelsize': 16, #'text.fontsize': 14, 'font.size': 14, 'legend.fontsize': 10, 'xtick.labelsize': 16, 'ytick.labelsize': 16, 'text.usetex': True, 'figure.figsize': figsize } #'figure.figsize': fig_size} if uniform is not None: assert isinstance(uniform,int) params = {#'backend': 'ps', 'axes.labelsize': uniform, #'text.fontsize': uniform, 'font.size': uniform, 'legend.fontsize': uniform, 'xtick.labelsize': uniform, 'ytick.labelsize': uniform, 'text.usetex': True, 'text.latex.unicode': True, 'text.latex.preamble':r'\usepackage{amsmath},\usepackage{amssymb}', 'figure.figsize': figsize } if not amsmath: params.update({'text.latex.preamble':''}) plt.rcParams.update(params) plt.rcParams['text.latex.unicode']=True #if figsize: # plt.rcParams[figure.figsize]={'paper':(4.6,4)}[figsize] return(params)
gpl-3.0
-5,737,185,367,860,241,000
43.158537
474
0.627175
false
3.725309
false
false
false
sarrionandia/taber
data/views.py
1
4616
from django.core.exceptions import ObjectDoesNotExist from django.http import HttpResponse, Http404 from django.utils.decorators import method_decorator from django.views.generic import View from django.template import RequestContext, loader from models import Institution, Judge, Team, Venue from django.views.decorators.csrf import csrf_exempt import json def index(request): template = loader.get_template('data/index.html') context = RequestContext(request, { 'institutions' : Institution.objects.all().order_by('name'), 'judges' : Judge.objects.all(), 'teams' : Team.objects.all(), 'venues' : Venue.objects.all(), }) return HttpResponse(template.render(context)) class DeleteInstitutionView(View): def post(self, request, institutionid): try: institution = Institution.objects.get(id=institutionid) institution.delete() except ObjectDoesNotExist: raise Http404("Institution does not exist") return HttpResponse(institutionid) @method_decorator(csrf_exempt) def dispatch(self, *args, **kwargs): return super(DeleteInstitutionView, self).dispatch(*args, **kwargs) class CreateInstitutionView(View): def post(self, request): name = request.POST.get('name') institution = Institution(name=name) institution.save() response = {"id" : institution.id, "name" : institution.name} return HttpResponse(json.dumps(response)) @method_decorator(csrf_exempt) def dispatch(self, *args, **kwargs): return super(CreateInstitutionView, self).dispatch(*args, **kwargs) class UpdateInstitutionView(View): def post(self, request, institutionid): try: institution = Institution.objects.get(id=institutionid) institution.name = request.POST.get('name') institution.save() response = { 'name' : institution.name, 'id' : institution.id } return HttpResponse(json.dumps(response)); except ObjectDoesNotExist: raise Http404("Institution does not exist") @method_decorator(csrf_exempt) def dispatch(self, *args, **kwargs): return super(UpdateInstitutionView, self).dispatch(*args, **kwargs) class DeleteTeamView(View): def post(self, request, teamid): try: team = Team.objects.get(id=teamid) team.delete() except ObjectDoesNotExist: raise Http404("Team does not exist") return HttpResponse("OK") @method_decorator(csrf_exempt) def dispatch(self, *args, **kwargs): return super(DeleteTeamView, self).dispatch(*args, **kwargs) class CreateTeamView(View): def post(self, request): institution = Institution.objects.get(id=int(request.POST.get('institution'))) team = Team(name=request.POST.get('name'), institution=institution) team.speaker1 = request.POST.get('speaker1') team.speaker2 = request.POST.get('speaker2') team.save() response = { 'id' : team.id, 'name' : team.name, 'speaker1' : team.speaker1, 'speaker2' : team.speaker2 } return HttpResponse(json.dumps(response)) @method_decorator(csrf_exempt) def dispatch(self, *args, **kwargs): return super(CreateTeamView, self).dispatch(*args, **kwargs) class UpdateTeamView(View): def post(self, request, teamid): try: team = Team.objects.get(id=teamid) team.name = request.POST.get('name') team.speaker1 = request.POST.get('speaker1') team.speaker2 = request.POST.get('speaker2') team.save() except ObjectDoesNotExist: raise Http404("Team does not exist") return HttpResponse("OK") class DeleteJudgeView(View): def post(self, request, judgeid): try: judge = Judge.objects.get(id=judgeid) judge.delete() except ObjectDoesNotExist: raise Http404("Judge does not exist") return HttpResponse("OK") class CreateJudgeView(View): def post(self, request): name = request.POST.get('name') try: institution = Institution.objects.get(id=int(request.POST.get('institution'))) judge = Judge(name=name, institution=institution) judge.save() except ObjectDoesNotExist: raise Http404("Institution does not exist") return HttpResponse(judge.id)
gpl-2.0
3,279,643,001,639,974,000
29.569536
90
0.630416
false
4.084956
false
false
false
xzhang2016/tfagent
tfta/test/test2_json.py
1
40826
from kqml import KQMLList, KQMLString from tfta.tfta import TFTA from tfta.tfta_module import TFTA_Module from bioagents.tests.util import ekb_from_text, ekb_kstring_from_text, \ get_request, agent_clj_from_text from bioagents.tests.integration import _IntegrationTest, _FailureTest from indra.sources.trips.processor import TripsProcessor from indra.statements import Agent from bioagents import Bioagent from indra.sources import trips ##################################### # Testing the following TFTA capabilities # IS-GENE-ONTO # FIND-GENE-ONTO # FIND-KINASE-REGULATION # IS-GENE-TISSUE # FIND-GENE-TISSUE # FIND-TISSUE # FIND-COMMON-TF-GENES # FIND-EVIDENCE ###################################### def _get_targets(target_arg): proteins = None family = None agents = Bioagent.get_agent(target_arg) if isinstance(agents, list): proteins = [a.name for a in agents if a is not None and ('UP' in a.db_refs or 'HGNC' in a.db_refs)] family = [a.name for a in agents if a is not None and 'FPLX' in a.db_refs and a.name not in proteins] elif isinstance(agents, Agent): if 'UP' in agents.db_refs or 'HGNC' in agents.db_refs: proteins = [agents.name] if not proteins and 'FPLX' in agents.db_refs: family = [agents.name] if proteins: print('genes=', ','.join(proteins)) else: print('Genes = None\n') if family: print('family=', ','.join(family)) else: print('family = None\n') return proteins,family def agents_clj_from_text(text): ekb_xml = ekb_from_text(text) tp = trips.process_xml(ekb_xml) agents = tp.get_agents() clj = Bioagent.make_cljson(agents) return clj ############################################################################# #IS-GENE-ONTO #Is stat3 a kinase? class TestIsGeneOnto1(_IntegrationTest): def __init__(self, *args): super(TestIsGeneOnto1, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to gene = agent_clj_from_text('STAT3') _get_targets(gene) print('target=', str(gene)) keyword = 'kinase' content = KQMLList('is-gene-onto') content.set('keyword', keyword) content.set('gene', gene) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert output.get('result') == 'FALSE', output #Is stat3 a transcription factor? class TestIsGeneOnto2(_IntegrationTest): def __init__(self, *args): super(TestIsGeneOnto2, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to gene = agent_clj_from_text('STAT3') _get_targets(gene) print('target=', str(gene)) keyword = 'transcription factor' content = KQMLList('is-gene-onto') content.set('keyword', keyword) content.set('gene', gene) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert output.get('result') == 'TRUE', output #Is stat3 a protein kinase? class TestIsGeneOnto3(_IntegrationTest): def __init__(self, *args): super(TestIsGeneOnto3, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to gene = agent_clj_from_text('STAT3') _get_targets(gene) print('target=', str(gene)) keyword = 'protein kinase' content = KQMLList('is-gene-onto') content.set('keyword', keyword) content.set('gene', gene) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert output.get('result') == 'FALSE', output #Is jak1 a protein kinase? class TestIsGeneOnto4(_IntegrationTest): def __init__(self, *args): super(TestIsGeneOnto4, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to gene = agent_clj_from_text('JAK1') _get_targets(gene) print('target=', str(gene)) keyword = 'protein kinase' content = KQMLList('is-gene-onto') content.set('keyword', keyword) content.set('gene', gene) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert output.get('result') == 'TRUE', output #Is PBRM1 a transcription factor? class TestIsGeneOnto5(_IntegrationTest): def __init__(self, *args): super(TestIsGeneOnto5, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to gene = agent_clj_from_text('PBRM1') _get_targets(gene) print('target=', str(gene)) keyword = 'transcription factor' content = KQMLList('is-gene-onto') content.set('keyword', keyword) content.set('gene', gene) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert output.get('result') == 'TRUE', output #TEST FAMILY NAME #Is SMURF a transcription factor? class TestIsGeneOnto6(_IntegrationTest): def __init__(self, *args): super(TestIsGeneOnto6, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to gene = agent_clj_from_text('SMURF') _get_targets(gene) print('target=', str(gene)) keyword = 'transcription factor' content = KQMLList('is-gene-onto') content.set('keyword', keyword) content.set('gene', gene) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'FAILURE', output assert output.get('reason') == 'FAMILY_NAME', output.get('reason') print("len(output.get('clarification'))=", len(output.get('clarification'))) assert len(output.get('clarification')) == 5, output #is stat a kinase? (STAT is grounded as a gene, not a family) class TestIsGeneOnto7(_IntegrationTest): def __init__(self, *args): super(TestIsGeneOnto7, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to gene = agent_clj_from_text('STAT') _get_targets(gene) print('target=', str(gene)) keyword = 'KINASE' content = KQMLList('is-gene-onto') content.set('keyword', keyword) content.set('gene', gene) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'FAILURE', output assert output.get('reason') == 'FAMILY_NAME', output print("len(output.get('clarification'))=", len(output.get('clarification'))) assert len(output.get('clarification')) == 5, output #test protein and gene category #is map3k7 a protein? class TestIsGeneOnto8(_IntegrationTest): def __init__(self, *args): super(TestIsGeneOnto8, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to gene = agent_clj_from_text('map3k7') _get_targets(gene) print('target=', str(gene)) keyword = 'protein' content = KQMLList('is-gene-onto') content.set('keyword', keyword) content.set('gene', gene) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert output.get('result') == 'TRUE', output #is stat3 a gene? class TestIsGeneOnto9(_IntegrationTest): def __init__(self, *args): super(TestIsGeneOnto9, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to gene = agent_clj_from_text('stat3') _get_targets(gene) print('target=', str(gene)) keyword = 'gene' content = KQMLList('is-gene-onto') content.set('keyword', keyword) content.set('gene', gene) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert output.get('result') == 'TRUE', output ################################################################################## ##TEST FIND-GENE-ONTO #Among STAT3, JAK1, JAK2, ELK1, ELK2, HRAS, and FOS, which are protein kinases? class TestFindGeneOnto1(_IntegrationTest): def __init__(self, *args): super(TestFindGeneOnto1, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to gene = agents_clj_from_text("STAT3, JAK1, JAK2, ELK1, FOS, HRAS, ELK2") _get_targets(gene) print('target=', str(gene)) keyword = 'protein kinase' content = KQMLList('find-gene-onto') content.set('keyword', keyword) content.set('gene', gene) return get_request(content), content def check_response_to_message(self, output): print("len(output.get('genes'))=", str(len(output.get('genes')))) assert output.head() == 'SUCCESS', output assert len(output.get('genes')) == 2, output #Among STAT3, JAK1, JAK2, ELK1, and FOS, which are histone demethylase? class TestFindGeneOnto2(_IntegrationTest): def __init__(self, *args): super(TestFindGeneOnto2, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to gene = agents_clj_from_text("STAT3, JAK1, JAK2, ELK1, FOS, SMAD2, KDM4B") _get_targets(gene) print('target=', str(gene)) keyword = 'histone demethylase' content = KQMLList('find-gene-onto') content.set('keyword', keyword) content.set('gene', gene) return get_request(content), content def check_response_to_message(self, output): print('len(output)=' + str(len(output.get('genes')))) assert output.head() == 'SUCCESS', output assert len(output.get('genes')) == 1, output #Among PBRM1, SMAD2, TBL1XR1, AKT1, CDK19, CDK8, CDK9, DDR1, GSK3A, GSK3B, MET,TRIM28,COL2A1, # JAK1, PRMT1, RB1, SMURF2, TRAF4, and USP15, which are transcription factors? class TestFindGeneOnto3(_IntegrationTest): def __init__(self, *args): super(TestFindGeneOnto3, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to gene = agents_clj_from_text("PBRM1, SMAD2, TBL1XR1, AKT1, CDK19, CDK8, CDK9, DDR1, \ GSK3A, GSK3B, MET,TRIM28,COL2A1,JAK1, PRMT1, RB1, SMURF2, TRAF4, USP15") _get_targets(gene) print('target=', str(gene)) keyword = 'transcription factor' content = KQMLList('find-gene-onto') content.set('keyword', keyword) content.set('gene', gene) return get_request(content), content def check_response_to_message(self, output): print("len(output.get('genes'))=" + str(len(output.get('genes')))) assert output.head() == 'SUCCESS', output assert len(output.get('genes')) == 3, output #Among STAT3, JAK1, JAK2, ELK1, and FOS, which are demethylase? class TestFindGeneOnto4(_IntegrationTest): def __init__(self, *args): super(TestFindGeneOnto4, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to gene = agents_clj_from_text("STAT3, JAK1, JAK2, ELK1, FOS, SMAD2, KDM4B") _get_targets(gene) print('target=', str(gene)) keyword = 'demethylase' content = KQMLList('find-gene-onto') content.set('keyword', keyword) content.set('gene', gene) return get_request(content), content def check_response_to_message(self, output): print('len(output)=' + str(len(output.get('genes')))) assert output.head() == 'SUCCESS', output assert len(output.get('genes')) == 1, output #complex query: find-target and find-gene-onto #What genes regulated by FOS are kinases? class TestFindGeneOnto5(_IntegrationTest): def __init__(self, *args): super(TestFindGeneOnto5, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to regulator = agent_clj_from_text('cfos') _get_targets(regulator) print('target=', str(regulator)) keyword = 'kinase' content = KQMLList('find-gene-onto') content.set('keyword', keyword) content.set('regulator', regulator) return get_request(content), content def check_response_to_message(self, output): print('len(output)=' + str(len(output.get('genes')))) assert output.head() == 'SUCCESS', output assert len(output.get('genes')) == 5, output ############################################################################### # FIND-KINASE-REGULATION #Which kinases regulate the cfos gene? class TestFindKinaseReg1(_IntegrationTest): def __init__(self, *args): super(TestFindKinaseReg1, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to target = agent_clj_from_text('cfos') _get_targets(target) print('target=', str(target)) content = KQMLList('FIND-KINASE-REGULATION') content.set('target', target) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert len(output.get('kinase')) == 5, output #test gene family #Which kinases regulate the MEK gene? class TestFindKinaseReg2(_IntegrationTest): def __init__(self, *args): super(TestFindKinaseReg2, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to target = agent_clj_from_text('MEK') _get_targets(target) print('target=', str(target)) content = KQMLList('FIND-KINASE-REGULATION') content.set('target', target) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'FAILURE', output assert output.get('reason') == 'FAMILY_NAME', output print("len(output.get('clarification'))=", len(output.get('clarification'))) assert len(output.get('clarification')) == 5, output #Which kinases negatively regulate the cfos gene? class TestFindKinaseReg3(_IntegrationTest): def __init__(self, *args): super(TestFindKinaseReg3, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to target = agent_clj_from_text('cfos') _get_targets(target) print('target=', str(target)) keyword = 'decrease' content = KQMLList('FIND-KINASE-REGULATION') content.set('target', target) content.set('keyword', keyword) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert len(output.get('kinase')) == 3, output #Which kinases positively regulate the cfos gene? class TestFindKinaseReg4(_IntegrationTest): def __init__(self, *args): super(TestFindKinaseReg4, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to target = agent_clj_from_text('cfos') _get_targets(target) print('target=', str(target)) keyword = 'increase' content = KQMLList('FIND-KINASE-REGULATION') content.set('target', target) content.set('keyword', keyword) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert len(output.get('kinase')) == 2, output #Which kinases positively regulate the AKT gene? class TestFindKinaseReg5(_IntegrationTest): def __init__(self, *args): super(TestFindKinaseReg5, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to target = agent_clj_from_text('AKT') _get_targets(target) print('target=', str(target)) keyword = 'increase' content = KQMLList('FIND-KINASE-REGULATION') content.set('target', target) content.set('keyword', keyword) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'FAILURE', output assert output.get('reason') == 'FAMILY_NAME', output print("len(output.get('clarification'))=", len(output.get('clarification'))) assert len(output.get('clarification')) == 5, output ####################################################################################### #IS-GENE-TISSUE ###Is stat3 expressed in liver? class TestIsTissueGene1(_IntegrationTest): def __init__(self, *args): super(TestIsTissueGene1, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to target = agent_clj_from_text('stat3') _get_targets(target) print('target=', str(target)) content = KQMLList('IS-GENE-TISSUE') content.set('gene', target) content.set('tissue', 'liver') return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert output.get('result') == 'TRUE', output ###Is kras expressed in brain? class TestIsTissueGene2(_IntegrationTest): def __init__(self, *args): super(TestIsTissueGene2, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to target = agent_clj_from_text('kras') _get_targets(target) print('target=', str(target)) content = KQMLList('IS-GENE-TISSUE') content.set('gene', target) content.set('tissue', 'brain') return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert output.get('result') == 'FALSE', output ###Is stat3 exclusively expressed in liver? class TestIsTissueGene3(_IntegrationTest): def __init__(self, *args): super(TestIsTissueGene3, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to target = agent_clj_from_text('stat3') _get_targets(target) print('target=', str(target)) content = KQMLList('IS-GENE-TISSUE') content.set('gene', target) content.set('tissue', 'liver') content.set('keyword', 'exclusive') return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert output.get('result') == 'FALSE', output ###Is GYS2 exclusively expressed in liver? class TestIsTissueGene4(_IntegrationTest): def __init__(self, *args): super(TestIsTissueGene4, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to target = agent_clj_from_text('GYS2') _get_targets(target) print('target=', str(target)) content = KQMLList('IS-GENE-TISSUE') content.set('gene', target) content.set('tissue', 'liver') content.set('keyword', 'exclusive') return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert output.get('result') == 'TRUE', output ###Is NEUROD2 exclusively expressed in brain? class TestIsTissueGene5(_IntegrationTest): def __init__(self, *args): super(TestIsTissueGene5, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to target = agent_clj_from_text('NEUROD2') _get_targets(target) print('target=', str(target)) content = KQMLList('IS-GENE-TISSUE') content.set('gene', target) content.set('tissue', 'brain') content.set('keyword', 'exclusive') return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert output.get('result') == 'TRUE', output ###Is GAST expressed in stomach? class TestIsTissueGene6(_IntegrationTest): def __init__(self, *args): super(TestIsTissueGene6, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to target = agent_clj_from_text('GAST') _get_targets(target) print('target=', str(target)) content = KQMLList('IS-GENE-TISSUE') content.set('gene', target) content.set('tissue', 'stomach') return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert output.get('result') == 'TRUE', output ###################################################################################### #FIND-GENE-TISSUE #what genes are expressed in liver? class TestFindGeneTissue1(_IntegrationTest): def __init__(self, *args): super(TestFindGeneTissue1, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to #gene = ekb_kstring_from_text('AKT') tissue = 'liver' content = KQMLList('FIND-GENE-TISSUE') content.set('tissue', tissue) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output print("len(output.get('genes'))=", len(output.get('genes'))) assert len(output.get('genes')) == 1929, output #among stat3,srf, kras, and hras, what genes are expressed in liver? class TestFindGeneTissue2(_IntegrationTest): def __init__(self, *args): super(TestFindGeneTissue2, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to gene = agents_clj_from_text('stat3, srf, kras, hras') tissue = 'liver' content = KQMLList('FIND-GENE-TISSUE') content.set('tissue', tissue) content.set('gene', gene) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output print("len(output.get('genes'))=", len(output.get('genes'))) assert len(output.get('genes')) == 1, output #what genes are exclusively expressed in liver? class TestFindGeneTissue3(_IntegrationTest): def __init__(self, *args): super(TestFindGeneTissue3, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to #gene = 'stat3, srf, kras, hras' tissue = 'liver' content = KQMLList('FIND-GENE-TISSUE') content.set('tissue', tissue) content.set('keyword', 'exclusive') return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output print("len(output.get('genes'))=", len(output.get('genes'))) assert len(output.get('genes')) == 31, output #what genes are exclusively expressed in brain? class TestFindGeneTissue4(_IntegrationTest): def __init__(self, *args): super(TestFindGeneTissue4, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to tissue = 'brain' content = KQMLList('FIND-GENE-TISSUE') content.set('tissue', tissue) content.set('keyword', 'exclusive') return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output print("len(output.get('genes'))=", len(output.get('genes'))) assert len(output.get('genes')) == 44, output ############################################################################### # FIND-TISSUE #What tissues is STAT3 expressed in? class TestFindTissue1(_IntegrationTest): def __init__(self, *args): super(TestFindTissue1, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to gene = agent_clj_from_text('STAT3') _get_targets(gene) print('target=', str(gene)) content = KQMLList('FIND-TISSUE') content.set('gene', gene) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output print("len(output.get('tissue'))=", str(len(output.get('tissue')))) assert len(output.get('tissue')) == 8, output #What tissues is MEK expressed in? class TestFindTissue2(_IntegrationTest): def __init__(self, *args): super(TestFindTissue2, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to gene = agent_clj_from_text('MEK') _get_targets(gene) print('target=', str(gene)) content = KQMLList('FIND-TISSUE') content.set('gene', gene) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'FAILURE', output assert output.get('reason') == 'FAMILY_NAME', output print("len(output.get('clarification'))=", len(output.get('clarification'))) assert len(output.get('clarification')) == 5, output #what tissues can I ask class TestFindTissue3(_IntegrationTest): def __init__(self, *args): super(TestFindTissue3, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to content = KQMLList('FIND-TISSUE') return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output print("len(output.get('tissue'))=", len(output.get('tissue'))) assert len(output.get('tissue')) == 30, output #What tissues is frizzled8 expressed in? class TestFindTissue4(_IntegrationTest): def __init__(self, *args): super(TestFindTissue4, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to gene = agent_clj_from_text('frizzled8') _get_targets(gene) print('target=', str(gene)) content = KQMLList('FIND-TISSUE') content.set('gene', gene) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output print("len(output.get('tissue'))=", str(len(output.get('tissue')))) assert len(output.get('tissue')) == 7, output #################################################################################### #FIND-COMMON-TF-GENES #What transcription factors are shared by the SRF, HRAS, and elk1 genes? (subtask: find-common-tf-genes) class TestFindCommonTfGenes1(_IntegrationTest): def __init__(self, *args): super(TestFindCommonTfGenes1, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to target = agents_clj_from_text('SRF, HRAS, elk1') _get_targets(target) print('target=', str(target)) content = KQMLList('FIND-COMMON-TF-GENES') content.set('target', target) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert len(output.get('tfs')) == 3, output #What transcription factors are in common to the STAT3, SOCS3, IFNG, FOXO3, and CREB5 genes? class TestFindCommonTfGenes2(_IntegrationTest): def __init__(self, *args): super(TestFindCommonTfGenes2, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to target = agents_clj_from_text('STAT3, IFNG, FOXO3, SOCS3, CREB5') _get_targets(target) print('target=', str(target)) content = KQMLList('FIND-COMMON-TF-GENES') content.set('target', target) return get_request(content), content def check_response_to_message(self, output): print('len(output)=' + str(len(output.get('tfs')))) assert output.head() == 'SUCCESS', output assert len(output.get('tfs')) == 8, output #test gene family #What transcription factors are in common to the STAT3, SOCS3, and MEK genes? #MEK will be ignored in this case class TestFindCommonTfGenes3(_IntegrationTest): def __init__(self, *args): super(TestFindCommonTfGenes3, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to target = agents_clj_from_text('STAT3, SOCS3, MEK') _get_targets(target) print('target=', str(target)) content = KQMLList('FIND-COMMON-TF-GENES') content.set('target', target) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output assert len(output.get('tfs')) == 1, output #What transcription factors are in common to the STAT3, SOCS3, and AKT genes? class TestFindCommonTfGenes4(_IntegrationTest): def __init__(self, *args): super(TestFindCommonTfGenes4, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to target = agents_clj_from_text('STAT3, AKT, MEK') _get_targets(target) print('target=', str(target)) content = KQMLList('FIND-COMMON-TF-GENES') content.set('target', target) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'FAILURE', output assert output.get('reason') == 'FAMILY_NAME', output assert len(output.get('clarification').get('as')) == 2, output #Which of these transcription factors are shared by the SRF, HRAS, FOS, and elk1 genes? (subtask: find-common-tf-genes) class TestFindCommonTfGenes5(_IntegrationTest): def __init__(self, *args): super(TestFindCommonTfGenes5, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to target = agents_clj_from_text('SRF, HRAS, cfos, elk1') _get_targets(target) print('target=', str(target)) of_those = agents_clj_from_text('stat3,ELK1,TFAP2A,CREB1,TP53') _get_targets(of_those) print('target=', str(of_those)) content = KQMLList('FIND-COMMON-TF-GENES') content.set('target', target) content.set('of-those', of_those) return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output print("len(output.get('tfs'))=", len(output.get('tfs'))) assert len(output.get('tfs')) == 3, output ###################################################################################### # FIND-EVIDENCE ##Show me evidence that kras regulate frizzled8? class TestFindEvidence1(_IntegrationTest): def __init__(self, *args): super(TestFindEvidence1, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to regulator = agent_clj_from_text('kras') target = agent_clj_from_text('fzd8') _get_targets(target) print('target=', str(target)) content = KQMLList('FIND-EVIDENCE') content.set('regulator', regulator) content.set('target', target) content.set('keyword', 'regulate') return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output print("len(output.get('evidence'))=", str(len(output.get('evidence')))) print("len(output.get('evidence').get('literature'))=", str(len(output.get('evidence').get('literature')))) assert len(output.get('evidence')) == 2, output assert len(output.get('evidence').get('literature')) == 1, output ##show me evidence that kras increase frizzled8? class TestFindEvidence2(_IntegrationTest): def __init__(self, *args): super(TestFindEvidence2, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to regulator = agent_clj_from_text('kras') target = agent_clj_from_text('fzd8') content = KQMLList('FIND-EVIDENCE') content.set('regulator', regulator) content.set('target', target) content.set('keyword', 'increase') return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output print("len(output.get('evidence'))=", str(len(output.get('evidence')))) print("len(output.get('evidence').get('literature'))=", str(len(output.get('evidence').get('literature')))) assert len(output.get('evidence')) == 2, output assert len(output.get('evidence').get('literature')) == 1, output ##show me evidence that kras decrease frizzled8? class TestFindEvidence3(_IntegrationTest): def __init__(self, *args): super(TestFindEvidence3, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to regulator = agent_clj_from_text('kras') target = agent_clj_from_text('fzd8') content = KQMLList('FIND-EVIDENCE') content.set('regulator', regulator) content.set('target', target) content.set('keyword', 'decrease') return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output print("len(output.get('evidence'))=", str(len(output.get('evidence')))) print("type(output.get('evidence'))=",type(output.get('evidence'))) print("len(output.get('evidence').get('literature'))=", str(len(output.get('evidence').get('literature')))) assert len(output.get('evidence')) == 2, output assert len(output.get('evidence').get('literature')) == 1, output ##Show me the evidence that IL6 increase the amount of SOCS1. class TestFindEvidence4(_IntegrationTest): def __init__(self, *args): super(TestFindEvidence4, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to regulator = agent_clj_from_text('il6') target = agent_clj_from_text('socs1') content = KQMLList('FIND-EVIDENCE') content.set('regulator',regulator) content.set('target',target) content.set('keyword', 'increase') return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output print("len(output.get('evidence'))=", str(len(output.get('evidence')))) print("len(output.get('evidence').get('literature'))=", str(len(output.get('evidence').get('literature')))) assert len(output.get('evidence')) == 2, output assert len(output.get('evidence').get('literature')) == 9, output ##Show me the evidence that SRF binds to the FOS gene. class TestFindEvidence5(_IntegrationTest): def __init__(self, *args): super(TestFindEvidence5, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to regulator = agent_clj_from_text('SRF') target = agent_clj_from_text('cfos') content = KQMLList('FIND-EVIDENCE') content.set('regulator', regulator) content.set('target', target) content.set('keyword', 'bind') return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output print("len(output.get('evidence'))=", str(len(output.get('evidence')))) print("len(output.get('evidence').get('tf-db'))=", str(len(output.get('evidence').get('tf-db')))) assert len(output.get('evidence')) == 2, output assert len(output.get('evidence').get('tf-db')) == 2, output ##Show me the evidence that SRF regulate FOS gene. class TestFindEvidence6(_IntegrationTest): def __init__(self, *args): super(TestFindEvidence6, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to regulator = agent_clj_from_text('SRF') target = agent_clj_from_text('cfos') content = KQMLList('FIND-EVIDENCE') content.set('regulator', regulator) content.set('target', target) content.set('keyword', 'regulate') return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'SUCCESS', output print("len(output.get('evidence'))=", str(len(output.get('evidence')))) print("len(output.get('evidence').get('literature'))=", str(len(output.get('evidence').get('literature')))) assert len(output.get('evidence')) == 4, output assert len(output.get('evidence').get('literature')) == 2, output #IncreaseAmount(miR_491(), GFAP()) class TestFindEvidence7(_IntegrationTest): def __init__(self, *args): super(TestFindEvidence7, self).__init__(TFTA_Module) def create_message(self): # Here we create a KQML request that the TFTA needs to respond to agents = Bioagent.get_agent(agent_clj_from_text('miR-491')) print(agents) print('name=', agents.name) print('db_refs=', agents.db_refs) regulator = agent_clj_from_text('miR-491') target = agent_clj_from_text('GFAP') content = KQMLList('FIND-EVIDENCE') content.set('regulator', regulator) content.set('target', target) content.set('keyword', 'increase') print(content, '\n') return get_request(content), content def check_response_to_message(self, output): assert output.head() == 'FAILURE', output assert output.get('reason') == 'NO_REGULATOR_NAME', output
bsd-2-clause
-776,647,102,837,806,700
38.445411
119
0.606452
false
3.559062
true
false
false
cajone/pychess
lib/pychess/widgets/ChatWindow.py
1
3808
import re from gi.repository import Gtk from pychess.widgets.ChatView import ChatView from pychess.widgets.ViewsPanel import ViewsPanel from pychess.widgets.InfoPanel import InfoPanel from pychess.widgets.ChannelsPanel import ChannelsPanel TYPE_PERSONAL, TYPE_CHANNEL, TYPE_GUEST, \ TYPE_ADMIN, TYPE_COMP, TYPE_BLINDFOLD = range(6) def get_playername(playername): re_m = re.match("(\w+)\W*", playername) return re_m.groups()[0] class ChatWindow(object): def __init__(self, widgets, connection): self.connection = connection self.viewspanel = ViewsPanel(self.connection) self.channelspanel = ChannelsPanel(self.connection) self.adj = self.channelspanel.get_vadjustment() self.infopanel = InfoPanel(self.connection) self.chatbox = Gtk.Box() self.chatbox.pack_start(self.channelspanel, True, True, 0) notebook = Gtk.Notebook() notebook.append_page(self.viewspanel, Gtk.Label(_("Chat"))) notebook.append_page(self.infopanel, Gtk.Label(_("Info"))) self.chatbox.pack_start(notebook, False, False, 0) self.panels = [self.viewspanel, self.channelspanel, self.infopanel] self.viewspanel.connect('channel_content_Changed', self.channelspanel.channel_Highlight, id) self.channelspanel.connect('conversationAdded', self.onConversationAdded) self.channelspanel.connect('conversationRemoved', self.onConversationRemoved) self.channelspanel.connect('conversationSelected', self.onConversationSelected) self.channelspanel.connect('focus_in_event', self.focus_in, self.adj) for panel in self.panels: panel.show_all() panel.start() def onConversationAdded(self, panel, grp_id, text, grp_type): chatView = ChatView() plus_channel = '+channel ' + str(grp_id) self.connection.cm.connection.client.run_command(plus_channel) for panel in self.panels: panel.addItem(grp_id, text, grp_type, chatView) def onConversationRemoved(self, panel, grp_id): minus_channel = '-channel ' + str(grp_id) self.connection.cm.connection.client.run_command(minus_channel) for panel in self.panels: panel.removeItem(grp_id) def onConversationSelected(self, panel, grp_id): for panel in self.panels: panel.selectItem(grp_id) def openChatWithPlayer(self, name): cm = self.connection.cm self.channelspanel.onPersonMessage(cm, name, "", False, "") def focus_in(widget, event, adj): alloc = widget.get_allocation() if alloc.y < adj.value or alloc.y > adj.value + adj.page_size: adj.set_value(min(alloc.y, adj.upper - adj.page_size)) if __name__ == "__main__": import random class LM: def getPlayerlist(self): for i in range(10): chrs = map(chr, range(ord("a"), ord("z") + 1)) yield "".join(random.sample(chrs, random.randrange(20))) def getChannels(self): return [(str(i), n) for i, n in enumerate(self.getPlayerlist())] def joinChannel(self, channel): pass def connect(self, *args): pass def getPeopleInChannel(self, name): pass def finger(self, name): pass def getJoinedChannels(self): return [] class Con: def __init__(self): self.glm = LM() self.cm = LM() self.fm = LM() chatwin = ChatWindow({}, Con()) globals()["_"] = lambda x: x chatwin.window.connect("delete-event", Gtk.main_quit) Gtk.main()
gpl-3.0
-2,369,495,915,848,076,000
33
77
0.608193
false
3.736997
false
false
false
ideamonk/Web2Hunter-GAE
web2hunter.py
1
2098
#!/usr/bin/env python # Web2Hunter -- Abhishek Mishra <ideamonk at gmail.com> # # a web 2.0 name generator extension to domainhunter.py # # usage - # $ python web2hunter.py import domainhunter as DH import random A = ["Anti", "Aero", "Babble", "Buzz", "Blog", "Blue", "Brain", "Bright", "Browse", "Bubble", "Chat", "Chatter", "Dab", "Dazzle", "Dev", "Digi", "Edge", "Feed", "Five", "Flash", "Flip", "Gab", "Giga", "Inno", "Jabber", "Jax", "Jet", "Jump", "Link", "Live", "My", "N", "Photo", "Pod", "Real", "Riff", "Shuffle", "Snap", "Skip", "Tag", "Tek", "Thought", "Top", "Topic", "Twitter", "Word", "You", "Zoom"] B = ["bean", "beat", "bird", "blab", "box", "bridge", "bug", "buzz", "cast", "cat", "chat", "club", "cube", "dog", "drive", "feed", "fire", "fish", "fly", "ify", "jam", "links", "list", "lounge", "mix", "nation", "opia", "pad", "path", "pedia", "point", "pulse", "set", "space", "span", "share", "shots", "sphere", "spot", "storm", "ster", "tag", "tags", "tube", "tune", "type", "verse", "vine", "ware", "wire", "works", "XS", "Z", "zone", "zoom"] C = ["Ai", "Aba", "Agi", "Ava", "Awesome", "Cami", "Centi", "Cogi", "Demi", "Diva", "Dyna", "Ea", "Ei", "Fa", "Ge", "Ja", "I", "Ka", "Kay", "Ki", "Kwi", "La", "Lee", "Mee", "Mi", "Mu", "My", "Oo", "O", "Oyo", "Pixo", "Pla", "Qua", "Qui", "Roo", "Rhy", "Ska", "Sky", "Ski", "Ta", "Tri", "Twi", "Tru", "Vi", "Voo", "Wiki", "Ya", "Yaki", "Yo", "Za", "Zoo"] D = ["ba", "ble", "boo", "box", "cero", "deo", "del", "do", "doo", "gen", "jo", "lane", "lia", "lith", "loo", "lium", "mba", "mbee", "mbo", "mbu", "mia", "mm", "nder", "ndo", "ndu", "noodle", "nix", "nte", "nti", "nu", "nyx", "pe", "re", "ta", "tri", "tz", "va", "vee", "veo", "vu", "xo", "yo", "zz", "zzy", "zio", "zu"] def genName(): output = "" random.shuffle(A) random.shuffle(B) random.shuffle(C) random.shuffle(D) if (random.randint(0,1) == 1): awesomename = A[0] + B[0] else: awesomename = C[0] + D[0] random.shuffle(DH.tlds) tld = DH.tlds[0] if ( DH.domainSearch(awesomename + tld) ): output = awesomename + tld return output
bsd-3-clause
-202,349,895,963,669
55.702703
448
0.515729
false
2.229543
false
false
false
buzzz321/SerMan
server.py
1
4112
#!/usr/bin/env python3 # found some where on internet and added my own stuff to it. import logging import socket import select import subprocess HOSTNAME = 'localhost' PORT = '4000' MAXIMUM_QUEUED_CONNECTIONS = 5 RECEIVING_BUFFER_SIZE = 4096 logger = logging.getLogger(__name__) def start_server(hostname, port): # Get all possible binding addresses for given hostname and port. possible_addresses = socket.getaddrinfo( hostname, port, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM, flags=socket.AI_PASSIVE ) server_socket = None # Look for an address that will actually bind. for family, socket_type, protocol, name, address in possible_addresses: try: # Create socket. server_socket = socket.socket(family, socket_type, protocol) # Make socket port reusable. server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Bind socket to the address. server_socket.bind(address) except OSError: # Try another address. continue break if server_socket is None: logger.error("No suitable address available.") return # Listen for incoming connections. server_socket.listen(MAXIMUM_QUEUED_CONNECTIONS) logger.info("Listening on %s port %d." % server_socket.getsockname()[:2]) monitored_sockets = [server_socket] try: while True: # Wait for any of the monitored sockets to become readable. ready_to_read_sockets = select.select( monitored_sockets, tuple(), tuple() )[0] for ready_socket in ready_to_read_sockets: if ready_socket == server_socket: # If server socket is readable, accept new client # connection. client_socket, client_address = server_socket.accept() monitored_sockets.append(client_socket) logger.info("New connection #%d on %s:%d." % ( client_socket.fileno(), client_address[0], client_address[1] )) else: message = ready_socket.recv(RECEIVING_BUFFER_SIZE) if message: # Client send correct message. Echo it. if b'lista' in message: print(message) lista = subprocess.check_output(["ls", "-l"]) print(lista) ready_socket.sendall(lista) if b'long' in message: print(message) infile = open('serman/sermanwindow.cc') lista = infile.readlines() lista = ', '.join([str(x) for x in lista]) print(lista) ready_socket.sendall(str.encode(lista)) else: print(message) ready_socket.sendall(message) else: # Client connection is lost. Handle it. logger.info( "Lost connection #%d." % ready_socket.fileno() ) monitored_sockets.remove(ready_socket) except KeyboardInterrupt: pass logger.info("Shutdown initiated.") # Close client connections. monitored_sockets.remove(server_socket) for client_socket in monitored_sockets: logger.info("Closing connection #%d." % client_socket.fileno()) client_socket.close() # Close server socket. logger.info("Shutting server down...") server_socket.close() if __name__ == '__main__': # Configure logging. logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler()) # Start server. start_server(HOSTNAME, PORT)
bsd-2-clause
5,011,016,042,292,578,000
37.074074
79
0.530399
false
4.798133
false
false
false
zibraproject/zika-pipeline
scripts/extract.py
1
1704
from poretools.Fast5File import Fast5FileSet import sys # extract with constraints: # -- only one group ever # -- only one flowcell ID ever # -- always unique read ID def run(parser, args): flowcells = set() reads = set() i = 0 basecaller_version = None for fast5 in Fast5FileSet(args.directory, None, args.basecaller): # if not basecaller_version: # basecaller_version = fast5.get_basecaller_version() # elif fast5.get_basecaller_version() != basecaller_version: # print >>sys.stderr, "ABORTED: More than one basecaller version found: %s, %s" % (basecaller_version, fast5.get_basecaller_version()) # raise SystemExit if not fast5.is_open: print >>sys.stderr, "Skipping read: %s" % (fast5.filename) continue read_flowcell_id = fast5.get_flowcell_id() flowcells.add(read_flowcell_id) if len(flowcells) != 1: print >>sys.stderr, "ABORTED: More than one flowcell found in dataset: %s" % (flowcells,) raise SystemExit #if flowcell_id != read_flowcell_id: # print >>sys.stderr, "Skipping read from flowcell: %s" % (read_flowcell_id) # continue read_id = fast5.get_read_id() if read_id in reads: print >>sys.stderr, "Skipping duplicate read: %s" % (read_id) continue reads.add(read_id) fas = fast5.get_fastas('fwd') for read in fas: if read: print read fast5.close() i += 1 if i % 1000 == 0: print >>sys.stderr, "Extracted %s reads" % (i,) # zibra.py # run # --flowcell # --type 1d / 2d # --check-sample-name # --check-flowcell-name # --min-support-value # --min-depth # --min-log-likelihood # --normalised-depth # --use-indels # --trim-reads # <scheme> <sample> <directory> # list-schemes
mit
-3,093,050,626,163,535,000
23.695652
136
0.658451
false
2.752827
false
false
false
GeoMatDigital/django-geomat
geomat/feedback/views.py
1
1251
from django.shortcuts import render from rest_framework import generics from rest_framework.response import Response from rest_framework import views from geomat.feedback.serializers import FeedBackSerializer from django.core.mail import send_mail from rest_framework import status from drf_yasg.utils import swagger_auto_schema class FeedBackView(generics.GenericAPIView): serializer_class = FeedBackSerializer permission_classes = () @swagger_auto_schema(responses={200:"The Views response is 200 if mail is sent"}) def post(self, request, *args, **kwargs): serializer = FeedBackSerializer(data=request.data) serializer.is_valid(raise_exception=True) serializer = serializer.data message = send_mail(subject=serializer["emailTitle"], from_email= "{0} <{1}>".format(serializer["username"],serializer["userEmail"]), message=serializer["emailContent"], recipient_list=["[email protected]"], fail_silently=False) if not message: return Response(status=status.HTTP_400_BAD_REQUEST) return Response(data=serializer) # Create your views here.
bsd-3-clause
-621,155,066,860,859,400
39.354839
107
0.681055
false
4.389474
false
false
false
xmendez/wfuzz
tests/server_dir/simple_server.py
1
3589
# slightly modified from # https://gist.github.com/trungly/5889154 from http.server import HTTPServer from http.server import SimpleHTTPRequestHandler import urllib.parse class GetHandler(SimpleHTTPRequestHandler): def do_HEAD(self): parsed_path = urllib.parse.urlparse(self.path) if parsed_path.path.startswith("/echo"): message = "\n".join( [ "CLIENT VALUES:", "client_address=%s (%s)" % (self.client_address, self.address_string()), "command=%s" % self.command, "path=%s" % self.path, "real path=%s" % parsed_path.path, "query=%s" % parsed_path.query, "request_version=%s" % self.request_version, "", "HEADERS:", "%s" % self.headers, ] ) self.send_response(200) self.end_headers() self.wfile.write(message.encode("utf-8")) elif parsed_path.path.startswith("/redirect"): self.send_response(301) self.send_header("Location", "/echo") self.end_headers() else: SimpleHTTPRequestHandler.do_HEAD(self) return def do_GET(self): parsed_path = urllib.parse.urlparse(self.path) if parsed_path.path.startswith("/echo"): message = "\n".join( [ "CLIENT VALUES:", "client_address=%s (%s)" % (self.client_address, self.address_string()), "command=%s" % self.command, "path=%s" % self.path, "real path=%s" % parsed_path.path, "query=%s" % parsed_path.query, "request_version=%s" % self.request_version, "", "HEADERS:", "%s" % self.headers, ] ) self.send_response(200) self.end_headers() self.wfile.write(message.encode("utf-8")) elif parsed_path.path.startswith("/redirect"): self.send_response(301) self.send_header("Location", "/echo") self.end_headers() else: SimpleHTTPRequestHandler.do_GET(self) return def do_POST(self): parsed_path = urllib.parse.urlparse(self.path) if parsed_path.path.startswith("/echo"): content_len = int(self.headers.get("content-length")) post_body = self.rfile.read(content_len).decode("utf-8") self.send_response(200) self.end_headers() message = "\n".join( [ "CLIENT VALUES:", "client_address=%s (%s)" % (self.client_address, self.address_string()), "command=%s" % self.command, "path=%s" % self.path, "real path=%s" % parsed_path.path, "query=%s" % parsed_path.query, "request_version=%s" % self.request_version, "", "HEADERS:", "%s" % self.headers, "POST_DATA=%s" % post_body, "", ] ) self.wfile.write(message.encode("utf-8")) return if __name__ == "__main__": server = HTTPServer(("0.0.0.0", 8000), GetHandler) server.serve_forever()
gpl-2.0
6,534,772,595,874,089,000
33.84466
68
0.462246
false
4.36618
false
false
false
josefmonje/Django_demo
main/migrations/0001_initial.py
1
1096
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2016-03-01 10:46 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Profile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('modified', models.DateTimeField(auto_now=True)), ('is_verified', models.BooleanField(default=False)), ('name', models.CharField(max_length=50)), ('update', models.TextField()), ('image', models.ImageField(blank=True, null=True, upload_to='')), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)), ], ), ]
mit
313,435,659,963,360,100
34.354839
145
0.609489
false
4.23166
false
false
false
yannrouillard/weboob
modules/banqueaccord/pages.py
1
5605
# -*- coding: utf-8 -*- # Copyright(C) 2013 Romain Bignon # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. from decimal import Decimal import re from weboob.capabilities.bank import Account from weboob.tools.browser import BasePage, BrokenPageError from weboob.tools.captcha.virtkeyboard import MappedVirtKeyboard, VirtKeyboardError from weboob.tools.capabilities.bank.transactions import FrenchTransaction __all__ = ['LoginPage', 'IndexPage', 'AccountsPage', 'OperationsPage'] class VirtKeyboard(MappedVirtKeyboard): symbols={'0':('8664b9cdfa66b4c3a1ec99c35a2bf64b','9eb80c6e99410eaac32905b2c77e65e5','37717277dc2471c8a7bf37e2068a8f01'), '1':('1f36986f9d27dde54ce5b08e8e285476','9d0aa7a0a2bbab4f2c01ef1e820cb3f1'), '2':('b560b0cce2ca74d3d499d73775152ab7',), '3':('d16e426e71fc29b1b55d0fbded99a473',), '4':('19c68066e414e08d17c86fc5c4acc949','c43354a7f7739508f76c538d5b3bce26'), '5':('4b9abf98e30a1475997ec770cbe5e702','2059b4aa95c7b3156b171255fa10bbdd'), '6':('804be4171d61f9cc10e9978c43b1d2a0','a41b091d4a11a318406a5a8bd3ed3837'), '7':('8adf951f4eea5f446f714214e101d555',), '8':('568135f3844213c30f2c7880be867d3d',), '9':('a3750995c511ea1492ac244421109e77','eeb3a8ba804f19380dfe94a91a37595b'), } color=(0,0,0) def __init__(self, page): img = page.document.find("//img[@usemap='#cv']") img_file = page.browser.openurl(img.attrib['src']) MappedVirtKeyboard.__init__(self, img_file, page.document, img, self.color, 'href', convert='RGB') self.check_symbols(self.symbols, page.browser.responses_dirname) def check_color(self, pixel): for p in pixel: if p >= 0xd5: return False return True def get_symbol_coords(self, (x1, y1, x2, y2)): # strip borders return MappedVirtKeyboard.get_symbol_coords(self, (x1+10, y1+10, x2-10, y2-10)) def get_symbol_code(self, md5sum_list): for md5sum in md5sum_list: try: code = MappedVirtKeyboard.get_symbol_code(self,md5sum) except VirtKeyboardError: continue else: return ''.join(re.findall("'(\d+)'", code)[-2:]) raise VirtKeyboardError('Symbol not found') def get_string_code(self, string): code = '' for c in string: code += self.get_symbol_code(self.symbols[c]) return code class LoginPage(BasePage): def login(self, login, password): vk = VirtKeyboard(self) form = self.document.xpath('//form[@id="formulaire-login"]')[0] code = vk.get_string_code(password) assert len(code)==10, BrokenPageError("Wrong number of character.") self.browser.location(self.browser.buildurl(form.attrib['action'], identifiant=login, code=code), no_login=True) class IndexPage(BasePage): def get_list(self): for line in self.document.xpath('//li[@id="menu-n2-mesproduits"]//li//a'): if line.get('onclick') is None: continue account = Account() account.id = line.get('onclick').split("'")[1] account.label = self.parser.tocleanstring(line) yield account def get_card_name(self): return self.parser.tocleanstring(self.document.xpath('//h1')[0]) class AccountsPage(BasePage): def get_balance(self): balance = Decimal('0.0') for line in self.document.xpath('//div[@class="detail"]/table//tr'): try: left = line.xpath('./td[@class="gauche"]')[0] right = line.xpath('./td[@class="droite"]')[0] except IndexError: #useless line continue if len(left.xpath('./span[@class="precision"]')) == 0 and (left.text is None or not 'total' in left.text.lower()): continue balance -= Decimal(FrenchTransaction.clean_amount(right.text)) return balance class OperationsPage(BasePage): def get_history(self): for tr in self.document.xpath('//div[contains(@class, "mod-listeoperations")]//table/tbody/tr'): cols = tr.findall('td') date = self.parser.tocleanstring(cols[0]) raw = self.parser.tocleanstring(cols[1]) label = re.sub(u' - traité le \d+/\d+', '', raw) debit = self.parser.tocleanstring(cols[3]) if len(debit) > 0: t = FrenchTransaction(0) t.parse(date, raw) t.label = label t.set_amount(debit) yield t amount = self.parser.tocleanstring(cols[2]) if len(amount) > 0: t = FrenchTransaction(0) t.parse(date, raw) t.label = label t.set_amount(amount) t.amount = - t.amount yield t
agpl-3.0
2,130,746,300,878,250,000
37.383562
126
0.616881
false
3.410834
false
false
false
franblas/facialrecoChallenge
itml.py
1
3881
# -*- coding: utf-8 -*- """ Created on Thu Jun 18 19:04:51 2015 @author: Paco """ """ Information Theoretic Metric Learning, Kulis et al., ICML 2007 """ import numpy as np from sklearn.metrics import pairwise_distances from base_metric import BaseMetricLearner class ITML(BaseMetricLearner): """ Information Theoretic Metric Learning (ITML) """ def __init__(self, gamma=1., max_iters=1000, convergence_threshold=1e-3): """ gamma: value for slack variables """ self.gamma = gamma self.max_iters = max_iters self.convergence_threshold = convergence_threshold def _process_inputs(self, X, constraints, bounds, A0): self.X = X # check to make sure that no two constrained vectors are identical a,b,c,d = constraints ident = _vector_norm(self.X[a] - self.X[b]) > 1e-9 a, b = a[ident], b[ident] ident = _vector_norm(self.X[c] - self.X[d]) > 1e-9 c, d = c[ident], d[ident] # init bounds if bounds is None: self.bounds = np.percentile(pairwise_distances(X), (5, 95)) else: assert len(bounds) == 2 self.bounds = bounds # init metric if A0 is None: self.A = np.identity(X.shape[1]) else: self.A = A0 return a,b,c,d def fit(self, X, constraints, bounds=None, A0=None, verbose=False): """ X: (n x d) data matrix - each row corresponds to a single instance constraints: tuple of arrays: (a,b,c,d) indices into X, such that: d(X[a],X[b]) < d(X[c],X[d]) bounds: (pos,neg) pair of bounds on similarity, such that: d(X[a],X[b]) < pos d(X[c],X[d]) > neg A0: [optional] (d x d) initial regularization matrix, defaults to identity """ a,b,c,d = self._process_inputs(X, constraints, bounds, A0) gamma = self.gamma num_pos = len(a) num_neg = len(c) _lambda = np.zeros(num_pos + num_neg) lambdaold = np.zeros_like(_lambda) gamma_proj = 1. if gamma is np.inf else gamma/(gamma+1.) pos_bhat = np.zeros(num_pos) + self.bounds[0] neg_bhat = np.zeros(num_neg) + self.bounds[1] A = self.A for it in xrange(self.max_iters): # update positives vv = self.X[a] - self.X[b] for i,v in enumerate(vv): wtw = v.dot(A).dot(v) # scalar alpha = min(_lambda[i], gamma_proj*(1./wtw - 1./pos_bhat[i])) _lambda[i] -= alpha beta = alpha/(1 - alpha*wtw) pos_bhat[i] = 1./((1 / pos_bhat[i]) + (alpha / gamma)) A += beta * A.dot(np.outer(v,v)).dot(A) # update negatives vv = self.X[c] - self.X[d] for i,v in enumerate(vv): wtw = v.dot(A).dot(v) # scalar alpha = min(_lambda[i+num_pos],gamma_proj*(1./neg_bhat[i] - 1./wtw)) _lambda[i+num_pos] -= alpha beta = -alpha/(1 + alpha*wtw) neg_bhat[i] = 1./((1 / neg_bhat[i]) - (alpha / gamma)) A += beta * A.dot(np.outer(v,v)).dot(A) normsum = np.linalg.norm(_lambda) + np.linalg.norm(lambdaold) if normsum == 0: conv = np.inf break conv = np.abs(lambdaold - _lambda).sum() / normsum if conv < self.convergence_threshold: break lambdaold = _lambda.copy() if verbose: print 'itml iter: %d, conv = %f' % (it, conv) if verbose: print 'itml converged at iter: %d, conv = %f' % (it, conv) return self def metric(self): return self.A @classmethod def prepare_constraints(self, labels, num_points, num_constraints): ac,bd = np.random.randint(num_points, size=(2,num_constraints)) pos = labels[ac] == labels[bd] a,c = ac[pos], ac[~pos] b,d = bd[pos], bd[~pos] return a,b,c,d # hack around lack of axis kwarg in older numpy versions try: np.linalg.norm([[4]], axis=1) except TypeError: def _vector_norm(X): return np.apply_along_axis(np.linalg.norm, 1, X) else: def _vector_norm(X): return np.linalg.norm(X, axis=1)
mit
2,450,078,011,670,945,000
30.056
78
0.592373
false
2.940152
false
false
false
rickysarraf/autoEqualizer
autoEqualizer.py
1
8836
#!/usr/bin/python # autoEqualizer - Script to load equalizer presets ondemand based on what genre of track is playing # Copyright (C) 2007 Ritesh Raj Sarraf <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import sys import threading import signal from time import sleep try: from qt import * except ImportError: sys.stderr.write("Err!!! I need the Python Qt modules. Please install the python-qt3 package.\n") DEBUG=1 MODE=1 try: from dcopext import DCOPClient, DCOPApp except ImportError: sys.stderr.write("Err!!! I can't find the dcopext module.\n") os.popen( "kdialog --sorry 'PyKDE3 (KDE3 bindings for Python) is required for this script.'" ) raise if DEBUG: if os.path.isfile(__file__+".log") is True: try: os.remove(__file__+".log") except IOError: sys.stderr.write("Couldn't remove the file. Do you have ownership.\n") f = open(__file__+".log", 'a') #class Notification( QCustomEvent ): class Notification( QCustomEvent ): __super_init = QCustomEvent.__init__ def __init__( self, str ): self.__super_init(QCustomEvent.User + 1) self.string = str class autoEqualizer( QApplication): """ The main application, also sets up the Qt event loop """ def __init__( self, args, mode ): QApplication.__init__( self, args ) # create a new DCOP-Client self.client = DCOPClient() # Select what mode we want to run in # 1 => shortStatusMessage # 2 => popupMessage self.mode = mode # connect the client to the local DCOP-Server if self.client.attach() is not True: os.popen( "kdialog --sorry 'Could not connect to local DCOP server. Something weird happened.'" ) sys.exit(1) # create a DCOP-Application-Object to talk to Amarok self.amarok = DCOPApp('amarok', self.client) debug( "Started.\n" ) # Start separate thread for reading data from stdin self.stdinReader = threading.Thread( target = self.readStdin ) self.stdinReader.start() self.readSettings() def osCommands(self, command): # Execute the command and return the exit value # Once the extraction problem is root-caused, we can fix this easily. os.environ['__autoEqualizer_command'] = command try: old_environ = os.environ['LANG'] except KeyError: old_environ = "C" os.environ['LANG'] = "C" #if os.system(command '> __autoEqualizer_output 2>&1') != 0: # debug("Couldn't execute the command using the dcopy command interface also.") def saveState(self, sessionmanager): # script is started by amarok, not by KDE's session manager debug("We're in saveState. We should be avoiding session starts with this in place.\n") sessionmanager.setRestartHint(QSessionManager.RestartNever) def readSettings( self ): """ Reads settings from configuration file """ try: foovar = config.get( "General", "foo" ) except: debug( "No config file found, using defaults.\n" ) ############################################################################ # Stdin-Reader Thread ############################################################################ def readStdin( self ): """ Reads incoming notifications from stdin """ while True: # Read data from stdin. Will block until data arrives. line = sys.stdin.readline() debug ("Line is %s.\n" % (line) ) if line: qApp.postEvent( self, Notification(line) ) else: break ############################################################################ # Notification Handling ############################################################################ def customEvent( self, notification ): """ Handles notifications """ string = QString(notification.string) debug( "Received notification: " + str( string ) + "\n" ) if string.contains( "configure" ): self.configure() if string.contains( "engineStateChange: play" ): debug("Play event triggered.\n") self.engineStatePlay() if string.contains( "engineStateChange: idle" ): self.engineStateIdle() if string.contains( "engineStateChange: pause" ): self.engineStatePause() if string.contains( "engineStateChange: empty" ): self.engineStatePause() if string.contains( "trackChange" ): debug("Track change event occured.\n") self.trackChange() # Notification callbacks. Implement these functions to react to specific notification # events from Amarok: def configure( self ): debug( "configuration" ) self.dia = ConfigDialog() self.dia.show() self.connect( self.dia, SIGNAL( "destroyed()" ), self.readSettings ) def engineStatePlay( self ): """ Called when Engine state changes to Play """ debug ("Enable the equalizer to On") self.equalizerState() def engineStateIdle( self ): """ Called when Engine state changes to Idle """ pass def engineStatePause( self ): """ Called when Engine state changes to Pause """ pass def engineStateEmpty( self ): """ Called when Engine state changes to Empty """ pass def trackChange( self ): """ Called when a new track starts """ debug ("Track Change event called.\n") self.setEqualizer() def getGenre(self): # get the Genre from the current song. retval, genre = self.amarok.player.genre() genre = genre.__str__() if retval is not True: debug("I couldn't get the genre using the library. Is Amarok running?") #TODO: debug("Will try using the dcop commandline interface") else: return genre def setEqualizer(self): # set the equalizer accordingly # TODO: It would be good to have a list of preset equalizers # and match them self.genre = self.getGenre() retval, success = self.amarok.player.setEqualizerPreset(self.genre) if retval is not True: debug("I couldn't get the equalizer preset. Is Amarok running?") else: if self.mode == 1: self.amarok.playlist.shortStatusMessage("Activated equalizer preset -> %s" % (self.genre) ) debug ("Activated equalizer preset -> %s\n" % (self.genre) ) elif self.mode == 2: self.amarok.playlist.popupMessage("Activated equalizer preset -> %s" % (self.genre) ) debug ("Activated equalizer preset -> %s\n" % (self.genre) ) else: # Default Mode self.amarok.playlist.popupMessage("Activated equalizer preset -> %s" % (self.genre) ) debug ("Activated equalizer preset -> %s\n" % (self.genre) ) def equalizerState(self): # check if the equalizer is on or not # FIXME: Currently, it looks like dcopext has a bug # even though I try to set the equalizer to on, it doesn't # so for now we will check if the equalizer is on or not and # enable it using the dcop command retval, equalizerState = self.amarok.player.equalizerEnabled() if not equalizerState: os.system( "dcop amarok player setEqualizerEnabled True" ) debug("Enable the Equalizer.") ############################################################################ def debug( message ): """ Prints debug message to stdout """ f.writelines(message) f.flush() #print debug_prefix + " " + message def onStop(signum, stackframe): """ Called when script is stopped by user """ debug("I'm in onStop.\n") debug("We need to kill the process, otherwise it strays around even if amarok exits.\n") os.kill(os.getpid(), 9) def main( ): app = autoEqualizer ( sys.argv, MODE ) app.exec_loop() if __name__ == "__main__": mainapp = threading.Thread(target=main) mainapp.start() signal.signal(signal.SIGTERM, onStop) # necessary for signal catching while 1: sleep(120)
gpl-3.0
-8,767,119,271,233,244,000
32.596958
109
0.603327
false
4.09643
true
false
false
fffy2366/image-processing
api.py
1
10056
#!bin/evn python # -*-coding:utf8-*- import base64 import sys import os import logging import datetime import re import multiprocessing # from pylab import * from PIL import Image import cv2 from bceocrapi import BceOCRAPI from bin.python.models.images import Images from nude import Nude import imagehash from bin.python.utils import logger from bin.python.models.redis_results import RedisResults Image.LOAD_TRUNCATED_IMAGES = True reload(sys) sys.setdefaultencoding('utf-8') #日志 # 默认log存放目录,需要在程序入口调用才能生效,可省略 logger.log_dir = "./logs" # log文件名前缀,需要在程序入口调用才能生效,可省略 logger.log_name = "api" conf = logger.Logger() # conf.debug('debug') # conf.warn('tr-warn') # conf.info('ds-info') # conf.error('ss-error') # IMAGE_DIR = "/Users/fengxuting/Downloads/testphoto/" IMAGE_DIR = "public/uploads/api/" class Api: def __init__(self): self.IMAGE_HASH = "" # 获取图片哈希值 def get_image_hash(self,file): img = Image.open(file) h = str(imagehash.dhash(img)) return h # 人脸识别 def face(self,file): # Get user supplied values oriImg = IMAGE_DIR + file #图像压缩处理 # disImg = IMAGE_DIR +"ocrdis"+file # newImg = resizeImg(ori_img=oriImg,dst_img=disImg,dst_w=2048,dst_h=2048,save_q=100) # cascPath = "./data/haarcascades/haarcascade_frontalface_alt.xml" cascPath = "./data/lbpcascades/lbpcascade_frontalface.xml" # Create the haar 级联 facecascade = cv2.CascadeClassifier(cascPath) # Read the image image = cv2.imread(oriImg) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray, gray) # 直方图均衡化:直方图均衡化是通过拉伸像素强度分布范围来增强图像对比度的一种方法。 gray = cv2.medianBlur(gray, 3) # 降噪? (height, width, a) = image.shape # Detect faces in the image faces = facecascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=2, minSize=(30, 30), flags=cv2.cv.CV_HAAR_SCALE_IMAGE ) # 1,如果小于0.5%的 不认为头像。2,多个头像的 与最大的对比,如果比值小于50%,不认为是头像。 faces_area = [] face_count = 0 for (x, y, w, h) in faces: face_area = w * h # 脸占整个图的比例 face_scale = (face_area) / float(height * width) * 100 # print("name %s,scale %s,x %s,y %s,w %s,h %s,area %s" % (file,face_scale,x,y,w,h,face_area)) # if face_scale<0.5: # continue faces_area.append(face_area) faces_new = [] if(len(faces_area)>1): face_max = max(faces_area) for index,face in enumerate(faces) : (x, y, w, h) = face # 脸占最大脸的比例 scale = (w*h)/float(face_max) * 100 # print("scale %s" % (scale)) if(scale<50): # delete(faces,index,axis=0) pass else: faces_new.append(face) else: faces_new = faces return faces_new #黑白处理 def blackWhite(self,filename): image_file = Image.open(IMAGE_DIR+filename) # open colour image #exception : Premature end of JPEG file . IOError: image file is truncated (1 bytes not processed) try: image_file = image_file.convert('L') # convert image to black and white except Exception as e: raise return IMAGE_DIR+filename dst_path = IMAGE_DIR+"wb"+filename image_file.save(dst_path) return dst_path #数字识别 def ocr(self,file): ocr = BceOCRAPI("02fbe03acf3042a1b40e067bba1971f7", "bb1d4aafe7924fc0829fc33fa26b3347"); #黑白处理 # newImg = IMAGE_DIR +file newImg = self.blackWhite(file) #图像压缩处理 disImg = IMAGE_DIR +"ocrdis"+file newImg = self.resizeImg(ori_img=newImg,dst_img=disImg,dst_w=1600,dst_h=1600,save_q=100) with open(newImg, 'rb') as f: content = f.read() content = base64.b64encode(content) try: # ocr # result = ocr.get_ocr_text(content, language='CHN_ENG') result = ocr.get_ocr_text(content, language='ENG') # print("file:"+file+"----------result:"+result) # conf.info("file:"+file+"----------result:"+result) return result except Exception as e: raise # 图片如果宽或高大于300则等比例压缩 def resizeImg(self,**args): args_key = {'ori_img': '', 'dst_img': '', 'dst_w': '', 'dst_h': '', 'save_q': 75} arg = {} for key in args_key: if key in args: arg[key] = args[key] im = Image.open(arg['ori_img']) ori_w, ori_h = im.size widthRatio = heightRatio = None ratio = 1 if (ori_w and ori_w > arg['dst_w']) or (ori_h and ori_h > arg['dst_h']): if arg['dst_w'] and ori_w > arg['dst_w']: widthRatio = float(arg['dst_w']) / ori_w # 正确获取小数的方式 if arg['dst_h'] and ori_h > arg['dst_h']: heightRatio = float(arg['dst_h']) / ori_h if widthRatio and heightRatio: if widthRatio < heightRatio: ratio = widthRatio else: ratio = heightRatio if widthRatio and not heightRatio: ratio = widthRatio if heightRatio and not widthRatio: ratio = heightRatio newWidth = int(ori_w * ratio) newHeight = int(ori_h * ratio) else: newWidth = ori_w newHeight = ori_h im.resize((newWidth, newHeight), Image.ANTIALIAS).save(arg['dst_img'], quality=arg['save_q']) return arg['dst_img'] # 裁剪人脸以下的图片 def cropImg(self, file, faces): oriImg = IMAGE_DIR + file # 裁剪人脸以下最多五倍高度的图片 # ipl_image = cv.LoadImage(oriImg) ipl_image = Image.open(oriImg) # print(ipl_image.height) if (len(faces) < 1): # print("no face") return faces (x, y, w, h) = faces[0] yy = int(y + 1.5*h) hh = h * 6 (width, height) = ipl_image.size if (hh > height - y): hh = height - y if(yy>=height): return False dst = ipl_image.crop((x, yy, x + w, y + hh)) dst.save(IMAGE_DIR + file) #鉴别黄色图片 def isnude(self,file): #图像压缩处理 imagePath = IMAGE_DIR + file nudeImg = IMAGE_DIR +"nude_"+file self.resizeImg(ori_img=imagePath,dst_img=nudeImg,dst_w=300,dst_h=300,save_q=100) faces = self.face("nude_"+file) self.cropImg("nude_"+file,faces) n = Nude(nudeImg) # n.setFaces(faces) # n.resize(1000,1000) n.parse() # print n.result return 1 if n.result else 0 # 统计数字个数 def countdigits(self,s): digitpatt = re.compile('\d') return len(digitpatt.findall(s)) # 删除图片 def delImg(self,file): #黑白的 wbImg = IMAGE_DIR+"wb"+file ocrImg300 = IMAGE_DIR +"dis"+file #大于1600的 ocrImg1600 = IMAGE_DIR +"ocrdis"+file nudeImg = IMAGE_DIR +"nude_"+file if os.path.isfile(wbImg): os.remove(wbImg) if os.path.isfile(ocrImg300): os.remove(ocrImg300) if os.path.isfile(ocrImg1600): os.remove(ocrImg1600) # 鉴黄裁剪图 if os.path.isfile(nudeImg): os.remove(nudeImg) #删除原文件 # os.remove(IMAGE_DIR+file) def one(self,file): filepath = IMAGE_DIR+file if(os.path.isfile(filepath)): self.IMAGE_HASH = self.get_image_hash(filepath) redis_result = self.get_result_from_redis(self.IMAGE_HASH) if(redis_result): #删除图像 self.delImg(file) print redis_result sys.exit(0) is_pass = 1 #人脸检测 fc = self.face(file) # 如果人脸不是1则 ocr和鉴黄不用检测 if(len(fc)!=1): l = -1 is_nude = -1 is_pass = 0 else: #ocr text = "" text = self.ocr(file) text = text.encode("utf-8") l = self.countdigits(text) if (l > 6): is_nude = -1 is_pass = 0 else: #鉴黄 is_nude = self.isnude(file) if(is_nude==1): is_pass = 0 #删除图像 self.delImg(file) # print {"face_count":len(fc),"digital_count":l,"is_nude":is_nude,"pass":is_pass} result = str(len(fc))+","+str(l)+","+str(is_nude)+","+str(is_pass) # 结果保存redis数据库 self.save_redis(self.IMAGE_HASH,result) print result else: print("error:",file, "is not a img file") # 保存redis def save_redis(self,hash,result): rr = RedisResults() rr.save(hash,result) # redis数据是否存在,并返回检测结果 def get_result_from_redis(self,hash): rr = RedisResults() return rr.get(hash) if __name__ == '__main__': api = Api() api.one(sys.argv[1]) # api.one("9d27d550-4beb-11e6-aefd-4f827560e966.png") # api.one("91787150-4bf1-11e6-aefd-4f827560e966.png") pass
mit
-6,490,777,745,784,625,000
28.816456
106
0.525472
false
2.877825
false
false
false
Huyuwei/tvm
tests/python/contrib/test_cublas.py
1
2856
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import numpy as np from tvm.contrib import cublas def test_matmul_add(): n = 1024 l = 128 m = 235 A = tvm.placeholder((n, l), name='A') B = tvm.placeholder((l, m), name='B') C = cublas.matmul(A, B) s = tvm.create_schedule(C.op) def verify(target="cuda"): if not tvm.module.enabled(target): print("skip because %s is not enabled..." % target) return if not tvm.get_global_func("tvm.contrib.cublas.matmul", True): print("skip because extern function is not available") return ctx = tvm.gpu(0) f = tvm.build(s, [A, B, C], target) a = tvm.nd.array(np.random.uniform(size=(n, l)).astype(A.dtype), ctx) b = tvm.nd.array(np.random.uniform(size=(l, m)).astype(B.dtype), ctx) c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), ctx) f(a, b, c) tvm.testing.assert_allclose( c.asnumpy(), np.dot(a.asnumpy(), b.asnumpy()), rtol=1e-5) verify() def test_batch_matmul(): j = 16 n = 1024 l = 128 m = 235 A = tvm.placeholder((j, n, l), name='A') B = tvm.placeholder((j, l, m), name='B') C = cublas.batch_matmul(A, B) s = tvm.create_schedule(C.op) def verify(target="cuda"): if not tvm.module.enabled(target): print("skip because %s is not enabled..." % target) return if not tvm.get_global_func("tvm.contrib.cublas.matmul", True): print("skip because extern function is not available") return ctx = tvm.gpu(0) f = tvm.build(s, [A, B, C], target) a = tvm.nd.array(np.random.uniform(size=(j, n, l)).astype(A.dtype), ctx) b = tvm.nd.array(np.random.uniform(size=(j, l, m)).astype(B.dtype), ctx) c = tvm.nd.array(np.zeros((j, n, m), dtype=C.dtype), ctx) f(a, b, c) tvm.testing.assert_allclose( c.asnumpy(), np.matmul(a.asnumpy(), b.asnumpy()), rtol=1e-5) verify() if __name__ == "__main__": test_matmul_add() test_batch_matmul()
apache-2.0
2,408,037,564,744,352,000
36.090909
80
0.613796
false
3.395957
true
false
false
prataprc/eazytext
eazytext/extension/.Attic/box.py
1
4735
# This file is subject to the terms and conditions defined in # file 'LICENSE', which is part of this source code package. # Copyright (c) 2010 SKR Farms (P) LTD. # -*- coding: utf-8 -*- # Gotcha : none # Notes : none # Todo : none # 1. Unit test case for this extension. from zope.component import getGlobalSiteManager from eazytext.extension import Extension from eazytext.interfaces import IEazyTextExtensionFactory from eazytext.lib import split_style gsm = getGlobalSiteManager() doc = """ h3. Box : Description :: Generate a box with title and content. Text within the curly braces will be interpreted as the content and can contain EazyText text as well. If title text is provided, then the extension can take parameter ''hide'' which can be used to shrink/expand box content. Property key-value pairs accepts CSS styling attributes and other special attributes like, |= title | optional, title string |= titlestyle | optional, title style string in CSS style format |= contentstyle | optional, content style string in CSS style format ''Example'' > [<PRE{{{ Box hide #{ # 'title' : 'Building Mnesia Database', # 'titlestyle' : 'color: brown;', # 'contentstyle' : 'color: gray;', # 'border' : '1px solid gray', # 'style' : { 'margin' : '10px', 'padding' : '10px' }, #} This chapter details the basic steps involved when designing a Mnesia database and the programming constructs which make different solutions available to the programmer. The chapter includes the following sections, * defining a schema * the datamodel * starting Mnesia * creating new tables. }}} >] {{{ Box hide #{ # 'title' : 'Building Mnesia Database', # 'titlestyle' : 'color: brown;', # 'contentstyle' : 'color: gray;', # 'border' : '1px solid gray', # 'style' : { 'margin' : '10px', 'padding' : '10px' }, #} This chapter details the basic steps involved when designing a Mnesia database and the programming constructs which make different solutions available to the programmer. The chapter includes the following sections: * defining a schema * the datamodel * starting Mnesia * creating new tables. }}} """ tmpl = """ <div class="etext-box" style="%s"> <div class="boxtitle" style="%s"> %s %s </div> <div class="boxcont" style="%s">%s</div> </div> """ spantmpl = """ <span class="boxhide"> hide</span> <span class="boxshow"> show</span> """ class Box( Extension ) : _doc = doc def __init__( self, props, nowiki, *args ) : self.nowiki = nowiki self.title = props.pop( 'title', '' ) boxstyle = props.pop( 'style', {} ) titlestyle = props.pop( 'titlestyle', {} ) contentstyle = props.pop( 'contentstyle', '' ) d_style, s_style = split_style( boxstyle ) self.style = s_style self.css = {} self.css.update( props ) self.css.update( d_style ) d_style, s_style = split_style( titlestyle ) self.titlestyle = s_style self.title_css = {} self.title_css.update( d_style ) d_style, s_style = split_style( contentstyle ) self.contentstyle = s_style self.cont_css = {} self.cont_css.update( d_style ) self.hide = 'hide' in args def __call__( self, argtext ): return eval( 'Box( %s )' % argtext ) def html( self, node, igen, *args, **kwargs ) : from eazytext.parser import ETParser fn = lambda (k, v) : '%s : %s' % (k,v) boxstyle = '; '.join(map( fn, self.css.items() )) if self.style : boxstyle += '; %s ;' % self.style titlestyle = '; '.join(map( fn, self.title_css.items() )) if self.titlestyle : titlestyle += '; %s ;' % self.titlestyle contstyle = '; '.join(map( fn, self.cont_css.items() )) if self.contentstyle : contstyle += '; %s ;' % self.contentstyle self.nowiki_h = '' if self.nowiki : etparser = ETParser( skin=None, nested=True, lex_optimize=False, yacc_optimize=False, ) tu = etparser.parse( self.nowiki, debuglevel=0 ) self.nowiki_h = tu.tohtml() if self.title : html = tmpl % ( boxstyle, titlestyle, self.title, spantmpl, contstyle, self.nowiki_h ) else : html = tmpl % ( boxstyle, titlestyle, self.title, '', contstyle, self.nowiki_h ) return html # Register this plugin gsm.registerUtility( Box(), IEazyTextExtensionFactory, 'Box' ) Box._doc = doc
gpl-3.0
856,791,562,702,116,600
28.409938
78
0.594931
false
3.568199
false
false
false
Maskawanian/ude-components
Components/Client/__init__.py
1
1318
# See LICENCE for the source code licence. # (c) 2010 Dan Saul from Base import Base SAVE_STATUS_SAVED = 0 # We can close this without losing data. SAVE_STATUS_NOT_SAVED = 1 # If we close this we will lose data, but we can save. SAVE_STATUS_NOT_SAVED_NEED_PATH = 2 # If we close this we will lose data, but we can save, however need the save path. SAVE_STATUS_SAVING = 3 # In the progress of saving. SAVE_STATUS_UNSAVABLE = 4 # If we close this we will lose data, we are not able to save however. SAVE_STATUS_RANGE = range(SAVE_STATUS_SAVED,SAVE_STATUS_UNSAVABLE+1) BUS_INTERFACE_NAME = "org.ude.components.client" BUS_INTERFACE_NAME_PID_FORMAT = BUS_INTERFACE_NAME+"_{0}" BUS_OBJECT_PATH = "/org/ude/components/client" import logging,os logger = logging.getLogger('Client') logger.setLevel(logging.DEBUG) __fh = logging.FileHandler('runtime.log') __fh.setLevel(logging.DEBUG) __ch = logging.StreamHandler() __ch.setLevel(logging.ERROR) __formatter = logging.Formatter('%(asctime)s %(name)s[%(process)d:%(levelname)s] %(filename)s::%(funcName)s() - %(message)s') __fh.setFormatter(__formatter) __ch.setFormatter(__formatter) logger.addHandler(__fh) logger.addHandler(__ch) #http://docs.python.org/library/logging.html#http-handler eventually...
lgpl-3.0
7,542,065,898,066,238,000
34.621622
125
0.698786
false
3.270471
false
false
false
mardiros/apium
apium/task/__init__.py
1
8390
import sys import types import logging import asyncio import inspect import traceback import importlib from collections import defaultdict from uuid import uuid4 import venusian from .. import registry log = logging.getLogger(__name__) class TaskRegistry(object): """ Default implementation of the task registry """ def __init__(self): self._registry = {} self.default_queue = '#master' self.queues = defaultdict(list) def register(self, task): """ Register a task """ if task.name in self._registry: raise RuntimeError('Task {} is already registered' ''.format(task.name)) if task.queue: self.queues[task.queue].append(task.name) else: if self.get_queue(task.name) not in self.queues: self.queues[self.default_queue].append(task.name) self._registry[task.name] = task def get(self, task_name): """ Get the task from it's name. The tasks must be registred previously. """ try: return self._registry[task_name] except KeyError: raise RuntimeError('Task {} is not registered'.format(task_name)) def configure_queues(self, default_queue='#master', queues=None): self.default_queue = default_queue if queues: for queue, tasks in queues.items(): self.queues[queue].extend(tasks) def get_queue(self, task_name): """ Get the queue for the given task name """ for name, queue in self.queues.items(): if task_name in queue: return name return self.default_queue class TaskRequest: """ Represent a task instance to run """ def __init__(self, driver, task_name, task_args, task_kwargs, uuid=None, ignore_result=None, result_queue=None): self._driver = driver self.uuid = uuid or str(uuid4()) self.task_name = task_name self.task_args = task_args self.task_kwargs = task_kwargs self.result_queue = result_queue or self._driver.get_result_queue() self.ignore_result = ignore_result @asyncio.coroutine def get(self, timeout=None): """ Return the result of the task or the result of the chained tasks in case some callback have been attached. :param timeout: timeout for the tasks. if None, the default timeout of the TaskRequest will be used. The default timeout is the timeout attribute of the tasks :type timeout: float :return: the result of the task """ result = yield from self._driver.pop_result(self, timeout) return result def to_dict(self): return {'uuid': self.uuid, 'ignore_result': self.ignore_result, 'result_queue': self.result_queue, 'task_name': self.task_name, 'task_args': self.task_args, 'task_kwargs': self.task_kwargs, } def __str__(self): return '<TaskRequest {}>'.format(self.uuid) class TaskResponse: def __init__(self, uuid, status, result=None, exception=None, tracback=None): self.uuid = uuid self.status = status self.result = result self.exception = exception self.traceback = traceback def to_dict(self): ret = {'uuid': self.uuid, 'status': self.status, } if self.status == 'DONE': ret['result'] = self.result elif self.status == 'ERROR': ret['exception'] = {'module': getattr(self.exception, '__module__', '__builtin__'), 'class': exc.__class__.__name__, 'args': exc.args, } ret['traceback'] = traceback.format_exc().strip() return ret class Task: ignore_result = False queue = None timeout = None def __init__(self, driver, method, **kwargs): self._driver = driver if 'name' in kwargs: self.name = kwargs['name'] else: self.name = '{}.{}'.format(method.__module__, method.__name__) self._origin = method if inspect.isclass(method): method = method() if (not asyncio.iscoroutinefunction(method) and (isinstance(method, asyncio.Future) or inspect.isgenerator(method) )): method = asyncio.coroutine(method) self.method = method if 'ignore_result' in kwargs: self.ignore_result = kwargs['ignore_result'] if 'timeout' in kwargs: self.timeout = kwargs['timeout'] self._name = kwargs.get('name', None) @asyncio.coroutine def __call__(self, *args, **kwargs): ignore_result = self.ignore_result timeout = self.timeout if 'task_options' in kwargs: task_options = kwargs.pop('task_options') ignore_result = task_options.get('ignore_result', ignore_result) timeout = task_options.get('timeout', timeout) request = TaskRequest(self._driver, self.name, args, kwargs, ignore_result=ignore_result) yield from self._driver.push_task(request) if ignore_result: return result = yield from request.get(timeout) return result def execute(self, *args, **kwargs): """ Execute the wrapped method. This call must run in a process of the apium worker. If the wrapped method is a coroutine, it will spawn a new event loop in the process executor to wait untile the coroutine is done. """ ret = self.method(*args, **kwargs) if isinstance(ret, asyncio.Future) or inspect.isgenerator(ret): # In that case, # run the asyncio coroutine in a dedicated event loop # of the process pool executure @asyncio.coroutine def routine(method, future): ret = yield from method future.set_result(ret) future = asyncio.Future() old_loop = asyncio.get_event_loop() try: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(asyncio.Task(routine(ret, future))) ret = future.result() finally: asyncio.set_event_loop(old_loop) loop.close() return ret def __str__(self): return '<task {}>'.format(self.name) def execute_task(task_name, uuid, args, kwargs): """ Glue function that can be pickle. Python cannot easily pickle class method, that why the ITaskRegistry cannot be used directly. """ driver = registry.get_driver() # Here is the main reason we have a singleton task_to_run = driver.get_task(task_name) log.info('Executing task {}'.format(task_name)) log.debug('with param {}, {}'.format(args, kwargs)) try: ret = TaskResponse(uuid, 'DONE', task_to_run.execute(*args, **kwargs)) except Exception as exc: log.error('Error {} while running task {} with param {}, {}' ''.format(exc, task_name, args, kwargs)) ret = TaskResponse(uuid, 'ERROR', exception=exc, traceback=sys.exc_info[2]) ret = ret.to_dict() log.info('task {} executed'.format(task_name)) log.debug('task returns {}'.format(ret)) return ret class task: """ Transform a class or a function to a coroutine, attach it to be used via the apium application. """ def __init__(self, **task_options): self.task_options = task_options def __call__(self, wrapped): def callback(scanner, name, ob): task_ = Task(scanner.driver, wrapped, **self.task_options) log.info('Register task {}'.format(task_.name)) scanner.driver.register_task(task_) venusian.attach(wrapped, callback, category='apium') return wrapped
bsd-3-clause
-306,071,942,864,579,460
31.269231
81
0.556853
false
4.390372
false
false
false
googleapis/googleapis-gen
google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/services/billing_setup_service/transports/grpc.py
1
11783
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Callable, Dict, Optional, Sequence, Tuple from google.api_core import grpc_helpers # type: ignore from google.api_core import gapic_v1 # type: ignore import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from google.ads.googleads.v6.resources.types import billing_setup from google.ads.googleads.v6.services.types import billing_setup_service from .base import BillingSetupServiceTransport, DEFAULT_CLIENT_INFO class BillingSetupServiceGrpcTransport(BillingSetupServiceTransport): """gRPC backend transport for BillingSetupService. A service for designating the business entity responsible for accrued costs. A billing setup is associated with a payments account. Billing- related activity for all billing setups associated with a particular payments account will appear on a single invoice generated monthly. Mutates: The REMOVE operation cancels a pending billing setup. The CREATE operation creates a new billing setup. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ def __init__(self, *, host: str = 'googleads.googleapis.com', credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. channel (Optional[grpc.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or applicatin default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ self._ssl_channel_credentials = ssl_channel_credentials if channel: # Sanity check: Ensure that channel and credentials are not both # provided. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning) host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443" if credentials is None: credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id) # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() ssl_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: ssl_credentials = SslCredentials().ssl_credentials # create a new channel. The provided one is ignored. self._grpc_channel = type(self).create_channel( host, credentials=credentials, credentials_file=credentials_file, ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" if credentials is None: credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES) # create a new channel. The provided one is ignored. self._grpc_channel = type(self).create_channel( host, credentials=credentials, ssl_credentials=ssl_channel_credentials, scopes=self.AUTH_SCOPES, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._stubs = {} # type: Dict[str, Callable] # Run the base constructor. super().__init__( host=host, credentials=credentials, client_info=client_info, ) @classmethod def create_channel(cls, host: str = 'googleads.googleapis.com', credentials: ga_credentials.Credentials = None, scopes: Optional[Sequence[str]] = None, **kwargs) -> grpc.Channel: """Create and return a gRPC channel object. Args: address (Optionsl[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object. """ return grpc_helpers.create_channel( host, credentials=credentials, scopes=scopes or cls.AUTH_SCOPES, **kwargs ) @property def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service. """ return self._grpc_channel @property def get_billing_setup(self) -> Callable[ [billing_setup_service.GetBillingSetupRequest], billing_setup.BillingSetup]: r"""Return a callable for the get billing setup method over gRPC. Returns a billing setup. Returns: Callable[[~.GetBillingSetupRequest], ~.BillingSetup]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'get_billing_setup' not in self._stubs: self._stubs['get_billing_setup'] = self.grpc_channel.unary_unary( '/google.ads.googleads.v6.services.BillingSetupService/GetBillingSetup', request_serializer=billing_setup_service.GetBillingSetupRequest.serialize, response_deserializer=billing_setup.BillingSetup.deserialize, ) return self._stubs['get_billing_setup'] @property def mutate_billing_setup(self) -> Callable[ [billing_setup_service.MutateBillingSetupRequest], billing_setup_service.MutateBillingSetupResponse]: r"""Return a callable for the mutate billing setup method over gRPC. Creates a billing setup, or cancels an existing billing setup. Returns: Callable[[~.MutateBillingSetupRequest], ~.MutateBillingSetupResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'mutate_billing_setup' not in self._stubs: self._stubs['mutate_billing_setup'] = self.grpc_channel.unary_unary( '/google.ads.googleads.v6.services.BillingSetupService/MutateBillingSetup', request_serializer=billing_setup_service.MutateBillingSetupRequest.serialize, response_deserializer=billing_setup_service.MutateBillingSetupResponse.deserialize, ) return self._stubs['mutate_billing_setup'] __all__ = ( 'BillingSetupServiceGrpcTransport', )
apache-2.0
1,060,008,593,816,807,600
43.464151
112
0.616821
false
4.730229
false
false
false
okfse/froide
froide/foirequest/tasks.py
1
3122
import os from django.conf import settings from django.utils import translation from django.db import transaction from django.core.files import File from froide.celery import app as celery_app from .models import FoiRequest, FoiAttachment from .foi_mail import _process_mail, _fetch_mail from .file_utils import convert_to_pdf @celery_app.task(acks_late=True, time_limit=60) def process_mail(*args, **kwargs): translation.activate(settings.LANGUAGE_CODE) def run(*args, **kwargs): try: _process_mail(*args, **kwargs) except Exception: transaction.rollback() raise else: transaction.commit() return None run = transaction.commit_manually(run) run(*args, **kwargs) @celery_app.task(expires=60) def fetch_mail(): for rfc_data in _fetch_mail(): process_mail.delay(rfc_data) @celery_app.task def detect_overdue(): translation.activate(settings.LANGUAGE_CODE) for foirequest in FoiRequest.objects.get_to_be_overdue(): foirequest.set_overdue() @celery_app.task def detect_asleep(): translation.activate(settings.LANGUAGE_CODE) for foirequest in FoiRequest.objects.get_to_be_asleep(): foirequest.set_asleep() @celery_app.task def classification_reminder(): translation.activate(settings.LANGUAGE_CODE) for foirequest in FoiRequest.objects.get_unclassified(): foirequest.send_classification_reminder() @celery_app.task def count_same_foirequests(instance_id): translation.activate(settings.LANGUAGE_CODE) try: count = FoiRequest.objects.filter(same_as_id=instance_id).count() FoiRequest.objects.filter(id=instance_id).update(same_as_count=count) except FoiRequest.DoesNotExist: pass @celery_app.task(time_limit=60) def convert_attachment_task(instance_id): try: att = FoiAttachment.objects.get(pk=instance_id) except FoiAttachment.DoesNotExist: return return convert_attachment(att) def convert_attachment(att): result_file = convert_to_pdf( att.file.path, binary_name=settings.FROIDE_CONFIG.get( 'doc_conversion_binary' ), construct_call=settings.FROIDE_CONFIG.get( 'doc_conversion_call_func' ) ) if result_file is None: return path, filename = os.path.split(result_file) new_file = File(open(result_file, 'rb')) if att.converted: new_att = att.converted else: if FoiAttachment.objects.filter( belongs_to=att.belongs_to, name=filename).exists(): name, extension = filename.rsplit('.', 1) filename = '%s_converted.%s' % (name, extension) new_att = FoiAttachment( belongs_to=att.belongs_to, approved=False, filetype='application/pdf', is_converted=True ) new_att.name = filename new_att.file = new_file new_att.size = new_file.size new_att.file.save(filename, new_file) new_att.save() att.converted = new_att att.save()
mit
8,346,048,382,823,139,000
25.913793
77
0.652466
false
3.592635
false
false
false
rtbortolin/QandA
QandA/QandA/urls.py
1
1936
""" Definition of urls for QandA. """ from datetime import datetime from django.conf.urls import patterns, url from app.forms import BootstrapAuthenticationForm import app import app.views import django.contrib.auth.views # Uncomment the next lines to enable the admin: # from django.conf.urls import include # from django.contrib import admin # admin.autodiscover() urlpatterns = [ # Examples: url(r'^$', app.views.home, name='home'), url(r'^contact$', app.views.contact, name='contact'), url(r'^about', app.views.about, name='about'), url(r'^login/$', django.contrib.auth.views.login, { 'template_name': 'app/login.html', 'authentication_form': BootstrapAuthenticationForm, 'extra_context': { 'title':'Log in', 'year':datetime.now().year, } }, name='login'), url(r'^logout$', django.contrib.auth.views.logout, { 'next_page': '/', }, name='logout'), url(r'^question/create$', app.views.create_question, name = "create_question"), url(r'^question/(?P<pk>\d+)$', app.views.QuestionDetailView.as_view(template_name='app/questions/details.html'), name = "Question Detail"), url(r'^question/comment$', app.views.make_question_comment, name = "make_question_comment"), url(r'^question/answer$', app.views.answer_a_question, name = "answer_a_question"), url(r'^question/answer/comment$', app.views.make_answer_comment, name = "make_answer_comment"), url(r'^question/vote$', app.views.make_a_vote, name = "make_a_vote"), #url(r'^$', HomePageView.as_view(), name='home'), # Uncomment the admin/doc line below to enable admin documentation: # url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: # url(r'^admin/', include(admin.site.urls)), ]
mit
-6,395,860,316,709,606,000
35.528302
143
0.628099
false
3.63227
false
false
false
bSr43/udis86
scripts/ud_opcode.py
1
26794
# udis86 - scripts/ud_opcode.py # # Copyright (c) 2009, 2013 Vivek Thampi # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import copy class UdInsnDef: """An x86 instruction definition """ def __init__(self, **insnDef): self.mnemonic = insnDef['mnemonic'] self.eflags = insnDef['eflags'] self.firstOpAccess = insnDef['firstOpAccess'] self.secondOpAccess = insnDef['secondOpAccess'] self.implicitRegUse = insnDef['implicitRegUse'] self.implicitRegDef = insnDef['implicitRegDef'] self.prefixes = insnDef['prefixes'] self.opcodes = insnDef['opcodes'] self.operands = insnDef['operands'] self._cpuid = insnDef['cpuid'] self._opcexts = {} for opc in self.opcodes: if opc.startswith('/'): e, v = opc.split('=') self._opcexts[e] = v def lookupPrefix(self, pfx): """Lookup prefix (if any, None otherwise), by name""" return True if pfx in self.prefixes else None @property def vendor(self): return self._opcexts.get('/vendor', None) @property def mode(self): return self._opcexts.get('/m', None) @property def osize(self): return self._opcexts.get('/o', None) def isDef64(self): return 'def64' in self.prefixes def __str__(self): return self.mnemonic + " " + ', '.join(self.operands) + \ " " + ' '.join(self.opcodes) class UdOpcodeTable: """A single table of instruction definitions, indexed by a decode field. """ class CollisionError(Exception): pass class IndexError(Exception): """Invalid Index Error""" pass @classmethod def vendor2idx(cls, v): return (0 if v == 'amd' else (1 if v == 'intel' else 2)) @classmethod def vex2idx(cls, v): if v.startswith("none_"): v = v[5:] vexOpcExtMap = { 'none' : 0x0, '0f' : 0x1, '0f38' : 0x2, '0f3a' : 0x3, '66' : 0x4, '66_0f' : 0x5, '66_0f38' : 0x6, '66_0f3a' : 0x7, 'f3' : 0x8, 'f3_0f' : 0x9, 'f3_0f38' : 0xa, 'f3_0f3a' : 0xb, 'f2' : 0xc, 'f2_0f' : 0xd, 'f2_0f38' : 0xe, 'f2_0f3a' : 0xf, } return vexOpcExtMap[v] # A mapping of opcode extensions to their representational # values used in the opcode map. OpcExtMap = { '/rm' : lambda v: int(v, 16), '/x87' : lambda v: int(v, 16), '/3dnow' : lambda v: int(v, 16), '/reg' : lambda v: int(v, 16), # modrm.mod # (!11, 11) => (00b, 01b) '/mod' : lambda v: 0 if v == '!11' else 1, # Mode extensions: # (16, 32, 64) => (00, 01, 02) '/o' : lambda v: (int(v) / 32), '/a' : lambda v: (int(v) / 32), # Disassembly mode # (!64, 64) => (00b, 01b) '/m' : lambda v: 1 if v == '64' else 0, # SSE # none => 0 # f2 => 1 # f3 => 2 # 66 => 3 '/sse' : lambda v: (0 if v == 'none' else (((int(v, 16) & 0xf) + 1) / 2)), # AVX '/vex' : lambda v: UdOpcodeTable.vex2idx(v), '/vexw' : lambda v: 0 if v == '0' else 1, '/vexl' : lambda v: 0 if v == '0' else 1, # Vendor '/vendor': lambda v: UdOpcodeTable.vendor2idx(v) } _TableInfo = { 'opctbl' : { 'label' : 'UD_TAB__OPC_TABLE', 'size' : 256 }, '/sse' : { 'label' : 'UD_TAB__OPC_SSE', 'size' : 4 }, '/reg' : { 'label' : 'UD_TAB__OPC_REG', 'size' : 8 }, '/rm' : { 'label' : 'UD_TAB__OPC_RM', 'size' : 8 }, '/mod' : { 'label' : 'UD_TAB__OPC_MOD', 'size' : 2 }, '/m' : { 'label' : 'UD_TAB__OPC_MODE', 'size' : 2 }, '/x87' : { 'label' : 'UD_TAB__OPC_X87', 'size' : 64 }, '/a' : { 'label' : 'UD_TAB__OPC_ASIZE', 'size' : 3 }, '/o' : { 'label' : 'UD_TAB__OPC_OSIZE', 'size' : 3 }, '/3dnow' : { 'label' : 'UD_TAB__OPC_3DNOW', 'size' : 256 }, '/vendor' : { 'label' : 'UD_TAB__OPC_VENDOR', 'size' : 3 }, '/vex' : { 'label' : 'UD_TAB__OPC_VEX', 'size' : 16 }, '/vexw' : { 'label' : 'UD_TAB__OPC_VEX_W', 'size' : 2 }, '/vexl' : { 'label' : 'UD_TAB__OPC_VEX_L', 'size' : 2 }, } def __init__(self, typ): assert typ in self._TableInfo self._typ = typ self._entries = {} def size(self): return self._TableInfo[self._typ]['size'] def entries(self): return self._entries.iteritems() def numEntries(self): return len(self._entries.keys()) def label(self): return self._TableInfo[self._typ]['label'] def typ(self): return self._typ def meta(self): return self._typ def __str__(self): return "table-%s" % self._typ def add(self, opc, obj): typ = UdOpcodeTable.getOpcodeTyp(opc) idx = UdOpcodeTable.getOpcodeIdx(opc) if self._typ != typ or idx in self._entries: raise CollisionError() self._entries[idx] = obj def lookup(self, opc): typ = UdOpcodeTable.getOpcodeTyp(opc) idx = UdOpcodeTable.getOpcodeIdx(opc) if self._typ != typ: raise UdOpcodeTable.CollisionError("%s <-> %s" % (self._typ, typ)) return self._entries.get(idx, None) def entryAt(self, index): """Returns the entry at a given index of the table, None if there is none. Raises an exception if the index is out of bounds. """ if index < self.size(): return self._entries.get(index, None) raise self.IndexError("index out of bounds: %s" % index) def setEntryAt(self, index, obj): if index < self.size(): self._entries[index] = obj else: raise self.IndexError("index out of bounds: %s" % index) @classmethod def getOpcodeTyp(cls, opc): if opc.startswith('/'): return opc.split('=')[0] else: return 'opctbl' @classmethod def getOpcodeIdx(cls, opc): if opc.startswith('/'): typ, v = opc.split('=') return cls.OpcExtMap[typ](v) else: # plain opctbl opcode return int(opc, 16) @classmethod def getLabels(cls): """Returns a list of all labels""" return [cls._TableInfo[k]['label'] for k in cls._TableInfo.keys()] class UdOpcodeTables(object): """Collection of opcode tables """ class CollisionError(Exception): def __init__(self, obj1, obj2): self.obj1, self.obj2 = obj1, obj2 def newTable(self, typ): """Create a new opcode table of a give type `typ`. """ tbl = UdOpcodeTable(typ) self._tables.append(tbl) return tbl def mkTrie(self, opcodes, obj): """Recursively contruct a trie entry mapping a string of opcodes to an object. """ if len(opcodes) == 0: return obj opc = opcodes[0] tbl = self.newTable(UdOpcodeTable.getOpcodeTyp(opc)) tbl.add(opc, self.mkTrie(opcodes[1:], obj)) return tbl def walk(self, tbl, opcodes): """Walk down the opcode trie, starting at a given opcode table, given a string of opcodes. Return None if unable to walk, the object at the leaf otherwise. """ opc = opcodes[0] e = tbl.lookup(opc) if e is None: return None elif isinstance(e, UdOpcodeTable) and len(opcodes[1:]): return self.walk(e, opcodes[1:]) return e def map(self, tbl, opcodes, obj): """Create a mapping from a given string of opcodes to an object in the opcode trie. Constructs trie branches as needed. """ opc = opcodes[0] e = tbl.lookup(opc) if e is None: tbl.add(opc, self.mkTrie(opcodes[1:], obj)) else: if len(opcodes[1:]) == 0: raise self.CollisionError(e, obj) self.map(e, opcodes[1:], obj) def __init__(self, xml): self._tables = [] self._insns = [] self._mnemonics = {} # The root table is always a 256 entry opctbl, indexed # by a plain opcode byte self.root = self.newTable('opctbl') if os.getenv("UD_OPCODE_DEBUG"): self._logFh = open("opcodeTables.log", "w") # add an invalid instruction entry without any mapping # in the opcode tables. self.invalidInsn = UdInsnDef(mnemonic="invalid", eflags="___________", firstOpAccess="", secondOpAccess="", implicitRegUse=[], implicitRegDef=[], opcodes=[], cpuid=[], operands=[], prefixes=[]) self._insns.append(self.invalidInsn) # Construct UdOpcodeTables object from the given # udis86 optable.xml for insn in self.__class__.parseOptableXML(xml): self.addInsnDef(insn) self.patchAvx2byte() self.mergeSSENONE() self.printStats() def log(self, s): if os.getenv("UD_OPCODE_DEBUG"): self._logFh.write(s + "\n") def mergeSSENONE(self): """Merge sse tables with only one entry for /sse=none """ for table in self._tables: for k, e in table.entries(): if isinstance(e, UdOpcodeTable) and e.typ() == '/sse': if e.numEntries() == 1: sse = e.lookup("/sse=none") if sse: table.setEntryAt(k, sse) uniqTables = {} def genTableList(tbl): if tbl not in uniqTables: self._tables.append(tbl) uniqTables[tbl] = 1 for k, e in tbl.entries(): if isinstance(e, UdOpcodeTable): genTableList(e) self._tables = [] genTableList(self.root) def patchAvx2byte(self): # create avx tables for pp in (None, 'f2', 'f3', '66'): for m in (None, '0f', '0f38', '0f3a'): if pp is None and m is None: continue if pp is None: vex = m elif m is None: vex = pp else: vex = pp + '_' + m table = self.walk(self.root, ('c4', '/vex=' + vex)) self.map(self.root, ('c5', '/vex=' + vex), table) def addInsn(self, **insnDef): # Canonicalize opcode list opcexts = insnDef['opcexts'] opcodes = list(insnDef['opcodes']) eflags = insnDef['eflags'] firstOpAccess = insnDef['firstOpAccess'] secondOpAccess = insnDef['secondOpAccess'] implicitRegUse = insnDef['implicitRegUse'] implicitRegDef = insnDef['implicitRegDef'] # TODO: REMOVE! # print opcodes, eflags, insnDef['mnemonic'], firstOpAccess, secondOpAccess, implicitRegUse, implicitRegDef # Re-order vex if '/vex' in opcexts: assert opcodes[0] == 'c4' or opcodes[0] == 'c5' opcodes.insert(1, '/vex=' + opcexts['/vex']) # Add extensions. The order is important, and determines how # well the opcode table is packed. Also note, /sse must be # before /o, because /sse may consume operand size prefix # affect the outcome of /o. for ext in ('/mod', '/x87', '/reg', '/rm', '/sse', '/o', '/a', '/m', '/vexw', '/vexl', '/3dnow', '/vendor'): if ext in opcexts: opcodes.append(ext + '=' + opcexts[ext]) insn = UdInsnDef(mnemonic = insnDef['mnemonic'], eflags = insnDef['eflags'], firstOpAccess = insnDef['firstOpAccess'], secondOpAccess = insnDef['secondOpAccess'], implicitRegUse = insnDef['implicitRegUse'], implicitRegDef = insnDef['implicitRegDef'], prefixes = insnDef['prefixes'], operands = insnDef['operands'], opcodes = opcodes, cpuid = insnDef['cpuid']) try: self.map(self.root, opcodes, insn) except self.CollisionError as e: self.pprint() print(opcodes, insn, str(e.obj1), str(e.obj2)) raise except Exception as e: self.pprint() raise self._insns.append(insn) # add to lookup by mnemonic structure if insn.mnemonic not in self._mnemonics: self._mnemonics[insn.mnemonic] = [ insn ] else: self._mnemonics[insn.mnemonic].append(insn) def addInsnDef(self, insnDef): opcodes = [] opcexts = {} # pack plain opcodes first, and collect opcode # extensions for opc in insnDef['opcodes']: if not opc.startswith('/'): opcodes.append(opc) else: e, v = opc.split('=') opcexts[e] = v # treat vendor as an opcode extension if len(insnDef['vendor']): opcexts['/vendor'] = insnDef['vendor'][0] if insnDef['mnemonic'] in ('lds', 'les'): # # Massage lds and les, which share the same prefix as AVX # instructions, to work well with the opcode tree. # opcexts['/vex'] = 'none' elif '/vex' in opcexts: # A proper avx instruction definition; make sure there are # no legacy opcode extensions assert '/sse' not in opcodes # make sure the opcode definitions don't already include # the avx prefixes. assert opcodes[0] not in ('c4', 'c5') # An avx only instruction is defined by the /vex= opcode # extension. They do not include the c4 (long form) or # c5 (short form) prefix. As part of opcode table generate, # here we create the long form definition, and then patch # the table for c5 in a later stage. # Construct a long-form definition of the avx instruction opcodes.insert(0, 'c4') elif (opcodes[0] == '0f' and opcodes[1] != '0f' and '/sse' not in opcexts): # Make all 2-byte opcode form isntructions play nice with sse # opcode maps. opcexts['/sse'] = 'none' # legacy sse defs that get promoted to avx fn = self.addInsn if 'avx' in insnDef['cpuid'] and '/sse' in opcexts: fn = self.addSSE2AVXInsn fn(mnemonic = insnDef['mnemonic'], eflags = insnDef['eflags'], firstOpAccess = insnDef['firstOpAccess'], secondOpAccess = insnDef['secondOpAccess'], implicitRegUse = insnDef['implicitRegUse'], implicitRegDef = insnDef['implicitRegDef'], prefixes = insnDef['prefixes'], opcodes = opcodes, opcexts = opcexts, operands = insnDef['operands'], cpuid = insnDef['cpuid']) def addSSE2AVXInsn(self, **insnDef): """Add an instruction definition containing an avx cpuid bit, but declared in its legacy SSE form. The function splits the definition to create two new definitions, one for SSE and one promoted to an AVX form. """ # SSE ssemnemonic = insnDef['mnemonic'] sseeflags = insnDef['eflags'] ssefirstOpAccess = insnDef['firstOpAccess'] ssesecondOpAccess = insnDef['secondOpAccess'] sseimplicitRegUse = insnDef['implicitRegUse'] sseimplicitRegDef = insnDef['implicitRegDef'] sseopcodes = insnDef['opcodes'] # remove vex opcode extensions sseopcexts = dict([(e, v) for e, v in insnDef['opcexts'].iteritems() if not e.startswith('/vex')]) # strip out avx operands, preserving relative ordering # of remaining operands sseoperands = [opr for opr in insnDef['operands'] if opr not in ('H', 'L')] # strip out avx prefixes sseprefixes = [pfx for pfx in insnDef['prefixes'] if not pfx.startswith('vex')] # strip out avx bits from cpuid ssecpuid = [flag for flag in insnDef['cpuid'] if not flag.startswith('avx')] self.addInsn(mnemonic = ssemnemonic, eflags = sseeflags, firstOpAccess = ssefirstOpAccess, secondOpAccess = ssesecondOpAccess, implicitRegUse = sseimplicitRegUse, implicitRegDef = sseimplicitRegDef, prefixes = sseprefixes, opcodes = sseopcodes, opcexts = sseopcexts, operands = sseoperands, cpuid = ssecpuid) # AVX vexmnemonic = 'v' + insnDef['mnemonic'] vexeflags = insnDef['eflags'] vexfirstOpAccess = insnDef['firstOpAccess'] vexsecondOpAccess = insnDef['secondOpAccess'] veximplicitRegUse = insnDef['implicitRegUse'] veximplicitRegDef = insnDef['implicitRegDef'] vexprefixes = insnDef['prefixes'] vexopcodes = ['c4'] vexopcexts = dict([(e, insnDef['opcexts'][e]) for e in insnDef['opcexts'] if e != '/sse']) vexopcexts['/vex'] = insnDef['opcexts']['/sse'] + '_' + '0f' if insnDef['opcodes'][1] == '38' or insnDef['opcodes'][1] == '3a': vexopcexts['/vex'] += insnDef['opcodes'][1] vexopcodes.extend(insnDef['opcodes'][2:]) else: vexopcodes.extend(insnDef['opcodes'][1:]) vexoperands = [] for o in insnDef['operands']: # make the operand size explicit: x if o in ('V', 'W', 'H', 'U'): o = o + 'x' vexoperands.append(o) vexcpuid = [flag for flag in insnDef['cpuid'] if not flag.startswith('sse')] self.addInsn(mnemonic = vexmnemonic, eflags = vexeflags, firstOpAccess = vexfirstOpAccess, secondOpAccess = vexsecondOpAccess, implicitRegUse = veximplicitRegUse, implicitRegDef = veximplicitRegDef, prefixes = vexprefixes, opcodes = vexopcodes, opcexts = vexopcexts, operands = vexoperands, cpuid = vexcpuid) def getInsnList(self): """Returns a list of all instructions in the collection""" return self._insns def getTableList(self): """Returns a list of all tables in the collection""" return self._tables def getMnemonicsList(self): """Returns a sorted list of mnemonics""" return sorted(self._mnemonics.keys()) def pprint(self): def printWalk(tbl, indent=""): entries = tbl.entries() for k, e in entries: if isinstance(e, UdOpcodeTable): self.log("%s |-<%02x> %s" % (indent, k, e)) printWalk(e, indent + " |") elif isinstance(e, UdInsnDef): self.log("%s |-<%02x> %s" % (indent, k, e)) printWalk(self.root) def printStats(self): tables = self.getTableList() self.log("stats: ") self.log(" Num tables = %d" % len(tables)) self.log(" Num insnDefs = %d" % len(self.getInsnList())) self.log(" Num insns = %d" % len(self.getMnemonicsList())) totalSize = 0 totalEntries = 0 for table in tables: totalSize += table.size() totalEntries += table.numEntries() self.log(" Packing Ratio = %d%%" % ((totalEntries * 100) / totalSize)) self.log("--------------------") self.pprint() @staticmethod def parseOptableXML(xml): """Parse udis86 optable.xml file and return list of instruction definitions. """ from xml.dom import minidom xmlDoc = minidom.parse(xml) tlNode = xmlDoc.firstChild insns = [] while tlNode and tlNode.localName != "x86optable": tlNode = tlNode.nextSibling for insnNode in tlNode.childNodes: if not insnNode.localName: continue if insnNode.localName != "instruction": raise Exception("warning: invalid insn node - %s" % insnNode.localName) mnemonic = insnNode.getElementsByTagName('mnemonic')[0].firstChild.data vendor, cpuid = '', [] global_eflags = "___________" global_firstOpAccess = "R" global_secondOpAccess = "R" global_implicitRegUse = [] global_implicitRegDef = [] for node in insnNode.childNodes: if node.localName == 'vendor': vendor = node.firstChild.data.split() elif node.localName == 'cpuid': cpuid = node.firstChild.data.split() elif node.localName == 'eflags': global_eflags = node.firstChild.data elif node.localName == 'first_operand_access': global_firstOpAccess = node.firstChild.data elif node.localName == 'second_operand_access': global_secondOpAccess = node.firstChild.data elif node.localName == 'implicit_register_use': global_implicitRegUse.append(node.firstChild.data) elif node.localName == 'implicit_register_def': global_implicitRegDef.append(node.firstChild.data) for node in insnNode.childNodes: if node.localName == 'def': eflags = copy.deepcopy(global_eflags) firstOpAccess = copy.deepcopy(global_firstOpAccess) secondOpAccess = copy.deepcopy(global_secondOpAccess) implicitRegUse = copy.deepcopy(global_implicitRegUse) implicitRegDef = copy.deepcopy(global_implicitRegDef) insnDef = { 'pfx' : [] } for node in node.childNodes: if not node.localName: continue if node.localName in ('pfx', 'opc', 'opr', 'vendor', 'cpuid'): insnDef[node.localName] = node.firstChild.data.split() elif node.localName == 'eflags': eflags = node.firstChild.data elif node.localName == 'first_operand_access': firstOpAccess = node.firstChild.data elif node.localName == 'second_operand_access': secondOpAccess = node.firstChild.data elif node.localName == 'implicit_register_use': implicitRegUse.append(node.firstChild.data) elif node.localName == 'implicit_register_def': implicitRegDef.append(node.firstChild.data) elif node.localName == 'mode': insnDef['pfx'].extend(node.firstChild.data.split()) insns.append({'prefixes' : insnDef.get('pfx', []), 'mnemonic' : mnemonic, 'eflags' : eflags, 'firstOpAccess' : firstOpAccess, 'secondOpAccess' : secondOpAccess, 'implicitRegUse' : implicitRegUse, 'implicitRegDef' : implicitRegDef, 'opcodes' : insnDef.get('opc', []), 'operands' : insnDef.get('opr', []), 'vendor' : insnDef.get('vendor', vendor), 'cpuid' : insnDef.get('cpuid', cpuid)}) return insns
bsd-2-clause
-4,087,577,388,770,248,000
37.168091
115
0.506382
false
3.904124
false
false
false
levythu/ThuCloudDisk
ThuCloudDisk/ThuCloudDisk/settings.py
1
6136
# Django settings for ThuCloudDisk project. import os.path dirname = os.path.dirname(__file__).replace("\\", "/") ROOT_PATH = os.path.dirname(dirname) DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( ('thuclouddisk', '[email protected]'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': 'ThuCloudDisk', # Or path to database file if using sqlite3. # The following settings are not used with sqlite3: 'USER': 'root', 'PASSWORD': 'Zstbj2013', 'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. 'PORT': '', # Set to empty string for default. } } # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = [] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'Asia/Shanghai' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = os.path.join(ROOT_PATH, 'media').replace('\\','/') # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '/media/' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( os.path.join(ROOT_PATH, 'static').replace('\\','/'), # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'mg1=t7!v5f7*#6p2b+#+o_fiqja0w4w#15m604oo+=(w1glvdk' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', #'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'ThuCloudDisk.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'ThuCloudDisk.wsgi.application' TEMPLATE_DIRS = ( os.path.join(ROOT_PATH, 'templates').replace('\\','/'), # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Uncomment the next line to enable the admin: 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', 'web', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } USE_SWIFT = True WEB_RSYNC = False AUTH_USER_MODEL = 'web.MyUser' EMAIL_HOST='smtp.163.com' EMAIL_HOST_USER='[email protected]' EMAIL_HOST_PASSWORD='jzfiiczzaziqhymt' EMAIL_USE_TLS = True SITE_URL='thucloud.com' SITE_NAME='thuclouddisk' SWIFT_HOST='192.168.56.101:5000' SWIFT_URL='192.168.56.101' SWIFT_TENANT='demo' SWIFT_USER = 'demo' SWIFT_SECRET = 'DEMO_PASS' LOCAL_BUFFER_PATH = os.path.join(ROOT_PATH, 'media/buffer').replace('\\','/')
apache-2.0
-3,915,417,579,704,217,000
32.71978
127
0.688233
false
3.514318
false
false
false
romanz/electrum
plugins/digitalbitbox/qt.py
1
1522
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase from .digitalbitbox import DigitalBitboxPlugin from electrum.i18n import _ from electrum.plugins import hook from electrum.wallet import Standard_Wallet class Plugin(DigitalBitboxPlugin, QtPluginBase): icon_unpaired = ":icons/digitalbitbox_unpaired.png" icon_paired = ":icons/digitalbitbox.png" def create_handler(self, window): return DigitalBitbox_Handler(window) @hook def receive_menu(self, menu, addrs, wallet): if type(wallet) is not Standard_Wallet: return keystore = wallet.get_keystore() if type(keystore) is not self.keystore_class: return if not self.is_mobile_paired(): return if not keystore.is_p2pkh(): return if len(addrs) == 1: def show_address(): change, index = wallet.get_address_index(addrs[0]) keypath = '%s/%d/%d' % (keystore.derivation, change, index) xpub = self.get_client(keystore)._get_xpub(keypath) verify_request_payload = { "type": 'p2pkh', "echo": xpub['echo'], } self.comserver_post_notification(verify_request_payload) menu.addAction(_("Show on {}").format(self.device), show_address) class DigitalBitbox_Handler(QtHandlerBase): def __init__(self, win): super(DigitalBitbox_Handler, self).__init__(win, 'Digital Bitbox')
mit
-7,596,609,756,621,793,000
30.708333
77
0.605782
false
3.882653
false
false
false
chadmv/cmt
scripts/cmt/rig/swingtwist.py
1
14549
"""Creates a node network to extract swing/twist rotation of a transform to drive another transforms offsetParentMatrix. The network calculates the local rotation swing and twist offset around the specified twist axis relative to the local rest orientation. This allows users to specify how much swing and twist they want to propagate to another transform. Uses include driving an upper arm twist joint from the shoulder and driving forearm twist joints from the wrist. .. raw:: html <div style="position: relative; padding-bottom: 56.25%; height: 0; overflow: hidden;"> <iframe src="https://www.youtube.com/embed/12tyQc93Y7A" style="position: absolute; top: 0; left: 0; width: 100%; height: 100%; border:0;" allowfullscreen title="YouTube Video"></iframe> </div> Since the network uses quaternions, partial swing and twist values between 0.0 and 1.0 will see a flip when the driver transform rotates past 180 degrees. The setup can either be made with several standard Maya nodes, or the compiled plug-in can be used to create a single node. Setting cmt.settings.ENABLE_PLUGINS to False will use vanilla Maya nodes. Otherwise, the compiled plug-in will be used. Example Usage ============= The twist decomposition network can be accessed in the cmt menu:: CMT > Rigging > Connect Twist Joint Twist child of shoulder:: shoulder |- twist_joint1 |- twist_joint2 |- elbow create_swing_twist(shoulder, twist_joint1, twist_weight=-1.0, swing_weight=0.0) create_swing_twist(shoulder, twist_joint2, twist_weight=-0.5, swing_weight=0.0) Twist forearm from wrist:: elbow |- twist_joint1 |- twist_joint2 |- wrist create_swing_twist(wrist, twist_joint1, twist_weight=0.5, swing_weight=0.0) create_swing_twist(wrist, twist_joint2, twist_weight=1.0, swing_weight=0.0) Use no plugins:: import cmt.settings as settings settings.ENABLE_PLUGINS = False create_swing_twist(wrist, twist_joint1, twist_weight=0.5, swing_weight=0.0) create_swing_twist(wrist, twist_joint2, twist_weight=1.0, swing_weight=0.0) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import maya.cmds as cmds import maya.mel as mel import maya.api.OpenMaya as OpenMaya from cmt.ui.optionbox import OptionBox from cmt.settings import DOCUMENTATION_ROOT import cmt.settings as settings from cmt.dge import dge import cmt.shortcuts as shortcuts import math logger = logging.getLogger(__name__) # User defined attribute names used in the network TWIST_WEIGHT = "twist" SWING_WEIGHT = "swing" TWIST_OUTPUT = "twistOutput" INV_TWIST_OUTPUT = "invertedTwistOutput" SWING_OUTPUT = "swingOutput" INV_SWING_OUTPUT = "invertedSwingOutput" HELP_URL = "{}/rig/swingtwist.html".format(DOCUMENTATION_ROOT) def create_swing_twist( driver, driven, twist_weight=1.0, swing_weight=1.0, twist_axis=0 ): """Create a node network to drive a transforms offsetParentMatrix from the decomposed swing/twist of another transform. Setting cmt.settings.ENABLE_PLUGINS to False will use vanilla Maya nodes. Otherwise, the compiled plug-in will be used. :param driver: Driver transform :param driven: Driven transform :param twist_weight: -1 to 1 twist scalar :param swing_weight: -1 to 1 swing scalar :param twist_axis: Local twist axis on driver (0: X, 1: Y, 2: Z) """ if settings.ENABLE_PLUGINS: cmds.loadPlugin("cmt", qt=True) cmds.swingTwist( driver, driven, twist=twist_weight, swing=swing_weight, twistAxis=twist_axis ) return for attr in [TWIST_OUTPUT, INV_TWIST_OUTPUT, SWING_OUTPUT, INV_SWING_OUTPUT]: if not cmds.objExists("{}.{}".format(driver, attr)): cmds.addAttr(driver, ln=attr, at="message") if not _twist_network_exists(driver): _create_twist_decomposition_network(driver, twist_axis) for attr in [TWIST_WEIGHT, SWING_WEIGHT]: if not cmds.objExists("{}.{}".format(driven, attr)): cmds.addAttr( driven, ln=attr, keyable=True, minValue=0, maxValue=1, defaultValue=math.fabs(twist_weight), ) twist, inv_twist, swing, inv_swing = _get_swing_twist_attributes(driver) twist_slerp = _create_slerp(driven, twist_weight, twist, inv_twist, TWIST_WEIGHT) swing_slerp = _create_slerp(driven, swing_weight, swing, inv_swing, SWING_WEIGHT) rotation = cmds.createNode("quatProd", name="{}_rotation".format(driver)) cmds.connectAttr( "{}.outputQuat".format(twist_slerp), "{}.input1Quat".format(rotation) ) cmds.connectAttr( "{}.outputQuat".format(swing_slerp), "{}.input2Quat".format(rotation) ) rotation_matrix = cmds.createNode( "composeMatrix", name="{}_rotation_matrix".format(driver) ) cmds.setAttr("{}.useEulerRotation".format(rotation_matrix), 0) cmds.connectAttr( "{}.outputQuat".format(rotation), "{}.inputQuat".format(rotation_matrix) ) mult = cmds.createNode("multMatrix", name="{}_offset_parent_matrix".format(driven)) cmds.connectAttr( "{}.outputMatrix".format(rotation_matrix), "{}.matrixIn[0]".format(mult) ) pinv = OpenMaya.MMatrix(cmds.getAttr("{}.parentInverseMatrix[0]".format(driven))) m = OpenMaya.MMatrix(cmds.getAttr("{}.worldMatrix[0]".format(driven))) local_rest_matrix = m * pinv cmds.setAttr("{}.matrixIn[1]".format(mult), list(local_rest_matrix), type="matrix") cmds.connectAttr( "{}.matrixSum".format(mult), "{}.offsetParentMatrix".format(driven) ) # Zero out local xforms to prevent double xform for attr in ["{}{}".format(x, y) for x in ["t", "r", "jo"] for y in "xyz"]: is_locked = cmds.getAttr("{}.{}".format(driven, attr), lock=True) if is_locked: cmds.setAttr("{}.{}".format(driven, attr), lock=False) cmds.setAttr("{}.{}".format(driven, attr), 0.0) if is_locked: cmds.setAttr("{}.{}".format(driven, attr), lock=True) logger.info( "Created swing twist network to drive {} from {}".format(driven, driver) ) def _twist_network_exists(driver): """Test whether the twist decomposition network already exists on driver. :param driver: Driver transform :return: True or False """ has_twist_attribute = cmds.objExists("{}.{}".format(driver, TWIST_OUTPUT)) if not has_twist_attribute: return False twist_node = cmds.listConnections("{}.{}".format(driver, TWIST_OUTPUT), d=False) return True if twist_node else False def _create_twist_decomposition_network(driver, twist_axis): """Create the twist decomposition network for driver. :param driver: Driver transform :param twist_axis: Local twist axis on driver """ # Connect message attributes to the decomposed twist nodes so we can reuse them # if the network is driving multiple nodes mult = cmds.createNode("multMatrix", name="{}_local_matrix".format(driver)) parent_inverse = "{}.parentInverseMatrix[0]".format(driver) world_matrix = "{}.worldMatrix[0]".format(driver) cmds.connectAttr(world_matrix, "{}.matrixIn[0]".format(mult)) cmds.connectAttr(parent_inverse, "{}.matrixIn[1]".format(mult)) pinv = OpenMaya.MMatrix(cmds.getAttr(parent_inverse)) m = OpenMaya.MMatrix(cmds.getAttr(world_matrix)) inv_local_rest_matrix = (m * pinv).inverse() cmds.setAttr( "{}.matrixIn[2]".format(mult), list(inv_local_rest_matrix), type="matrix" ) rotation = cmds.createNode("decomposeMatrix", name="{}_rotation".format(driver)) cmds.connectAttr("{}.matrixSum".format(mult), "{}.inputMatrix".format(rotation)) twist = cmds.createNode("quatNormalize", name="{}_twist".format(driver)) cmds.connectAttr( "{}.outputQuat.outputQuatW".format(rotation), "{}.inputQuat.inputQuatW".format(twist), ) axis = "XYZ"[twist_axis] cmds.connectAttr( "{}.outputQuat.outputQuat{}".format(rotation, axis), "{}.inputQuat.inputQuat{}".format(twist, axis), ) # swing = twist.inverse() * rotation inv_twist = cmds.createNode("quatInvert", name="{}_inverse_twist".format(driver)) cmds.connectAttr("{}.outputQuat".format(twist), "{}.inputQuat".format(inv_twist)) swing = cmds.createNode("quatProd", name="{}_swing".format(driver)) cmds.connectAttr("{}.outputQuat".format(inv_twist), "{}.input1Quat".format(swing)) cmds.connectAttr("{}.outputQuat".format(rotation), "{}.input2Quat".format(swing)) inv_swing = cmds.createNode("quatInvert", name="{}_inverse_swing".format(driver)) cmds.connectAttr("{}.outputQuat".format(swing), "{}.inputQuat".format(inv_swing)) # Connect the nodes to the driver so we can find and reuse them for multiple setups for node, attr in [ (twist, TWIST_OUTPUT), (inv_twist, INV_TWIST_OUTPUT), (swing, SWING_OUTPUT), (inv_swing, INV_SWING_OUTPUT), ]: cmds.connectAttr("{}.message".format(node), "{}.{}".format(driver, attr)) def _get_swing_twist_attributes(driver): """Get the quaternion output attribute of the twist decomposition network. :param driver: Driver transform :param invert: True to get the inverted twist attribute :param twist_axis: Local twist axis of driver :return: The quaternion output attribute """ nodes = [] for attr in [TWIST_OUTPUT, INV_TWIST_OUTPUT, SWING_OUTPUT, INV_SWING_OUTPUT]: node = cmds.listConnections("{}.{}".format(driver, attr), d=False) if not node: # The network isn't connected so create it _create_twist_decomposition_network(driver, twist_axis) return _get_swing_twist_attributes(driver) nodes.append(node[0]) return ["{}.outputQuat".format(node) for node in nodes] def _create_slerp(driven, weight, rotation, inv_rotation, attribute): slerp = cmds.createNode("quatSlerp", name="{}_{}_slerp".format(driven, attribute)) cmds.setAttr("{}.{}".format(driven, attribute), math.fabs(weight)) cmds.connectAttr("{}.{}".format(driven, attribute), "{}.inputT".format(slerp)) cmds.setAttr("{}.input1QuatW".format(slerp), 1) if weight >= 0.0: cmds.connectAttr(rotation, "{}.input2Quat".format(slerp)) else: cmds.connectAttr(inv_rotation, "{}.input2Quat".format(slerp)) return slerp def create_from_menu(*args, **kwargs): sel = cmds.ls(sl=True) if len(sel) != 2: raise RuntimeError("Select driver transform then driven transform.") driver, driven = sel kwargs = Options.get_kwargs() create_swing_twist(driver, driven, **kwargs) def display_menu_options(*args, **kwargs): options = Options("Swing Twist Options", HELP_URL) options.show() class Options(OptionBox): SWING_WEIGHT_WIDGET = "cmt_swing_weight" TWIST_WEIGHT_WIDGET = "cmt_twist_weight" TWIST_AXIS_WIDGET = "cmt_twist_axis" @classmethod def get_kwargs(cls): """Gets the function arguments either from the option box widgets or the saved option vars. If the widgets exist, their values will be saved to the option vars. :return: A dictionary of the arguments to the create_twist_decomposition function.""" kwargs = {} if cmds.floatSliderGrp(Options.TWIST_WEIGHT_WIDGET, exists=True): kwargs["twist_weight"] = cmds.floatSliderGrp( Options.TWIST_WEIGHT_WIDGET, q=True, value=True ) cmds.optionVar(fv=(Options.TWIST_WEIGHT_WIDGET, kwargs["twist_weight"])) else: kwargs["twist_weight"] = cmds.optionVar(q=Options.TWIST_WEIGHT_WIDGET) if cmds.floatSliderGrp(Options.SWING_WEIGHT_WIDGET, exists=True): kwargs["swing_weight"] = cmds.floatSliderGrp( Options.SWING_WEIGHT_WIDGET, q=True, value=True ) cmds.optionVar(fv=(Options.SWING_WEIGHT_WIDGET, kwargs["swing_weight"])) else: kwargs["twist_weight"] = cmds.optionVar(q=Options.TWIST_WEIGHT_WIDGET) if cmds.optionMenuGrp(Options.TWIST_AXIS_WIDGET, exists=True): value = cmds.optionMenuGrp(Options.TWIST_AXIS_WIDGET, q=True, sl=True) kwargs["twist_axis"] = value - 1 cmds.optionVar(iv=(Options.TWIST_AXIS_WIDGET, kwargs["twist_axis"])) else: kwargs["twist_axis"] = cmds.optionVar(q=Options.TWIST_AXIS_WIDGET) return kwargs def create_ui(self): cmds.columnLayout(adj=True) for widget in [ Options.SWING_WEIGHT_WIDGET, Options.TWIST_WEIGHT_WIDGET, Options.TWIST_AXIS_WIDGET, ]: # Delete the widgets so we don't create multiple controls with the same name try: cmds.deleteUI(widget, control=True) except RuntimeError: pass swing_weight = cmds.optionVar(q=Options.SWING_WEIGHT_WIDGET) cmds.floatSliderGrp( Options.SWING_WEIGHT_WIDGET, label="Swing weight", field=True, minValue=-1.0, maxValue=1.0, fieldMinValue=-1.0, fieldMaxValue=1.0, value=swing_weight, step=0.1, precision=2, ) twist_weight = cmds.optionVar(q=Options.TWIST_WEIGHT_WIDGET) cmds.floatSliderGrp( Options.TWIST_WEIGHT_WIDGET, label="Twist weight", field=True, minValue=-1.0, maxValue=1.0, fieldMinValue=-1.0, fieldMaxValue=1.0, value=twist_weight, step=0.1, precision=2, ) twist_axis = cmds.optionVar(q=Options.TWIST_AXIS_WIDGET) twist_axis = 1 if not twist_axis else twist_axis + 1 cmds.optionMenuGrp(Options.TWIST_AXIS_WIDGET, l="Twist Axis") cmds.menuItem(label="X") cmds.menuItem(label="Y") cmds.menuItem(label="Z") cmds.optionMenuGrp(Options.TWIST_AXIS_WIDGET, e=True, sl=twist_axis) def on_apply(self): create_from_menu() def on_reset(self): cmds.floatSliderGrp(Options.SWING_WEIGHT_WIDGET, e=True, value=1) cmds.floatSliderGrp(Options.TWIST_WEIGHT_WIDGET, e=True, value=1) cmds.optionMenuGrp(Options.TWIST_AXIS_WIDGET, e=True, sl=1) def on_save(self): Options.get_kwargs()
mit
-4,035,239,516,782,056,000
36.594315
191
0.652279
false
3.364709
false
false
false
TurkuNLP/SRNNMT
old_/big_run.py
1
12851
from keras.models import Sequential, Graph, Model, model_from_json from keras.layers import Dense, Dropout, Activation, Merge, Input, merge, Flatten from keras.layers.recurrent import GRU from keras.callbacks import Callback,ModelCheckpoint from keras.layers.embeddings import Embedding import numpy as np import sys import math import json import gzip import conllutil3 as cu import data_dense from test import load_model from dictionary_baseline import build_dictionary min_len=5 max_len=30 def read_fin_parsebank(fname,max_sent=10000): # sentences=[] counter=0 for comm, sent in cu.read_conllu(gzip.open(fname,"rt",encoding="utf-8")): if min_len<=len(sent)<=max_len: txt=" ".join(line[cu.FORM] for line in sent) yield txt # sentences.append(txt) counter+=1 if counter==max_sent: break # return sentences def sent_reader(f): words=[] for line in f: line=line.strip() if line=="</s>": # end of sentence if words: yield words words=[] cols=line.split("\t") if len(cols)==1: continue words.append(cols[0]) def read_eng_parsebank(fname,max_sent=10000): counter=0 for sent in sent_reader(gzip.open(fname,"rt",encoding="utf-8")): if min_len<=len(sent)<=max_len: txt=" ".join(sent) yield txt counter+=1 if counter==max_sent: break def fill_batch(minibatch_size,max_sent_len,vs,data_iterator,ngrams): """ Iterates over the data_iterator and fills the index matrices with fresh data ms = matrices, vs = vocabularies """ # custom fill_batch to return also sentences... ms=data_dense.Matrices(minibatch_size,max_sent_len,ngrams) batchsize,max_sentence_len=ms.source_ngrams[ngrams[0]].shape #just pick any one of these really row=0 src_sents=[] trg_sents=[] for (sent_src,sent_target),target in data_iterator: src_sents.append(sent_src) trg_sents.append(sent_target) for N in ngrams: for j,ngram in enumerate(data_dense.ngram_iterator(sent_src,N,max_sent_len)): ms.source_ngrams[N][row,j]=vs.get_id(ngram,vs.source_ngrams[N]) for j,ngram in enumerate(data_dense.ngram_iterator(sent_target,N,max_sent_len)): ms.target_ngrams[N][row,j]=vs.get_id(ngram,vs.target_ngrams[N]) ms.src_len[row]=len(sent_src.strip().split()) ms.trg_len[row]=len(sent_target.strip().split()) ms.targets[row]=target row+=1 if row==batchsize: # print(ms.matrix_dict, ms.targets) yield ms.matrix_dict, ms.targets, src_sents, trg_sents src_sents=[] trg_sents=[] row=0 ms=data_dense.Matrices(minibatch_size,max_sent_len,ngrams) def iter_wrapper(src_fname,trg_fname,max_sent=10000): for fin_sent,eng_sent in zip(read_fin_parsebank(src_fname,max_sent=max_sent),read_eng_parsebank(trg_fname,max_sent=max_sent)): yield (fin_sent,eng_sent),1.0 #def iter_wrapper(src_fname,trg_fname,max_sent=1000): # count=0 # for fin_sent,eng_sent in zip(open(src_fname),open(trg_fname)): # fin_sent=fin_sent.strip() # eng_sent=eng_sent.strip() # yield (fin_sent,eng_sent),1.0 # count+=1 # if count==max_sent: # break def vectorize(voc_name,mname,src_fname,trg_fname,max_pairs): # create files file_dict={} for i in range(min_len,max_len+1): file_dict["fi_sent_len{N}".format(N=i)]=gzip.open("vdata/fi_sent_len{N}.txt.gz".format(N=i),"wt",encoding="utf-8") file_dict["fi_vec_len{N}".format(N=i)]=open("vdata/fi_vec_len{N}.npy".format(N=i),"wb") file_dict["en_sent_len{N}".format(N=i)]=gzip.open("vdata/en_sent_len{N}.txt.gz".format(N=i),"wt",encoding="utf-8") file_dict["en_vec_len{N}".format(N=i)]=open("vdata/en_vec_len{N}.npy".format(N=i),"wb") minibatch_size=100 ngrams=(4,) # TODO: read this from somewhere #Read vocabularies vs=data_dense.read_vocabularies(voc_name,"xxx","xxx",False,ngrams) vs.trainable=False # load model trained_model=load_model(mname) output_size=trained_model.get_layer('source_dense').output_shape[1] max_sent_len=trained_model.get_layer('source_ngrams_{n}'.format(n=ngrams[0])).output_shape[1] print(output_size,max_sent_len) # build matrices ms=data_dense.Matrices(minibatch_size,max_sent_len,ngrams) # get vectors # for loop over minibatches counter=0 for i,(mx,targets,src_data,trg_data) in enumerate(fill_batch(minibatch_size,max_sent_len,vs,iter_wrapper(src_fname,trg_fname,max_sent=max_pairs),ngrams)): src,trg=trained_model.predict(mx) # shape = (minibatch_size,gru_width) # loop over items in minibatch for j,(src_v,trg_v) in enumerate(zip(src,trg)): norm_src=src_v/np.linalg.norm(src_v) norm_trg=trg_v/np.linalg.norm(trg_v) fi_len=len(src_data[j].split()) en_len=len(trg_data[j].split()) norm_src.astype(np.float32).tofile(file_dict["fi_vec_len{N}".format(N=fi_len)]) print(src_data[j],file=file_dict["fi_sent_len{N}".format(N=fi_len)]) norm_trg.astype(np.float32).tofile(file_dict["en_vec_len{N}".format(N=en_len)]) print(trg_data[j],file=file_dict["en_sent_len{N}".format(N=en_len)]) counter+=1 if counter%100000==0: print("Vectorized {c} sentence pairs".format(c=counter)) # print(type(norm_src[0].astype(np.float32))) # counter+=1 # if counter==len(src_data): # break # if counter==len(src_data): # break for key,value in file_dict.items(): value.close() # return src_vectors,trg_vectors def rank_keras(src_vectors,trg_vectors,src_sentences,trg_sentences,verbose=True): ranks=[] all_similarities=[] # list of sorted lists src_data=[s.strip() for s in gzip.open(src_sentences,"rt")] trg_data=[s.strip() for s in gzip.open(trg_sentences,"rt")] src_vectors=np.fromfile(src_vectors,np.float32).reshape(len(src_data),150)[:100000,:] trg_vectors=np.fromfile(trg_vectors,np.float32).reshape(len(trg_data),150)[:100000,:] src_data=src_data[:100000] trg_data=trg_data[:100000] print("#",len(src_data),len(trg_data),file=sys.stderr) to_keep=[] # dot product sim_matrix=np.dot(src_vectors,trg_vectors.T) print("dot product ready",file=sys.stderr) # argpartition partition_matrix=np.argpartition(sim_matrix,-3000)#[-N-1:] print("partition ready",file=sys.stderr) results=[] for i,row in enumerate(partition_matrix): results.append((src_data[i],[(sim_matrix[i,idx],trg_data[idx]) for idx in row[-3000:]])) # for i in range(5): # print(results[i][0],results[i][1][:5]) return results # for i in range(len(src_vectors)): # sims=trg_vectors.dot(src_vectors[i]) # all_similarities.append(sims) # N=10 ## results=sorted(((sims[idx],idx,trg_data[idx]) for idx in np.argpartition(sims,-N-1)), reverse=True)#[-N-1:]), reverse=True) # results=sorted(((sims[idx],idx,trg_data[idx]) for idx,s in enumerate(sims)), reverse=True)#[-N-1:]), reverse=True) # if results[0][0]<0.6: # continue # result_idx=[idx for (sim,idx,txt) in results] # ranks.append(result_idx.index(i)+1) # to_keep.append((src_data[i],[(s,txt) for s,idx,txt in results[:1000]])) # if verbose: # print("source:",i,src_data[i],np.dot(src_vectors[i],trg_vectors[i])) ## print("reference:",trg_data[i]) ## print("rank:",result_idx.index(i)+1) # for s,idx,txt in results[:10]: # print(idx,s,txt) # print("****") # print # print("Keras:") # print("Avg:",sum(ranks)/len(ranks)) # print("#num:",len(ranks)) # ## return all_similarities # return to_keep def rank_dictionary(keras_results,verbose=True): f2e_dictionary=build_dictionary("lex.f2e", "uniq.train.tokens.fi.100K") e2f_dictionary=build_dictionary("lex.e2f", "uniq.train.tokens.en.100K") ranks=[] na=0 all_scores=[] for i, (src_sent,pairs) in enumerate(keras_results): english_transl=set() finnish_words=set(src_sent.lower().split()) for w in finnish_words: if w in f2e_dictionary: english_transl.update(f2e_dictionary[w]) combined=[] for j,(s,trg_sent) in enumerate(pairs): count=0 english_words=set(trg_sent.strip().lower().split()) score=len(english_words&english_transl)/len(english_words) # scores.append((j,score/len(english_words))) finnish_transl=set() for w in english_words: if w in e2f_dictionary: finnish_transl.update(e2f_dictionary[w]) score2=len(finnish_words&finnish_transl)/len(finnish_words) # scores2.append((j,score2/len(finnish_words))) avg=(s+score+score2)/3 combined.append((avg,trg_sent)) # combined=[(x,(f+e)/2) for (x,f),(y,e) in zip(scores,scores2)] results=sorted(combined, key=lambda x:x[0], reverse=True) # if combined[0][0]<0.4: # continue all_scores.append((results[0][0],src_sent,results)) # all_scores.append(combined) # if combined[i][1]==0.0: # TODO # ranks.append(len(src_data)/2) # na+=1 # continue # result_idx=[idx for idx,score in results] # ranks.append(result_idx.index(i)+1) if verbose: print("Source:",i,src_sent) # print("Reference:",trg_data[i],combined[i][1]) # print("Rank:",result_idx.index(i)+1) for s,txt in results[:10]: print(txt,s) print("*"*20) print() for (best_sim,src_sent,translations) in sorted(all_scores, key=lambda x:x[0], reverse=True): print("source:",src_sent) for (s,trg_sent) in translations[:20]: print(trg_sent,s) print("") # print("Dictionary baseline:") # print("Avg:",sum(ranks)/len(ranks)) print("# num:",len(all_scores),file=sys.stderr) # print("n/a:",na) # return all_scores def test(src_fname,trg_fname,mname,voc_name,max_pairs): # read sentences src_data=[] trg_data=[] for src_line,trg_line in data_dense.iter_data(src_fname,trg_fname,max_pairs=max_pairs): src_data.append(src_line.strip()) trg_data.append(trg_line.strip()) src_vectors,trg_vectors=vectorize(voc_name,src_data,trg_data,mname) similarities=rank(src_vectors,trg_vectors,src_data,trg_data) if __name__=="__main__": import argparse parser = argparse.ArgumentParser(description='') g=parser.add_argument_group("Reguired arguments") g.add_argument('-m', '--model', type=str, help='Give model name') g.add_argument('-v', '--vocabulary', type=str, help='Give vocabulary file') g.add_argument('--max_pairs', type=int, default=1000, help='Give vocabulary file, default={n}'.format(n=1000)) g.add_argument('--fi_len', type=int, help='Finnish matrix len') g.add_argument('--en_len', type=int, help='English matrix len') args = parser.parse_args() if args.model==None or args.vocabulary==None: parser.print_help() sys.exit(1) # vectorize(args.vocabulary,args.model,"pbv4_ud.part-00.gz","encow14ax01.xml.gz",args.max_pairs) # vectorize(args.vocabulary,args.model,"data/all.test.fi.tokenized","data/all.test.en.tokenized") # to_keep=rank_keras("finnish_vectors.npy","english_vectors.npy","finnish_sentences.txt.gz","english_sentences.txt.gz",verbose=False) # results=rank_keras("vdata/fi_vec_len15.npy","vdata/en_vec_len15.npy","vdata/fi_sent_len15.txt.gz","vdata/en_sent_len15.txt.gz",verbose=False) keras_results=rank_keras("vdata/fi_vec_len{n}.npy".format(n=args.fi_len),"vdata/en_vec_len{n}.npy".format(n=args.en_len),"vdata/fi_sent_len{n}.txt.gz".format(n=args.fi_len),"vdata/en_sent_len{n}.txt.gz".format(n=args.en_len),verbose=False) rank_dictionary(keras_results,verbose=False) # test("data/all.test.fi.tokenized","data/all.test.en.tokenized",args.model,args.vocabulary,args.max_pairs) #for mx,targets in batch_iter: # input is shuffled!!! # src,trg=model.predict(mx) # print(targets,np.dot(src[0],trg[0]))
apache-2.0
-3,734,053,887,119,361,000
35.717143
243
0.604155
false
3.069995
true
false
false
warrenspe/hconf
hconf/Config.py
1
6230
""" Copyright (C) 2016 Warren Spencer [email protected] This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Author: Warren Spencer Email: [email protected] """ # Standard imports import re # Project imports from .Exceptions import * from .Subparsers._subparser import Subparser __all__ = [ 'ConfigManager', ] class _Config(object): """ Config object which will be populated and returned as the config object holding all the configuration options. """ def __getitem__(self, name): if hasattr(self, name): return getattr(self, name) raise KeyError(str(name)) class ConfigManager(object): """ Config manager which can have a sequence of subparsers assigned to it in order to delegate configuration parsing. Expected configuration options are set on the object explicitely. """ configNameRE = re.compile("^[a-zA-Z][\w\-_]*$") def __init__(self, *args): """ Initializes a ConfigManager. Inputs: args - ConfigManagers can be optionally initialized with a sequence of dictionaries representing configuration options to add to the ConfigManager. """ self.configs = dict() self.parsers = list() self._config = None for arg in args: self.addConfig(**arg) def registerParser(self, parser): """ Registers a parser to parse configuration inputs. """ if not isinstance(parser, Subparser): raise TypeError("%s is not an instance of a subparser." % parser) self.parsers.append(parser) def addConfig(self, name, default=None, cast=None, required=False, description=None): """ Adds the given configuration option to the ConfigManager. Inputs: name - The configuration name to accept. required - A boolean indicating whether or not the configuration option is required or not. cast - A type (or function accepting 1 argument and returning an object) to cast the input as. If any error occurs during casting an InvalidConfigurationException will be raised. default - The default value to assign to this configuration option. Note that None is not a valid default if required=True. description - A human readable description of this configuration parameter. Will be displayed when the program is run with a -h flag. """ # Validate the name if not self.configNameRE.match(name): raise InvalidConfigurationException("Invalid configuration name: %s" % name) self.configs[self._sanitizeName(name)] = { 'default': default, 'cast': cast, 'required': required, 'description': description } def parse(self): """ Executes the registered parsers to parse input configurations. """ self._config = _Config() self._setDefaults() for parser in self.parsers: for key, value in parser.parse(self, self._config).items(): key = self._sanitizeName(key) if key not in self.configs: raise UnknownConfigurationException(key) if value is not None: self._setConfig(key, value) self._ensureRequired() self._cast() return self._config def _setDefaults(self): """ Sets all the expected configuration options on the config object as either the requested default value, or None. """ for configName, configDict in self.configs.items(): self._setConfig(configName, configDict['default']) def _ensureRequired(self): """ Ensures that all configuration options marked as being required have been passed (ie are non-None). Raises a MissingConfigurationException if a required configuration option was not passed. """ for configName, configDict in self.configs.items(): if configDict['required']: if getattr(self._config, configName) is None: raise MissingConfigurationException(configName) def _cast(self): """ Iterates through our parsed configuration options and cast any options with marked cast types. """ for configName, configDict in self.configs.items(): if configDict['cast'] is not None: configValue = getattr(self._config, configName) if configValue is not None: try: self._setConfig(configName, configDict['cast'](configValue)) except: raise InvalidConfigurationException("%s: %r" % (configName, configValue)) def _setConfig(self, name, value): """ Sets the configuration option on the current configuration object being populated. Inputs: name - The name of the configuration option to set. value - The value of the configuration option to set. """ setattr(self._config, name, value) def _sanitizeName(self, name): """ Sanitizes a configuration name so that it can be set onto the Config object safely (ex: replacing -'s with _'s). Inputs: name - The string containing the name to sanitize. Outputs: A string containing the sanitized string. """ return name.replace('-', '_')
gpl-3.0
-5,497,231,904,780,135,000
34.6
120
0.619904
false
4.917127
true
false
false
tgcmteam/tgcmlinux
src/tgcm/contrib/freedesktopnet/networkmanager/activeconnection.py
1
2450
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2009 Martin Vidner # # # Authors: # Martin Vidner <martin at vidnet.net> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 # USA import dbus from freedesktopnet.dbusclient import DBusClient, object_path from freedesktopnet.dbusclient.func import * from applet import Connection from device import Device from accesspoint import AccessPoint from util import Enum class ActiveConnection(DBusClient): """ Signals: PropertiesChanged ( a{sv}: properties ) Properties: ServiceName - s - (read) Connection - o - (read) SpecificObject - o - (read) Devices - ao - (read) State - u - (read) (NM_ACTIVE_CONNECTION_STATE) Default - b - (read) Enumerated types: NM_ACTIVE_CONNECTION_STATE """ SERVICE = "org.freedesktop.NetworkManager" IFACE = "org.freedesktop.NetworkManager.Connection.Active" def __init__(self, opath): super(ActiveConnection, self).__init__(dbus.SystemBus(), self.SERVICE, opath, default_interface=self.IFACE) class State(Enum): UNKNOWN = 0 ACTIVATING = 1 ACTIVATED = 2 def __getitem__(self, key): "Implement Connection by adding the required ServiceName" v = super(ActiveConnection, self).__getitem__(key) if key == "Connection": sn="org.freedesktop.NetworkManager" v = Connection(sn, v) return v ActiveConnection._add_adaptors( PropertiesChanged = SA(identity), # ServiceName = PA(identity), # Connection = PA(Connection), # implemented in __getitem__ SpecificObject = PA(AccessPoint), #in most cases. figure out. Devices = PA(seq_adaptor(Device._create)), State = PA(ActiveConnection.State), Default = PA(bool), )
gpl-2.0
4,414,699,881,224,779,000
30.818182
115
0.682449
false
3.810264
false
false
false
derekgreene/twitter-jsonl-tools
jsonl-tweet-export.py
1
2962
#!/usr/bin/env python """ A very simple script to export tweets from a JSONL file in CSV format. Sample usage: python jsonl-tweet-export.py sample/sample-tweets-500.jsonl -o sample/sample-tweets.csv """ import sys, fileinput, codecs, re from datetime import datetime from optparse import OptionParser import logging as log try: import ujson as json except: import json from prettytable import PrettyTable # -------------------------------------------------------------- def parse_twitter_date( s, ignore_time_zones = True ): # hack for cases where timezone is not supported by Python strptime if ignore_time_zones: parts = s.split(" ") smodified =" ".join( parts[0:4] + [ parts[-1] ] ) return datetime.strptime(smodified,'%a %b %d %H:%M:%S %Y') return datetime.strptime(s,'%a %b %d %H:%M:%S %z %Y') def fmt_id( x ): return '"%s"' % x def norm( s, sep ): s = s.replace(sep, " ") return re.sub("\s+", " ", s ) # -------------------------------------------------------------- def main(): parser = OptionParser(usage="usage: %prog [options] json_file1 json_file2 ...") parser.add_option("-t", "--top", action="store", type="int", dest="top", help="number of top authors to display", default=10) parser.add_option("-o", action="store", type="string", dest="out_path", help="output path for CSV file", default="tweets.csv") parser.add_option("-s", action="store", type="string", dest="separator", help="separator character for output file (default is comma)", default=",") (options, args) = parser.parse_args() if( len(args) < 1 ): parser.error( "Must specify at least one JSONL file" ) log.basicConfig(level=20, format='%(message)s') sep = options.separator log.info("Tweets will be written to %s ..." % options.out_path ) header = ["Tweet_ID", "Created_At", "Author_Screen_Name", "Author_Id", "Text" ] fout = codecs.open( options.out_path, "w", encoding="utf-8", errors="ignore" ) fout.write("%s\n" % sep.join(header) ) for tweets_path in args: log.info("Loading tweets from %s ..." % tweets_path) # Process every line as JSON data num_tweets, num_failed, line_number = 0, 0, 0 for l in fileinput.input(tweets_path): l = l.strip() if len(l) == 0: continue try: line_number += 1 tweet = json.loads(l) sdate = parse_twitter_date(tweet["created_at"]).strftime("%Y-%m-%d %H:%M:%S") values = [ fmt_id(tweet["id"]), sdate, norm(tweet["user"]["screen_name"], sep).lower(), fmt_id(tweet["user"]["id"]), norm(tweet["text"], sep) ] fout.write("%s\n" % sep.join(values) ) num_tweets += 1 if line_number % 50000 == 0: log.info("Processed %d lines" % line_number) except Exception as e: log.error("Failed to parse tweet on line %d: %s" % ( line_number, e ) ) num_failed += 1 fileinput.close() log.info("Wrote %d tweets" % num_tweets ) fout.flush() fout.close() # -------------------------------------------------------------- if __name__ == "__main__": main()
apache-2.0
4,448,869,735,812,144,000
34.686747
149
0.605334
false
3.085417
false
false
false
gkadillak/rockstor-core
src/rockstor/fs/btrfs.py
1
31825
""" Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com> This file is part of RockStor. RockStor is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. RockStor is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ """ system level helper methods to interact with the btrfs filesystem """ import re import time import os import shutil from system.osi import run_command, create_tmp_dir, is_share_mounted, \ is_mounted, get_dev_byid_name, convert_to_kib from system.exceptions import (CommandException) from pool_scrub import PoolScrub from django_ztask.decorators import task import logging logger = logging.getLogger(__name__) MKFS_BTRFS = '/sbin/mkfs.btrfs' BTRFS = '/sbin/btrfs' MOUNT = '/bin/mount' UMOUNT = '/bin/umount' DEFAULT_MNT_DIR = '/mnt2/' RMDIR = '/bin/rmdir' QID = '2015' def add_pool(pool, disks): """ pool is a btrfs filesystem. """ disks_fp = ['/dev/disk/by-id/' + d for d in disks] cmd = [MKFS_BTRFS, '-f', '-d', pool.raid, '-m', pool.raid, '-L', pool.name, ] cmd.extend(disks_fp) out, err, rc = run_command(cmd) enable_quota(pool) return out, err, rc def get_pool_info(disk): """ Extracts any pool information by running btrfs fi show <disk> and collates the results by 'Label', 'uuid', and current boot disk name. The disk name is then translated to the by-id type found in /dev/disk/by-id so that it's counterparts in the db's Disk.name field can be found. N.B. devices without serial may have no by-id counterpart. Used by CommandView()._refresh_pool_state() and DiskDetailView()._btrfs_disk_import :param disk: by-id disk name without path :return: a dictionary with keys of 'disks', 'label', and 'uuid'; disks keys a list of devices, while label and uuid keys are for strings. """ cmd = [BTRFS, 'fi', 'show', '/dev/disk/by-id/%s' % disk] o, e, rc = run_command(cmd) pool_info = {'disks': [],} for l in o: if (re.match('Label', l) is not None): fields = l.split() pool_info['label'] = fields[1].strip("'") pool_info['uuid'] = fields[3] elif (re.match('\tdevid', l) is not None): # We have a line starting wth <tab>devid, extract the dev name. # Previously this would have been sda and used as is but we need # it's by-id references as that is the new format for Disks.name. # Original sda extraction: # pool_info['disks'].append(l.split()[-1].split('/')[-1]) # Updated '/dev/sda' extraction to save on a split we no longer need # and use this 'now' name to get our by-id name with path removed. # This is required as that is how device names are stored in the # db Disk.name so that we can locate a drive and update it's pool # field reference. dev_byid, is_byid = get_dev_byid_name(l.split()[-1], True) pool_info['disks'].append(dev_byid) return pool_info def pool_raid(mnt_pt): o, e, rc = run_command([BTRFS, 'fi', 'df', mnt_pt]) # data, system, metadata, globalreserve raid_d = {} for l in o: fields = l.split() if (len(fields) > 1): block = fields[0][:-1].lower() raid = fields[1][:-1].lower() if not block in raid_d and raid is not 'DUP': raid_d[block] = raid if (raid_d['metadata'] == 'single'): raid_d['data'] = raid_d['metadata'] return raid_d def cur_devices(mnt_pt): """ When given a btrfs mount point a list containing the full path of all devices is generated by wrapping the btrfs fi show <mnt_pt> command and parsing the devid line. Used by resize_pool() to ascertain membership status of a device in a pool :param mnt_pt: btrfs mount point :return: list containing the current reported devices associated with a btrfs mount point in by-id (with full path) format. """ dev_list_byid = [] o, e, rc = run_command([BTRFS, 'fi', 'show', mnt_pt]) for l in o: l = l.strip() if (re.match('devid ', l) is not None): # The following extracts the devices from the above command output, # ie /dev/sda type names, but these are transient and we use their # by-id type counterparts in the db and our logging hence the # call to convert the 'now' names to by-id type names. # N.B. As opposed to get_pool_info we want to preserve the path as # our caller expects this full path format. dev_byid, is_byid = get_dev_byid_name(l.split()[-1]) dev_list_byid.append(dev_byid) return dev_list_byid def resize_pool(pool, dev_list_byid, add=True): """ Acts on a given pool and list of device names by generating and then executing the appropriate:- "btrfs <device list> add(default)/delete root_mnt_pt(pool)" command, or returning None if a disk member sanity check fails ie if all the supplied devices are either not already a member of the pool (when adding) or are already members of the pool (when deleting). If any device in the supplied dev_list fails this test then no command is executed and None is returned. :param pool: btrfs pool name :param dev_list_byid: list of devices to add/delete in by-id (without path). :param add: when true (default) or not specified then attempt to add dev_list devices to pool, or when specified as True attempt to delete dev_list devices from pool. :return: Tuple of results from run_command(generated command) or None if the device member/pool sanity check fails. """ dev_list_byid = ['/dev/disk/by-id/' + d for d in dev_list_byid] root_mnt_pt = mount_root(pool) cur_dev = cur_devices(root_mnt_pt) resize_flag = 'add' if (not add): resize_flag = 'delete' resize_cmd = [BTRFS, 'device', resize_flag, ] # Until we verify that all devices are or are not already members of the # given pools depending on if we are adding (default) or removing # (add=False) those devices we set our resize flag to false. resize = False for d in dev_list_byid: if (((resize_flag == 'add' and (d not in cur_dev)) or (resize_flag == 'delete' and (d in cur_dev)))): resize = True # Basic disk member of pool sanity check passed. resize_cmd.append(d) if (not resize): return None resize_cmd.append(root_mnt_pt) return run_command(resize_cmd) #Try mounting by-label first. If that is not possible, mount using every device #in the set, one by one until success. def mount_root(pool): root_pool_mnt = DEFAULT_MNT_DIR + pool.name if (is_share_mounted(pool.name)): return root_pool_mnt create_tmp_dir(root_pool_mnt) mnt_device = '/dev/disk/by-label/%s' % pool.name mnt_cmd = [MOUNT, mnt_device, root_pool_mnt, ] mnt_options = '' if (pool.mnt_options is not None): mnt_options = pool.mnt_options if (pool.compression is not None): if (re.search('compress', mnt_options) is None): mnt_options = ('%s,compress=%s' % (mnt_options, pool.compression)) if (os.path.exists(mnt_device)): if (len(mnt_options) > 0): mnt_cmd.extend(['-o', mnt_options]) run_command(mnt_cmd) return root_pool_mnt #If we cannot mount by-label, let's try mounting by device one by one #until we get our first success. if (pool.disk_set.count() < 1): raise Exception('Cannot mount Pool(%s) as it has no disks in it.' % pool.name) last_device = pool.disk_set.last() for device in pool.disk_set.all(): mnt_device = ('/dev/disk/by-id/%s' % device.name) if (os.path.exists(mnt_device)): mnt_cmd = [MOUNT, mnt_device, root_pool_mnt, ] if (len(mnt_options) > 0): mnt_cmd.extend(['-o', mnt_options]) try: run_command(mnt_cmd) return root_pool_mnt except Exception, e: if (device.name == last_device.name): #exhausted mounting using all devices in the pool raise e logger.error('Error mouting: %s. Will try using another device.' % mnt_cmd) logger.exception(e) raise Exception('Failed to mount Pool(%s) due to an unknown reason.' % pool.name) def umount_root(root_pool_mnt): if (not os.path.exists(root_pool_mnt)): return try: o, e, rc = run_command([UMOUNT, '-l', root_pool_mnt]) except CommandException, ce: if (ce.rc == 32): for l in ce.err: l = l.strip() if (re.search('not mounted$', l) is not None): return raise ce for i in range(20): if (not is_mounted(root_pool_mnt)): run_command([RMDIR, root_pool_mnt]) return time.sleep(2) run_command([UMOUNT, '-f', root_pool_mnt]) run_command([RMDIR, root_pool_mnt]) return def is_subvol(mnt_pt): show_cmd = [BTRFS, 'subvolume', 'show', mnt_pt] o, e, rc = run_command(show_cmd, throw=False) if (rc == 0): return True return False def subvol_info(mnt_pt): info = {} o, e, rc = run_command([BTRFS, 'subvolume', 'show', mnt_pt], throw=False) if (rc == 0): for i in o: fields = i.strip().split(':') if (len(fields) > 1): info[fields[0].strip()] = fields[1].strip() return info def add_share(pool, share_name, qid): """ share is a subvolume in btrfs. """ root_pool_mnt = mount_root(pool) subvol_mnt_pt = root_pool_mnt + '/' + share_name show_cmd = [BTRFS, 'subvolume', 'show', subvol_mnt_pt] o, e, rc = run_command(show_cmd, throw=False) if (rc == 0): return o, e, rc if (not is_subvol(subvol_mnt_pt)): sub_vol_cmd = [BTRFS, 'subvolume', 'create', '-i', qid, subvol_mnt_pt] return run_command(sub_vol_cmd) return True def mount_share(share, mnt_pt): if (is_mounted(mnt_pt)): return mount_root(share.pool) pool_device = ('/dev/disk/by-id/%s' % share.pool.disk_set.first().name) subvol_str = 'subvol=%s' % share.subvol_name create_tmp_dir(mnt_pt) mnt_cmd = [MOUNT, '-t', 'btrfs', '-o', subvol_str, pool_device, mnt_pt] return run_command(mnt_cmd) def mount_snap(share, snap_name, snap_mnt=None): pool_device = ('/dev/disk/by-id/%s' % share.pool.disk_set.first().name) share_path = ('%s%s' % (DEFAULT_MNT_DIR, share.name)) rel_snap_path = ('.snapshots/%s/%s' % (share.name, snap_name)) snap_path = ('%s%s/%s' % (DEFAULT_MNT_DIR, share.pool.name, rel_snap_path)) if (snap_mnt is None): snap_mnt = ('%s/.%s' % (share_path, snap_name)) if (is_mounted(snap_mnt)): return mount_share(share, share_path) if (is_subvol(snap_path)): create_tmp_dir(snap_mnt) return run_command([MOUNT, '-o', 'subvol=%s' % rel_snap_path, pool_device, snap_mnt]) def subvol_list_helper(mnt_pt): """ temporary solution until btrfs is fixed. wait upto 30 secs :( """ num_tries = 0 while (True): try: return run_command([BTRFS, 'subvolume', 'list', mnt_pt]) except CommandException, ce: if (ce.rc != 19): # rc == 19 is due to the slow kernel cleanup thread. It should # eventually succeed. raise ce time.sleep(1) num_tries = num_tries + 1 if (num_tries > 30): raise ce def snapshot_list(mnt_pt): o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-s', mnt_pt]) snaps = [] for s in o: snaps.append(s.split()[-1]) return snaps def shares_info(pool): # return a list of share names under this mount_point. # useful to gather names of all shares in a pool try: mnt_pt = mount_root(pool) except CommandException, e: if (e.rc == 32): #mount failed, so we just assume that something has gone wrong at a #lower level, like a device failure. Return empty share map. #application state can be removed. If the low level failure is #recovered, state gets reconstructed anyway. return {} raise o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-s', mnt_pt]) snap_idmap = {} for l in o: if (re.match('ID ', l) is not None): fields = l.strip().split() snap_idmap[fields[1]] = fields[-1] o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-p', mnt_pt]) shares_d = {} share_ids = [] for l in o: if (re.match('ID ', l) is None): continue fields = l.split() vol_id = fields[1] if (vol_id in snap_idmap): # snapshot # if the snapshot directory is direct child of a pool and is rw, # then it's a Share. (aka Rockstor Share clone). clone = False if (len(snap_idmap[vol_id].split('/')) == 1): o, e, rc = run_command([BTRFS, 'property', 'get', '%s/%s' % (mnt_pt, snap_idmap[vol_id])]) for l in o: if (l == 'ro=false'): clone = True if (not clone): continue parent_id = fields[5] if (parent_id in share_ids): # subvol of subvol. add it so child subvols can also be ignored. share_ids.append(vol_id) elif (parent_id in snap_idmap): # snapshot/subvol of snapshot. # add it so child subvols can also be ignored. snap_idmap[vol_id] = fields[-1] else: shares_d[fields[-1]] = '0/%s' % vol_id share_ids.append(vol_id) return shares_d def parse_snap_details(mnt_pt, fields): writable = True snap_name = None o1, e1, rc1 = run_command([BTRFS, 'property', 'get', '%s/%s' % (mnt_pt, fields[-1])]) for l1 in o1: if (re.match('ro=', l1) is not None): if (l1.split('=')[1] == 'true'): writable = False if (writable is True): if (len(fields[-1].split('/')) == 1): # writable snapshot + direct child of pool. # So we'll treat it as a share. continue snap_name = fields[-1].split('/')[-1] return snap_name, writable def snaps_info(mnt_pt, share_name): o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-u', '-p', '-q', mnt_pt]) share_id = share_uuid = None for l in o: if (re.match('ID ', l) is not None): fields = l.split() if (fields[-1] == share_name): share_id = fields[1] share_uuid = fields[12] if (share_id is None): return {} o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-s', '-p', '-q', '-u', mnt_pt]) snaps_d = {} snap_uuids = [] for l in o: if (re.match('ID ', l) is not None): fields = l.split() # parent uuid must be share_uuid or another snapshot's uuid if (fields[7] != share_id and fields[15] != share_uuid and fields[15] not in snap_uuids): continue snap_name, writable = parse_snap_details(mnt_pt, fields) if (snap_name is not None): snaps_d[snap_name] = ('0/%s' % fields[1], writable, ) # we rely on the observation that child snaps are listed after their # parents, so no need to iterate through results separately. # Instead, we add the uuid of a snap to the list and look up if # it's a parent of subsequent entries. snap_uuids.append(fields[17]) return snaps_d def share_id(pool, share_name): """ returns the subvolume id, becomes the share's uuid. @todo: this should be part of add_share -- btrfs create should atomically return the id """ root_pool_mnt = mount_root(pool) out, err, rc = subvol_list_helper(root_pool_mnt) subvol_id = None for line in out: if (re.search(share_name + '$', line) is not None): subvol_id = line.split()[1] break if (subvol_id is not None): return subvol_id raise Exception('subvolume id for share: %s not found.' % share_name) def remove_share(pool, share_name, pqgroup, force=False): """ umount share if its mounted. mount root pool btrfs subvolume delete root_mnt/vol_name umount root pool """ if (is_share_mounted(share_name)): mnt_pt = ('%s%s' % (DEFAULT_MNT_DIR, share_name)) umount_root(mnt_pt) root_pool_mnt = mount_root(pool) subvol_mnt_pt = root_pool_mnt + '/' + share_name if (not is_subvol(subvol_mnt_pt)): return if (force): o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-o', subvol_mnt_pt]) for l in o: if (re.match('ID ', l) is not None): subvol = root_pool_mnt + '/' + l.split()[-1] run_command([BTRFS, 'subvolume', 'delete', subvol], log=True) qgroup = ('0/%s' % share_id(pool, share_name)) delete_cmd = [BTRFS, 'subvolume', 'delete', subvol_mnt_pt] run_command(delete_cmd, log=True) qgroup_destroy(qgroup, root_pool_mnt) return qgroup_destroy(pqgroup, root_pool_mnt) def remove_snap(pool, share_name, snap_name): root_mnt = mount_root(pool) snap_path = ('%s/.snapshots/%s/%s' % (root_mnt, share_name, snap_name)) if (is_mounted(snap_path)): umount_root(snap_path) if (is_subvol(snap_path)): qgroup = ('0/%s' % share_id(pool, snap_name)) run_command([BTRFS, 'subvolume', 'delete', snap_path], log=True) return qgroup_destroy(qgroup, root_mnt) else: o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-s', root_mnt]) for l in o: #just give the first match. if (re.match('ID.*%s$' % snap_name, l) is not None): snap = '%s/%s' % (root_mnt, l.split()[-1]) return run_command([BTRFS, 'subvolume', 'delete', snap], log=True) def add_snap_helper(orig, snap, readonly=False): cmd = [BTRFS, 'subvolume', 'snapshot', orig, snap] if (readonly): cmd.insert(3, '-r') try: return run_command(cmd) except CommandException, ce: if (ce.rc != 19): # rc == 19 is due to the slow kernel cleanup thread. snapshot gets # created just fine. lookup is delayed arbitrarily. raise ce def add_clone(pool, share, clone, snapshot=None): """ clones either a share or a snapshot """ pool_mnt = mount_root(pool) orig_path = pool_mnt if (snapshot is not None): orig_path = ('%s/.snapshots/%s/%s' % (orig_path, share, snapshot)) else: orig_path = ('%s/%s' % (orig_path, share)) clone_path = ('%s/%s' % (pool_mnt, clone)) return add_snap_helper(orig_path, clone_path) def add_snap(pool, share_name, snap_name, readonly=False): """ create a snapshot """ root_pool_mnt = mount_root(pool) share_full_path = ('%s/%s' % (root_pool_mnt, share_name)) snap_dir = ('%s/.snapshots/%s' % (root_pool_mnt, share_name)) create_tmp_dir(snap_dir) snap_full_path = ('%s/%s' % (snap_dir, snap_name)) return add_snap_helper(share_full_path, snap_full_path, readonly) def rollback_snap(snap_name, sname, subvol_name, pool): """ 1. validate destination snapshot and umount the share 2. remove the share 3. move the snapshot to share location and mount it. """ mnt_pt = ('%s%s' % (DEFAULT_MNT_DIR, sname)) snap_fp = ('%s/%s/.snapshots/%s/%s' % (DEFAULT_MNT_DIR, pool.name, sname, snap_name)) if (not is_subvol(snap_fp)): raise Exception('Snapshot(%s) does not exist. Rollback is not ' 'possible' % snap_fp) mount_root(pool) if (is_share_mounted(sname)): umount_root(mnt_pt) remove_share(pool, subvol_name, '-1/-1') shutil.move(snap_fp, '%s/%s/%s' % (DEFAULT_MNT_DIR, pool.name, sname)) create_tmp_dir(mnt_pt) subvol_str = 'subvol=%s' % sname dpath = '/dev/disk/by-id/%s' % pool.disk_set.first().name mnt_cmd = [MOUNT, '-t', 'btrfs', '-o', subvol_str, dpath, mnt_pt] run_command(mnt_cmd) def switch_quota(pool, flag='enable'): root_mnt_pt = mount_root(pool) cmd = [BTRFS, 'quota', flag, root_mnt_pt] return run_command(cmd) def enable_quota(pool): return switch_quota(pool) def disable_quota(pool_name): return switch_quota(pool_name, flag='disable') def qgroup_id(pool, share_name): sid = share_id(pool, share_name) return '0/' + sid def qgroup_max(mnt_pt): o, e, rc = run_command([BTRFS, 'qgroup', 'show', mnt_pt], log=True) res = 0 for l in o: if (re.match('%s/' % QID, l) is not None): cid = int(l.split()[0].split('/')[1]) if (cid > res): res = cid return res def qgroup_create(pool): # mount pool mnt_pt = mount_root(pool) qid = ('%s/%d' % (QID, qgroup_max(mnt_pt) + 1)) o, e, rc = run_command([BTRFS, 'qgroup', 'create', qid, mnt_pt], log=True) return qid def qgroup_destroy(qid, mnt_pt): o, e, rc = run_command([BTRFS, 'qgroup', 'show', mnt_pt]) for l in o: if (re.match(qid, l) is not None and l.split()[0] == qid): return run_command([BTRFS, 'qgroup', 'destroy', qid, mnt_pt], log=True) return False def qgroup_is_assigned(qid, pqid, mnt_pt): # Returns true if the given qgroup qid is already assigned to pqid for the # path(mnt_pt) o, e, rc = run_command([BTRFS, 'qgroup', 'show', '-pc', mnt_pt]) for l in o: fields = l.split() if (len(fields) > 3 and fields[0] == qid and fields[3] == pqid): return True return False def qgroup_assign(qid, pqid, mnt_pt): if (qgroup_is_assigned(qid, pqid, mnt_pt)): return True # since btrfs-progs 4.2, qgroup assign succeeds but throws a warning: # "WARNING: # quotas may be inconsistent, rescan needed" and returns with # exit code 1. try: run_command([BTRFS, 'qgroup', 'assign', qid, pqid, mnt_pt]) except CommandException, e: wmsg = 'WARNING: quotas may be inconsistent, rescan needed' if (e.rc == 1 and e.err[0] == wmsg): #schedule a rescan if one is not currently running. dmsg = ('Quota inconsistency while assigning %s. Rescan scheduled.' % qid) try: run_command([BTRFS, 'quota', 'rescan', mnt_pt]) return logger.debug(dmsg) except CommandException, e2: emsg = 'ERROR: quota rescan failed: Operation now in progress' if (e2.rc == 1 and e2.err[0] == emsg): return logger.debug('%s.. Another rescan already in progress.' % dmsg) logger.exception(e2) raise e2 logger.exception(e) raise e def update_quota(pool, qgroup, size_bytes): root_pool_mnt = mount_root(pool) # Until btrfs adds better support for qgroup limits. We'll not set limits. # It looks like we'll see the fixes in 4.2 and final ones by 4.3. # cmd = [BTRFS, 'qgroup', 'limit', str(size_bytes), qgroup, root_pool_mnt] cmd = [BTRFS, 'qgroup', 'limit', 'none', qgroup, root_pool_mnt] return run_command(cmd, log=True) def share_usage(pool, share_id): """ Return the sum of the qgroup sizes of this share and any child subvolumes """ # Obtain path to share in pool root_pool_mnt = mount_root(pool) cmd = [BTRFS, 'subvolume', 'list', root_pool_mnt] out, err, rc = run_command(cmd, log=True) short_id = share_id.split('/')[1] share_dir = '' for line in out: fields = line.split() if (len(fields) > 0 and short_id in fields[1]): share_dir = root_pool_mnt + '/' + fields[8] break # Obtain list of child subvolume qgroups cmd = [BTRFS, 'subvolume', 'list', '-o', share_dir] out, err, rc = run_command(cmd, log=True) qgroups = [short_id] for line in out: fields = line.split() if (len(fields) > 0): qgroups.append(fields[1]) # Sum qgroup sizes cmd = [BTRFS, 'qgroup', 'show', share_dir] out, err, rc = run_command(cmd, log=True) rusage = eusage = 0 for line in out: fields = line.split() qgroup = [] if (len(fields) > 0 and '/' in fields[0]): qgroup = fields[0].split('/') if (len(qgroup) > 0 and qgroup[1] in qgroups): rusage += convert_to_kib(fields[1]) eusage += convert_to_kib(fields[2]) return (rusage, eusage) def shares_usage(pool, share_map, snap_map): # don't mount the pool if at least one share in the map is mounted. usage_map = {} mnt_pt = None for s in share_map.keys(): if (is_share_mounted(share_map[s])): mnt_pt = ('%s%s' % (DEFAULT_MNT_DIR, share_map[s])) break if (mnt_pt is None): mnt_pt = mount_root(pool) cmd = [BTRFS, 'qgroup', 'show', mnt_pt] out, err, rc = run_command(cmd, log=True) combined_map = dict(share_map, **snap_map) for line in out: fields = line.split() if (len(fields) > 0 and fields[0] in combined_map): r_usage = convert_to_kib(fields[-2]) e_usage = convert_to_kib(fields[-1]) usage_map[combined_map[fields[0]]] = (r_usage, e_usage) return usage_map def pool_usage(mnt_pt): # @todo: remove temporary raid5/6 custom logic once fi usage # supports raid5/6. cmd = [BTRFS, 'fi', 'usage', '-b', mnt_pt] total = 0 inuse = 0 free = 0 data_ratio = 1 raid56 = False parity = 1 disks = set() out, err, rc = run_command(cmd) for e in err: e = e.strip() if (re.match('WARNING: RAID56', e) is not None): raid56 = True for o in out: o = o.strip() if (raid56 is True and re.match('/dev/', o) is not None): disks.add(o.split()[0]) elif (raid56 is True and re.match('Data,RAID', o) is not None): if (o[5:10] == 'RAID6'): parity = 2 elif (re.match('Device size:', o) is not None): total = int(o.split()[2]) / 1024 elif (re.match('Used:', o) is not None): inuse = int(o.split()[1]) / 1024 elif (re.match('Free ', o) is not None): free = int(o.split()[2]) / 1024 elif (re.match('Data ratio:', o) is not None): data_ratio = float(o.split()[2]) if (data_ratio < 0.01): data_ratio = 0.01 if (raid56 is True): num_disks = len(disks) if (num_disks > 0): per_disk = total / num_disks total = (num_disks - parity) * per_disk else: total = total / data_ratio inuse = inuse / data_ratio free = total - inuse return (total, inuse, free) def scrub_start(pool, force=False): mnt_pt = mount_root(pool) p = PoolScrub(mnt_pt) p.start() return p.pid def scrub_status(pool): stats = {'status': 'unknown', } mnt_pt = mount_root(pool) out, err, rc = run_command([BTRFS, 'scrub', 'status', '-R', mnt_pt]) if (len(out) > 1): if (re.search('running', out[1]) is not None): stats['status'] = 'running' elif (re.search('finished', out[1]) is not None): stats['status'] = 'finished' dfields = out[1].split()[-1].split(':') stats['duration'] = ((int(dfields[0]) * 60 * 60) + (int(dfields[1]) * 60) + int(dfields[2])) else: return stats else: return stats for l in out[2:-1]: fields = l.strip().split(': ') if (fields[0] == 'data_bytes_scrubbed'): stats['kb_scrubbed'] = int(fields[1]) / 1024 else: stats[fields[0]] = int(fields[1]) return stats @task() def start_balance(mnt_pt, force=False, convert=None): cmd = ['btrfs', 'balance', 'start', mnt_pt] if (force): cmd.insert(3, '-f') if (convert is not None): cmd.insert(3, '-dconvert=%s' % convert) cmd.insert(3, '-mconvert=%s' % convert) run_command(cmd) def balance_status(pool): stats = {'status': 'unknown', } mnt_pt = mount_root(pool) out, err, rc = run_command([BTRFS, 'balance', 'status', mnt_pt], throw=False) if (len(out) > 0): if (re.match('Balance', out[0]) is not None): stats['status'] = 'running' if ((len(out) > 1 and re.search('chunks balanced', out[1]) is not None)): percent_left = out[1].split()[-2][:-1] try: percent_left = int(percent_left) stats['percent_done'] = 100 - percent_left except: pass elif (re.match('No balance', out[0]) is not None): stats['status'] = 'finished' stats['percent_done'] = 100 return stats def device_scan(): return run_command([BTRFS, 'device', 'scan']) def btrfs_uuid(disk): """return uuid of a btrfs filesystem""" o, e, rc = run_command( [BTRFS, 'filesystem', 'show', '/dev/disk/by-id/%s' % disk]) return o[0].split()[3] def set_property(mnt_pt, name, val, mount=True): if (mount is not True or is_mounted(mnt_pt)): cmd = [BTRFS, 'property', 'set', mnt_pt, name, val] return run_command(cmd) def get_snap(subvol_path, oldest=False, num_retain=None, regex=None): if (not os.path.isdir(subvol_path)): return None share_name = subvol_path.split('/')[-1] cmd = [BTRFS, 'subvol', 'list', '-o', subvol_path] o, e, rc = run_command(cmd) snaps = {} for l in o: fields = l.split() if (len(fields) > 0): snap_fields = fields[-1].split('/') if (len(snap_fields) != 3 or snap_fields[1] != share_name): #not the Share we are interested in. continue if (regex is not None and re.search(regex, snap_fields[2]) is None): #regex not in the name continue snaps[int(fields[1])] = snap_fields[2] snap_ids = sorted(snaps.keys()) if (oldest): if(len(snap_ids) > num_retain): return snaps[snap_ids[0]] elif (len(snap_ids) > 0): return snaps[snap_ids[-1]] return None def get_oldest_snap(subvol_path, num_retain, regex=None): return get_snap(subvol_path, oldest=True, num_retain=num_retain, regex=regex) def get_lastest_snap(subvol_path, regex=None): return get_snap(subvol_path, regex=regex)
gpl-3.0
-7,107,197,275,919,733,000
35.123723
91
0.564996
false
3.31614
false
false
false
nevins-b/lemur
lemur/plugins/lemur_kubernetes/plugin.py
1
5423
""" .. module: lemur.plugins.lemur_kubernetes.plugin :platform: Unix :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more :license: Apache, see LICENSE for more details. The plugin inserts certificates and the private key as Kubernetes secret that can later be used to secure service endpoints running in Kubernetes pods .. moduleauthor:: Mikhail Khodorovskiy <[email protected]> """ import base64 import urllib import requests import itertools from lemur.certificates.models import Certificate from lemur.plugins.bases import DestinationPlugin DEFAULT_API_VERSION = 'v1' def ensure_resource(k8s_api, k8s_base_uri, namespace, kind, name, data): # _resolve_uri(k8s_base_uri, namespace, kind, name, api_ver=DEFAULT_API_VERSION) url = _resolve_uri(k8s_base_uri, namespace, kind) create_resp = k8s_api.post(url, json=data) if 200 <= create_resp.status_code <= 299: return None elif create_resp.json()['reason'] != 'AlreadyExists': return create_resp.content update_resp = k8s_api.put(_resolve_uri(k8s_base_uri, namespace, kind, name), json=data) if not 200 <= update_resp.status_code <= 299: return update_resp.content return None def _resolve_ns(k8s_base_uri, namespace, api_ver=DEFAULT_API_VERSION,): api_group = 'api' if '/' in api_ver: api_group = 'apis' return '{base}/{api_group}/{api_ver}/namespaces'.format(base=k8s_base_uri, api_group=api_group, api_ver=api_ver) + ('/' + namespace if namespace else '') def _resolve_uri(k8s_base_uri, namespace, kind, name=None, api_ver=DEFAULT_API_VERSION): if not namespace: namespace = 'default' return "/".join(itertools.chain.from_iterable([ (_resolve_ns(k8s_base_uri, namespace, api_ver=api_ver),), ((kind + 's').lower(),), (name,) if name else (), ])) class KubernetesDestinationPlugin(DestinationPlugin): title = 'Kubernetes' slug = 'kubernetes-destination' description = 'Allow the uploading of certificates to Kubernetes as secret' author = 'Mikhail Khodorovskiy' author_url = 'https://github.com/mik373/lemur' options = [ { 'name': 'kubernetesURL', 'type': 'str', 'required': True, 'validation': '@(https?|http)://(-\.)?([^\s/?\.#-]+\.?)+(/[^\s]*)?$@iS', 'helpMessage': 'Must be a valid Kubernetes server URL!', }, { 'name': 'kubernetesAuthToken', 'type': 'str', 'required': True, 'validation': '/^$|\s+/', 'helpMessage': 'Must be a valid Kubernetes server Token!', }, { 'name': 'kubernetesServerCertificate', 'type': 'str', 'required': True, 'validation': '/^$|\s+/', 'helpMessage': 'Must be a valid Kubernetes server Certificate!', }, { 'name': 'kubernetesNamespace', 'type': 'str', 'required': True, 'validation': '/^$|\s+/', 'helpMessage': 'Must be a valid Kubernetes Namespace!', }, ] def __init__(self, *args, **kwargs): super(KubernetesDestinationPlugin, self).__init__(*args, **kwargs) def upload(self, name, body, private_key, cert_chain, options, **kwargs): k8_bearer = self.get_option('kubernetesAuthToken', options) k8_cert = self.get_option('kubernetesServerCertificate', options) k8_namespace = self.get_option('kubernetesNamespace', options) k8_base_uri = self.get_option('kubernetesURL', options) k8s_api = K8sSession(k8_bearer, k8_cert) cert = Certificate(body=body) # in the future once runtime properties can be passed-in - use passed-in secret name secret_name = 'certs-' + urllib.quote_plus(cert.name) err = ensure_resource(k8s_api, k8s_base_uri=k8_base_uri, namespace=k8_namespace, kind="secret", name=secret_name, data={ 'apiVersion': 'v1', 'kind': 'Secret', 'metadata': { 'name': secret_name, }, 'data': { 'combined.pem': base64.b64encode(body + private_key), 'ca.crt': base64.b64encode(cert_chain), 'service.key': base64.b64encode(private_key), 'service.crt': base64.b64encode(body), } }) if err is not None: raise Exception("Error uploading secret: " + err) class K8sSession(requests.Session): def __init__(self, bearer, cert): super(K8sSession, self).__init__() self.headers.update({ 'Authorization': 'Bearer %s' % bearer }) k8_ca = '/tmp/k8.cert' with open(k8_ca, "w") as text_file: text_file.write(cert) self.verify = k8_ca def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=30, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None, json=None): """ This method overrides the default timeout to be 10s. """ return super(K8sSession, self).request(method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
apache-2.0
-2,376,839,072,637,838,300
33.987097
157
0.592476
false
3.649394
false
false
false
jfinkels/turingmachine
test_turingmachine.py
1
11279
# test_turingmachine.py - tests for turingmachine.py # # Copyright 2014 Jeffrey Finkelstein. # # This file is part of turingmachine. # # turingmachine is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # turingmachine is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # turingmachine. If not, see <http://www.gnu.org/licenses/>. """Provides tests for :mod:`turingmachine`.""" from collections import defaultdict import logging import unittest from turingmachine import BadSymbol from turingmachine import L from turingmachine import logger from turingmachine import R from turingmachine import TuringMachine from turingmachine import UnknownSymbol from turingmachine import UnknownState class TestTuringMachine(unittest.TestCase): """Unit tests for the :class:`turingmachine.TuringMachine` class.""" def setUp(self): """Disable verbose logging for tests.""" self.level = logger.getEffectiveLevel() logger.setLevel(logging.INFO) def tearDown(self): """Restore the original logging level for the :mod:`turingmachine` module. """ logger.setLevel(self.level) def test_unknown_symbol(self): """Tests that an error is raised when an unknown symbol (that is, a symbol for which there is no entry in the transition function) is encountered in the string. """ states = set(range(4)) initial_state = 0 accept_state = 2 reject_state = 3 transitions = { # repeatedly move right, writing a bogus character as it goes 0: { '0': (0, '0', R), '1': (0, '?', R), '_': (1, '_', L) }, # accept on the last symbol 1: { '0': (accept_state, '0', R), '1': (accept_state, '1', R), '_': (accept_state, '_', R) }, #2: {}, # this is the accept state #3: {} # this is the reject state } bogus_symbol = TuringMachine(states, initial_state, accept_state, reject_state, transitions) try: bogus_symbol('_0101_') assert False, 'Should have raised an exception' except UnknownSymbol: pass def test_bad_symbol(self): """Tests that an error is raised when the user specifies a bad symbol in the transition table. """ states = set(range(3)) initial_state = 0 accept_state = 1 reject_state = 2 transitions = { 0: { '0': (0, '', R), '1': (0, '', R), '_': (1, '', R) } } bad_symbol = TuringMachine(states, initial_state, accept_state, reject_state, transitions) try: bad_symbol('_0_') assert False, 'Should have raised an exception' except BadSymbol: pass def test_bad_state(self): """Tests that an error is raised when the user specifies a bad state in the transition table. """ bad_state = TuringMachine(set(range(3)), 0, 1, 2, {}) try: bad_state('__') assert False, 'Should have raised an exception' except UnknownState: pass def test_move_left_and_right(self): """Tests the execution of a Turing machine that simply moves left and right. """ states = set(range(17)) initial_state = 0 accept_state = 15 reject_state = 16 # Move left five cells then move right ten cells. Always accept. transition = defaultdict(dict) for state in range(5): for symbol in '0', '1', '_': transition[state][symbol] = (state + 1, symbol, L) for state in range(5, 15): for symbol in '0', '1', '_': transition[state][symbol] = (state + 1, symbol, R) move_left_right = TuringMachine(states, initial_state, accept_state, reject_state, transition) for s in '', '010', '000000': assert move_left_right('_' + s + '_') def test_is_even(self): """Tests the execution of a Turing machine that computes whether a binary string represents an even number. This Turing machine simply moves right repeatedly until it finds the end of the input string, then checks if the rightmost (that is, least significant) bit is a 0. """ states = set(range(4)) initial_state = 0 accept_state = 2 reject_state = 3 transition = { # this state represents moving right all the way to the end 0: { '0': (0, '0', R), '1': (0, '1', R), '_': (1, '_', L), }, # this state represents looking at the rightmost symbol 1: { '0': (accept_state, '0', L), '1': (reject_state, '1', L), '_': (reject_state, '_', R), } #2: {} # this is the accept state #3: {} # this is the reject state } is_even = TuringMachine(states, initial_state, accept_state, reject_state, transition) for s in '011010', '0', '1100010': assert is_even('_' + s + '_') for s in '1101', '1', '', '01001': assert not is_even('_' + s + '_') def test_parity(self): """Tests the execution of a Turing machine that computes the parity of a binary string, that is, whether the number of ones in the binary strings is odd. This Turing machine oscillates between two states, one of which represents having seen an even number of 1s, the other an odd number. Every time it sees a 1, it switches which of those two states it is in. """ states = set(range(4)) initial_state = 0 accept_state = 2 reject_state = 3 # begin in pre-reject state # repeat: # if reading a 1: # if in pre-accept state, move to pre-reject state # if in pre-reject state, move to pre-accept state # move right # if in pre-accept, accept # if in pre-reject, reject transition = { # this state represents having read an even number of ones 0: { '0': (0, '0', R), '1': (1, '1', R), '_': (reject_state, '_', L), }, # this state represents having read an odd number of ones 1: { '0': (1, '0', R), '1': (0, '1', R), '_': (accept_state, '_', R), } } parity = TuringMachine(states, initial_state, accept_state, reject_state, transition) for s in '011010', '1', '1101011': assert parity('_' + s + '_') for s in '1001', '0', '', '001001': assert not parity('_' + s + '_') def test_is_palindrome(self): """Tests the execution of a Turing machine that computes whether a binary string is a palindrome. This Turing machine operates recursively. If the input string is an empty string or a single bit, it accepts. If the input string has length two or more, it determines if the first and last bits of the input string are the same, then turns each of them into a blank. It then recurses and runs the same algorithm on the new, smaller string. """ states = set(range(10)) initial_state = 0 accept_state = 8 reject_state = 9 # This is a description of the implementation of the Turing machine # that decides whether a binary string is a palindrome. # # repeat the following steps: # read a symbol # if _: accept # if 0: # write blank # move right # if _: accept (because it is a single 0) # otherwise repeatedly move right to end # at terminal blank move left # if 1: reject # else: write blank # if 1: # write blank # move right # if _: accept (because it is a single 1) # repeatedly move right to end # at terminal blank move left # if 0: reject # else: write blank # repeatedly move left to end transition = { # read the first symbol 0: { '0': (6, '_', R), '1': (7, '_', R), '_': (accept_state, '_', R) }, # string starts with 0; move repeatedly right 1: { '0': (1, '0', R), '1': (1, '1', R), '_': (3, '_', L) }, # string starts with 1; move repeatedly right 2: { '0': (2, '0', R), '1': (2, '1', R), '_': (4, '_', L) }, # rightmost symbol should be a 0 3: { '0': (5, '_', L), '1': (reject_state, '1', L), '_': (reject_state, '_', L) # this situation is unreachable }, # rightmost symbol should be a 1 4: { '0': (reject_state, '0', L), '1': (5, '_', L), '_': (reject_state, '_', L) # this situation is unreachable }, # repeatedly move left to the beginning of the string 5: { '0': (5, '0', L), '1': (5, '1', L), '_': (0, '_', R) }, # check if there is only one symbol left 6: { '0': (1, '0', R), '1': (1, '1', R), '_': (accept_state, '_', L) }, # check if there is only one symbol left 7: { '0': (2, '0', R), '1': (2, '1', R), '_': (accept_state, '_', L) } #7: {} # this is the accept state #8: {} # this is the reject state } is_palindrome = TuringMachine(states, initial_state, accept_state, reject_state, transition) for s in '', '0', '010', '111010111': assert is_palindrome('_' + s + '_') for s in '01', '110', '111100001': assert not is_palindrome('_' + s + '_')
gpl-3.0
6,797,504,506,111,407,000
34.806349
79
0.494193
false
4.262661
true
false
false
tensorflow/datasets
tensorflow_datasets/structured/dart/dart.py
1
4869
# coding=utf-8 # Copyright 2021 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """dart dataset.""" import json import os import tensorflow.compat.v2 as tf import tensorflow_datasets.public_api as tfds _CITATION = """ @article{radev2020dart, title={DART: Open-Domain Structured Data Record to Text Generation}, author={Dragomir Radev and Rui Zhang and Amrit Rau and Abhinand Sivaprasad and Chiachun Hsieh and Nazneen Fatema Rajani and Xiangru Tang and Aadit Vyas and Neha Verma and Pranav Krishna and Yangxiaokang Liu and Nadia Irwanto and Jessica Pan and Faiaz Rahman and Ahmad Zaidi and Murori Mutuma and Yasin Tarabar and Ankit Gupta and Tao Yu and Yi Chern Tan and Xi Victoria Lin and Caiming Xiong and Richard Socher}, journal={arXiv preprint arXiv:2007.02871}, year={2020} """ _DESCRIPTION = """ DART (DAta Record to Text generation) contains RDF entity-relation annotated with sentence descriptions that cover all facts in the triple set. DART was constructed using existing datasets such as: WikiTableQuestions, WikiSQL, WebNLG and Cleaned E2E. The tables from WikiTableQuestions and WikiSQL were transformed to subject-predicate-object triples, and its text annotations were mainly collected from MTurk. The meaningful representations in E2E were also transformed to triples and its descriptions were used, some that couldn't be transformed were dropped. The dataset splits of E2E and WebNLG are kept, and for the WikiTableQuestions and WikiSQL the Jaccard similarity is used to keep similar tables in the same set (train/dev/tes). This dataset is constructed following a standarized table format. """ _URL = 'https://github.com/Yale-LILY/dart/archive/master.zip' class Dart(tfds.core.GeneratorBasedBuilder): """DAta Record to Text Generation.""" VERSION = tfds.core.Version('0.1.0') def _info(self): return tfds.core.DatasetInfo( builder=self, # This is the description that will appear on the datasets page. description=_DESCRIPTION, # tfds.features.FeatureConnectors features=tfds.features.FeaturesDict({ 'input_text': { 'table': # Each row will be one triple fact. tfds.features.Sequence({ # we'll only have subject/predicate/object headers 'column_header': tf.string, 'row_number': tf.int16, 'content': tf.string, }), }, 'target_text': tf.string, }), supervised_keys=('input_text', 'target_text'), # Homepage of the dataset for documentation homepage='https://github.com/Yale-LILY/dart', citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" extracted_path = os.path.join( dl_manager.download_and_extract(_URL), 'dart-master', 'data', 'v1.1.1') return { tfds.Split.TRAIN: self._generate_examples( json_file=os.path.join(extracted_path, 'dart-v1.1.1-full-train.json')), tfds.Split.VALIDATION: self._generate_examples( json_file=os.path.join(extracted_path, 'dart-v1.1.1-full-dev.json')), tfds.Split.TEST: self._generate_examples( json_file=os.path.join(extracted_path, 'dart-v1.1.1-full-test.json')), } def _generate_examples(self, json_file): """Yields examples.""" with tf.io.gfile.GFile(json_file) as f: data = json.load(f) for entry_count, entry in enumerate(data): table = [] for i, triple_set in enumerate(entry['tripleset']): for header, content in zip(['subject', 'predicate', 'object'], triple_set): table.append({ 'column_header': header, 'row_number': i, 'content': content, }) for annotation_count, annotation in enumerate(entry['annotations']): yield '{}_{}'.format(entry_count, annotation_count), { 'input_text': { 'table': table, }, 'target_text': annotation['text'] }
apache-2.0
2,792,275,343,321,724,000
39.575
414
0.636681
false
3.824823
false
false
false
Mitali-Sodhi/CodeLingo
Dataset/python/multi_interface.py
1
6077
import re import time import sys import os import copy PARAMS = {} NAME_PREFIX = 'vm_' METRICS = { 'time' : 0, 'data' : {} } LAST_METRICS = copy.deepcopy(METRICS) METRICS_CACHE_MAX = 5 INTERFACES = [] descriptors = [] stats_tab = { "rx_bytes" : 0, "rx_pkts" : 1, "rx_errs" : 2, "rx_drops" : 3, "tx_bytes" : 8, "tx_pkts" : 9, "tx_errs" : 10, "tx_drops" : 11, } # Where to get the stats from net_stats_file = "/proc/net/dev" def create_desc(skel, prop): d = skel.copy() for k,v in prop.iteritems(): d[k] = v return d def metric_init(params): global descriptors global INTERFACES # INTERFACES = params.get('interfaces') watch_interfaces = params.get('interfaces') excluded_interfaces = params.get('excluded_interfaces') get_interfaces(watch_interfaces,excluded_interfaces) # print INTERFACES time_max = 60 Desc_Skel = { 'name' : 'XXX', 'call_back' : get_delta, 'time_max' : 60, 'value_type' : 'float', 'format' : '%.4f', 'units' : '/s', 'slope' : 'both', # zero|positive|negative|both 'description' : 'XXX', 'groups' : 'network', } for dev in INTERFACES: descriptors.append(create_desc(Desc_Skel, { "name" : "rx_bytes_" + dev, "units" : "bytes/sec", "description" : "received bytes per sec", })) descriptors.append(create_desc(Desc_Skel, { "name" : "rx_pkts_" + dev, "units" : "pkts/sec", "description" : "received packets per sec", })) descriptors.append(create_desc(Desc_Skel, { "name" : "rx_errs_" + dev, "units" : "pkts/sec", "description" : "received error packets per sec", })) descriptors.append(create_desc(Desc_Skel, { "name" : "rx_drops_" + dev, "units" : "pkts/sec", "description" : "receive packets dropped per sec", })) descriptors.append(create_desc(Desc_Skel, { "name" : "tx_bytes_" + dev, "units" : "bytes/sec", "description" : "transmitted bytes per sec", })) descriptors.append(create_desc(Desc_Skel, { "name" : "tx_pkts_" + dev, "units" : "pkts/sec", "description" : "transmitted packets per sec", })) descriptors.append(create_desc(Desc_Skel, { "name" : "tx_errs_" + dev, "units" : "pkts/sec", "description" : "transmitted error packets per sec", })) descriptors.append(create_desc(Desc_Skel, { "name" : "tx_drops_" + dev, "units" : "pkts/sec", "description" : "transmitted dropped packets per sec", })) return descriptors def metric_cleanup(): '''Clean up the metric module.''' pass def get_interfaces(watch_interfaces, excluded_interfaces): global INTERFACES if_excluded = 0 # check if particular interfaces have been specifieid. Watch only those if watch_interfaces != "": INTERFACES = watch_interfaces.split(" ") else: if excluded_interfaces != "": excluded_if_list = excluded_interfaces.split(" ") f = open(net_stats_file, "r") for line in f: # Find only lines with : if re.search(":", line): a = line.split(":") dev_name = a[0].lstrip() # Determine if interface is excluded by name or regex for ex in excluded_if_list: if re.match(ex,dev_name): if_excluded = 1 if not if_excluded: INTERFACES.append(dev_name) if_excluded = 0 return 0 def get_metrics(): """Return all metrics""" global METRICS, LAST_METRICS if (time.time() - METRICS['time']) > METRICS_CACHE_MAX: try: file = open(net_stats_file, 'r') except IOError: return 0 # convert to dict metrics = {} for line in file: if re.search(":", line): a = line.split(":") dev_name = a[0].lstrip() metrics[dev_name] = re.split("\s+", a[1].lstrip()) # update cache LAST_METRICS = copy.deepcopy(METRICS) METRICS = { 'time': time.time(), 'data': metrics } return [METRICS, LAST_METRICS] def get_delta(name): """Return change over time for the requested metric""" # get metrics [curr_metrics, last_metrics] = get_metrics() # Names will be in the format of tx/rx underscore metric_name underscore interface # e.g. tx_bytes_eth0 parts = name.split("_") iface = parts[2] name = parts[0] + "_" + parts[1] index = stats_tab[name] try: delta = (float(curr_metrics['data'][iface][index]) - float(last_metrics['data'][iface][index])) /(curr_metrics['time'] - last_metrics['time']) if delta < 0: print name + " is less 0" delta = 0 except KeyError: delta = 0.0 return delta if __name__ == '__main__': try: params = { "interfaces": "", "excluded_interfaces": "dummy", "debug" : True, } metric_init(params) while True: for d in descriptors: v = d['call_back'](d['name']) print ('value for %s is '+d['format']) % (d['name'], v) time.sleep(5) except StandardError: print sys.exc_info()[0] os._exit(1)
mit
4,746,327,573,861,715,000
27.530516
148
0.481981
false
3.817211
false
false
false
Beyond-Imagination/BlubBlub
RaspberryPI/django-env/bin/miniterm.py
1
35109
#!/home/pi/Django/bin/python3 # # Very simple serial terminal # # This file is part of pySerial. https://github.com/pyserial/pyserial # (C)2002-2015 Chris Liechti <[email protected]> # # SPDX-License-Identifier: BSD-3-Clause import codecs import os import sys import threading import serial from serial.tools.list_ports import comports from serial.tools import hexlify_codec # pylint: disable=wrong-import-order,wrong-import-position codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None) try: raw_input except NameError: # pylint: disable=redefined-builtin,invalid-name raw_input = input # in python3 it's "raw" unichr = chr def key_description(character): """generate a readable description for a key""" ascii_code = ord(character) if ascii_code < 32: return 'Ctrl+{:c}'.format(ord('@') + ascii_code) else: return repr(character) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - class ConsoleBase(object): """OS abstraction for console (input/output codec, no echo)""" def __init__(self): if sys.version_info >= (3, 0): self.byte_output = sys.stdout.buffer else: self.byte_output = sys.stdout self.output = sys.stdout def setup(self): """Set console to read single characters, no echo""" def cleanup(self): """Restore default console settings""" def getkey(self): """Read a single key from the console""" return None def write_bytes(self, byte_string): """Write bytes (already encoded)""" self.byte_output.write(byte_string) self.byte_output.flush() def write(self, text): """Write string""" self.output.write(text) self.output.flush() def cancel(self): """Cancel getkey operation""" # - - - - - - - - - - - - - - - - - - - - - - - - # context manager: # switch terminal temporary to normal mode (e.g. to get user input) def __enter__(self): self.cleanup() return self def __exit__(self, *args, **kwargs): self.setup() if os.name == 'nt': # noqa import msvcrt import ctypes class Out(object): """file-like wrapper that uses os.write""" def __init__(self, fd): self.fd = fd def flush(self): pass def write(self, s): os.write(self.fd, s) class Console(ConsoleBase): def __init__(self): super(Console, self).__init__() self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP() self._saved_icp = ctypes.windll.kernel32.GetConsoleCP() ctypes.windll.kernel32.SetConsoleOutputCP(65001) ctypes.windll.kernel32.SetConsoleCP(65001) self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace') # the change of the code page is not propagated to Python, manually fix it sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace') sys.stdout = self.output self.output.encoding = 'UTF-8' # needed for input def __del__(self): ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp) ctypes.windll.kernel32.SetConsoleCP(self._saved_icp) def getkey(self): while True: z = msvcrt.getwch() if z == unichr(13): return unichr(10) elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore msvcrt.getwch() else: return z def cancel(self): # CancelIo, CancelSynchronousIo do not seem to work when using # getwch, so instead, send a key to the window with the console hwnd = ctypes.windll.kernel32.GetConsoleWindow() ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0) elif os.name == 'posix': import atexit import termios import fcntl class Console(ConsoleBase): def __init__(self): super(Console, self).__init__() self.fd = sys.stdin.fileno() self.old = termios.tcgetattr(self.fd) atexit.register(self.cleanup) if sys.version_info < (3, 0): self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin) else: self.enc_stdin = sys.stdin def setup(self): new = termios.tcgetattr(self.fd) new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG new[6][termios.VMIN] = 1 new[6][termios.VTIME] = 0 termios.tcsetattr(self.fd, termios.TCSANOW, new) def getkey(self): c = self.enc_stdin.read(1) if c == unichr(0x7f): c = unichr(8) # map the BS key (which yields DEL) to backspace return c def cancel(self): fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0') def cleanup(self): termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old) else: raise NotImplementedError( 'Sorry no implementation for your platform ({}) available.'.format(sys.platform)) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - class Transform(object): """do-nothing: forward all data unchanged""" def rx(self, text): """text received from serial port""" return text def tx(self, text): """text to be sent to serial port""" return text def echo(self, text): """text to be sent but displayed on console""" return text class CRLF(Transform): """ENTER sends CR+LF""" def tx(self, text): return text.replace('\n', '\r\n') class CR(Transform): """ENTER sends CR""" def rx(self, text): return text.replace('\r', '\n') def tx(self, text): return text.replace('\n', '\r') class LF(Transform): """ENTER sends LF""" class NoTerminal(Transform): """remove typical terminal control codes from input""" REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t') REPLACEMENT_MAP.update( { 0x7F: 0x2421, # DEL 0x9B: 0x2425, # CSI }) def rx(self, text): return text.translate(self.REPLACEMENT_MAP) echo = rx class NoControls(NoTerminal): """Remove all control codes, incl. CR+LF""" REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32)) REPLACEMENT_MAP.update( { 0x20: 0x2423, # visual space 0x7F: 0x2421, # DEL 0x9B: 0x2425, # CSI }) class Printable(Transform): """Show decimal code for all non-ASCII characters and replace most control codes""" def rx(self, text): r = [] for c in text: if ' ' <= c < '\x7f' or c in '\r\n\b\t': r.append(c) elif c < ' ': r.append(unichr(0x2400 + ord(c))) else: r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c))) r.append(' ') return ''.join(r) echo = rx class Colorize(Transform): """Apply different colors for received and echo""" def __init__(self): # XXX make it configurable, use colorama? self.input_color = '\x1b[37m' self.echo_color = '\x1b[31m' def rx(self, text): return self.input_color + text def echo(self, text): return self.echo_color + text class DebugIO(Transform): """Print what is sent and received""" def rx(self, text): sys.stderr.write(' [RX:{}] '.format(repr(text))) sys.stderr.flush() return text def tx(self, text): sys.stderr.write(' [TX:{}] '.format(repr(text))) sys.stderr.flush() return text # other ideas: # - add date/time for each newline # - insert newline after: a) timeout b) packet end character EOL_TRANSFORMATIONS = { 'crlf': CRLF, 'cr': CR, 'lf': LF, } TRANSFORMATIONS = { 'direct': Transform, # no transformation 'default': NoTerminal, 'nocontrol': NoControls, 'printable': Printable, 'colorize': Colorize, 'debug': DebugIO, } # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - def ask_for_port(): """\ Show a list of ports and ask the user for a choice. To make selection easier on systems with long device names, also allow the input of an index. """ sys.stderr.write('\n--- Available ports:\n') ports = [] for n, (port, desc, hwid) in enumerate(sorted(comports()), 1): sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc)) ports.append(port) while True: port = raw_input('--- Enter port index or full name: ') try: index = int(port) - 1 if not 0 <= index < len(ports): sys.stderr.write('--- Invalid index!\n') continue except ValueError: pass else: port = ports[index] return port class Miniterm(object): """\ Terminal application. Copy data from serial port to console and vice versa. Handle special keys from the console to show menu etc. """ def __init__(self, serial_instance, echo=False, eol='crlf', filters=()): self.console = Console() self.serial = serial_instance self.echo = echo self.raw = False self.input_encoding = 'UTF-8' self.output_encoding = 'UTF-8' self.eol = eol self.filters = filters self.update_transformations() self.exit_character = 0x1d # GS/CTRL+] self.menu_character = 0x14 # Menu: CTRL+T self.alive = None self._reader_alive = None self.receiver_thread = None self.rx_decoder = None self.tx_decoder = None def _start_reader(self): """Start reader thread""" self._reader_alive = True # start serial->console thread self.receiver_thread = threading.Thread(target=self.reader, name='rx') self.receiver_thread.daemon = True self.receiver_thread.start() def _stop_reader(self): """Stop reader thread only, wait for clean exit of thread""" self._reader_alive = False if hasattr(self.serial, 'cancel_read'): self.serial.cancel_read() self.receiver_thread.join() def start(self): """start worker threads""" self.alive = True self._start_reader() # enter console->serial loop self.transmitter_thread = threading.Thread(target=self.writer, name='tx') self.transmitter_thread.daemon = True self.transmitter_thread.start() self.console.setup() def stop(self): """set flag to stop worker threads""" self.alive = False def join(self, transmit_only=False): """wait for worker threads to terminate""" self.transmitter_thread.join() if not transmit_only: if hasattr(self.serial, 'cancel_read'): self.serial.cancel_read() self.receiver_thread.join() def close(self): self.serial.close() def update_transformations(self): """take list of transformation classes and instantiate them for rx and tx""" transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f] for f in self.filters] self.tx_transformations = [t() for t in transformations] self.rx_transformations = list(reversed(self.tx_transformations)) def set_rx_encoding(self, encoding, errors='replace'): """set encoding for received data""" self.input_encoding = encoding self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors) def set_tx_encoding(self, encoding, errors='replace'): """set encoding for transmitted data""" self.output_encoding = encoding self.tx_encoder = codecs.getincrementalencoder(encoding)(errors) def dump_port_settings(self): """Write current settings to sys.stderr""" sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format( p=self.serial)) sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format( ('active' if self.serial.rts else 'inactive'), ('active' if self.serial.dtr else 'inactive'), ('active' if self.serial.break_condition else 'inactive'))) try: sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format( ('active' if self.serial.cts else 'inactive'), ('active' if self.serial.dsr else 'inactive'), ('active' if self.serial.ri else 'inactive'), ('active' if self.serial.cd else 'inactive'))) except serial.SerialException: # on RFC 2217 ports, it can happen if no modem state notification was # yet received. ignore this error. pass sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive')) sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive')) sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding)) sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding)) sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper())) sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters))) def reader(self): """loop and copy serial->console""" try: while self.alive and self._reader_alive: # read all that is there or wait for one byte data = self.serial.read(self.serial.in_waiting or 1) if data: if self.raw: self.console.write_bytes(data) else: text = self.rx_decoder.decode(data) for transformation in self.rx_transformations: text = transformation.rx(text) self.console.write(text) except serial.SerialException: self.alive = False self.console.cancel() raise # XXX handle instead of re-raise? def writer(self): """\ Loop and copy console->serial until self.exit_character character is found. When self.menu_character is found, interpret the next key locally. """ menu_active = False try: while self.alive: try: c = self.console.getkey() except KeyboardInterrupt: c = '\x03' if not self.alive: break if menu_active: self.handle_menu_key(c) menu_active = False elif c == self.menu_character: menu_active = True # next char will be for menu elif c == self.exit_character: self.stop() # exit app break else: #~ if self.raw: text = c for transformation in self.tx_transformations: text = transformation.tx(text) self.serial.write(self.tx_encoder.encode(text)) if self.echo: echo_text = c for transformation in self.tx_transformations: echo_text = transformation.echo(echo_text) self.console.write(echo_text) except: self.alive = False raise def handle_menu_key(self, c): """Implement a simple menu / settings""" if c == self.menu_character or c == self.exit_character: # Menu/exit character again -> send itself self.serial.write(self.tx_encoder.encode(c)) if self.echo: self.console.write(c) elif c == '\x15': # CTRL+U -> upload file self.upload_file() elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help sys.stderr.write(self.get_help_text()) elif c == '\x12': # CTRL+R -> Toggle RTS self.serial.rts = not self.serial.rts sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive')) elif c == '\x04': # CTRL+D -> Toggle DTR self.serial.dtr = not self.serial.dtr sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive')) elif c == '\x02': # CTRL+B -> toggle BREAK condition self.serial.break_condition = not self.serial.break_condition sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive')) elif c == '\x05': # CTRL+E -> toggle local echo self.echo = not self.echo sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive')) elif c == '\x06': # CTRL+F -> edit filters self.change_filter() elif c == '\x0c': # CTRL+L -> EOL mode modes = list(EOL_TRANSFORMATIONS) # keys eol = modes.index(self.eol) + 1 if eol >= len(modes): eol = 0 self.eol = modes[eol] sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper())) self.update_transformations() elif c == '\x01': # CTRL+A -> set encoding self.change_encoding() elif c == '\x09': # CTRL+I -> info self.dump_port_settings() #~ elif c == '\x01': # CTRL+A -> cycle escape mode #~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode elif c in 'pP': # P -> change port self.change_port() elif c in 'sS': # S -> suspend / open port temporarily self.suspend_port() elif c in 'bB': # B -> change baudrate self.change_baudrate() elif c == '8': # 8 -> change to 8 bits self.serial.bytesize = serial.EIGHTBITS self.dump_port_settings() elif c == '7': # 7 -> change to 8 bits self.serial.bytesize = serial.SEVENBITS self.dump_port_settings() elif c in 'eE': # E -> change to even parity self.serial.parity = serial.PARITY_EVEN self.dump_port_settings() elif c in 'oO': # O -> change to odd parity self.serial.parity = serial.PARITY_ODD self.dump_port_settings() elif c in 'mM': # M -> change to mark parity self.serial.parity = serial.PARITY_MARK self.dump_port_settings() elif c in 'sS': # S -> change to space parity self.serial.parity = serial.PARITY_SPACE self.dump_port_settings() elif c in 'nN': # N -> change to no parity self.serial.parity = serial.PARITY_NONE self.dump_port_settings() elif c == '1': # 1 -> change to 1 stop bits self.serial.stopbits = serial.STOPBITS_ONE self.dump_port_settings() elif c == '2': # 2 -> change to 2 stop bits self.serial.stopbits = serial.STOPBITS_TWO self.dump_port_settings() elif c == '3': # 3 -> change to 1.5 stop bits self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE self.dump_port_settings() elif c in 'xX': # X -> change software flow control self.serial.xonxoff = (c == 'X') self.dump_port_settings() elif c in 'rR': # R -> change hardware flow control self.serial.rtscts = (c == 'R') self.dump_port_settings() else: sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c))) def upload_file(self): """Ask user for filenname and send its contents""" sys.stderr.write('\n--- File to upload: ') sys.stderr.flush() with self.console: filename = sys.stdin.readline().rstrip('\r\n') if filename: try: with open(filename, 'rb') as f: sys.stderr.write('--- Sending file {} ---\n'.format(filename)) while True: block = f.read(1024) if not block: break self.serial.write(block) # Wait for output buffer to drain. self.serial.flush() sys.stderr.write('.') # Progress indicator. sys.stderr.write('\n--- File {} sent ---\n'.format(filename)) except IOError as e: sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e)) def change_filter(self): """change the i/o transformations""" sys.stderr.write('\n--- Available Filters:\n') sys.stderr.write('\n'.join( '--- {:<10} = {.__doc__}'.format(k, v) for k, v in sorted(TRANSFORMATIONS.items()))) sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters))) with self.console: new_filters = sys.stdin.readline().lower().split() if new_filters: for f in new_filters: if f not in TRANSFORMATIONS: sys.stderr.write('--- unknown filter: {}\n'.format(repr(f))) break else: self.filters = new_filters self.update_transformations() sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters))) def change_encoding(self): """change encoding on the serial port""" sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding)) with self.console: new_encoding = sys.stdin.readline().strip() if new_encoding: try: codecs.lookup(new_encoding) except LookupError: sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding)) else: self.set_rx_encoding(new_encoding) self.set_tx_encoding(new_encoding) sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding)) sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding)) def change_baudrate(self): """change the baudrate""" sys.stderr.write('\n--- Baudrate: ') sys.stderr.flush() with self.console: backup = self.serial.baudrate try: self.serial.baudrate = int(sys.stdin.readline().strip()) except ValueError as e: sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e)) self.serial.baudrate = backup else: self.dump_port_settings() def change_port(self): """Have a conversation with the user to change the serial port""" with self.console: try: port = ask_for_port() except KeyboardInterrupt: port = None if port and port != self.serial.port: # reader thread needs to be shut down self._stop_reader() # save settings settings = self.serial.getSettingsDict() try: new_serial = serial.serial_for_url(port, do_not_open=True) # restore settings and open new_serial.applySettingsDict(settings) new_serial.rts = self.serial.rts new_serial.dtr = self.serial.dtr new_serial.open() new_serial.break_condition = self.serial.break_condition except Exception as e: sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e)) new_serial.close() else: self.serial.close() self.serial = new_serial sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port)) # and restart the reader thread self._start_reader() def suspend_port(self): """\ open port temporarily, allow reconnect, exit and port change to get out of the loop """ # reader thread needs to be shut down self._stop_reader() self.serial.close() sys.stderr.write('\n--- Port closed: {} ---\n'.format(self.serial.port)) do_change_port = False while not self.serial.is_open: sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format( exit=key_description(self.exit_character))) k = self.console.getkey() if k == self.exit_character: self.stop() # exit app break elif k in 'pP': do_change_port = True break try: self.serial.open() except Exception as e: sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e)) if do_change_port: self.change_port() else: # and restart the reader thread self._start_reader() sys.stderr.write('--- Port opened: {} ---\n'.format(self.serial.port)) def get_help_text(self): """return the help text""" # help text, starts with blank line! return """ --- pySerial ({version}) - miniterm - help --- --- {exit:8} Exit program --- {menu:8} Menu escape key, followed by: --- Menu keys: --- {menu:7} Send the menu character itself to remote --- {exit:7} Send the exit character itself to remote --- {info:7} Show info --- {upload:7} Upload file (prompt will be shown) --- {repr:7} encoding --- {filter:7} edit filters --- Toggles: --- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK --- {echo:7} echo {eol:7} EOL --- --- Port settings ({menu} followed by the following): --- p change port --- 7 8 set data bits --- N E O S M change parity (None, Even, Odd, Space, Mark) --- 1 2 3 set stop bits (1, 2, 1.5) --- b change baud rate --- x X disable/enable software flow control --- r R disable/enable hardware flow control """.format(version=getattr(serial, 'VERSION', 'unknown version'), exit=key_description(self.exit_character), menu=key_description(self.menu_character), rts=key_description('\x12'), dtr=key_description('\x04'), brk=key_description('\x02'), echo=key_description('\x05'), info=key_description('\x09'), upload=key_description('\x15'), repr=key_description('\x01'), filter=key_description('\x06'), eol=key_description('\x0c')) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # default args can be used to override when calling main() from an other script # e.g to create a miniterm-my-device.py def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None): """Command line tool, entry point""" import argparse parser = argparse.ArgumentParser( description="Miniterm - A simple terminal program for the serial port.") parser.add_argument( "port", nargs='?', help="serial port name ('-' to show port list)", default=default_port) parser.add_argument( "baudrate", nargs='?', type=int, help="set baud rate, default: %(default)s", default=default_baudrate) group = parser.add_argument_group("port settings") group.add_argument( "--parity", choices=['N', 'E', 'O', 'S', 'M'], type=lambda c: c.upper(), help="set parity, one of {N E O S M}, default: N", default='N') group.add_argument( "--rtscts", action="store_true", help="enable RTS/CTS flow control (default off)", default=False) group.add_argument( "--xonxoff", action="store_true", help="enable software flow control (default off)", default=False) group.add_argument( "--rts", type=int, help="set initial RTS line state (possible values: 0, 1)", default=default_rts) group.add_argument( "--dtr", type=int, help="set initial DTR line state (possible values: 0, 1)", default=default_dtr) group.add_argument( "--ask", action="store_true", help="ask again for port when open fails", default=False) group = parser.add_argument_group("data handling") group.add_argument( "-e", "--echo", action="store_true", help="enable local echo (default off)", default=False) group.add_argument( "--encoding", dest="serial_port_encoding", metavar="CODEC", help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s", default='UTF-8') group.add_argument( "-f", "--filter", action="append", metavar="NAME", help="add text transformation", default=[]) group.add_argument( "--eol", choices=['CR', 'LF', 'CRLF'], type=lambda c: c.upper(), help="end of line mode", default='CRLF') group.add_argument( "--raw", action="store_true", help="Do no apply any encodings/transformations", default=False) group = parser.add_argument_group("hotkeys") group.add_argument( "--exit-char", type=int, metavar='NUM', help="Unicode of special character that is used to exit the application, default: %(default)s", default=0x1d) # GS/CTRL+] group.add_argument( "--menu-char", type=int, metavar='NUM', help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s", default=0x14) # Menu: CTRL+T group = parser.add_argument_group("diagnostics") group.add_argument( "-q", "--quiet", action="store_true", help="suppress non-error messages", default=False) group.add_argument( "--develop", action="store_true", help="show Python traceback on error", default=False) args = parser.parse_args() if args.menu_char == args.exit_char: parser.error('--exit-char can not be the same as --menu-char') if args.filter: if 'help' in args.filter: sys.stderr.write('Available filters:\n') sys.stderr.write('\n'.join( '{:<10} = {.__doc__}'.format(k, v) for k, v in sorted(TRANSFORMATIONS.items()))) sys.stderr.write('\n') sys.exit(1) filters = args.filter else: filters = ['default'] while True: # no port given on command line -> ask user now if args.port is None or args.port == '-': try: args.port = ask_for_port() except KeyboardInterrupt: sys.stderr.write('\n') parser.error('user aborted and port is not given') else: if not args.port: parser.error('port is not given') try: serial_instance = serial.serial_for_url( args.port, args.baudrate, parity=args.parity, rtscts=args.rtscts, xonxoff=args.xonxoff, do_not_open=True) if not hasattr(serial_instance, 'cancel_read'): # enable timeout for alive flag polling if cancel_read is not available serial_instance.timeout = 1 if args.dtr is not None: if not args.quiet: sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive')) serial_instance.dtr = args.dtr if args.rts is not None: if not args.quiet: sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive')) serial_instance.rts = args.rts serial_instance.open() except serial.SerialException as e: sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e)) if args.develop: raise if not args.ask: sys.exit(1) else: args.port = '-' else: break miniterm = Miniterm( serial_instance, echo=args.echo, eol=args.eol.lower(), filters=filters) miniterm.exit_character = unichr(args.exit_char) miniterm.menu_character = unichr(args.menu_char) miniterm.raw = args.raw miniterm.set_rx_encoding(args.serial_port_encoding) miniterm.set_tx_encoding(args.serial_port_encoding) if not args.quiet: sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format( p=miniterm.serial)) sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format( key_description(miniterm.exit_character), key_description(miniterm.menu_character), key_description(miniterm.menu_character), key_description('\x08'))) miniterm.start() try: miniterm.join(True) except KeyboardInterrupt: pass if not args.quiet: sys.stderr.write("\n--- exit ---\n") miniterm.join() miniterm.close() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if __name__ == '__main__': main()
gpl-3.0
9,167,541,231,525,012,000
34.972336
116
0.527415
false
3.935987
false
false
false
openmotics/gateway
src/gateway/group_action_controller.py
1
2954
# Copyright (C) 2020 OpenMotics BV # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ GroupAction BLL """ from __future__ import absolute_import import logging from ioc import Injectable, Inject, INJECTED, Singleton from gateway.base_controller import BaseController, SyncStructure from gateway.dto import GroupActionDTO from gateway.models import GroupAction if False: # MYPY from typing import List, Tuple logger = logging.getLogger(__name__) @Injectable.named('group_action_controller') @Singleton class GroupActionController(BaseController): SYNC_STRUCTURES = [SyncStructure(GroupAction, 'group_action')] @Inject def __init__(self, master_controller=INJECTED): super(GroupActionController, self).__init__(master_controller) def do_basic_action(self, action_type, action_number): # type: (int, int) -> None self._master_controller.do_basic_action(action_type, action_number) def do_group_action(self, group_action_id): # type: (int) -> None self._master_controller.do_group_action(group_action_id) def load_group_action(self, group_action_id): # type: (int) -> GroupActionDTO group_action = GroupAction.get(number=group_action_id) # type: GroupAction # TODO: Use exists group_action_dto = self._master_controller.load_group_action(group_action_id=group_action.number) return group_action_dto def load_group_actions(self): # type: () -> List[GroupActionDTO] group_action_dtos = [] for group_action in list(GroupAction.select()): # TODO: Only fetch the numbers group_action_dto = self._master_controller.load_group_action(group_action_id=group_action.number) group_action_dtos.append(group_action_dto) return group_action_dtos def save_group_actions(self, group_actions): # type: (List[GroupActionDTO]) -> None group_actions_to_save = [] for group_action_dto in group_actions: group_action = GroupAction.get_or_none(number=group_action_dto.id) # type: GroupAction if group_action is None: logger.info('Ignored saving non-existing GroupAction {0}'.format(group_action_dto.id)) continue group_actions_to_save.append(group_action_dto) self._master_controller.save_group_actions(group_actions_to_save)
agpl-3.0
3,239,037,115,919,670,000
43.089552
109
0.708869
false
3.674129
false
false
false
SymbiFlow/edalize
tests/test_vunit/run.py
1
1483
# Auto generated by Edalize def load_module_from_file(name, python_file): import importlib.util spec = importlib.util.spec_from_file_location(name, python_file) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module def load_runner_hooks(python_file = r''): if len(python_file) > 0: return load_module_from_file('vunit_runner_hooks', python_file) else: return __import__('edalize.vunit_hooks', fromlist=['vunit_hooks']) runner = load_runner_hooks().VUnitRunner() # Override this hook to allow custom creation configuration of the VUnit instance: vu = runner.create() lib = vu.add_library("vunit_test_runner_lib") lib.add_source_files("sv_file.sv") lib.add_source_files("vlog_file.v") lib.add_source_files("vlog05_file.v") lib.add_source_files("vhdl_file.vhd") lib.add_source_files("vhdl2008_file", vhdl_standard="2008") lib.add_source_files("another_sv_file.sv") # Override this hook to customize the library, e.g. compile-flags etc. # This allows full access to vunit.ui.Library interface: runner.handle_library("vunit_test_runner_lib", lib) lib = vu.add_library("libx") lib.add_source_files("vhdl_lfile") # Override this hook to customize the library, e.g. compile-flags etc. # This allows full access to vunit.ui.Library interface: runner.handle_library("libx", lib) # override this hook to perform final customization and parametrization of VUnit, custom invokation, etc. runner.main(vu)
bsd-2-clause
-3,198,943,249,158,944,300
35.170732
105
0.732974
false
3.135307
false
false
false
endere/Data-structures-2nd-half
src/hash.py
1
2101
"""Hash tables.""" import bst class HashTable(object): """Class object for our Hash Table.""" def __init__(self, bucket_number, function='fnv'): """Init for our hash. Accepts a string to determine which hash the table uses. """ self.bucket_list = [] self.function = function self.bucket_number = bucket_number for i in range(bucket_number): self.bucket_list.append(bst.BinarySearchTree()) def _hash(self, key): """Use fnv hash if function is fnv, or uses additive hash if function is add.""" if self.function == 'fnv': h = 2166136261 for i in range(len(key)): h = (h * 16777619) ^ ord(key[i]) return h elif self.function == 'add': h = 0 for i in range(len(key)): h += ord(key[i]) return h def set(self, key, value): """Place an item in the hash table.""" number = self._hash(key) stored_key = number if self.function == 'fnv' else key if self.get(key) is None: self.bucket_list[number % self.bucket_number].insert(stored_key, value) def get(self, key): """Use a key to retrieve a stored value from the table.""" if type(key) != str: raise TypeError("This is not the string you're looking for!") number = self._hash(key) stored_key = number if self.function == 'fnv' else key try: return self.bucket_list[number % self.bucket_number].search(stored_key).stored_value except AttributeError: return None if __name__ == '__main__': test_table = HashTable(1021) with open('/usr/share/dict/words') as dictionary: data = dictionary.read() data = data.split('\n') for i in range(len(data)): print(len(data) - i) test_table.set(data[i], data[i]) # print(type(test_table.dict_bst)) for i in test_table.bucket_list: print("key: {} , len: {}".format(test_table.bucket_list.index(i), i.size()))
mit
8,588,302,588,909,000,000
34.016667
96
0.557354
false
3.778777
false
false
false
juancarlospaco/microraptor
setup.py
1
4696
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # # To generate DEB package from Python Package: # sudo pip3 install stdeb # python3 setup.py --verbose --command-packages=stdeb.command bdist_deb # # # To generate RPM package from Python Package: # sudo apt-get install rpm # python3 setup.py bdist_rpm --verbose --fix-python --binary-only # # # To generate EXE MS Windows from Python Package (from MS Windows only): # python3 setup.py bdist_wininst --verbose # # # To generate PKGBUILD ArchLinux from Python Package (from PyPI only): # sudo pip3 install git+https://github.com/bluepeppers/pip2arch.git # pip2arch.py PackageNameHere # # # To Upload to PyPI by executing: # sudo pip install --upgrade pip setuptools wheel virtualenv # python3 setup.py bdist_egg bdist_wheel --universal sdist --formats=zip upload --sign """Setup.py for Python, as Generic as possible.""" import os import re import sys from setuptools import setup, Command from zipapp import create_archive ############################################################################## # EDIT HERE MODULE_PATH = os.path.join(os.path.dirname(__file__), "microraptor.py") DESCRIPTION = """Microraptor builds cool presentations using Angler, Impress and Markdown. Presentations using a simple MarkDown file. Convert a GitHub README.md to Presentations with one command.""" ############################################################################## # Dont touch below try: with open(str(MODULE_PATH), "r", encoding="utf-8-sig") as source_code_file: SOURCE = source_code_file.read() except: with open(str(MODULE_PATH), "r") as source_code_file: SOURCE = source_code_file.read() def find_this(search, source=SOURCE): """Take a string and a filename path string and return the found value.""" print("Searching for {what}.".format(what=search)) if not search or not source: print("Not found on source: {what}.".format(what=search)) return "" return str(re.compile(r".*__{what}__ = '(.*?)'".format( what=search), re.S).match(source).group(1)).strip().replace("'", "") class ZipApp(Command): description, user_options = "Creates a zipapp.", [] def initialize_options(self): pass # Dont needed, but required. def finalize_options(self): pass # Dont needed, but required. def run(self): return create_archive("microraptor.py", "microraptor.pyz", "/usr/bin/env python3") print("Starting build of setuptools.setup().") ############################################################################## # EDIT HERE setup( name="microraptor", version=find_this("version"), description="Presentation builder using Markdown and ImpressJS.", long_description=DESCRIPTION, url=find_this("url"), license=find_this("license"), author=find_this("author"), author_email=find_this("email"), maintainer=find_this("author"), maintainer_email=find_this("email"), include_package_data=True, zip_safe=True, install_requires=['anglerfish', 'mistune', 'pygments'], setup_requires=['anglerfish', 'mistune', 'pygments'], tests_require=['anglerfish', 'mistune', 'pygments'], requires=['anglerfish', 'mistune', 'pygments'], scripts=["microraptor.py"], cmdclass={"zipapp": ZipApp}, keywords=['ImpressJS', 'presentation', 'HTML5', 'Markdown', 'impress', 'CSS', 'HTML', 'Web', 'GFM', 'KISS', 'Builder', 'HTML'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Intended Audience :: Other Audience', 'Natural Language :: English', 'License :: OSI Approved :: GNU General Public License (GPL)', 'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)', 'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'Operating System :: OS Independent', 'Operating System :: POSIX :: Linux', 'Operating System :: Microsoft :: Windows', 'Operating System :: MacOS :: MacOS X', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Software Development', ], ) print("Finished build of setuptools.setup().")
gpl-3.0
-6,046,680,466,054,644,000
28.721519
93
0.622445
false
3.989805
false
false
false
COCS4950G7/COSC4950
Source/GUI/frameGUI_V1.4.py
1
4896
__author__ = 'Jon' import Tkconstants import tkFileDialog from Tkinter import * import Tkinter as Tk ######################################################################## ## adding a hashinng button, used to test basic functionality of enter text. ## Test commit for pycharm rebuild class MyApp(object): """""" #---------------------------------------------------------------------- def __init__(self, parent): """Constructor""" self.root = parent self.root.title("Main frame") self.frame = Tk.Frame(parent) self.frame.pack() title1 = Label(root, text="Single User Mode") title1.grid(row=1, column=0) btn_dictionary = Tk.Button(self.frame, text="Dictionary", command=self.open_dictionary) btn_dictionary.grid(row=2, column=0) btn_brute_force = Tk.Button(self.frame , text="Brute Force", command=self.open_brute_force) btn_brute_force.grid(row=2, column=1) btn_node = Tk.Button(self.frame, text="Node", command=self.open_node) btn_node.grid(row=4, column=0) btn_server = Tk.Button(self.frame, text="Server", command=self.open_server) btn_server.grid(row=4, column=2) #---------------------------------------------------------------------- def hide(self): """""" self.root.withdraw() #---------------------------------------------------------------------- def openFrame(self): """""" self.hide() otherFrame = Tk.Toplevel() otherFrame.geometry("400x300") otherFrame.title("otherFrame") handler = lambda: self.onCloseOtherFrame(otherFrame) btn = Tk.Button(otherFrame, text="Close", command=handler) btn.pack() #---------------------------------------------------------------------- def open_node(self): """""" self.hide() node_frame = Tk.Toplevel() node_frame.geometry("400x300") node_frame.title("Node") handler = lambda: self.onCloseOtherFrame(node_frame) btn = Tk.Button(node_frame, text="Back", command=handler) btn.pack() #---------------------------------------------------------------------- def open_server(self): """""" self.hide() server_frame = Tk.Toplevel() server_frame.geometry("400x300") server_frame.title("Server") handler = lambda: self.onCloseOtherFrame(server_frame) btn = Tk.Button(server_frame, text="Back", command=handler) btn.pack() #---------------------------------------------------------------------- def open_sum(self): """""" self.hide() sum_frame = Tk.Toplevel() sum_frame.geometry("400x300") sum_frame.title("Single User Mode") handler = lambda: self.onCloseOtherFrame(sum_frame) btn = Tk.Button(sum_frame, text="Back", command=handler) btn.pack() #---------------------------------------------------------------------- def open_brute_force(self): """""" self.hide() brute_force_frame = Tk.Toplevel() brute_force_frame.geometry("400x300") brute_force_frame.title("Brute Force") handler = lambda: self.onCloseOtherFrame(brute_force_frame) btn = Tk.Button(brute_force_frame, text="Back", command=handler) btn.pack() #---------------------------------------------------------------------- def open_dictionary(self): """""" self.hide() dictionary_frame = Tk.Toplevel() dictionary_frame.geometry("400x300") dictionary_frame.title("Dictionary") handler = lambda: self.onCloseOtherFrame(dictionary_frame) btn = Tk.Button(dictionary_frame, text="Back", command=handler) btn.pack() #---------------------------------------------------------------------- def onCloseOtherFrame(self, otherFrame): """""" otherFrame.destroy() self.show() #---------------------------------------------------------------------- def show(self): """""" self.root.update() self.root.deiconify() #---------------------------------------------------------------------- def askopenfile(self): """Returns an opened file in read mode.""" self.file_opt = options = {} options['defaultextension'] = '.txt' options['filetypes'] = [('all files', '.*'), ('text files', '.txt')] options['initialdir'] = 'C:\\' options['initialfile'] = 'myfile.txt' options['parent'] = root options['title'] = 'This is a title' return tkFileDialog.askopenfile(mode='r', **self.file_opt) #---------------------------------------------------------------------- if __name__ == "__main__": root = Tk.Tk() root.geometry("800x600") app = MyApp(root) root.mainloop()
gpl-3.0
-3,765,599,085,140,937,000
34.744526
99
0.469771
false
4.442831
false
false
false
jdavidrcamacho/Tests_GP
02 - Programs being tested/RV_function.py
1
3615
# -*- coding: utf-8 -*- """ Created on Fri Feb 3 11:36:58 2017 @author: camacho """ import numpy as np import matplotlib.pyplot as pl pl.close("all") ##### RV FUNCTION 1 - circular orbit def RV_circular(P=365,K=0.1,T=0,gamma=0,time=100,space=20): #parameters #P = period in days #K = semi-amplitude of the signal #T = velocity at zero phase #gamma = average velocity of the star #time = time of the simulation #space => I want an observation every time/space days t=np.linspace(0,time,space) RV=[K*np.sin(2*np.pi*x/P - T) + gamma for x in t] RV=[x for x in RV] #m/s return [t,RV] ##### RV FUNCTION 2 - keplerian orbit def RV_kepler(P=365,e=0,K=0.1,T=0,gamma=0,w=np.pi,time=100,space=1000): #parameters #P = period in days #e = eccentricity #K = RV amplitude #gamma = constant system RV #T = zero phase #w = longitude of the periastron #time = time of the simulation #space => I want an observation every time/space days t=np.linspace(0,time,space) #mean anomaly Mean_anom=[2*np.pi*(x1-T)/P for x1 in t] #eccentric anomaly -> E0=M + e*sin(M) + 0.5*(e**2)*sin(2*M) E0=[x + e*np.sin(x) + 0.5*(e**2)*np.sin(2*x) for x in Mean_anom] #mean anomaly -> M0=E0 - e*sin(E0) M0=[x - e*np.sin(x) for x in E0] i=0 while i<100: #[x + y for x, y in zip(first, second)] calc_aux=[x2-y for x2,y in zip(Mean_anom,M0)] E1=[x3 + y/(1-e*np.cos(x3)) for x3,y in zip(E0,calc_aux)] M1=[x4 - e*np.sin(x4) for x4 in E0] i+=1 E0=E1 M0=M1 nu=[2*np.arctan(np.sqrt((1+e)/(1-e))*np.tan(x5/2)) for x5 in E0] RV=[ gamma + K*(e*np.cos(w)+np.cos(w+x6)) for x6 in nu] RV=[x for x in RV] #m/s return t,RV #Examples #a=RV_circular() #pl.figure('RV_circular with P=365') #pl.plot(a[0],a[1],':',) #pl.title('planet of 365 days orbit') #pl.xlabel('time') #pl.ylabel('RV (Km/s)') #b=RV_circular(P=100) #pl.figure('RV_circular with P=100') #pl.title('planet of 100 days orbit') #pl.plot(b[0],b[1],':',) #pl.xlabel('time') #pl.ylabel('RV (Km/s)') #c=RV_kepler(P=100,e=0,w=np.pi,time=100) #pl.figure() #pl.plot(c[0],c[1],':',) #pl.title('P=100, e=0, w=pi, time=100') #pl.xlabel('time') #pl.ylabel('RV (Km/s)') #d1=RV_kepler(P=100,e=0, w=0,time=500) #pl.figure() #pl.title('P=100, e=0, w=pi, time=25') #pl.plot(d[0],d[1],'-',) #pl.xlabel('time') #pl.ylabel('RV (Km/s)') #d2=RV_kepler(P=100,e=0, w=np.pi,time=500) #pl.figure() #pl.title('P=100, e=0, w=pi, time=25') #pl.plot(d[0],d[1],'-',) #pl.xlabel('time') #pl.ylabel('RV (Km/s)') #d3=RV_kepler(P=100,e=0.5, w=np.pi,time=500) #pl.figure() #pl.title('P=100, e=0, w=pi, time=25') #pl.plot(d[0],d[1],'-',) #pl.xlabel('time') #pl.ylabel('RV (Km/s)') #d4=RV_kepler(P=100,e=0.5, w=np.pi/2,time=500) #pl.figure() #pl.title('P=100, e=0, w=pi, time=25') #pl.plot(d[0],d[1],'-',) #pl.xlabel('time') #pl.ylabel('RV (Km/s)') d1=RV_kepler(P=100,e=0, w=0,time=500) d2=RV_kepler(P=100,e=0.5, w=0,time=500) d3=RV_kepler(P=100,e=0.5, w=np.pi,time=500) d4=RV_kepler(P=100,e=0.5, w=np.pi/2,time=500) # Four axes, returned as a 2-d array f, axarr = pl.subplots(2, 2) axarr[0, 0].plot(d1[0],d1[1]) axarr[0, 0].set_title('e=0 and w=0') axarr[0, 1].plot(d2[0],d2[1]) axarr[0, 1].set_title('e=0.5, w=0') axarr[1, 0].plot(d3[0],d3[1]) axarr[1, 0].set_title('e=0.5, w=pi') axarr[1, 1].plot(d4[0],d4[1]) axarr[1, 1].set_title('e=0.5, w=pi/2') #pl.setp(pl.xticks(fontsize = 18) for a in axarr[0,:])#pl.yticks(fontsize=18)) pl.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False)
mit
1,223,838,283,404,268,000
25.202899
78
0.582849
false
2.094438
false
false
false
IamJeffG/geopandas
geopandas/plotting.py
1
13216
from __future__ import print_function import warnings import numpy as np from six import next from six.moves import xrange from shapely.geometry import Polygon def plot_polygon(ax, poly, facecolor='red', edgecolor='black', alpha=0.5, linewidth=1.0, **kwargs): """ Plot a single Polygon geometry """ from descartes.patch import PolygonPatch a = np.asarray(poly.exterior) if poly.has_z: poly = Polygon(zip(*poly.exterior.xy)) # without Descartes, we could make a Patch of exterior ax.add_patch(PolygonPatch(poly, facecolor=facecolor, linewidth=0, alpha=alpha)) # linewidth=0 because boundaries are drawn separately ax.plot(a[:, 0], a[:, 1], color=edgecolor, linewidth=linewidth, **kwargs) for p in poly.interiors: x, y = zip(*p.coords) ax.plot(x, y, color=edgecolor, linewidth=linewidth) def plot_multipolygon(ax, geom, facecolor='red', edgecolor='black', alpha=0.5, linewidth=1.0, **kwargs): """ Can safely call with either Polygon or Multipolygon geometry """ if geom.type == 'Polygon': plot_polygon(ax, geom, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha, linewidth=linewidth, **kwargs) elif geom.type == 'MultiPolygon': for poly in geom.geoms: plot_polygon(ax, poly, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha, linewidth=linewidth, **kwargs) def plot_linestring(ax, geom, color='black', linewidth=1.0, **kwargs): """ Plot a single LineString geometry """ a = np.array(geom) ax.plot(a[:, 0], a[:, 1], color=color, linewidth=linewidth, **kwargs) def plot_multilinestring(ax, geom, color='red', linewidth=1.0, **kwargs): """ Can safely call with either LineString or MultiLineString geometry """ if geom.type == 'LineString': plot_linestring(ax, geom, color=color, linewidth=linewidth, **kwargs) elif geom.type == 'MultiLineString': for line in geom.geoms: plot_linestring(ax, line, color=color, linewidth=linewidth, **kwargs) def plot_point(ax, pt, marker='o', markersize=2, color='black', **kwargs): """ Plot a single Point geometry """ ax.plot(pt.x, pt.y, marker=marker, markersize=markersize, color=color, **kwargs) def gencolor(N, colormap='Set1'): """ Color generator intended to work with one of the ColorBrewer qualitative color scales. Suggested values of colormap are the following: Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3 (although any matplotlib colormap will work). """ from matplotlib import cm # don't use more than 9 discrete colors n_colors = min(N, 9) cmap = cm.get_cmap(colormap, n_colors) colors = cmap(range(n_colors)) for i in xrange(N): yield colors[i % n_colors] def plot_series(s, cmap='Set1', color=None, ax=None, linewidth=1.0, figsize=None, **color_kwds): """ Plot a GeoSeries Generate a plot of a GeoSeries geometry with matplotlib. Parameters ---------- Series The GeoSeries to be plotted. Currently Polygon, MultiPolygon, LineString, MultiLineString and Point geometries can be plotted. cmap : str (default 'Set1') The name of a colormap recognized by matplotlib. Any colormap will work, but categorical colormaps are generally recommended. Examples of useful discrete colormaps include: Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3 color : str (default None) If specified, all objects will be colored uniformly. ax : matplotlib.pyplot.Artist (default None) axes on which to draw the plot linewidth : float (default 1.0) Line width for geometries. figsize : pair of floats (default None) Size of the resulting matplotlib.figure.Figure. If the argument ax is given explicitly, figsize is ignored. **color_kwds : dict Color options to be passed on to the actual plot function Returns ------- matplotlib axes instance """ if 'colormap' in color_kwds: warnings.warn("'colormap' is deprecated, please use 'cmap' instead " "(for consistency with matplotlib)", FutureWarning) cmap = color_kwds.pop('colormap') if 'axes' in color_kwds: warnings.warn("'axes' is deprecated, please use 'ax' instead " "(for consistency with pandas)", FutureWarning) ax = color_kwds.pop('axes') import matplotlib.pyplot as plt if ax is None: fig, ax = plt.subplots(figsize=figsize) ax.set_aspect('equal') color_generator = gencolor(len(s), colormap=cmap) for geom in s: if color is None: col = next(color_generator) else: col = color if geom.type == 'Polygon' or geom.type == 'MultiPolygon': if 'facecolor' in color_kwds: plot_multipolygon(ax, geom, linewidth=linewidth, **color_kwds) else: plot_multipolygon(ax, geom, facecolor=col, linewidth=linewidth, **color_kwds) elif geom.type == 'LineString' or geom.type == 'MultiLineString': plot_multilinestring(ax, geom, color=col, linewidth=linewidth, **color_kwds) elif geom.type == 'Point': plot_point(ax, geom, color=col, **color_kwds) plt.draw() return ax def plot_dataframe(s, column=None, cmap=None, color=None, linewidth=1.0, categorical=False, legend=False, ax=None, scheme=None, k=5, vmin=None, vmax=None, figsize=None, **color_kwds): """ Plot a GeoDataFrame Generate a plot of a GeoDataFrame with matplotlib. If a column is specified, the plot coloring will be based on values in that column. Otherwise, a categorical plot of the geometries in the `geometry` column will be generated. Parameters ---------- GeoDataFrame The GeoDataFrame to be plotted. Currently Polygon, MultiPolygon, LineString, MultiLineString and Point geometries can be plotted. column : str (default None) The name of the column to be plotted. categorical : bool (default False) If False, cmap will reflect numerical values of the column being plotted. For non-numerical columns (or if column=None), this will be set to True. cmap : str (default 'Set1') The name of a colormap recognized by matplotlib. color : str (default None) If specified, all objects will be colored uniformly. linewidth : float (default 1.0) Line width for geometries. legend : bool (default False) Plot a legend (Experimental; currently for categorical plots only) ax : matplotlib.pyplot.Artist (default None) axes on which to draw the plot scheme : pysal.esda.mapclassify.Map_Classifier Choropleth classification schemes (requires PySAL) k : int (default 5) Number of classes (ignored if scheme is None) vmin : None or float (default None) Minimum value of cmap. If None, the minimum data value in the column to be plotted is used. vmax : None or float (default None) Maximum value of cmap. If None, the maximum data value in the column to be plotted is used. figsize Size of the resulting matplotlib.figure.Figure. If the argument axes is given explicitly, figsize is ignored. **color_kwds : dict Color options to be passed on to the actual plot function Returns ------- matplotlib axes instance """ if 'colormap' in color_kwds: warnings.warn("'colormap' is deprecated, please use 'cmap' instead " "(for consistency with matplotlib)", FutureWarning) cmap = color_kwds.pop('colormap') if 'axes' in color_kwds: warnings.warn("'axes' is deprecated, please use 'ax' instead " "(for consistency with pandas)", FutureWarning) ax = color_kwds.pop('axes') import matplotlib.pyplot as plt from matplotlib.lines import Line2D from matplotlib.colors import Normalize from matplotlib import cm if column is None: return plot_series(s.geometry, cmap=cmap, color=color, ax=ax, linewidth=linewidth, figsize=figsize, **color_kwds) else: if s[column].dtype is np.dtype('O'): categorical = True if categorical: if cmap is None: cmap = 'Set1' categories = list(set(s[column].values)) categories.sort() valuemap = dict([(k, v) for (v, k) in enumerate(categories)]) values = [valuemap[k] for k in s[column]] else: values = s[column] if scheme is not None: binning = __pysal_choro(values, scheme, k=k) values = binning.yb # set categorical to True for creating the legend categorical = True binedges = [binning.yb.min()] + binning.bins.tolist() categories = ['{0:.2f} - {1:.2f}'.format(binedges[i], binedges[i+1]) for i in range(len(binedges)-1)] cmap = norm_cmap(values, cmap, Normalize, cm, vmin=vmin, vmax=vmax) if ax is None: fig, ax = plt.subplots(figsize=figsize) ax.set_aspect('equal') for geom, value in zip(s.geometry, values): if color is None: col = cmap.to_rgba(value) else: col = color if geom.type == 'Polygon' or geom.type == 'MultiPolygon': plot_multipolygon(ax, geom, facecolor=col, linewidth=linewidth, **color_kwds) elif geom.type == 'LineString' or geom.type == 'MultiLineString': plot_multilinestring(ax, geom, color=col, linewidth=linewidth, **color_kwds) elif geom.type == 'Point': plot_point(ax, geom, color=col, **color_kwds) if legend: if categorical: patches = [] for value, cat in enumerate(categories): patches.append(Line2D([0], [0], linestyle="none", marker="o", alpha=color_kwds.get('alpha', 0.5), markersize=10, markerfacecolor=cmap.to_rgba(value))) ax.legend(patches, categories, numpoints=1, loc='best') else: # TODO: show a colorbar raise NotImplementedError plt.draw() return ax def __pysal_choro(values, scheme, k=5): """ Wrapper for choropleth schemes from PySAL for use with plot_dataframe Parameters ---------- values Series to be plotted scheme pysal.esda.mapclassify classificatin scheme ['Equal_interval'|'Quantiles'|'Fisher_Jenks'] k number of classes (2 <= k <=9) Returns ------- binning Binning objects that holds the Series with values replaced with class identifier and the bins. """ try: from pysal.esda.mapclassify import Quantiles, Equal_Interval, Fisher_Jenks schemes = {} schemes['equal_interval'] = Equal_Interval schemes['quantiles'] = Quantiles schemes['fisher_jenks'] = Fisher_Jenks s0 = scheme scheme = scheme.lower() if scheme not in schemes: scheme = 'quantiles' warnings.warn('Unrecognized scheme "{0}". Using "Quantiles" ' 'instead'.format(s0), UserWarning, stacklevel=3) if k < 2 or k > 9: warnings.warn('Invalid k: {0} (2 <= k <= 9), setting k=5 ' '(default)'.format(k), UserWarning, stacklevel=3) k = 5 binning = schemes[scheme](values, k) return binning except ImportError: raise ImportError("PySAL is required to use the 'scheme' keyword") def norm_cmap(values, cmap, normalize, cm, vmin=None, vmax=None): """ Normalize and set colormap Parameters ---------- values Series or array to be normalized cmap matplotlib Colormap normalize matplotlib.colors.Normalize cm matplotlib.cm vmin Minimum value of colormap. If None, uses min(values). vmax Maximum value of colormap. If None, uses max(values). Returns ------- n_cmap mapping of normalized values to colormap (cmap) """ mn = min(values) if vmin is None else vmin mx = max(values) if vmax is None else vmax norm = normalize(vmin=mn, vmax=mx) n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap) return n_cmap
bsd-3-clause
-1,790,349,565,041,138,400
34.431635
138
0.590875
false
4.208917
false
false
false
spillai/crisp
crisp/rotations.py
1
9499
# -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import """ Rotation handling module """ __author__ = "Hannes Ovrén" __copyright__ = "Copyright 2013, Hannes Ovrén" __license__ = "GPL" __email__ = "[email protected]" import numpy as np from numpy.testing import assert_almost_equal from . import ransac #------------------------------------------------------------------------------ def procrustes(X, Y, remove_mean=False): """Orthogonal procrustes problem solver The procrustes problem finds the best rotation R, and translation t where X = R*Y + t The number of points in X and Y must be at least 2. For the minimal case of two points, a third point is temporarily created and used for the estimation. Parameters ----------------- X : (3, N) ndarray First set of points Y : (3, N) ndarray Second set of points remove_mean : bool If true, the mean is removed from X and Y before solving the procrustes problem. Can yield better results in some applications. Returns ----------------- R : (3,3) ndarray Rotation component t : (3,) ndarray Translation component (None if remove_mean is False) """ assert X.shape == Y.shape assert X.shape[0] > 1 # Minimal case, create third point using cross product if X.shape[0] == 2: X3 = np.cross(X[:,0], X[:,1], axis=0) X = np.hstack((X, X3 / np.linalg.norm(X3))) Y3 = np.cross(Y[:,0], Y[:,1], axis=0) Y = np.hstack((Y, Y3 / np.linalg.norm(Y3))) D, N = X.shape[:2] if remove_mean: mx = np.mean(X, axis=1).reshape(D, 1) my = np.mean(Y, axis=1).reshape(D, 1) Xhat = X - mx Yhat = Y - my else: Xhat = X Yhat = Y (U, S, V) = np.linalg.svd((Xhat).dot(Yhat.T)) Dtmp = np.eye(Xhat.shape[0]) Dtmp[-1,-1] = np.linalg.det(U.dot(V)) R_est = U.dot(Dtmp).dot(V) # Now X=R_est*(Y-my)+mx=R_est*Y+t_est if remove_mean: t_est= mx - R_est.dot(my) else: t_est = None return (R_est, t_est) #-------------------------------------------------------------------------- def rotation_matrix_to_axis_angle(R): """Convert a 3D rotation matrix to a 3D axis angle representation Parameters --------------- R : (3,3) array Rotation matrix Returns ---------------- v : (3,) array (Unit-) rotation angle theta : float Angle of rotations, in radians Note -------------- This uses the algorithm as described in Multiple View Geometry, p. 584 """ assert R.shape == (3,3) assert_almost_equal(np.linalg.det(R), 1.0, err_msg="Not a rotation matrix: determinant was not 1") S, V = np.linalg.eig(R) k = np.argmin(np.abs(S - 1.)) s = S[k] assert_almost_equal(s, 1.0, err_msg="Not a rotation matrix: No eigen value s=1") v = np.real(V[:, k]) # Result is generally complex vhat = np.array([R[2,1] - R[1,2], R[0,2] - R[2,0], R[1,0] - R[0,1]]) sintheta = 0.5 * np.dot(v, vhat) costheta = 0.5 * (np.trace(R) - 1) theta = np.arctan2(sintheta, costheta) return (v, theta) #-------------------------------------------------------------------------- def axis_angle_to_rotation_matrix(v, theta): """Convert rotation from axis-angle to rotation matrix Parameters --------------- v : (3,) ndarray Rotation axis (normalized) theta : float Rotation angle (radians) Returns ---------------- R : (3,3) ndarray Rotation matrix """ if np.abs(theta) < np.spacing(1): return np.eye(3) else: v = v.reshape(3,1) np.testing.assert_almost_equal(np.linalg.norm(v), 1.) vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]) vvt = np.dot(v, v.T) R = np.eye(3)*np.cos(theta) + (1 - np.cos(theta))*vvt + vx * np.sin(theta) return R #-------------------------------------------------------------------------- def quat_to_rotation_matrix(q): """Convert unit quaternion to rotation matrix Parameters ------------- q : (4,) ndarray Unit quaternion, scalar as first element Returns ---------------- R : (3,3) ndarray Rotation matrix """ q = q.flatten() assert q.size == 4 assert_almost_equal(np.linalg.norm(q), 1.0, err_msg="Not a unit quaternion!") qq = q ** 2 R = np.array([[qq[0] + qq[1] - qq[2] - qq[3], 2*q[1]*q[2] - 2*q[0]*q[3], 2*q[1]*q[3] + 2*q[0]*q[2]], [2*q[1]*q[2] + 2*q[0]*q[3], qq[0] - qq[1] + qq[2] - qq[3], 2*q[2]*q[3] - 2*q[0]*q[1]], [2*q[1]*q[3] - 2*q[0]*q[2], 2*q[2]*q[3] + 2*q[0]*q[1], qq[0] - qq[1] - qq[2] + qq[3]]]) return R #-------------------------------------------------------------------------- def integrate_gyro_quaternion(gyro_ts, gyro_data): """Integrate angular velocities to rotations Parameters --------------- gyro_ts : ndarray Timestamps gyro_data : (3, N) ndarray Angular velocity measurements Returns --------------- rotations : (4, N) ndarray Rotation sequence as unit quaternions (first element scalar) """ #NB: Quaternion q = [a, n1, n2, n3], scalar first q_list = np.zeros((gyro_ts.shape[0], 4)) # Nx4 quaternion list q_list[0,:] = np.array([1, 0, 0, 0]) # Initial rotation (no rotation) # Iterate over all (except first) for i in range(1, gyro_ts.size): w = gyro_data[i] dt = gyro_ts[i] - gyro_ts[i - 1] qprev = q_list[i - 1] A = np.array([[0, -w[0], -w[1], -w[2]], [w[0], 0, w[2], -w[1]], [w[1], -w[2], 0, w[0]], [w[2], w[1], -w[0], 0]]) qnew = (np.eye(4) + (dt/2.0) * A).dot(qprev) qnorm = np.sqrt(np.sum(qnew ** 2)) qnew /= qnorm q_list[i] = qnew return q_list #-------------------------------------------------------------------------- def slerp(q1, q2, u): """SLERP: Spherical linear interpolation between two unit quaternions. Parameters ------------ q1 : (4, ) ndarray Unit quaternion (first element scalar) q2 : (4, ) ndarray Unit quaternion (first element scalar) u : float Interpolation factor in range [0,1] where 0 is first quaternion and 1 is second quaternion. Returns ----------- q : (4,) ndarray The interpolated unit quaternion """ q1 = q1.flatten() q2 = q2.flatten() assert q1.shape == q2.shape assert q1.size == 4 costheta = np.sqrt(np.sum(q1 * q2)) theta = np.arccos(costheta) f1 = np.sin((1.0 - u)*theta) / np.sin(theta) f2 = np.sin(u*theta) / np.sin(theta) # Shortest path is wanted, so conjugate if necessary if costheta < 0: f1 = -f1 q = f1*q1 + f2*q2 q = q / np.sqrt(np.sum(q**2)) # Normalize else: q = f1*q1 + f2*q2 q = q / np.sqrt(np.sum(q**2)) # Normalize return q #-------------------------------------------------------------------------- def estimate_rotation_procrustes_ransac(x, y, camera, threshold, inlier_ratio=0.75, do_translation=False): """Calculate rotation between two sets of image coordinates using ransac. Inlier criteria is the reprojection error of y into image 1. Parameters ------------------------- x : array 2xN image coordinates in image 1 y : array 2xN image coordinates in image 2 camera : Camera model threshold : float pixel distance threshold to accept as inlier do_translation : bool Try to estimate the translation as well Returns ------------------------ R : array 3x3 The rotation that best fulfills X = RY t : array 3x1 translation if do_translation is False residual : array pixel distances ||x - xhat|| where xhat ~ KRY (and lens distorsion) inliers : array Indices of the points (in X and Y) that are RANSAC inliers """ assert x.shape == y.shape assert x.shape[0] == 2 X = camera.unproject(x) Y = camera.unproject(y) data = np.vstack((X, Y, x)) assert data.shape[0] == 8 model_func = lambda data: procrustes(data[:3], data[3:6], remove_mean=do_translation) def eval_func(model, data): Y = data[3:6].reshape(3,-1) x = data[6:].reshape(2,-1) R, t = model Xhat = np.dot(R, Y) if t is None else np.dot(R, Y) + t xhat = camera.project(Xhat) dist = np.sqrt(np.sum((x-xhat)**2, axis=0)) return dist inlier_selection_prob = 0.99999 model_points = 2 ransac_iterations = int(np.log(1 - inlier_selection_prob) / np.log(1-inlier_ratio**model_points)) model_est, ransac_consensus_idx = ransac.RANSAC(model_func, eval_func, data, model_points, ransac_iterations, threshold, recalculate=True) if model_est is not None: (R, t) = model_est dist = eval_func((R, t), data) else: dist = None R, t = None, None ransac_consensus_idx = [] return R, t, dist, ransac_consensus_idx
gpl-3.0
-1,031,934,485,086,081,700
29.149206
146
0.502896
false
3.286159
false
false
false
prim/ocempgui
doc/examples/table.py
1
2042
# Table examples. from ocempgui.widgets import Renderer, Table, Label, Button from ocempgui.widgets.Constants import * def create_table_view (): # Crate and display a Table. table = Table (9, 2) table.spacing = 5 table.topleft = 5, 5 label = Label ("Nonaligned wide Label") table.add_child (0, 0, label) table.add_child (0, 1, Button ("Simple Button")) label = Label ("Top align") table.add_child (1, 0, label) table.set_align (1, 0, ALIGN_TOP) table.add_child (1, 1, Button ("Simple Button")) label = Label ("Bottom align") table.add_child (2, 0, label) table.set_align (2, 0, ALIGN_BOTTOM) table.add_child (2, 1, Button ("Simple Button")) label = Label ("Left align") table.add_child (3, 0, label) table.set_align (3, 0, ALIGN_LEFT) table.add_child (3, 1, Button ("Simple Button")) label = Label ("Right align") table.add_child (4, 0, label) table.set_align (4, 0, ALIGN_RIGHT) table.add_child (4, 1, Button ("Simple Button")) label = Label ("Topleft align") table.add_child (5, 0, label) table.set_align (5, 0, ALIGN_TOP | ALIGN_LEFT) table.add_child (5, 1, Button ("Simple Button")) label = Label ("Topright align") table.add_child (6, 0, label) table.set_align (6, 0, ALIGN_TOP | ALIGN_RIGHT) table.add_child (6, 1, Button ("Simple Button")) label = Label ("Bottomleft align") table.add_child (7, 0, label) table.set_align (7, 0, ALIGN_BOTTOM |ALIGN_LEFT) table.add_child (7, 1, Button ("Simple Button")) label = Label ("Bottomright align") table.add_child (8, 0, label) table.set_align (8, 0, ALIGN_BOTTOM |ALIGN_RIGHT) table.add_child (8, 1, Button ("Simple Button")) return table if __name__ == "__main__": # Initialize the drawing window. re = Renderer () re.create_screen (250, 350) re.title = "Table examples" re.color = (234, 228, 223) re.add_widget (create_table_view ()) # Start the main rendering loop. re.start ()
bsd-2-clause
528,070,260,686,461,950
30.415385
59
0.616552
false
3.156105
false
false
false
MERegistro/meregistro
meregistro/apps/reportes/views/normativa_jurisdiccional.py
1
1076
# -*- coding: UTF-8 -*- from django.http import HttpResponse, HttpResponseRedirect from datetime import datetime, date from apps.seguridad.decorators import login_required, credential_required from apps.seguridad.models import Usuario, Perfil from apps.titulos.models.NormativaJurisdiccional import NormativaJurisdiccional import csv from apps.reportes.models import Reporte @login_required @credential_required('tit_nor_jur_consulta') def normativas_jurisdiccionales(request, q): filename = 'normativas_jurisdiccionales_' + str(date.today()) + '.xls' reporte = Reporte(headers=['NUMERO/AÑO', 'TIPO', 'JURISDICCION', 'OTORGADA POR', 'OBSERVACIONES', 'ESTADO'], filename=filename) for nj in q: if nj.estado is None: estado_nombre = '' else: estado_nombre = nj.estado.nombre.encode('utf8') reporte.rows.append([nj.numero_anio.encode('utf8'), unicode(nj.tipo_normativa_jurisdiccional), unicode(nj.jurisdiccion), unicode(nj.otorgada_por), nj.observaciones.encode('utf8'), estado_nombre]) return reporte.as_csv()
bsd-3-clause
-264,908,265,114,107,870
45.73913
203
0.733023
false
2.929155
false
false
false
steffgrez/aio-jsonrpc-2.0
aio_jsonrpc_20/response.py
1
1779
class ResponseMaker(object): __slot__ = ['error_verbose'] def __init__(self, error_verbose=True): self.error_verbose = error_verbose def get_response(self, result, request_id): return { "jsonrpc": "2.0", "result": result, "id": request_id } def get_error(self, code, message, data=None, request_id=None): result = { "jsonrpc": "2.0", "error": { "code": code, "message": message, }, "id": request_id } if self.error_verbose and data: result["error"]['data'] = data return result def get_parse_error(self, data=None, request_id=None): return self.get_error( -32700, 'Parse error', data=data, request_id=request_id ) def get_invalid_request(self, data=None, request_id=None): return self.get_error( -32600, 'Invalid Request', data=data, request_id=request_id ) def get_method_not_found(self, data=None, request_id=None): return self.get_error( -32601, 'Method not found', data=data, request_id=request_id ) def get_invalid_params(self, data=None, request_id=None): return self.get_error( -32602, 'Invalid params', data=data, request_id=request_id ) def get_internal_error(self, data=None, request_id=None): return self.get_error( -32603, 'Internal error', data=data, request_id=request_id ) def get_server_error(self, code, data, request_id=None): return self.get_error( code=code, message='Server error', data=data, request_id=request_id )
mit
4,117,111,114,315,011,600
27.693548
72
0.540753
false
3.737395
false
false
false
markovmodel/molPX
molpx/_linkutils.py
1
18471
import numpy as _np from matplotlib.widgets import AxesWidget as _AxesWidget from matplotlib.colors import is_color_like as _is_color_like from matplotlib.axes import Axes as _mplAxes from matplotlib.figure import Figure as _mplFigure from IPython.display import display as _ipydisplay from pyemma.util.types import is_int as _is_int from scipy.spatial import cKDTree as _cKDTree from ._bmutils import get_ascending_coord_idx from mdtraj import Trajectory as _mdTrajectory from nglview import NGLWidget as _NGLwdg from ipywidgets import HBox as _HBox, VBox as _VBox def pts_per_axis_unit(mplax, pt_per_inch=72): r""" Return how many pt per axis unit of a given maptplotlib axis a figure has Parameters ---------- mplax : :obj:`matplotlib.axes._subplots.AxesSubplot` pt_per_inch : how many points are in an inch (this number should not change) Returns -------- pt_per_xunit, pt_per_yunit """ # matplotlib voodoo # Get bounding box bbox = mplax.get_window_extent().transformed(mplax.get_figure().dpi_scale_trans.inverted()) span_inch = _np.array([bbox.width, bbox.height], ndmin=2).T span_units = [mplax.get_xlim(), mplax.get_ylim()] span_units = _np.diff(span_units, axis=1) inch_per_unit = span_inch / span_units return inch_per_unit * pt_per_inch def update2Dlines(iline, x, y): """ provide a common interface to update objects on the plot to a new position (x,y) depending on whether they are hlines, vlines, dots etc Parameters ---------- iline: :obj:`matplotlib.lines.Line2D` object x : float with new position y : float with new position """ # TODO FIND OUT A CLEANER WAY TO DO THIS (dict or class) if not hasattr(iline,'whatisthis'): raise AttributeError("This method will only work if iline has the attribute 'whatsthis'") else: # TODO find cleaner way of distinguishing these 2Dlines if iline.whatisthis in ['dot']: iline.set_xdata((x)) iline.set_ydata((y)) elif iline.whatisthis in ['lineh']: iline.set_ydata((y,y)) elif iline.whatisthis in ['linev']: iline.set_xdata((x,x)) else: # TODO: FIND OUT WNY EXCEPTIONS ARE NOT BEING RAISED raise TypeError("what is this type of 2Dline?") class ClickOnAxisListener(object): def __init__(self, ngl_wdg, crosshairs, showclick_objs, ax, pos, list_mpl_objects_to_update): self.ngl_wdg = ngl_wdg self.crosshairs = crosshairs self.showclick_objs = showclick_objs self.ax = ax self.pos = pos self.list_mpl_objects_to_update = list_mpl_objects_to_update self.list_of_dots = [None]*self.pos.shape[0] self.fig_size = self.ax.figure.get_size_inches() self.kdtree = None def build_tree(self): # Use ax.transData to compute distance in pixels # regardelss of the axes units (http://matplotlib.org/users/transforms_tutorial.html) # Corresponds to the visual distance between clicked point and target point self.kdtree = _cKDTree(self.ax.transData.transform(self.pos)) @property def figure_changed_size(self): return not _np.allclose(self.fig_size, self.ax.figure.get_size_inches()) def __call__(self, event): # Wait for the first click or a a figsize change # to build the kdtree if self.figure_changed_size or self.kdtree is None: self.build_tree() self.fig_size = self.ax.figure.get_size_inches() # Was the click inside the bounding box? if self.ax.get_window_extent().contains(event.x, event.y): if self.crosshairs: for iline in self.showclick_objs: update2Dlines(iline, event.xdata, event.ydata) _, index = self.kdtree.query(x=[event.x, event.y], k=1) for idot in self.list_mpl_objects_to_update: update2Dlines(idot, self.pos[index, 0], self.pos[index, 1]) self.ngl_wdg.isClick = True if hasattr(self.ngl_wdg, '_GeomsInWid'): # We're in a sticky situation if event.button == 1: # Pressed left self.ngl_wdg._GeomsInWid[index].show() if self.list_of_dots[index] is None: # Plot and store the dot in case there wasn't self.list_of_dots[index] = self.ax.plot(self.pos[index, 0], self.pos[index, 1], 'o', c=self.ngl_wdg._GeomsInWid[index].color_dot, ms=7)[0] elif event.button in [2, 3]: # Pressed right or middle self.ngl_wdg._GeomsInWid[index].hide() # Delete dot if the geom is not visible anymore if not self.ngl_wdg._GeomsInWid[index].is_visible() and self.list_of_dots[index] is not None: self.list_of_dots[index].remove() self.list_of_dots[index] = None else: # We're not sticky, just go to the frame self.ngl_wdg.frame = index class MolPXBox(object): r""" Class created to be the parent class of MolPXHBox and MolPXVBox, which inherit from MolPXBox and the ipywidget classes HBox and VBox (*args and **kwargs are for these) The sole purpose of this class is to avoid monkey-patching elsewhere in the code, this class creates them as empty lists on instantiation. It also implements two methods: * self.display (=IPython.display(self) * append_if_existing """ def __init__(self, *args, **kwargs): self.linked_axes = [] self.linked_mdgeoms = [] self.linked_ngl_wdgs = [] self.linked_data_arrays = [] self.linked_ax_wdgs = [] self.linked_figs = [] def display(self): _ipydisplay(self) def append_if_existing(self, args0, startswith_arg="linked_"): r""" args0 is the tuple containing all widgets to be included in the MolPXBox this tuple can contain itself other MolPXWidget so we iterate through them and appending linked stuff """ for iarg in args0: for attrname in dir(iarg): if attrname.startswith(startswith_arg) and len(iarg.__dict__[attrname]) != 0: self.__dict__[attrname] += iarg.__dict__[attrname] def auto_append_these_mpx_attrs(iobj, *attrs): r""" The attribute s name is automatically derived from the attribute s type via a type:name dictionary *attrs : any number of unnamed objects of the types in type2attrname. If the object type is a list, it will be flattened prior to attempting """ attrs_flat_list = [] for sublist in attrs: if isinstance(sublist, list): for item in sublist: attrs_flat_list.append(item) else: attrs_flat_list.append(sublist) # Go through the arguments and assign them an attrname according to their types for iattr in attrs_flat_list: for attrname, itype in type2attrname.items(): if isinstance(iattr, itype): iobj.__dict__[attrname].append(iattr) break class MolPXHBox(_HBox, MolPXBox): def __init__(self, *args, **kwargs): super(MolPXHBox, self).__init__(*args, **kwargs) self.append_if_existing(args[0]) class MolPXVBox(_VBox, MolPXBox): def __init__(self, *args, **kwargs): super(MolPXVBox, self).__init__(*args, **kwargs) self.append_if_existing(args[0]) type2attrname = {"linked_axes": _mplAxes, "linked_mdgeoms": _mdTrajectory, "linked_ngl_wdgs": _NGLwdg, "linked_data_arrays": _np.ndarray, "linked_ax_wdgs": _AxesWidget, "linked_figs": _mplFigure, } class ChangeInNGLWidgetListener(object): def __init__(self, ngl_wdg, list_mpl_objects_to_update, pos): self.ngl_wdg = ngl_wdg self.list_mpl_objects_to_update = list_mpl_objects_to_update self.pos = pos def __call__(self, change): self.ngl_wdg.isClick = False _idx = change["new"] try: for idot in self.list_mpl_objects_to_update: update2Dlines(idot, self.pos[_idx, 0], self.pos[_idx, 1]) #print("caught index error with index %s (new=%s, old=%s)" % (_idx, change["new"], change["old"])) except IndexError as e: for idot in self.list_mpl_objects_to_update: update2Dlines(idot, self.pos[0, 0], self.pos[0, 1]) print("caught index error with index %s (new=%s, old=%s)" % (_idx, change["new"], change["old"])) #print("set xy = (%s, %s)" % (x[_idx], y[_idx])) class GeometryInNGLWidget(object): r""" returns an object that is aware of where its geometries are located in the NGLWidget their representation status The object exposes two methods, show and hide, to automagically know what to do """ def __init__(self, geom, ngl_wdg, list_of_repr_dicts=None, color_molecule_hex='Element', n_small=10): self.lives_at_components = [] self.geom = geom self.ngl_wdg = ngl_wdg self.have_repr = [] sticky_rep = 'cartoon' if self.geom[0].top.n_residues < n_small: sticky_rep = 'ball+stick' if list_of_repr_dicts is None: list_of_repr_dicts = [{'repr_type': sticky_rep, 'selection': 'all'}] self.list_of_repr_dicts = list_of_repr_dicts self.color_molecule_hex = color_molecule_hex self.color_dot = color_molecule_hex if isinstance(self.color_molecule_hex, str) and color_molecule_hex == 'Element': self.color_dot = 'red' def show(self): # Show can mean either # - add a whole new component (case 1) # - add the representation again to a representation-less component (case 2) # CASE 1 if self.is_empty() or self.all_reps_are_on(): if len(self.have_repr) == self.geom.n_frames: print("arrived at the end") component = None else: idx = len(self.have_repr) self.ngl_wdg.add_trajectory(self.geom[idx]) self.lives_at_components.append(len(self.ngl_wdg._ngl_component_ids) - 1) self.ngl_wdg.clear_representations(component=self.lives_at_components[-1]) self.have_repr.append(True) component = self.lives_at_components[-1] # CASE 2 elif self.any_rep_is_off(): # Some are living in the widget already but have no rep idx = _np.argwhere(~_np.array(self.have_repr))[0].squeeze() component = self.lives_at_components[idx] self.have_repr[idx] = True else: raise Exception("This situation should not arise. This is a bug") if component is not None: for irepr in self.list_of_repr_dicts: self.ngl_wdg.add_representation(irepr['repr_type'], selection=irepr['selection'], component=component, color=self.color_molecule_hex) def hide(self): if self.is_empty() or self.all_reps_are_off(): print("nothing to hide") pass elif self.any_rep_is_on(): # There's represented components already in the widget idx = _np.argwhere(self.have_repr)[-1].squeeze() self.ngl_wdg.clear_representations(component=self.lives_at_components[idx]) self.have_repr[idx] = False else: raise Exception("This situation should not arise. This is a bug") # Quickhand methods for knowing what's up def is_empty(self): if len(self.have_repr) == 0: return True else: return False def all_reps_are_off(self): if len(self.have_repr) == 0: return True else: return _np.all(~_np.array(self.have_repr)) def all_reps_are_on(self): if len(self.have_repr) == 0: return False else: return _np.all(self.have_repr) def any_rep_is_off(self): return _np.any(~_np.array(self.have_repr)) def any_rep_is_on(self): return _np.any(self.have_repr) def is_visible(self): if self.is_empty() or self.all_reps_are_off(): return False else: return True def link_ax_w_pos_2_nglwidget(ax, pos, ngl_wdg, crosshairs=True, dot_color='red', band_width=None, radius=False, directionality=None, exclude_coord=None, ): r""" Initial idea for this function comes from @arose, the rest is @gph82 Parameters ---------- ax : matplotlib axis object to be linked pos : ndarray of shape (N,2) with the positions of the geoms in the ngl_wdg crosshairs : Boolean or str If True, a crosshair will show where the mouse-click ocurred. If 'h' or 'v', only the horizontal or vertical line of the crosshair will be shown, respectively. If False, no crosshair will appear dot_color : Anything that yields matplotlib.colors.is_color_like(dot_color)==True Default is 'red'. dot_color='None' yields no dot band_width : None or iterable of len = 2 If band_width is not None, the method tries to figure out on its own if there is an ascending coordinate and will include a moving band on :obj:ax of this width (in units of the axis along which the band is plotted) If the method cannot find an ascending coordinate, an exception is thrown directionality : str or None, default is None If not None, directionality can be either 'a2w' or 'w2a', meaning that connectivity between axis and widget will be only established as * 'a2w' : action in axis triggers action in widget, but not the other way around * 'w2a' : action in widget triggers action in axis, but not the other way around exclude_coord : None or int , default is None The excluded coordinate will not be considered when computing the nearest-point-to-click. Typical use case is for visualize.traj to only compute distances horizontally along the time axis Returns ------- axes_widget : :obj:`matplotlib.Axes.Axeswidget` that has been linked to the NGLWidget """ assert directionality in [None, 'a2w', 'w2a'], "The directionality parameter has to be in [None, 'a2w', 'w2a'] " \ "not %s"%directionality assert crosshairs in [True, False, 'h', 'v'], "The crosshairs parameter has to be in [True, False, 'h','v'], " \ "not %s" % crosshairs ipos = _np.copy(pos) if _is_int(exclude_coord): ipos[:,exclude_coord] = 0 # Are we in a sticky situation? if hasattr(ngl_wdg, '_GeomsInWid'): sticky = True else: assert ngl_wdg.trajectory_0.n_frames == pos.shape[0], \ ("Mismatching frame numbers %u vs %u" % (ngl_wdg.trajectory_0.n_frames, pos.shape[0])) sticky = False # Basic interactive objects showclick_objs = [] if crosshairs in [True, 'h']: lineh = ax.axhline(ax.get_ybound()[0], c="black", ls='--') setattr(lineh, 'whatisthis', 'lineh') showclick_objs.append(lineh) if crosshairs in [True, 'v']: linev = ax.axvline(ax.get_xbound()[0], c="black", ls='--') setattr(linev, 'whatisthis', 'linev') showclick_objs.append(linev) if _is_color_like(dot_color): pass else: raise TypeError('dot_color should be a matplotlib color') dot = ax.plot(pos[0,0],pos[0,1], 'o', c=dot_color, ms=7, zorder=100)[0] setattr(dot,'whatisthis','dot') list_mpl_objects_to_update = [dot] # Other objects, related to smoothing options if band_width is not None: if radius: band_width_in_pts = int(_np.round(pts_per_axis_unit(ax).mean() * _np.mean(band_width))) rad = ax.plot(pos[0, 0], pos[0, 1], 'o', ms=_np.round(band_width_in_pts), c='green', alpha=.25, markeredgecolor='None')[0] setattr(rad, 'whatisthis', 'dot') if not sticky: list_mpl_objects_to_update.append(rad) else: # print("Band_width(x,y) is %s" % (band_width)) coord_idx = get_ascending_coord_idx(pos) if _np.ndim(coord_idx)>0 and len(coord_idx)==0: raise ValueError("Must have an ascending coordinate for band_width usage") band_width_in_pts = int(_np.round(pts_per_axis_unit(ax)[coord_idx] * band_width[coord_idx])) # print("Band_width in %s is %s pts"%('xy'[coord_idx], band_width_in_pts)) band_call = [ax.axvline, ax.axhline][coord_idx] band_init = [ax.get_xbound, ax.get_ybound][coord_idx] band_type = ['linev', 'lineh'][coord_idx] band = band_call(band_init()[0], lw=band_width_in_pts, c="green", ls='-', alpha=.25) setattr(band, 'whatisthis', band_type) list_mpl_objects_to_update.append(band) ngl_wdg.isClick = False CLA_listener = ClickOnAxisListener(ngl_wdg, crosshairs, showclick_objs, ax, pos, list_mpl_objects_to_update) NGL_listener = ChangeInNGLWidgetListener(ngl_wdg, list_mpl_objects_to_update, pos) # Connect axes to widget axes_widget = _AxesWidget(ax) if directionality in [None, 'a2w']: axes_widget.connect_event('button_release_event', CLA_listener) # Connect widget to axes if directionality in [None, 'w2a']: ngl_wdg.observe(NGL_listener, "frame", "change") ngl_wdg.center() return axes_widget
lgpl-3.0
-6,859,057,174,613,301,000
38.133475
118
0.584592
false
3.618926
false
false
false
brianhouse/wavefarm
granu/braid/pattern.py
1
2625
""" Pattern is just a list (of whatever) that can be specified in compacted form ... with the addition of the Markov expansion of tuples on calling resolve """ import random class Pattern(list): def __init__(self, value=[0]): list.__init__(self, value) self.resolve() def resolve(self): """ Choose a path through the Markov chain """ return self._unroll(self._subresolve(self)) def _subresolve(self, pattern): """ Resolve a subbranch of the pattern """ steps = [] for step in pattern: if type(step) == tuple: step = self._pick(step) if type(step) == tuple or type(step) == list: ## so Im limiting it to one layer of nesting? step = self._subresolve(step) elif type(step) == list: step = self._subresolve(step) steps.append(step) return steps def _pick(self, step): """ Choose between options for a given step """ assert len(step) == 2 or len(step) == 3 if len(step) == 2: if type(step[1]) == float: # (1, 0.5) is a 50% chance of playing a 1, otherwise 0 step = step[0], [0, 0], step[1] ## it's a 0, 0 because 0 patterns dont progress, and this could be the root level: is this a bug? else: step = step[0], step[1], 0.5 # (1, 2) is a 50% chance of playing a 1 vs a 2 step = step[0] if step[2] > random.random() else step[1] # (1, 2, 0.5) is full form ## expand this to accommodate any number of options return step def _unroll(self, pattern, divs=None, r=None): """ Unroll a compacted form to a pattern with lcm steps """ if divs is None: divs = self._get_divs(pattern) r = [] elif r is None: r = [] for step in pattern: if type(step) == list: self._unroll(step, (divs // len(pattern)), r) else: r.append(step) for i in range((divs // len(pattern)) - 1): r.append(0) return r def _get_divs(self, pattern): """ Find lcm for a subpattern """ subs = [(self._get_divs(step) if type(step) == list else 1) * len(pattern) for step in pattern] divs = subs[0] for step in subs[1:]: divs = lcm(divs, step) return divs def lcm(a, b): gcd, tmp = a, b while tmp != 0: gcd, tmp = tmp, gcd % tmp return a * b // gcd
gpl-3.0
-2,916,064,081,948,905,500
34
155
0.509333
false
3.766141
false
false
false
jimstorch/tokp
tokp_lib/xml_store.py
1
4808
#------------------------------------------------------------------------------ # File: xml_store.py # Purpose: Store and Retrieve data from XML files # Author: Jim Storch # License: GPLv3 see LICENSE.TXT #------------------------------------------------------------------------------ import datetime import re import glob from xml.etree import cElementTree as et from tokp_lib.parse_combat import Raid #--[ Datetime to String ]------------------------------------------------------ # Seems like a lot of juggling but strftime() and strptime() do not support # microseconds. def dt_to_str(dt): """Given a datetime object, returns a string in the format 'YYYY-MM-DD HH:MM:SS:MMMMMM'.""" return '%d-%.2d-%.2d %.2d:%.2d:%.2d.%.6d' % ( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond ) #--[ String to Datetime ]------------------------------------------------------ ## Regex for str_to_dt() rawstr = r"^(?P<year>\d{2,})-(?P<month>\d\d)-(?P<day>\d\d)\s(?P<hour>\d\d)" + \ ":(?P<minute>\d\d):(?P<second>\d\d)\.(?P<micro>\d*)$" compile_obj = re.compile(rawstr) def str_to_dt(string_in): """Given a string in the format 'YYYY-MM-DD HH:MM:SS:MMMMMM,' returns a datetime object.""" match_obj = compile_obj.search(string_in) if match_obj: year = int(match_obj.group('year')) month = int(match_obj.group('month')) day = int(match_obj.group('day')) hour = int(match_obj.group('hour')) minute = int(match_obj.group('minute')) second = int(match_obj.group('second')) micro = int(match_obj.group('micro')) else: raise ValueError('Could not parse datetime string') return datetime.datetime(year, month, day, hour, minute, second, micro) #--[ Indent ]------------------------------------------------------------------ # From http://effbot.org/zone/element-lib.htm (plus Paul Du Bois's comment) def indent(elem, level=0): """Make an ElementTree all nice and pretty with indents and line breaks.""" i = "\n" + level * " " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " for child in elem: indent(child, level+1) if not child.tail or not child.tail.strip(): child.tail = i if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i #--[ Write Raid XML ]---------------------------------------------------------- def write_raid_xml(raid): """ Given a Raid object, serializes it to an XML file. Returns the filename used.""" fname = raid.start_time.strftime("%Y%m%d.%H%M.") + raid.zone + '.xml' dstr = raid.start_time.strftime("%m/%d/%Y") xml = et.Element('raid',date = dstr) ## Zone zone = et.SubElement(xml,'zone') zone.text = raid.zone ## Start Time start_time = et.SubElement(xml,'start_time') start_time.text = dt_to_str(raid.start_time) ## End Time end_time = et.SubElement(xml,'end_time') end_time.text = dt_to_str(raid.end_time) ## Members members = et.SubElement(xml,'members') raid.raid_members.sort() for member in raid.raid_members: name = et.SubElement(members,'name') name.text = member ## Make pretty and write to a file indent(xml) f = open('data/raids/' + fname,'wU') f.write('<?xml version="1.0"?>\n') tree = et.ElementTree(xml) tree.write(f, 'utf-8') #print et.tostring(xml) return fname #--[ Read Raid XML ]----------------------------------------------------------- def read_raid_xml(fname): """Given an XML file name, un-serializes it to a Raid object. Returns the Raid object.""" tree = et.parse(open('data/raids/' + fname,'rU')) zone = tree.findtext('zone') start_time_str = tree.findtext('start_time') start_time = str_to_dt(start_time_str) end_time_str = tree.findtext('end_time') end_time = str_to_dt(end_time_str) raid = Raid(zone,start_time) raid.end_time = end_time for elem in tree.getiterator('name'): raid.add_member(elem.text) return raid #--[ Raid Files ]-------------------------------------------------------------- ## Regex for raid_files() fname_str = r'.*[/\\](?P<fname>.+)\.xml' fname_obj = re.compile(fname_str) def raid_files(): """Returns a chronologically sorted list of raid XML file names.""" file_list = [] xfiles = glob.glob('data/raids/*.xml') for xfile in xfiles: match_obj = fname_obj.search(xfile) file_list.append(match_obj.group('fname')) file_list.sort() return file_list
gpl-3.0
-795,323,932,101,939,700
31.053333
81
0.532238
false
3.436741
false
false
false
tamnm/kodi.mp3.zing.vn
dev/service.vnmusic/service.py
1
5800
import time import xbmc import urlparse import SimpleHTTPServer import SocketServer import urllib2 import urllib import re import json import base64 import requests import httplib songInfoApi = 'http://api.mp3.zing.vn/api/mobile/song/getsonginfo?keycode=fafd463e2131914934b73310aa34a23f&requestdata={"id":"_ID_ENCODED_"}' videoInfoApi ='http://api.mp3.zing.vn/api/mobile/video/getvideoinfo?keycode=fafd463e2131914934b73310aa34a23f&requestdata={"id":"_ID_ENCODED_"}' zingTvApi ='http://api.tv.zing.vn/2.0/media/info?api_key=d04210a70026ad9323076716781c223f&session_key=91618dfec493ed7dc9d61ac088dff36b&&media_id=' def load(url): r = requests.get(url) return r.text def checkUrl(url): try: ret = urllib2.urlopen(url) code = ret.code #log('check url:%s - %d'%(url,code)) return ret.code < 400 except Exception, e: #log('check url:%s - %s'%(url,str(e))) return False pass def getZTVSource(source,quality=3): result = None if quality <0 : return result ss = [] if 'Video3GP' in source: ss.append('http://'+source['Video3GP']) else: ss.append(None) if 'Video480' in source: ss.append('http://'+source['Video480']) else: ss.append(None) if 'Video720' in source: ss.append('http://'+source['Video720']) else: ss.append(None) if 'Video1080' in source: ss.append('http://'+source['Video1080']) else: ss.append(None) if ss[quality]!=None: result = ss[quality] else: for i in range(quality,-1,-1): if ss[i] != None: result = ss[i] break if result == None: for i in range(quality,len(ss)): if ss[i] != None: result = ss[i] break if result !=None and checkUrl(result): return result else: return getZTVSource(source,quality-1) def getZVideoSource(source,quality=4): log('getVideoSource:'+str(quality)) result = None if quality <0 : return result ss = [] if '240' in source: ss.append(source['240']) else: ss.append(None) if '360' in source: ss.append(source['360']) else: ss.append(None) if '480' in source: ss.append(source['480']) else: ss.append(None) if '720' in source: ss.append(source['720']) else: ss.append(None) if '1080' in source: ss.append(source['1080']) else: ss.append(None) #log('Source:%d - %s'%(quality,ss[quality])) if ss[quality]!=None: result = ss[quality] else: for i in range(quality,-1,-1): if ss[i] != None: result = ss[i] break if result == None: for i in range(quality,len(ss)): if ss[i] != None: result = ss[i] break if result !=None and checkUrl(result): return result else: return getZVideoSource(source,quality-1) def getZAudioSource(source,audio_quality=2): log('getAudioSource:'+str(audio_quality)) result = None if(audio_quality<0): return result ss = [] if '128' in source: ss.append(source['128']) else: ss.append(None) if '320' in source: ss.append(source['320']) else: ss.append(None) if 'lossless' in source: ss.append(source['lossless']) else: ss.append(None) if ss[audio_quality]!=None: result = ss[audio_quality] else: for i in range(audio_quality,-1,-1): if ss[i] != None: result = ss[i] if result != None: for i in range(audio_quality,len(ss)): if ss[i] != None: result = ss[i] if result != None and checkUrl(result): return result else: return getZAudioSource(source,audio_quality-1) def getMp3ZingSong(sid,q): url = songInfoApi.replace('_ID_ENCODED_',sid) js = json.loads(load(url)) url = getZAudioSource(js['source'],q) return url def getZingTVVideo(sid,q): url = zingTvApi + sid js = json.loads(load(url)) url = getZTVSource(js['response']['other_url'],q) return url def getMp3ZingVideo(sid,q): url = videoInfoApi.replace('_ID_ENCODED_',sid) js = json.loads(load(url)) source = getZVideoSource(js['source'],q) return source def getTalkTVVideo(sid): #loadPlayer.manifestUrl = "http://live.csmtalk.vcdn.vn/hls/6b1cc68ba8735185ada742e8713567c4/55f10fd0/elorenhat/index.m3u8"; url = 'http://talktv.vn/'+sid html = load(url) lines = html.split('\n') for line in lines: line = line.strip() if 'loadPlayer.manifestUrl' in line: line = line.replace('loadPlayer.manifestUrl','').replace('"','').replace(';','').replace('=','').strip() return line return None def log(m): sys.stdout.write(m) pass class MyRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def redirect(self,link): self.send_response(301) self.send_header('Content-type','text/html') self.send_header('Location', link) self.end_headers() #log('link:'+link) pass def do_HEAD(self): self.do_GET() def do_GET(self): q = urlparse.urlparse(self.path) queries = urlparse.parse_qs(q.query) self.log_request() if "mp3ZAudio?" in self.path: link = getMp3ZingSong(queries['sid'][0],int(queries['q'][0])) if link == None: log(queries['sid'][0]+' link not found') pass self.redirect(link) elif "mp3ZVideo?" in self.path: link = getMp3ZingVideo(queries['sid'][0],int(queries['q'][0])) self.redirect(link) elif 'ZingTV?' in self.path: link = getZingTVVideo(queries['sid'][0],int(queries['q'][0])) self.redirect(link) elif 'TalkTV?' in self.path: link = getTalkTVVideo(queries['sid'][0]) self.redirect(link) def log_request(self, code='-', size='-'): sys.stdout.write("%s %s %s" % (self.requestline, str(code), str(size))) if __name__ == '__main__': PORT = 9998 handler = MyRequestHandler httpd = SocketServer.TCPServer(("", PORT), handler) sys.stdout.write("serving at port %d" % PORT) httpd.serve_forever()
gpl-2.0
-8,087,498,911,041,477,000
22.276151
146
0.642241
false
2.724284
false
false
false
her0e1c1/pystock
stock/signals.py
1
2696
import pandas as pd from . import line, util def rolling_mean(series, period): """現在の株価(短期)と長期移動平均線(長期)のクロス""" slow = series.rolling(window=period, center=False).mean() return util.cross(series, slow) def rolling_mean_ratio(series, period, ratio): """長期移動平均線と現在の株価の最終日の差がratio乖離したら売買シグナル""" mean = series.rolling(window=period, center=False).mean() r = util.increment(util.last(series), util.last(mean)) return "BUY" if r > ratio else "SELL" if r < -ratio else None def increment_ratio(series, ratio=25): """前日に比べてratio乖離してたら売買シグナル(変動が大きいので戻りの可能性が高いと考える)""" curr = util.last(series) prev = util.last(series, offset_from_last=1) r = util.increment(curr, prev) return "BUY" if r < -ratio else "SELL" if r > ratio else None def rsi(series, period, buy, sell): """RSIは基本的に30%以下で売られ過ぎ, 70%で買われ過ぎ""" rsi = line.rsi(series, period) if rsi.empty: return None f = float(rsi[rsi.last_valid_index()]) return "BUY" if f < buy else "SELL" if f > sell else None def min_low(series, period, ratio): """指定期間中の最安値に近いたら買い. (底値が支えになって反発する可能性があると考える)""" m = float(series.tail(period).min()) if pd.isnull(m): return None last = series[series.last_valid_index()] return "BUY" if util.increment(last, m) < ratio else None def max_high(series, period, ratio): """min_lowの逆version""" m = float(series.tail(period).max()) if pd.isnull(m): return None last = series[series.last_valid_index()] return "SELL" if util.increment(m, last) < ratio else None def macd_signal(series, fast, slow, signal): """macd(短期)とsignal(長期)のクロス""" f = line.macd_line(series, fast, slow, signal) s = line.macd_signal(series, fast, slow, signal) return util.cross(f, s) def stochastic(series, k, d, sd): """ macd(短期)とsignal(長期)のクロス 一般的に次の値を利用する (k, d, sd) = (14, 3, 3) """ fast = line.stochastic_d(series, k=k, d=d) slow = line.stochastic_sd(series, k=k, d=d, sd=sd) return util.cross(fast, slow) def bollinger_band(series, period=20, ratio=3): """ 2sigmaを超えたら、買われすぎと判断してSELL -2sigmaを超えたら、売られ過ぎと判断してBUY """ s = util.sigma(series, period) return "BUY" if s <= -ratio else "SELL" if s >= ratio else None
gpl-3.0
7,619,161,558,827,822,000
28.868421
67
0.647137
false
2.182692
false
false
false
ted-dunstone/ivs
hub_demo/test_match.py
1
1043
import pika import sys import logging logging.basicConfig() connection = pika.BlockingConnection(pika.ConnectionParameters( host='localhost')) channel = connection.channel() channel.exchange_declare(exchange='Australia_NZ_Exchange', type='headers') result = channel.queue_declare(exclusive=True) if not result: print 'Queue didnt declare properly!' sys.exit(1) queue_name = result.method.queue channel.queue_bind(exchange='Australia_NZ_Exchange', queue = queue_name, routing_key = '', arguments = {'test': 'test', 'x-match':'any'}) def callback(ch, method, properties, body): print properties.user_id print "{headers}:{body}".format(headers = properties.headers, body = body) channel.basic_consume(callback, queue = queue_name, no_ack=True) try: channel.start_consuming() except KeyboardInterrupt: print 'Bye' finally: connection.close()
mit
-1,545,338,382,569,837,600
26.447368
65
0.61745
false
4.205645
false
false
false
gemelkelabs/timing_system_software
server_py_files/core/xstatus.py
1
1031
# -*- coding: utf-8 -*- """ Created on Sat Apr 05 21:30:07 2014 @author: Nate """ from xml.sax import saxutils class xstatus_ready(): """ Inheritance class to add a status output in xml-format to any object """ def xstatus(self): """ returns status of the listener as xml, and follows-on to child objects """ status="<"+self.__class__.__name__+">" status+=self.iterate_dict(vars(self)) status+="</"+self.__class__.__name__+">" return status def iterate_dict(self,dictelm): """ steps through a dictionary """ status='' for var in dictelm.keys(): status+="<"+var+">" if type(dictelm[var])==type({}): status+=self.iterate_dict(dictelm[var]) elif hasattr(dictelm[var],'xstatus'): status+=dictelm[var].xstatus() else: status+=saxutils.escape(str(dictelm[var])) status+="</"+var+">" return status
mit
3,538,536,760,451,454,000
28.485714
96
0.519884
false
3.804428
false
false
false
cheery/essence
essence/selection.py
1
12706
# This file is part of Essential Editor Research Project (EERP) # # EERP is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # EERP is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EERP. If not, see <http://www.gnu.org/licenses/>. from document import editlength, star, dot, element, iselement, empty_template, filled_template, replace, copy, copyList, dotmarker, starmarker, holepartial from random import randint from util import makelist, pull, push def clamp(star, base): return max(star.first, min(base, star.last)) def mark0(hole): if isinstance(hole, dot): return dotmarker(hole, False) if isinstance(hole, star): return starmarker(hole, 0, 0) if isinstance(hole, holepartial): return starmarker(hole.hole, hole.start, hole.start) def mark1(hole): if isinstance(hole, dot): return dotmarker(hole, True) if isinstance(hole, star): cursor = hole.last return starmarker(hole, cursor, cursor) if isinstance(hole, holepartial): return starmarker(hole.hole, hole.stop, hole.stop) def mark2(hole): if isinstance(hole, dot): return dotmarker(hole, True) if isinstance(hole, star): return starmarker(hole, hole.last, hole.first) if isinstance(hole, holepartial): return starmarker(hole.hole, hole.stop, hole.start) def first_marker(obj): if iselement(obj) and len(obj.holes) > 0: return mark0(obj.holes[0]) def last_marker(obj): if iselement(obj) and len(obj.holes) > 0: return mark1(obj.holes[-1]) class Selection(object): def __init__(self, buffer, path, parent, marker): self.buffer = buffer self.path = list(path) self.parent = parent self.marker = marker def remove(self): if self.marker.dot: self.buffer.do(self.marker, replace([None])) else: self.replace([]) def replace(self, data, branch_in=False): length = sum(editlength(obj) for obj in data) self.buffer.do(self.marker, replace(data)) if self.marker.dot: self.marker = dotmarker(self.marker.hole, visited = not branch_in) else: cursor = self.marker.start + length * (not branch_in) self.marker = starmarker(self.marker.hole, cursor, cursor) if branch_in: self.walk() # works only if there's starmarker def move(self, offset, selection=False, relative=True): if not self.marker.dot: cursor = clamp(self.marker.hole, relative*self.marker.cursor + offset) tail = self.marker.tail if selection else cursor self.marker = starmarker(self.marker.hole, cursor, tail) @property def empty(self): marker = self.marker if isinstance(marker, starmarker): return marker.start == marker.stop return False @property def bounds(self): marker = self.marker start0, stop0 = marker.hole.last, marker.hole.first for partial in marker.hole.partials: start1, stop1 = partial.start, partial.stop if start1 <= marker.cursor <= stop1: start0 = min(start0, start1) stop0 = max(stop0, stop1) return start0, stop0 def walk(self): marker = self.marker parent = self.parent if marker.dot and marker.at_top: new_marker = first_marker(marker.hole.a) if new_marker is not None: self.path.append((self.parent, marker.hole)) self.parent = marker.hole.a self.marker = new_marker return marker = mark1(marker.hole) if marker.at_bottom: index = parent.holes.index(marker.hole) if index + 1 < len(self.parent.holes): self.marker = mark0(parent.holes[index + 1]) elif len(self.path) > 0: parent, partial = self.path.pop(-1) self.parent = parent self.marker = mark1(partial) return if marker.dot: return for partial in marker.hole.partials: if partial.start <= marker.cursor < partial.stop: new_marker = first_marker(partial.a) if new_marker is None: self.move(partial.stop, relative=False) return else: self.path.append((self.parent, partial)) self.parent = partial.a self.marker = new_marker return def walk_backwards(self): marker = self.marker parent = self.parent if marker.dot and marker.at_bottom: new_marker = last_marker(marker.hole.a) if new_marker is not None: self.path.append((self.parent, marker.hole)) self.parent = marker.hole.a self.marker = new_marker return marker = mark0(marker.hole) if marker.at_top: index = parent.holes.index(marker.hole) if index > 0: self.marker = mark1(parent.holes[index - 1]) elif len(self.path) > 0: parent, partial = self.path.pop(-1) self.parent = parent self.marker = mark0(partial) return for partial in marker.hole.partials: if partial.start < marker.cursor <= partial.stop: new_marker = last_marker(partial.a) if new_marker is None: self.move(partial.start, relative=False) return else: self.path.append((self.parent, partial)) self.parent = partial.a self.marker = new_marker return def select_parent(self, mark=mark2): if len(self.path) > 0: self.parent, hole = self.path.pop(-1) self.marker = mark(hole) return True return False def expand(self): marker = self.marker if marker.dot or (marker.start == marker.hole.first and marker.stop == marker.hole.last): self.select_parent() else: tail_loc = marker.cursor < marker.tail tail = (marker.hole.first, marker.hole.last)[tail_loc] cursor = (marker.hole.first, marker.hole.last)[not tail_loc] self.marker = starmarker(marker.hole, cursor, tail) def at_leaf(self): if self.marker.dot: obj = self.marker.hole.a if obj is None or len(obj.holes) == 0: return True return False #class Selection(object): # def __init__(self, buffer, finger, cursor, tail): # self.buffer = buffer # self.finger = finger # self.cursor = cursor # self.tail = tail # # @property # def start(self): # return min(self.cursor, self.tail) # # @property # def stop(self): # return max(self.cursor, self.tail) # # @property # def top(self): # return self.buffer.document.context(self.finger)[-1] # # @property # def ascendable(self): # return len(self.finger) > 0 # # def descendable(self, index): # top = self.top # return index < len(top) and isinstance(top[index], element) # # @property # def yank(self): # return copyList(self.top[self.start:self.stop]) # # @property # @makelist # def frame_context(self): # current = self.buffer.visual # it = iter(self.finger) # while current is not None: # yield current # index = it.next() # next = None # for frame in current.find(): # base, _ = frame.range # if base == index: # next = frame # current = next # # def splice(self, data): # length = len(data) # operation = splice(self.start, self.stop, data) # self.buffer.do(self.finger, operation) # return Selection( # self.buffer, # self.finger, # self.start + length, # self.start + length, # ) # # SPLICE (text, elements, range) # # @property # def ascend(self): # finger, cursor = pull(self.finger) # return Selection(self.buffer, finger, self.cursor, self.tail).grasp(cursor, cursor+1) # # def descend(self, base): # finger = push(self.finger, base) # start, stop = 0, len(self.top[base]) # return Selection(self.buffer, finger, self.cursor, self.tail).grasp(start,stop) # # def build(self, outside=False, kw=None): # kw = {'which':'scratch'} if kw is None else kw # start = self.start # stop = self.stop # operation = build(start, stop, kw) # self.buffer.do(self.finger, operation) # if outside: # finger = self.finger # stop = start + 1 # else: # finger = push(self.finger, start) # start -= start # stop -= start # return Selection(self.buffer, finger, self.cursor, self.tail).grasp(start, stop) # # BUILD (with selection and type) # # def collapse(self): # finger, base = pull(self.finger) # start = self.start + base # stop = self.stop + base # operation = collapse(base) # self.buffer.do(finger, operation) # return Selection(self.buffer, finger, start, stop) # # COLLAPSE # # def modify(self, kw): # self.buffer.do(self.finger, modify(kw)) # return self # # MODIFY # # @property # def bounds(self): # top = self.top # cursor = self.cursor - 1 # # while cursor > 0: # if isinstance(top[cursor], element): # break # if isinstance(top[cursor-1], element): # break # cursor -= 1 # start = cursor # # cursor = self.cursor + 1 # while cursor < len(top): # if isinstance(top[cursor], element): # break # if isinstance(top[cursor-1], element): # break # cursor += 1 # stop = cursor # # return start, stop # # @property # def textbounds(self): # top = self.top # cursor = self.cursor # while cursor > 0: # if isinstance(top[cursor-1], element): # break # cursor -= 1 # start = cursor # # while cursor < len(top): # if isinstance(top[cursor], element): # break # cursor += 1 # stop = cursor # # return start, stop # # def walk_backward(self): # top = self.top # cursor = self.cursor # if cursor == 0: # if self.ascendable: # finger, cursor = pull(self.finger) # return Selection(self.buffer, finger, cursor, cursor) # return self # elif isinstance(top[cursor-1], element): # finger = push(self.finger, cursor-1) # cursor = len(top[cursor-1]) # return Selection(self.buffer, finger, cursor, cursor) # else: # cursor = self.bounds[0] # return Selection(self.buffer, self.finger, cursor, cursor) # # def walk_forward(self): # top = self.top # cursor = self.cursor # if cursor >= len(top): # if self.ascendable: # finger, cursor = pull(self.finger) # return Selection(self.buffer, finger, cursor+1, cursor+1) # return self # elif isinstance(top[cursor], element): # finger = push(self.finger, cursor) # cursor = 0 # return Selection(self.buffer, finger, cursor, cursor) # else: # cursor = self.bounds[1] # return Selection(self.buffer, self.finger, cursor, cursor) # # NAVIGATE (UP, DOWN, LEFT+[SH], RIGHT+[SH], LB, RB) # # def grasp(self, start, stop): # if self.cursor < self.tail: # cursor, tail = start, stop # else: # tail, cursor = start, stop # return Selection(self.buffer, self.finger, cursor, tail)
gpl-3.0
40,778,738,927,370,540
33.064343
156
0.557217
false
3.668014
false
false
false
google/material-design-icons
update/venv/lib/python3.9/site-packages/fontTools/pens/basePen.py
3
13126
"""fontTools.pens.basePen.py -- Tools and base classes to build pen objects. The Pen Protocol A Pen is a kind of object that standardizes the way how to "draw" outlines: it is a middle man between an outline and a drawing. In other words: it is an abstraction for drawing outlines, making sure that outline objects don't need to know the details about how and where they're being drawn, and that drawings don't need to know the details of how outlines are stored. The most basic pattern is this: outline.draw(pen) # 'outline' draws itself onto 'pen' Pens can be used to render outlines to the screen, but also to construct new outlines. Eg. an outline object can be both a drawable object (it has a draw() method) as well as a pen itself: you *build* an outline using pen methods. The AbstractPen class defines the Pen protocol. It implements almost nothing (only no-op closePath() and endPath() methods), but is useful for documentation purposes. Subclassing it basically tells the reader: "this class implements the Pen protocol.". An examples of an AbstractPen subclass is fontTools.pens.transformPen.TransformPen. The BasePen class is a base implementation useful for pens that actually draw (for example a pen renders outlines using a native graphics engine). BasePen contains a lot of base functionality, making it very easy to build a pen that fully conforms to the pen protocol. Note that if you subclass BasePen, you _don't_ override moveTo(), lineTo(), etc., but _moveTo(), _lineTo(), etc. See the BasePen doc string for details. Examples of BasePen subclasses are fontTools.pens.boundsPen.BoundsPen and fontTools.pens.cocoaPen.CocoaPen. Coordinates are usually expressed as (x, y) tuples, but generally any sequence of length 2 will do. """ from typing import Tuple from fontTools.misc.loggingTools import LogMixin __all__ = ["AbstractPen", "NullPen", "BasePen", "decomposeSuperBezierSegment", "decomposeQuadraticSegment"] class AbstractPen: def moveTo(self, pt: Tuple[float, float]) -> None: """Begin a new sub path, set the current point to 'pt'. You must end each sub path with a call to pen.closePath() or pen.endPath(). """ raise NotImplementedError def lineTo(self, pt: Tuple[float, float]) -> None: """Draw a straight line from the current point to 'pt'.""" raise NotImplementedError def curveTo(self, *points: Tuple[float, float]) -> None: """Draw a cubic bezier with an arbitrary number of control points. The last point specified is on-curve, all others are off-curve (control) points. If the number of control points is > 2, the segment is split into multiple bezier segments. This works like this: Let n be the number of control points (which is the number of arguments to this call minus 1). If n==2, a plain vanilla cubic bezier is drawn. If n==1, we fall back to a quadratic segment and if n==0 we draw a straight line. It gets interesting when n>2: n-1 PostScript-style cubic segments will be drawn as if it were one curve. See decomposeSuperBezierSegment(). The conversion algorithm used for n>2 is inspired by NURB splines, and is conceptually equivalent to the TrueType "implied points" principle. See also decomposeQuadraticSegment(). """ raise NotImplementedError def qCurveTo(self, *points: Tuple[float, float]) -> None: """Draw a whole string of quadratic curve segments. The last point specified is on-curve, all others are off-curve points. This method implements TrueType-style curves, breaking up curves using 'implied points': between each two consequtive off-curve points, there is one implied point exactly in the middle between them. See also decomposeQuadraticSegment(). The last argument (normally the on-curve point) may be None. This is to support contours that have NO on-curve points (a rarely seen feature of TrueType outlines). """ raise NotImplementedError def closePath(self) -> None: """Close the current sub path. You must call either pen.closePath() or pen.endPath() after each sub path. """ pass def endPath(self) -> None: """End the current sub path, but don't close it. You must call either pen.closePath() or pen.endPath() after each sub path. """ pass def addComponent( self, glyphName: str, transformation: Tuple[float, float, float, float, float, float] ) -> None: """Add a sub glyph. The 'transformation' argument must be a 6-tuple containing an affine transformation, or a Transform object from the fontTools.misc.transform module. More precisely: it should be a sequence containing 6 numbers. """ raise NotImplementedError class NullPen(AbstractPen): """A pen that does nothing. """ def moveTo(self, pt): pass def lineTo(self, pt): pass def curveTo(self, *points): pass def qCurveTo(self, *points): pass def closePath(self): pass def endPath(self): pass def addComponent(self, glyphName, transformation): pass class LoggingPen(LogMixin, AbstractPen): """A pen with a `log` property (see fontTools.misc.loggingTools.LogMixin) """ pass class MissingComponentError(KeyError): """Indicates a component pointing to a non-existent glyph in the glyphset.""" class DecomposingPen(LoggingPen): """ Implements a 'addComponent' method that decomposes components (i.e. draws them onto self as simple contours). It can also be used as a mixin class (e.g. see ContourRecordingPen). You must override moveTo, lineTo, curveTo and qCurveTo. You may additionally override closePath, endPath and addComponent. By default a warning message is logged when a base glyph is missing; set the class variable ``skipMissingComponents`` to False if you want to raise a :class:`MissingComponentError` exception. """ skipMissingComponents = True def __init__(self, glyphSet): """ Takes a single 'glyphSet' argument (dict), in which the glyphs that are referenced as components are looked up by their name. """ super(DecomposingPen, self).__init__() self.glyphSet = glyphSet def addComponent(self, glyphName, transformation): """ Transform the points of the base glyph and draw it onto self. """ from fontTools.pens.transformPen import TransformPen try: glyph = self.glyphSet[glyphName] except KeyError: if not self.skipMissingComponents: raise MissingComponentError(glyphName) self.log.warning( "glyph '%s' is missing from glyphSet; skipped" % glyphName) else: tPen = TransformPen(self, transformation) glyph.draw(tPen) class BasePen(DecomposingPen): """Base class for drawing pens. You must override _moveTo, _lineTo and _curveToOne. You may additionally override _closePath, _endPath, addComponent and/or _qCurveToOne. You should not override any other methods. """ def __init__(self, glyphSet=None): super(BasePen, self).__init__(glyphSet) self.__currentPoint = None # must override def _moveTo(self, pt): raise NotImplementedError def _lineTo(self, pt): raise NotImplementedError def _curveToOne(self, pt1, pt2, pt3): raise NotImplementedError # may override def _closePath(self): pass def _endPath(self): pass def _qCurveToOne(self, pt1, pt2): """This method implements the basic quadratic curve type. The default implementation delegates the work to the cubic curve function. Optionally override with a native implementation. """ pt0x, pt0y = self.__currentPoint pt1x, pt1y = pt1 pt2x, pt2y = pt2 mid1x = pt0x + 0.66666666666666667 * (pt1x - pt0x) mid1y = pt0y + 0.66666666666666667 * (pt1y - pt0y) mid2x = pt2x + 0.66666666666666667 * (pt1x - pt2x) mid2y = pt2y + 0.66666666666666667 * (pt1y - pt2y) self._curveToOne((mid1x, mid1y), (mid2x, mid2y), pt2) # don't override def _getCurrentPoint(self): """Return the current point. This is not part of the public interface, yet is useful for subclasses. """ return self.__currentPoint def closePath(self): self._closePath() self.__currentPoint = None def endPath(self): self._endPath() self.__currentPoint = None def moveTo(self, pt): self._moveTo(pt) self.__currentPoint = pt def lineTo(self, pt): self._lineTo(pt) self.__currentPoint = pt def curveTo(self, *points): n = len(points) - 1 # 'n' is the number of control points assert n >= 0 if n == 2: # The common case, we have exactly two BCP's, so this is a standard # cubic bezier. Even though decomposeSuperBezierSegment() handles # this case just fine, we special-case it anyway since it's so # common. self._curveToOne(*points) self.__currentPoint = points[-1] elif n > 2: # n is the number of control points; split curve into n-1 cubic # bezier segments. The algorithm used here is inspired by NURB # splines and the TrueType "implied point" principle, and ensures # the smoothest possible connection between two curve segments, # with no disruption in the curvature. It is practical since it # allows one to construct multiple bezier segments with a much # smaller amount of points. _curveToOne = self._curveToOne for pt1, pt2, pt3 in decomposeSuperBezierSegment(points): _curveToOne(pt1, pt2, pt3) self.__currentPoint = pt3 elif n == 1: self.qCurveTo(*points) elif n == 0: self.lineTo(points[0]) else: raise AssertionError("can't get there from here") def qCurveTo(self, *points): n = len(points) - 1 # 'n' is the number of control points assert n >= 0 if points[-1] is None: # Special case for TrueType quadratics: it is possible to # define a contour with NO on-curve points. BasePen supports # this by allowing the final argument (the expected on-curve # point) to be None. We simulate the feature by making the implied # on-curve point between the last and the first off-curve points # explicit. x, y = points[-2] # last off-curve point nx, ny = points[0] # first off-curve point impliedStartPoint = (0.5 * (x + nx), 0.5 * (y + ny)) self.__currentPoint = impliedStartPoint self._moveTo(impliedStartPoint) points = points[:-1] + (impliedStartPoint,) if n > 0: # Split the string of points into discrete quadratic curve # segments. Between any two consecutive off-curve points # there's an implied on-curve point exactly in the middle. # This is where the segment splits. _qCurveToOne = self._qCurveToOne for pt1, pt2 in decomposeQuadraticSegment(points): _qCurveToOne(pt1, pt2) self.__currentPoint = pt2 else: self.lineTo(points[0]) def decomposeSuperBezierSegment(points): """Split the SuperBezier described by 'points' into a list of regular bezier segments. The 'points' argument must be a sequence with length 3 or greater, containing (x, y) coordinates. The last point is the destination on-curve point, the rest of the points are off-curve points. The start point should not be supplied. This function returns a list of (pt1, pt2, pt3) tuples, which each specify a regular curveto-style bezier segment. """ n = len(points) - 1 assert n > 1 bezierSegments = [] pt1, pt2, pt3 = points[0], None, None for i in range(2, n+1): # calculate points in between control points. nDivisions = min(i, 3, n-i+2) for j in range(1, nDivisions): factor = j / nDivisions temp1 = points[i-1] temp2 = points[i-2] temp = (temp2[0] + factor * (temp1[0] - temp2[0]), temp2[1] + factor * (temp1[1] - temp2[1])) if pt2 is None: pt2 = temp else: pt3 = (0.5 * (pt2[0] + temp[0]), 0.5 * (pt2[1] + temp[1])) bezierSegments.append((pt1, pt2, pt3)) pt1, pt2, pt3 = temp, None, None bezierSegments.append((pt1, points[-2], points[-1])) return bezierSegments def decomposeQuadraticSegment(points): """Split the quadratic curve segment described by 'points' into a list of "atomic" quadratic segments. The 'points' argument must be a sequence with length 2 or greater, containing (x, y) coordinates. The last point is the destination on-curve point, the rest of the points are off-curve points. The start point should not be supplied. This function returns a list of (pt1, pt2) tuples, which each specify a plain quadratic bezier segment. """ n = len(points) - 1 assert n > 0 quadSegments = [] for i in range(n - 1): x, y = points[i] nx, ny = points[i+1] impliedPt = (0.5 * (x + nx), 0.5 * (y + ny)) quadSegments.append((points[i], impliedPt)) quadSegments.append((points[-2], points[-1])) return quadSegments class _TestPen(BasePen): """Test class that prints PostScript to stdout.""" def _moveTo(self, pt): print("%s %s moveto" % (pt[0], pt[1])) def _lineTo(self, pt): print("%s %s lineto" % (pt[0], pt[1])) def _curveToOne(self, bcp1, bcp2, pt): print("%s %s %s %s %s %s curveto" % (bcp1[0], bcp1[1], bcp2[0], bcp2[1], pt[0], pt[1])) def _closePath(self): print("closepath") if __name__ == "__main__": pen = _TestPen(None) pen.moveTo((0, 0)) pen.lineTo((0, 100)) pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0)) pen.closePath() pen = _TestPen(None) # testing the "no on-curve point" scenario pen.qCurveTo((0, 0), (0, 100), (100, 100), (100, 0), None) pen.closePath()
apache-2.0
4,968,359,647,964,168,000
31.490099
78
0.710879
false
3.233801
false
false
false
almarklein/bokeh
bokeh/server/views/data.py
1
2797
import json from flask import jsonify, request from six import iteritems from bokeh import protocol from bokeh.models import Range1d from .backbone import init_bokeh from ..app import bokeh_app from ..crossdomain import crossdomain from ..serverbb import prune from ..views import make_json @bokeh_app.route("/bokeh/data/<username>", methods=['GET', 'OPTIONS']) @crossdomain(origin="*", headers=['BOKEH-API-KEY', 'Continuum-Clientid']) def list_sources(username): bokehuser = bokeh_app.authentication.current_user() request_username = bokehuser.username sources = bokeh_app.datamanager.list_data_sources(request_username, username) return jsonify(sources=sources) def _make_range(r): """Create a range from the start/end values passed. This function is required because some BokehJS Range objects have ids but some don't and some have docs but some don't... so this is sort of a #Hack.... This may be removed when a better plot_state mechanism is created. """ return Range1d(start=r['start'], end=r['end']) @bokeh_app.route("/bokeh/data/<username>/<docid>/<datasourceid>", methods=['GET', 'OPTIONS']) @crossdomain(origin="*", headers=['BOKEH-API-KEY', 'Continuum-Clientid']) def get_data(username, docid, datasourceid): bokehuser = bokeh_app.authentication.current_user() request_username = bokehuser.username # handle docid later... clientdoc = bokeh_app.backbone_storage.get_document(docid) prune(clientdoc) init_bokeh(clientdoc) serverdatasource = clientdoc._models[datasourceid] parameters = json.loads(request.values.get('resample_parameters')) plot_state = json.loads(request.values.get('plot_state')) render_state = json.loads(request.values.get('render_state')) if 'render_state' in request.values else None # TODO: Desserializing directly to ranges....awk-ward. # There is probably a better way via the properties system that detects type...probably... # Possibly pass the whole plot_view object through instead of just the fragments we get with this mechanism plot_state=dict([(k, _make_range(r)) for k,r in iteritems(plot_state)]) result = bokeh_app.datamanager.get_data( request_username, serverdatasource, parameters, plot_state, render_state) json_result = make_json(protocol.serialize_json(result)) return json_result @bokeh_app.route("/bokeh/data/upload/<username>/<name>", methods=['POST']) def upload(username, name): bokehuser = bokeh_app.authentication.current_user() request_username = bokehuser.username f = request.files['file'] url = bokeh_app.datamanager.write(request_username, name, f) return url
bsd-3-clause
-7,211,432,925,990,131,000
37.847222
111
0.6936
false
3.842033
false
false
false
mricharleon/HatosGanaderos
userena/urls.py
1
4539
from django.conf.urls import * from django.contrib.auth import views as auth_views from userena import views as userena_views from userena import settings as userena_settings from userena.compat import auth_views_compat_quirks, password_reset_uid_kwarg def merged_dict(dict_a, dict_b): """Merges two dicts and returns output. It's purpose is to ease use of ``auth_views_compat_quirks`` """ dict_a.update(dict_b) return dict_a urlpatterns = patterns('', # Signup, signin and signout url(r'^add_profile/$', userena_views.add_profile, name='add_profile'), url(r'^signup/$', userena_views.signup, name='userena_signup'), url(r'^signin/$', userena_views.signin, name='userena_signin'), url(r'^signout/$', userena_views.signout, name='userena_signout'), # Reset password url(r'^password/reset/$', auth_views.password_reset, merged_dict({'template_name': 'userena/password_reset_form.html', 'email_template_name': 'userena/emails/password_reset_message.txt', 'extra_context': {'without_usernames': userena_settings.USERENA_WITHOUT_USERNAMES} }, auth_views_compat_quirks['userena_password_reset']), name='userena_password_reset'), url(r'^password/reset/done/$', auth_views.password_reset_done, {'template_name': 'userena/password_reset_done.html',}, name='userena_password_reset_done'), url(r'^password/reset/confirm/(?P<%s>[0-9A-Za-z]+)-(?P<token>.+)/$' % password_reset_uid_kwarg, auth_views.password_reset_confirm, merged_dict({'template_name': 'userena/password_reset_confirm_form.html', }, auth_views_compat_quirks['userena_password_reset_confirm']), name='userena_password_reset_confirm'), url(r'^password/reset/confirm/complete/$', auth_views.password_reset_complete, {'template_name': 'userena/password_reset_complete.html'}, name='userena_password_reset_complete'), # Signup url(r'^(?P<username>[\.\w-]+)/signup/complete/$', userena_views.direct_to_user_template, {'template_name': 'userena/signup_complete.html', 'extra_context': {'userena_activation_required': userena_settings.USERENA_ACTIVATION_REQUIRED, 'userena_activation_days': userena_settings.USERENA_ACTIVATION_DAYS}}, name='userena_signup_complete'), # Activate url(r'^activate/(?P<activation_key>\w+)/$', userena_views.activate, name='userena_activate'), # Retry activation url(r'^activate/retry/(?P<activation_key>\w+)/$', userena_views.activate_retry, name='userena_activate_retry'), # Change email and confirm it url(r'^(?P<username>[\.\w-]+)/email/$', userena_views.email_change, name='userena_email_change'), url(r'^(?P<username>[\.\w-]+)/email/complete/$', userena_views.direct_to_user_template, {'template_name': 'userena/email_change_complete.html'}, name='userena_email_change_complete'), url(r'^(?P<username>[\.\w-]+)/confirm-email/complete/$', userena_views.direct_to_user_template, {'template_name': 'userena/email_confirm_complete.html'}, name='userena_email_confirm_complete'), url(r'^confirm-email/(?P<confirmation_key>\w+)/$', userena_views.email_confirm, name='userena_email_confirm'), # Disabled account url(r'^(?P<username>[\.\w-]+)/disabled/$', userena_views.disabled_account, {'template_name': 'userena/disabled.html'}, name='userena_disabled'), # Change password url(r'^(?P<username>[\.\w-]+)/password/$', userena_views.password_change, name='userena_password_change'), url(r'^(?P<username>[\.\w-]+)/password/complete/$', userena_views.direct_to_user_template, {'template_name': 'userena/password_complete.html'}, name='userena_password_change_complete'), # Edit profile url(r'^(?P<username>[\.\w-]+)/edit/$', userena_views.profile_edit, name='userena_profile_edit'), # View profiles url(r'^(?P<username>(?!signout|signup|signin)[\.\w-]+)/$', userena_views.profile_detail, name='userena_profile_detail'), url(r'^page/(?P<page>[0-9]+)/$', userena_views.ProfileListView.as_view(), name='userena_profile_list_paginated'), url(r'^$', userena_views.ProfileListView.as_view(), name='userena_profile_list'), )
gpl-2.0
-6,829,509,290,407,092,000
37.142857
102
0.625028
false
3.524068
false
false
false
thica/ORCA-Remote
src/ORCA/settings/setttingtypes/SettingActions.py
1
4810
# -*- coding: utf-8 -*- """ ORCA Open Remote Control Application Copyright (C) 2013-2020 Carsten Thielepape Please contact me by : http://www.orca-remote.org/ This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from typing import List from typing import Union from xml.etree.ElementTree import Element from kivy.uix.popup import Popup from kivy.uix.widget import Widget import ORCA.Globals as Globals from ORCA.settings.setttingtypes.SettingScrollOptionsWithOptions import SettingScrollOptionsWithOptions from ORCA.settings.setttingtypes.SettingScrollOptions import ScrollOptionsPopUp from ORCA.utils.XML import LoadXMLFile from ORCA.utils.XML import Orca_include from ORCA.utils.XML import orca_et_loader from ORCA.utils.XML import GetXMLTextAttribute from ORCA.utils.LogError import LogError from ORCA.utils.FileName import cFileName from ORCA.vars.Replace import ReplaceVars __all__ = ['SettingActions'] class SettingActions(SettingScrollOptionsWithOptions): """ A setting class to select actions from the action list or from a codeset """ def __init__(self, **kwargs): self.aCodesetCmds:List[str] = [] self.oActionPopup:Union[Popup,None] = None self.oCodeSetActionsScrollOptionsPopup:Union[Popup,None]=None kwargs["options"] = [ReplaceVars("$lvar(742)"),ReplaceVars("$lvar(743)"),ReplaceVars("$lvar(744)")] kwargs["suboptions"] = [["$ACTIONLISTSEND"], ["$ACTIONLIST"], ["$FILELIST[%s]" % Globals.oPathCodesets.string]] super().__init__(**kwargs) def _set_suboption(self, instance:Widget) -> None: """ called, when the second option is selected """ if instance.text.startswith('CODESET_'): self.subpopup.dismiss() self.popup.dismiss() self._ReadCodeset(instance.text) self._ShowCodesetCodesPopup(instance.text) else: self.value = instance.text self.subpopup.dismiss() self.popup.dismiss() def _set_suboptioncodesetaction(self, instance:Widget) -> None: """ called, when a codesetcode is selected """ self.value = "SendCommand "+instance.text self.oActionPopup.dismiss() def _ReadCodeset(self,uFN:str) -> None: oXMLCode:Element uCmd:str del self.aCodesetCmds[:] try: oXMLCodeset:Element = LoadXMLFile(oFile=cFileName(Globals.oPathCodesets) + uFN) Orca_include(oXMLCodeset,orca_et_loader) if oXMLCodeset is not None: # First read imported codesets oXMLImports:Element = oXMLCodeset.find('imports') if oXMLImports is not None: oXMLImportCodesets:Element=oXMLImports.find('codeset') if oXMLImportCodesets is not None: for oXMLCode in oXMLImportCodesets.findall('code'): uCmd=GetXMLTextAttribute(oXMLNode=oXMLCode,uTag='action',bMandatory=False,vDefault='') if uCmd: self.aCodesetCmds.append(uCmd) for oXMLCode in oXMLCodeset.findall('code'): uCmd=GetXMLTextAttribute(oXMLNode=oXMLCode,uTag='action',bMandatory=False,vDefault='') if uCmd: self.aCodesetCmds.append(uCmd) except Exception as e: LogError(uMsg='Error Reading Codeset',oException=e) def _ShowCodesetCodesPopup(self,uFN:str) -> None: kwargs={'title':uFN,'options':sorted(self.aCodesetCmds)} self.oCodeSetActionsScrollOptionsPopup=ScrollOptionsPopUp(**kwargs) self.oCodeSetActionsScrollOptionsPopup.CreatePopup(self.value,self._set_suboptioncodesetaction,None) self.oActionPopup=self.oCodeSetActionsScrollOptionsPopup.popup
gpl-3.0
-6,715,954,626,130,124,000
45.156863
119
0.619543
false
4.208224
false
false
false
whiteclover/Choco
test/test_inheritance.py
1
10179
from choco import lookup, compat import unittest from test.util import result_lines class InheritanceTest(unittest.TestCase): def test_basic(self): collection = lookup.TemplateLookup() collection.put_string('main', """ <%inherit file="base"/> <%def name="header()"> main header. </%def> this is the content. """) collection.put_string('base', """ This is base. header: ${self.header()} body: ${self.body()} footer: ${self.footer()} <%def name="footer()"> this is the footer. header again ${next.header()} </%def> """) assert result_lines(collection.get_template('main').render()) == [ 'This is base.', 'header:', 'main header.', 'body:', 'this is the content.', 'footer:', 'this is the footer. header again', 'main header.' ] def test_multilevel_nesting(self): collection = lookup.TemplateLookup() collection.put_string('main', """ <%inherit file="layout"/> <%def name="d()">main_d</%def> main_body ${parent.d()} full stack from the top: ${self.name} ${parent.name} ${parent.context['parent'].name} ${parent.context['parent'].context['parent'].name} """) collection.put_string('layout', """ <%inherit file="general"/> <%def name="d()">layout_d</%def> layout_body parent name: ${parent.name} ${parent.d()} ${parent.context['parent'].d()} ${next.body()} """) collection.put_string('general', """ <%inherit file="base"/> <%def name="d()">general_d</%def> general_body ${next.d()} ${next.context['next'].d()} ${next.body()} """) collection.put_string('base', """ base_body full stack from the base: ${self.name} ${self.context['parent'].name} ${self.context['parent'].context['parent'].name} ${self.context['parent'].context['parent'].context['parent'].name} ${next.body()} <%def name="d()">base_d</%def> """) assert result_lines(collection.get_template('main').render()) == [ 'base_body', 'full stack from the base:', 'self:main self:layout self:general self:base', 'general_body', 'layout_d', 'main_d', 'layout_body', 'parent name: self:general', 'general_d', 'base_d', 'main_body layout_d', 'full stack from the top:', 'self:main self:layout self:general self:base' ] def test_includes(self): """test that an included template also has its full hierarchy invoked.""" collection = lookup.TemplateLookup() collection.put_string("base", """ <%def name="a()">base_a</%def> This is the base. ${next.body()} End base. """) collection.put_string("index",""" <%inherit file="base"/> this is index. a is: ${self.a()} <%include file="secondary"/> """) collection.put_string("secondary",""" <%inherit file="base"/> this is secondary. a is: ${self.a()} """) assert result_lines(collection.get_template("index").render()) == [ 'This is the base.', 'this is index.', 'a is: base_a', 'This is the base.', 'this is secondary.', 'a is: base_a', 'End base.', 'End base.' ] def test_namespaces(self): """test that templates used via <%namespace> have access to an inheriting 'self', and that the full 'self' is also exported.""" collection = lookup.TemplateLookup() collection.put_string("base", """ <%def name="a()">base_a</%def> <%def name="b()">base_b</%def> This is the base. ${next.body()} """) collection.put_string("layout", """ <%inherit file="base"/> <%def name="a()">layout_a</%def> This is the layout.. ${next.body()} """) collection.put_string("index",""" <%inherit file="base"/> <%namespace name="sc" file="secondary"/> this is index. a is: ${self.a()} sc.a is: ${sc.a()} sc.b is: ${sc.b()} sc.c is: ${sc.c()} sc.body is: ${sc.body()} """) collection.put_string("secondary",""" <%inherit file="layout"/> <%def name="c()">secondary_c. a is ${self.a()} b is ${self.b()} d is ${self.d()}</%def> <%def name="d()">secondary_d.</%def> this is secondary. a is: ${self.a()} c is: ${self.c()} """) assert result_lines(collection.get_template('index').render()) == ['This is the base.', 'this is index.', 'a is: base_a', 'sc.a is: layout_a', 'sc.b is: base_b', 'sc.c is: secondary_c. a is layout_a b is base_b d is secondary_d.', 'sc.body is:', 'this is secondary.', 'a is: layout_a', 'c is: secondary_c. a is layout_a b is base_b d is secondary_d.' ] def test_pageargs(self): collection = lookup.TemplateLookup() collection.put_string("base", """ this is the base. <% sorted_ = pageargs.items() sorted_ = sorted(sorted_) %> pageargs: (type: ${type(pageargs)}) ${sorted_} <%def name="foo()"> ${next.body(**context.kwargs)} </%def> ${foo()} """) collection.put_string("index", """ <%inherit file="base"/> <%page args="x, y, z=7"/> print ${x}, ${y}, ${z} """) if compat.py3k: assert result_lines(collection.get_template('index').render_unicode(x=5,y=10)) == [ "this is the base.", "pageargs: (type: <class 'dict'>) [('x', 5), ('y', 10)]", "print 5, 10, 7" ] else: assert result_lines(collection.get_template('index').render_unicode(x=5,y=10)) == [ "this is the base.", "pageargs: (type: <type 'dict'>) [('x', 5), ('y', 10)]", "print 5, 10, 7" ] def test_pageargs_2(self): collection = lookup.TemplateLookup() collection.put_string("base", """ this is the base. ${next.body(**context.kwargs)} <%def name="foo(**kwargs)"> ${next.body(**kwargs)} </%def> <%def name="bar(**otherargs)"> ${next.body(z=16, **context.kwargs)} </%def> ${foo(x=12, y=15, z=8)} ${bar(x=19, y=17)} """) collection.put_string("index", """ <%inherit file="base"/> <%page args="x, y, z=7"/> pageargs: ${x}, ${y}, ${z} """) assert result_lines(collection.get_template('index').render(x=5,y=10)) == [ "this is the base.", "pageargs: 5, 10, 7", "pageargs: 12, 15, 8", "pageargs: 5, 10, 16" ] def test_pageargs_err(self): collection = lookup.TemplateLookup() collection.put_string("base", """ this is the base. ${next.body()} """) collection.put_string("index", """ <%inherit file="base"/> <%page args="x, y, z=7"/> print ${x}, ${y}, ${z} """) try: print(collection.get_template('index').render(x=5,y=10)) assert False except TypeError: assert True def test_toplevel(self): collection = lookup.TemplateLookup() collection.put_string("base", """ this is the base. ${next.body()} """) collection.put_string("index", """ <%inherit file="base"/> this is the body """) assert result_lines(collection.get_template('index').render()) == [ "this is the base.", "this is the body" ] assert result_lines(collection.get_template('index').get_def("body").render()) == [ "this is the body" ] def test_dynamic(self): collection = lookup.TemplateLookup() collection.put_string("base", """ this is the base. ${next.body()} """) collection.put_string("index", """ <%! def dyn(context): if context.get('base', None) is not None: return 'base' else: return None %> <%inherit file="${dyn(context)}"/> this is index. """) assert result_lines(collection.get_template('index').render()) == [ 'this is index.' ] assert result_lines(collection.get_template('index').render(base=True)) == [ 'this is the base.', 'this is index.' ] def test_in_call(self): collection = lookup.TemplateLookup() collection.put_string("/layout.html",""" Super layout! <%call expr="self.grid()"> ${next.body()} </%call> Oh yea! <%def name="grid()"> Parent grid ${caller.body()} End Parent </%def> """) collection.put_string("/subdir/layout.html", """ ${next.body()} <%def name="grid()"> Subdir grid ${caller.body()} End subdir </%def> <%inherit file="/layout.html"/> """) collection.put_string("/subdir/renderedtemplate.html",""" Holy smokes! <%inherit file="/subdir/layout.html"/> """) #print collection.get_template("/layout.html").code #print collection.get_template("/subdir/renderedtemplate.html").render() assert result_lines(collection.get_template("/subdir/renderedtemplate.html").render()) == [ "Super layout!", "Subdir grid", "Holy smokes!", "End subdir", "Oh yea!" ]
mit
347,781,610,667,428,540
28.166189
163
0.483938
false
3.95301
true
false
false
census-instrumentation/opencensus-python
contrib/opencensus-ext-zipkin/opencensus/ext/zipkin/trace_exporter/__init__.py
1
6920
# Copyright 2017, OpenCensus Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Export the spans data to Zipkin Collector.""" import json import logging import requests from opencensus.common.transports import sync from opencensus.common.utils import check_str_length, timestamp_to_microseconds from opencensus.trace import base_exporter DEFAULT_ENDPOINT = '/api/v2/spans' DEFAULT_HOST_NAME = 'localhost' DEFAULT_PORT = 9411 DEFAULT_PROTOCOL = 'http' ZIPKIN_HEADERS = {'Content-Type': 'application/json'} SPAN_KIND_MAP = { 0: None, # span kind unspecified 1: "SERVER", 2: "CLIENT", } SUCCESS_STATUS_CODE = (200, 202) class ZipkinExporter(base_exporter.Exporter): """Export the spans to Zipkin. See: http://zipkin.io/zipkin-api/# :type service_name: str :param service_name: Service that logged an annotation in a trace. Classifier when query for spans. :type host_name: str :param host_name: (Optional) The host name of the Zipkin server. :type port: int :param port: (Optional) The port of the Zipkin server. :type end_point: str :param end_point: (Optional) The path for the span exporting endpoint. :type protocol: str :param protocol: (Optional) The protocol used for the request. :type transport: :class:`type` :param transport: Class for creating new transport objects. It should extend from the base_exporter :class:`.Transport` type and implement :meth:`.Transport.export`. Defaults to :class:`.SyncTransport`. The other option is :class:`.AsyncTransport`. """ def __init__( self, service_name='my_service', host_name=DEFAULT_HOST_NAME, port=DEFAULT_PORT, endpoint=DEFAULT_ENDPOINT, protocol=DEFAULT_PROTOCOL, transport=sync.SyncTransport, ipv4=None, ipv6=None): self.service_name = service_name self.host_name = host_name self.port = port self.endpoint = endpoint self.protocol = protocol self.url = self.get_url self.transport = transport(self) self.ipv4 = ipv4 self.ipv6 = ipv6 @property def get_url(self): return '{}://{}:{}{}'.format( self.protocol, self.host_name, self.port, self.endpoint) def emit(self, span_datas): """Send SpanData tuples to Zipkin server, default using the v2 API. :type span_datas: list of :class: `~opencensus.trace.span_data.SpanData` :param list of opencensus.trace.span_data.SpanData span_datas: SpanData tuples to emit """ try: zipkin_spans = self.translate_to_zipkin(span_datas) result = requests.post( url=self.url, data=json.dumps(zipkin_spans), headers=ZIPKIN_HEADERS) if result.status_code not in SUCCESS_STATUS_CODE: logging.error( "Failed to send spans to Zipkin server! Spans are {}" .format(zipkin_spans)) except Exception as e: # pragma: NO COVER logging.error(getattr(e, 'message', e)) def export(self, span_datas): self.transport.export(span_datas) def translate_to_zipkin(self, span_datas): """Translate the opencensus spans to zipkin spans. :type span_datas: list of :class: `~opencensus.trace.span_data.SpanData` :param span_datas: SpanData tuples to emit :rtype: list :returns: List of zipkin format spans. """ local_endpoint = { 'serviceName': self.service_name, 'port': self.port, } if self.ipv4 is not None: local_endpoint['ipv4'] = self.ipv4 if self.ipv6 is not None: local_endpoint['ipv6'] = self.ipv6 zipkin_spans = [] for span in span_datas: # Timestamp in zipkin spans is int of microseconds. start_timestamp_mus = timestamp_to_microseconds(span.start_time) end_timestamp_mus = timestamp_to_microseconds(span.end_time) duration_mus = end_timestamp_mus - start_timestamp_mus zipkin_span = { 'traceId': span.context.trace_id, 'id': str(span.span_id), 'name': span.name, 'timestamp': int(round(start_timestamp_mus)), 'duration': int(round(duration_mus)), 'localEndpoint': local_endpoint, 'tags': _extract_tags_from_span(span.attributes), 'annotations': _extract_annotations_from_span(span), } span_kind = span.span_kind parent_span_id = span.parent_span_id if span_kind is not None: kind = SPAN_KIND_MAP.get(span_kind) # Zipkin API for span kind only accept # enum(CLIENT|SERVER|PRODUCER|CONSUMER|Absent) if kind is not None: zipkin_span['kind'] = kind if parent_span_id is not None: zipkin_span['parentId'] = str(parent_span_id) zipkin_spans.append(zipkin_span) return zipkin_spans def _extract_tags_from_span(attr): if attr is None: return {} tags = {} for attribute_key, attribute_value in attr.items(): if isinstance(attribute_value, (int, bool, float)): value = str(attribute_value) elif isinstance(attribute_value, str): res, _ = check_str_length(str_to_check=attribute_value) value = res else: logging.warning('Could not serialize tag %s', attribute_key) continue tags[attribute_key] = value return tags def _extract_annotations_from_span(span): """Extract and convert time event annotations to zipkin annotations""" if span.annotations is None: return [] annotations = [] for annotation in span.annotations: event_timestamp_mus = timestamp_to_microseconds(annotation.timestamp) annotations.append({'timestamp': int(round(event_timestamp_mus)), 'value': annotation.description}) return annotations
apache-2.0
-6,535,133,408,901,106,000
31.641509
79
0.598555
false
4.039696
false
false
false
pihito/myHomeBox
devAsset/python/main.py
1
1055
from flask import Flask,render_template,request from flask.ext.script import Manager #déclare le serveur flask app = Flask(__name__) #déclare le plug-in flask-script manager = Manager(app) #crée la route web de la racine du site #et la lie à la fonction index @app.route("/") def index(): return render_template('index.html') #on crée la nouvelle route et on la lie à fonction Hello @app.route('/hello/') @app.route('/hello/<name>') def hello(name=None): if name == None : #Si le nom n'est pas dans l'url, je tente de l'extraire depuis la requête name = request.args.get('name',None) return render_template('hello.html', name=name) @app.route('/hello2/') @app.route('/hello2/<name>') def hello2(name=None): if name == None : #Si le nom n'est pas dans l'url, je tente de l'extraire depuis la requête name = request.args.get('name',None) return render_template('hello2.html', name=name) if __name__ == "__main__": #lance le serveur Flask via le plug-in flask-script manager.run()
apache-2.0
-385,943,290,818,522,430
27.324324
82
0.671442
false
3.026012
false
false
false
thanatoskira/AndroGuard
build/scripts-2.7/androapkinfo.py
1
3821
#!/usr/bin/python # This file is part of Androguard. # # Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr> # All rights reserved. # # Androguard is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Androguard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Androguard. If not, see <http://www.gnu.org/licenses/>. import sys, os from optparse import OptionParser from androguard.core import androconf from androguard.core.bytecodes import apk from androguard.core.bytecodes import dvm from androguard.core.analysis import analysis option_0 = { 'name' : ('-i', '--input'), 'help' : 'file : use this filename (APK)', 'nargs' : 1 } option_1 = { 'name' : ('-d', '--directory'), 'help' : 'directory : use this directory', 'nargs' : 1 } option_2 = { 'name' : ('-t', '--tag'), 'help' : 'display tags', 'action' : 'count' } option_3 = { 'name' : ('-v', '--version'), 'help' : 'version', 'action' : 'count' } options = [option_0, option_1, option_2, option_3] def display_dvm_info(apk) : vm = dvm.DalvikVMFormat( apk.get_dex() ) vmx = analysis.uVMAnalysis( vm ) print "Native code:", analysis.is_native_code(vmx) print "Dynamic code:", analysis.is_dyn_code(vmx) print "Reflection code:", analysis.is_reflection_code(vmx) for i in vmx.get_methods() : i.create_tags() if not i.tags.empty() : print i.method.get_class_name(), i.method.get_name(), i.tags def main(options, arguments) : if options.input != None : ret_type = androconf.is_android( options.input ) print os.path.basename(options.input), ":" if ret_type == "APK" : try : a = apk.APK( options.input ) if a.is_valid_APK() : a.show() display_dvm_info( a ) else : print "INVALID" except Exception, e : print "ERROR", e elif options.directory != None : for root, dirs, files in os.walk( options.directory, followlinks=True ) : if files != [] : for f in files : real_filename = root if real_filename[-1] != "/" : real_filename += "/" real_filename += f ret_type = androconf.is_android( real_filename ) if ret_type == "APK" : print os.path.basename( real_filename ), ":" try : a = apk.APK( real_filename ) if a.is_valid_APK() : a.show() display_dvm_info( a ) else : print "INVALID APK" raise("ooos") except Exception, e : print "ERROR", e raise("ooos") elif options.version != None : print "Androapkinfo version %s" % androconf.ANDROGUARD_VERSION if __name__ == "__main__" : parser = OptionParser() for option in options : param = option['name'] del option['name'] parser.add_option(*param, **option) options, arguments = parser.parse_args() sys.argv[:] = arguments main(options, arguments)
lgpl-3.0
2,697,242,861,988,364,300
36.460784
101
0.548548
false
3.871327
false
false
false
lhaze/dharma
pca/data/dao/abstract.py
1
7281
import typing as t from abc import abstractmethod from functools import reduce from operator import and_ from pca.data.errors import QueryErrors from pca.data.predicate import Predicate from pca.interfaces.dao import ( BatchOfDto, Dto, Id, IDao, Ids, IQueryChain, Kwargs, ) from pca.utils.dependency_injection import Component class QueryChain(IQueryChain): """ Technical detail of chaining queries. A proxy for a query interface of DAO, gathering lazy evaluated queries (ie. filter, sort, aggregate, etc) to call owning DAO to resolve them when non-lazy (ie. get, exists, count, update, etc) is called. """ # TODO lazy queries: order_by, aggregate, annotate # TODO evaluating queries: slicing _ids: Ids = None _filters: t.List[Predicate] = None def __init__(self, dao: "AbstractDao"): self._dao = dao @classmethod def _construct( cls, dao: "AbstractDao", filters: t.List[Predicate] = None, ids: t.List[Id] = None ) -> "QueryChain": """ Technical detail of creating a new QueryChain with specified argument. """ qc = cls(dao) qc._ids = ids qc._filters = filters return qc def _clone(self, filters: t.List[Predicate] = None, ids: t.List[Id] = None): """ Technical detail of cloning current QueryChain object extended by an additional argument. """ qc = self.__class__(self._dao) qc._ids = self._ids or ids if filters: qc._filters = (self._filters or []) + filters else: qc._filters = self._filters return qc def __repr__(self): return f"<QueryChain ids={self._ids}, filters={self._filters}>" @property def _is_trivial(self) -> bool: """Trivial QueryChain is the one that has no lazy operations defined.""" return not (self._filters or self._ids) @property def _reduced_filter(self) -> t.Optional[Predicate]: """Before evaluation, sum up all filter predicates into a single one""" return None if self._is_trivial else reduce(and_, self._filters) # lazy queries def filter(self, predicate: Predicate) -> "QueryChain": """ Filters out objects by the predicate specifying conditions that they should met. """ return self._clone(filters=[predicate]) def filter_by(self, id_: Id = None, ids: Ids = None) -> "QueryChain": """ Filters objects by a single id or a iterable of ids. :raises: InvalidQueryError if: * both `id_` and `ids` arguments are defined * or the query is already filtered by id """ if self._ids or bool(id_) == bool(ids): raise QueryErrors.CONFLICTING_QUERY_ARGUMENTS.with_params(id=id_, ids=ids) ids = ids or [id_] return self._clone(ids=ids) # evaluating queries def __iter__(self) -> Dto: """Yields values""" yield from self._dao._resolve_filter(self) def __len__(self) -> int: """Proxy for `count`.""" return self.count() def get(self, id_: Id) -> t.Optional[Dto]: """Returns object of given id, or None iff not present.""" qc = self.filter_by(id_=id_) filtered = self._dao._resolve_filter(qc) return self._dao._resolve_get(filtered, id_, nullable=True) def exists(self) -> bool: """Returns whether any object specified by the query exist.""" return self._dao._resolve_exists(self) def count(self) -> int: """ Counts objects filtering them out by the query specifying conditions that they should met. """ return self._dao._resolve_count(self) # evaluating commands def update(self, **update) -> Ids: """ Updates all objects specified by the query with given update. """ return self._dao._resolve_update(self, update) def remove(self) -> Ids: """ Removes all objects specified by the query from the collection. """ return self._dao._resolve_remove(self) class AbstractDao(IDao[Id], Component): """Base abstract implementation for Data Access Object.""" # lazy queries def all(self) -> QueryChain: """ Returns a query chain representing all objects. Useful to explicitly denote counting, updating or removing all objects. """ return QueryChain(self) def filter(self, predicate: Predicate) -> QueryChain: """ Filters out objects by the predicate specifying conditions that they should met. Can be chained via `QueryChain` helper class. """ return QueryChain._construct(self, filters=[predicate]) def filter_by(self, id_: Id = None, ids: Ids = None) -> IQueryChain: """ Filters objects by a single id or a iterable of ids. Can be chained with other queries via `IQueryChain` helper. :raises: InvalidQueryError iff both `id_` and `ids` arguments are defined. """ if bool(id_) == bool(ids): raise QueryErrors.CONFLICTING_QUERY_ARGUMENTS.with_params(id=id_, ids=ids) ids = ids or [id_] return QueryChain._construct(self, ids=ids) # evaluating queries def get(self, id_: Id) -> t.Optional[Dto]: """ Returns object of given id, or None iff not present. Shortcut for querying via `QueryChain.all`. """ qc = QueryChain._construct(self, ids=[id_]) filtered = self._resolve_filter(qc) return self._resolve_get(filtered, id_, nullable=True) @abstractmethod def _resolve_filter(self, query_chain: QueryChain) -> BatchOfDto: """Resolves filtering for any other resolving operation to compute.""" @abstractmethod def _resolve_get(self, dtos: BatchOfDto, id_: Id, nullable: bool = False) -> t.Optional[Dto]: """Resolves `get`query described by the ids.""" @abstractmethod def _resolve_exists(self, query_chain: QueryChain) -> bool: """Returns whether any object specified by the query exist.""" @abstractmethod def _resolve_count(self, query_chain: QueryChain) -> int: """ Counts objects filtering them out by the query specifying conditions that they should met. """ # evaluating commands @abstractmethod def _resolve_update(self, query_chain: QueryChain, update: Kwargs) -> Ids: """ Updates all objects specified by the query with given update. """ @abstractmethod def _resolve_remove(self, query_chain: QueryChain) -> Ids: """ Removes all objects specified by the query from the collection. """ # instant commands @abstractmethod def insert(self, dto: Dto) -> Id: """ Inserts the object into the collection. :returns: id of the inserted object """ @abstractmethod def batch_insert(self, dtos: BatchOfDto) -> Ids: """ Inserts multiple objects into the collection. :returns: a iterable of ids """ @abstractmethod def clear(self) -> None: """Clears the collection."""
mit
-3,672,697,594,967,121,400
29.851695
98
0.609944
false
4.132236
false
false
false
avedaee/DIRAC
WorkloadManagementSystem/Agent/JobCleaningAgent.py
1
8926
######################################################################## # $HeadURL$ # File : JobCleaningAgent.py # Author : A.T. ######################################################################## """ The Job Cleaning Agent controls removing jobs from the WMS in the end of their life cycle. """ from DIRAC import S_OK, gLogger from DIRAC.Core.Base.AgentModule import AgentModule from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB from DIRAC.WorkloadManagementSystem.Client.SandboxStoreClient import SandboxStoreClient from DIRAC.RequestManagementSystem.Client.Request import Request from DIRAC.RequestManagementSystem.Client.Operation import Operation from DIRAC.RequestManagementSystem.Client.File import File from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient import DIRAC.Core.Utilities.Time as Time import time import os REMOVE_STATUS_DELAY = { 'Done':7, 'Killed':1, 'Failed':7 } class JobCleaningAgent( AgentModule ): """ The specific agents must provide the following methods: - initialize() for initial settings - beginExecution() - execute() - the main method called in the agent cycle - endExecution() - finalize() - the graceful exit of the method, this one is usually used for the agent restart """ ############################################################################# def initialize( self ): """Sets defaults """ self.am_setOption( "PollingTime", 60 ) self.jobDB = JobDB() self.taskQueueDB = TaskQueueDB() self.jobLoggingDB = JobLoggingDB() # self.sandboxDB = SandboxDB( 'SandboxDB' ) agentTSTypes = self.am_getOption('ProductionTypes', []) if agentTSTypes: self.prod_types = agentTSTypes else: self.prod_types = Operations().getValue( 'Transformations/DataProcessing', ['MCSimulation', 'Merge'] ) gLogger.info('Will exclude the following Production types from cleaning %s' % ( ', '.join(self.prod_types) ) ) self.maxJobsAtOnce = self.am_getOption('MaxJobsAtOnce', 100) self.jobByJob = self.am_getOption('JobByJob', True) self.throttlingPeriod = self.am_getOption('ThrottlingPeriod', 0.) return S_OK() def __getAllowedJobTypes( self ): #Get valid jobTypes result = self.jobDB.getDistinctJobAttributes( 'JobType' ) if not result[ 'OK' ]: return result cleanJobTypes = [] for jobType in result[ 'Value' ]: if jobType not in self.prod_types: cleanJobTypes.append( jobType ) self.log.notice( "JobTypes to clean %s" % cleanJobTypes ) return S_OK( cleanJobTypes ) ############################################################################# def execute( self ): """The PilotAgent execution method. """ #Delete jobs in "Deleted" state result = self.removeJobsByStatus( { 'Status' : 'Deleted' } ) if not result[ 'OK' ]: return result #Get all the Job types that can be cleaned result = self.__getAllowedJobTypes() if not result[ 'OK' ]: return result baseCond = { 'JobType' : result[ 'Value' ] } # Remove jobs with final status for status in REMOVE_STATUS_DELAY: delay = REMOVE_STATUS_DELAY[ status ] condDict = dict( baseCond ) condDict[ 'Status' ] = status delTime = str( Time.dateTime() - delay * Time.day ) result = self.removeJobsByStatus( condDict, delTime ) if not result['OK']: gLogger.warn( 'Failed to remove jobs in status %s' % status ) return S_OK() def removeJobsByStatus( self, condDict, delay = False ): """ Remove deleted jobs """ if delay: gLogger.verbose( "Removing jobs with %s and older than %s" % ( condDict, delay ) ) result = self.jobDB.selectJobs( condDict, older = delay, limit = self.maxJobsAtOnce ) else: gLogger.verbose( "Removing jobs with %s " % condDict ) result = self.jobDB.selectJobs( condDict, limit = self.maxJobsAtOnce ) if not result['OK']: return result jobList = result['Value'] if len(jobList) > self.maxJobsAtOnce: jobList = jobList[:self.maxJobsAtOnce] if not jobList: return S_OK() self.log.notice( "Deleting %s jobs for %s" % ( len( jobList ), condDict ) ) count = 0 error_count = 0 result = SandboxStoreClient( useCertificates = True ).unassignJobs( jobList ) if not result[ 'OK' ]: gLogger.warn( "Cannot unassign jobs to sandboxes", result[ 'Message' ] ) result = self.deleteJobOversizedSandbox( jobList ) if not result[ 'OK' ]: gLogger.warn( "Cannot schedle removal of oversized sandboxes", result[ 'Message' ] ) return result failedJobs = result['Value']['Failed'] for job in failedJobs: jobList.pop( jobList.index( job ) ) if self.jobByJob: for jobID in jobList: resultJobDB = self.jobDB.removeJobFromDB( jobID ) resultTQ = self.taskQueueDB.deleteJob( jobID ) resultLogDB = self.jobLoggingDB.deleteJob( jobID ) errorFlag = False if not resultJobDB['OK']: gLogger.warn( 'Failed to remove job %d from JobDB' % jobID, result['Message'] ) errorFlag = True if not resultTQ['OK']: gLogger.warn( 'Failed to remove job %d from TaskQueueDB' % jobID, result['Message'] ) errorFlag = True if not resultLogDB['OK']: gLogger.warn( 'Failed to remove job %d from JobLoggingDB' % jobID, result['Message'] ) errorFlag = True if errorFlag: error_count += 1 else: count += 1 if self.throttlingPeriod: time.sleep(self.throttlingPeriod) else: result = self.jobDB.removeJobFromDB( jobList ) if not result['OK']: gLogger.error('Failed to delete %d jobs from JobDB' % len(jobList) ) else: gLogger.info('Deleted %d jobs from JobDB' % len(jobList) ) for jobID in jobList: resultTQ = self.taskQueueDB.deleteJob( jobID ) if not resultTQ['OK']: gLogger.warn( 'Failed to remove job %d from TaskQueueDB' % jobID, resultTQ['Message'] ) error_count += 1 else: count += 1 result = self.jobLoggingDB.deleteJob( jobList ) if not result['OK']: gLogger.error('Failed to delete %d jobs from JobLoggingDB' % len(jobList) ) else: gLogger.info('Deleted %d jobs from JobLoggingDB' % len(jobList) ) if count > 0 or error_count > 0 : gLogger.info( 'Deleted %d jobs from JobDB, %d errors' % ( count, error_count ) ) return S_OK() def deleteJobOversizedSandbox( self, jobIDList ): """ Delete the job oversized sandbox files from storage elements """ failed = {} successful = {} lfnDict = {} for jobID in jobIDList: result = self.jobDB.getJobParameter( jobID, 'OutputSandboxLFN' ) if result['OK']: lfn = result['Value'] if lfn: lfnDict[lfn] = jobID else: successful[jobID] = 'No oversized sandbox found' else: gLogger.warn( 'Error interrogting JobDB: %s' % result['Message'] ) if not lfnDict: return S_OK( {'Successful':successful, 'Failed':failed} ) # Schedule removal of the LFNs now for lfn, jobID in lfnDict.items(): result = self.jobDB.getJobAttributes( jobID, ['OwnerDN', 'OwnerGroup'] ) if not result['OK']: failed[jobID] = lfn continue if not result['Value']: failed[jobID] = lfn continue ownerDN = result['Value']['OwnerDN'] ownerGroup = result['Value']['OwnerGroup'] result = self.__setRemovalRequest( lfn, ownerDN, ownerGroup ) if not result['OK']: failed[jobID] = lfn else: successful[jobID] = lfn result = {'Successful':successful, 'Failed':failed} return S_OK( result ) def __setRemovalRequest( self, lfn, ownerDN, ownerGroup ): """ Set removal request with the given credentials """ oRequest = Request() oRequest.OwnerDN = ownerDN oRequest.OwnerGroup = ownerGroup oRequest.RequestName = os.path.basename( lfn ).strip() + '_removal_request.xml' oRequest.SourceComponent = 'JobCleaningAgent' removeFile = Operation() removeFile.Type = 'RemoveFile' removedFile = File() removedFile.LFN = lfn removeFile.addFile( removedFile ) oRequest.addOperation( removeFile ) return ReqClient().putRequest( oRequest )
gpl-3.0
4,453,427,842,608,304,000
35.73251
114
0.608895
false
3.911481
false
false
false
GoIncremental/gi-ansible
inventory/digital_ocean.py
1
19937
#!/usr/bin/env python ''' DigitalOcean external inventory script ====================================== Generates Ansible inventory of DigitalOcean Droplets. In addition to the --list and --host options used by Ansible, there are options for generating JSON of other DigitalOcean data. This is useful when creating droplets. For example, --regions will return all the DigitalOcean Regions. This information can also be easily found in the cache file, whose default location is /tmp/ansible-digital_ocean.cache). The --pretty (-p) option pretty-prints the output for better human readability. ---- Although the cache stores all the information received from DigitalOcean, the cache is not used for current droplet information (in --list, --host, --all, and --droplets). This is so that accurate droplet information is always found. You can force this script to use the cache with --force-cache. ---- Configuration is read from `digital_ocean.ini`, then from environment variables, then and command-line arguments. Most notably, the DigitalOcean Client ID and API Key must be specified. They can be specified in the INI file or with the following environment variables: export DO_CLIENT_ID='DO123' DO_API_KEY='abc123' Alternatively, they can be passed on the command-line with --client-id and --api-key. If you specify DigitalOcean credentials in the INI file, a handy way to get them into your environment (e.g., to use the digital_ocean module) is to use the output of the --env option with export: export $(digital_ocean.py --env) ---- The following groups are generated from --list: - ID (droplet ID) - NAME (droplet NAME) - image_ID - image_NAME - distro_NAME (distribution NAME from image) - region_ID - region_NAME - size_ID - size_NAME - status_STATUS When run against a specific host, this script returns the following variables: - do_created_at - do_distroy - do_id - do_image - do_image_id - do_ip_address - do_name - do_region - do_region_id - do_size - do_size_id - do_status ----- ``` usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] [--droplets] [--regions] [--images] [--sizes] [--ssh-keys] [--domains] [--pretty] [--cache-path CACHE_PATH] [--cache-max_age CACHE_MAX_AGE] [--refresh-cache] [--client-id CLIENT_ID] [--api-key API_KEY] Produce an Ansible Inventory file based on DigitalOcean credentials optional arguments: -h, --help show this help message and exit --list List all active Droplets as Ansible inventory (default: True) --host HOST Get all Ansible inventory variables about a specific Droplet --all List all DigitalOcean information as JSON --droplets List Droplets as JSON --regions List Regions as JSON --images List Images as JSON --sizes List Sizes as JSON --ssh-keys List SSH keys as JSON --domains List Domains as JSON --pretty, -p Pretty-print results --cache-path CACHE_PATH Path to the cache files (default: .) --cache-max_age CACHE_MAX_AGE Maximum age of the cached items (default: 0) --refresh-cache Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files) --client-id CLIENT_ID, -c CLIENT_ID DigitalOcean Client ID --api-key API_KEY, -a API_KEY DigitalOcean API Key ``` ''' # (c) 2013, Evan Wies <[email protected]> # # Inspired by the EC2 inventory plugin: # https://github.com/ansible/ansible/blob/devel/plugins/inventory/ec2.py # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ###################################################################### import os import sys import re import argparse from time import time import ConfigParser try: import json except ImportError: import simplejson as json try: from dopy.manager import DoError, DoManager except ImportError as e: print "failed=True msg='`dopy` library required for this script'" sys.exit(1) class DigitalOceanInventory(object): ########################################################################### # Main execution path ########################################################################### def __init__(self): ''' Main execution path ''' # DigitalOceanInventory data self.data = {} # All DigitalOcean data self.inventory = {} # Ansible Inventory self.index = {} # Varous indices of Droplet metadata # Define defaults self.cache_path = '.' self.cache_max_age = 0 # Read settings, environment variables, and CLI arguments self.read_settings() self.read_environment() self.read_cli_args() # Verify credentials were set if not hasattr(self, 'client_id') or not hasattr(self, 'api_key'): print '''Could not find values for DigitalOcean client_id and api_key. They must be specified via either ini file, command line argument (--client-id and --api-key), or environment variables (DO_CLIENT_ID and DO_API_KEY)''' sys.exit(-1) # env command, show DigitalOcean credentials if self.args.env: print "DO_CLIENT_ID=%s DO_API_KEY=%s" % (self.client_id, self.api_key) sys.exit(0) # Manage cache self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" self.cache_refreshed = False if not self.args.force_cache and self.args.refresh_cache or not self.is_cache_valid(): self.load_all_data_from_digital_ocean() else: self.load_from_cache() if len(self.data) == 0: if self.args.force_cache: print '''Cache is empty and --force-cache was specified''' sys.exit(-1) self.load_all_data_from_digital_ocean() else: # We always get fresh droplets for --list, --host, --all, and --droplets # unless --force-cache is specified if not self.args.force_cache and ( self.args.list or self.args.host or self.args.all or self.args.droplets): self.load_droplets_from_digital_ocean() # Pick the json_data to print based on the CLI command if self.args.droplets: json_data = { 'droplets': self.data['droplets'] } elif self.args.regions: json_data = { 'regions': self.data['regions'] } elif self.args.images: json_data = { 'images': self.data['images'] } elif self.args.sizes: json_data = { 'sizes': self.data['sizes'] } elif self.args.ssh_keys: json_data = { 'ssh_keys': self.data['ssh_keys'] } elif self.args.domains: json_data = { 'domains': self.data['domains'] } elif self.args.all: json_data = self.data elif self.args.host: json_data = self.load_droplet_variables_for_host() else: # '--list' this is last to make it default json_data = self.inventory if self.args.pretty: print json.dumps(json_data, sort_keys=True, indent=2) else: print json.dumps(json_data) # That's all she wrote... ########################################################################### # Script configuration ########################################################################### def read_settings(self): ''' Reads the settings from the digital_ocean.ini file ''' config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini') # Credentials if config.has_option('digital_ocean', 'client_id'): self.client_id = config.get('digital_ocean', 'client_id') if config.has_option('digital_ocean', 'api_key'): self.api_key = config.get('digital_ocean', 'api_key') # Cache related if config.has_option('digital_ocean', 'cache_path'): self.cache_path = config.get('digital_ocean', 'cache_path') if config.has_option('digital_ocean', 'cache_max_age'): self.cache_max_age = config.getint('digital_ocean', 'cache_max_age') def read_environment(self): ''' Reads the settings from environment variables ''' # Setup credentials if os.getenv("DO_CLIENT_ID"): self.client_id = os.getenv("DO_CLIENT_ID") if os.getenv("DO_API_KEY"): self.api_key = os.getenv("DO_API_KEY") def read_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials') parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)') parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet') parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON') parser.add_argument('--droplets','-d', action='store_true', help='List Droplets as JSON') parser.add_argument('--regions', action='store_true', help='List Regions as JSON') parser.add_argument('--images', action='store_true', help='List Images as JSON') parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON') parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON') parser.add_argument('--domains', action='store_true',help='List Domains as JSON') parser.add_argument('--pretty','-p', action='store_true', help='Pretty-print results') parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') parser.add_argument('--refresh-cache','-r', action='store_true', default=False, help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') parser.add_argument('--env','-e', action='store_true', help='Display DO_CLIENT_ID and DO_API_KEY') parser.add_argument('--client-id','-c', action='store', help='DigitalOcean Client ID') parser.add_argument('--api-key','-a', action='store', help='DigitalOcean API Key') self.args = parser.parse_args() if self.args.client_id: self.client_id = self.args.client_id if self.args.api_key: self.api_key = self.args.api_key if self.args.cache_path: self.cache_path = self.args.cache_path if self.args.cache_max_age: self.cache_max_age = self.args.cache_max_age # Make --list default if none of the other commands are specified if (not self.args.droplets and not self.args.regions and not self.args.images and not self.args.sizes and not self.args.ssh_keys and not self.args.domains and not self.args.all and not self.args.host): self.args.list = True ########################################################################### # Data Management ########################################################################### def load_all_data_from_digital_ocean(self): ''' Use dopy to get all the information from DigitalOcean and save data in cache files ''' manager = DoManager(self.client_id, self.api_key) self.data = {} self.data['droplets'] = self.sanitize_list(manager.all_active_droplets()) self.data['regions'] = self.sanitize_list(manager.all_regions()) self.data['images'] = self.sanitize_list(manager.all_images(filter=None)) self.data['sizes'] = self.sanitize_list(manager.sizes()) self.data['ssh_keys'] = self.sanitize_list(manager.all_ssh_keys()) self.data['domains'] = self.sanitize_list(manager.all_domains()) self.index = {} self.index['region_to_name'] = self.build_index(self.data['regions'], 'id', 'name') self.index['size_to_name'] = self.build_index(self.data['sizes'], 'id', 'name') self.index['image_to_name'] = self.build_index(self.data['images'], 'id', 'name') self.index['image_to_distro'] = self.build_index(self.data['images'], 'id', 'distribution') self.index['host_to_droplet'] = self.build_index(self.data['droplets'], 'ip_address', 'id', False) self.build_inventory() self.write_to_cache() def load_droplets_from_digital_ocean(self): ''' Use dopy to get droplet information from DigitalOcean and save data in cache files ''' manager = DoManager(self.client_id, self.api_key) self.data['droplets'] = self.sanitize_list(manager.all_active_droplets()) self.index['host_to_droplet'] = self.build_index(self.data['droplets'], 'ip_address', 'id', False) self.build_inventory() self.write_to_cache() def build_index(self, source_seq, key_from, key_to, use_slug=True): dest_dict = {} for item in source_seq: name = (use_slug and item.has_key('slug')) and item['slug'] or item[key_to] key = item[key_from] dest_dict[key] = name return dest_dict def build_inventory(self): '''Build Ansible inventory of droplets''' self.inventory = {} self.inventory['localhost'] = ['127.0.0.1'] # add all droplets by id and name for droplet in self.data['droplets']: dest = droplet['ip_address'] self.inventory[droplet['id']] = [dest] self.push(self.inventory, droplet['name'], dest) self.push(self.inventory, 'region_'+droplet['region_id'], dest) self.push(self.inventory, 'image_' +droplet['image_id'], dest) self.push(self.inventory, 'size_' +droplet['size_id'], dest) self.push(self.inventory, 'status_'+droplet['status'], dest) region_name = self.index['region_to_name'].get(droplet['region_id']) if region_name: self.push(self.inventory, 'region_'+region_name, dest) size_name = self.index['size_to_name'].get(droplet['size_id']) if size_name: self.push(self.inventory, 'size_'+size_name, dest) image_name = self.index['image_to_name'].get(droplet['image_id']) if image_name: self.push(self.inventory, 'image_'+image_name, dest) distro_name = self.index['image_to_distro'].get(droplet['image_id']) if distro_name: self.push(self.inventory, 'distro_'+distro_name, dest) def load_droplet_variables_for_host(self): '''Generate a JSON reponse to a --host call''' host = self.to_safe(str(self.args.host)) if not host in self.index['host_to_droplet']: # try updating cache if not self.args.force_cache: self.load_all_data_from_digital_ocean() if not host in self.index['host_to_droplet']: # host might not exist anymore return {} droplet = None if self.cache_refreshed: for drop in self.data['droplets']: if drop['ip_address'] == host: droplet = self.sanitize_dict(drop) break else: # Cache wasn't refreshed this run, so hit DigitalOcean API manager = DoManager(self.client_id, self.api_key) droplet_id = self.index['host_to_droplet'][host] droplet = self.sanitize_dict(manager.show_droplet(droplet_id)) if not droplet: return {} # Put all the information in a 'do_' namespace info = {} for k, v in droplet.items(): info['do_'+k] = v # Generate user-friendly variables (i.e. not the ID's) if droplet.has_key('region_id'): info['do_region'] = self.index['region_to_name'].get(droplet['region_id']) if droplet.has_key('size_id'): info['do_size'] = self.index['size_to_name'].get(droplet['size_id']) if droplet.has_key('image_id'): info['do_image'] = self.index['image_to_name'].get(droplet['image_id']) info['do_distro'] = self.index['image_to_distro'].get(droplet['image_id']) return info ########################################################################### # Cache Management ########################################################################### def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' if os.path.isfile(self.cache_filename): mod_time = os.path.getmtime(self.cache_filename) current_time = time() if (mod_time + self.cache_max_age) > current_time: return True return False def load_from_cache(self): ''' Reads the data from the cache file and assigns it to member variables as Python Objects''' cache = open(self.cache_filename, 'r') json_data = cache.read() cache.close() data = json.loads(json_data) self.data = data['data'] self.inventory = data['inventory'] self.index = data['index'] def write_to_cache(self): ''' Writes data in JSON format to a file ''' data = { 'data': self.data, 'index': self.index, 'inventory': self.inventory } json_data = json.dumps(data, sort_keys=True, indent=2) cache = open(self.cache_filename, 'w') cache.write(json_data) cache.close() ########################################################################### # Utilities ########################################################################### def push(self, my_dict, key, element): ''' Pushed an element onto an array that may not have been defined in the dict ''' if key in my_dict: my_dict[key].append(element); else: my_dict[key] = [element] def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' return re.sub("[^A-Za-z0-9\-\.]", "_", word) def sanitize_dict(self, d): new_dict = {} for k, v in d.items(): if v != None: new_dict[self.to_safe(str(k))] = self.to_safe(str(v)) return new_dict def sanitize_list(self, seq): new_seq = [] for d in seq: new_seq.append(self.sanitize_dict(d)) return new_seq ########################################################################### # Run the script DigitalOceanInventory()
apache-2.0
-7,804,996,669,653,076,000
39.940452
192
0.580679
false
3.969142
true
false
false
yveskaufmann/Naive-Bayes
NaiveBayes/BagOfWords.py
1
1801
class BagOfWords(object): """ Implementing a bag of words, words corresponding with their frequency of usages in a "document" for usage by the Document class, DocumentClass class and the Pool class. """ def __init__(self): self.__number_of_words = 0 self.__bag_of_words = {} def __add__(self, other): """ Overloading of the "+" operator to join two BagOfWords """ erg = BagOfWords() sum = erg.__bag_of_words for key in self.__bag_of_words: sum[key] = self.__bag_of_words[key] if key in other.__bag_of_words: sum[key] += other.__bag_of_words[key] for key in other.__bag_of_words: if key not in sum: sum[key] = other.__bag_of_words[key] return erg def add_word(self, word): """ A word is added in the dictionary __bag_of_words """ self.__number_of_words += 1 if word in self.__bag_of_words: self.__bag_of_words[word] += 1 else: self.__bag_of_words[word] = 1 def len(self): """ Returning the number of different words of an object """ return len(self.__bag_of_words) def Words(self): """ Returning a list of the words contained in the object """ return self.__bag_of_words.keys() def BagOfWords(self): """ Returning the dictionary, containing the words (keys) with their frequency (values) """ return self.__bag_of_words def WordFreq(self, word): """ Returning the frequency of a word """ if word in self.__bag_of_words: return self.__bag_of_words[word] else: return 0
mit
204,471,557,903,905,180
26.707692
99
0.52804
false
3.923747
false
false
false
xR86/ml-stuff
labs-AI/hw-lab5/pawn_chess.py
1
14528
# /usr/bin/python ''' Based on Representing a chess set in Python Part 2 Brendan Scott https://python4kids.brendanscott.com/2013/04/28/a-different-view-on-our-chess-model/ ''' import Tkinter as tk from Tkinter import PhotoImage import os.path import os # column_reference = "1 2 3 4 5 6 7 8".split(" ") column_reference = "a b c d e f g h".split(" ") EMPTY_SQUARE = " " TILE_WIDTH = 60 '''We have used a tile width of 60 because the images we are used are 60x60 pixels The original svg files were obtained from http://commons.wikimedia.org/wiki/Category:SVG_chess_pieces/Standard_transparent after downloading they were batch converted to png, then gif files. Bash one liners to do this: for i in $(ls *.svg); do inkscape -e ${i%.svg}.png -w 60 -h 60 $i ; done for i in $(ls *.png); do convert $i ${i%.png}.gif ; done white and black tiles were created in inkscape ''' BOARD_WIDTH = 8 * TILE_WIDTH BOARD_HEIGHT = BOARD_WIDTH DATA_DIR = "chess_data" TILES = {"black_tile": "black_tile.gif", "p": "chess_p45.gif", "P": "chess_p451.gif", "white_tile": "white_tile.gif" } class Model(object): def __init__(self): '''create a chess board with pieces positioned for a new game row ordering is reversed from normal chess representations but corresponds to a top left screen coordinate ''' self.board = [] pawn_base = "P " * 8 white_pawns = pawn_base.strip() black_pawns = white_pawns.lower() self.board.append([EMPTY_SQUARE] * 8) self.board.append(black_pawns.split(" ")) for i in range(4): self.board.append([EMPTY_SQUARE] * 8) self.board.append(white_pawns.split(" ")) self.board.append([EMPTY_SQUARE] * 8) def color(self, i, j): ''' checks the color of the piece located at the i, j coordinates ''' color = -1 # 0 - white, 1 - black if self.board[i][j] == 'p': color = 1 elif self.board[i][j] == 'P': color = 0 return color def move(self, start, destination): ''' move a piece located at the start location to destination (each an instance of BoardLocation) Does not check whether the move is valid for the piece ''' # check piece color color = self.color(start.i, start.j) print "Piece color: ", 'black' if color == 1 else ('white' if color == 0 else 'position empty') print "start.j, %d, destination.j %d" % (start.j, destination.j) print "start.i, %d, destination.i %d" % (start.i, destination.i) print "---" print self.board print "---" # ### error checking ### # # check coordinates are valid for c in [start, destination]: if c.i > 7 or c.j > 7 or c.i < 0 or c.j < 0: print 'err - coordinates are not valid (outside of board size)\n---' return # don't move to same location if start.i == destination.i and start.j == destination.j: print 'err - move to same location\n---' return # nothing to move if self.board[start.i][start.j] == EMPTY_SQUARE: print 'err - nothing to move\n---' return # don't move more than one step # if at initial location don't move more than two steps if color == 1 and start.i == 1 or color == 0 and start.i == 6: if abs(destination.i - start.i) > 2 or abs(destination.j - start.j) > 1: print 'err - more than two steps at init location\n---' return # don't move more than one step elif abs(destination.i - start.i) > 1 or abs(destination.j - start.j) > 1: print 'err - more than one step\n---' return # capture move capture_color = self.color(destination.i, destination.j) print 'capture color: ', capture_color print '---' #prevent capture of same color if capture_color == color and (start.j - 1 == destination.j or start.j + 1 == destination.j): print 'err - capture of same color\n---' return # prevent capture on an empty square if capture_color != color and (start.j - 1 == destination.j or start.j + 1 == destination.j) \ and self.board[destination.i][destination.j] == EMPTY_SQUARE: print 'err - capture of empty square\n---' return #prevent capture on walk (vertical) if capture_color != color and capture_color != -1 and start.j == destination.j: print 'err - capture on walk\n---' return # no retreat # and start.j == destination.j #if start.i - 1 != destination.i and color == 0: #allows only one step if start.i < destination.i and color == 0: # white goes up print 'err - retreat attempt\n---' return #if start.i + 1 != destination.i and color == 1: #allows only one step if start.i > destination.i and color == 1: # black goes down print 'err - retreat attempt\n---' return f = self.board[start.i][start.j] self.board[destination.i][destination.j] = f self.board[start.i][start.j] = EMPTY_SQUARE print '---\n' class BoardLocation(object): def __init__(self, i, j): self.i = i self.j = j class View(tk.Frame): def __init__(self, parent=None): tk.Frame.__init__(self, parent) # label = tk.Label(self, text="Error rate for a perceptron") # label.pack(pady=10, padx=10) label = tk.Button(self, text="Error ") label.pack(pady=10, padx=10, side = tk.RIGHT) self.canvas = tk.Canvas(self, width=BOARD_WIDTH, height=BOARD_HEIGHT) self.canvas.pack() self.images = {} for image_file_name in TILES: f = os.path.join(DATA_DIR, TILES[image_file_name]) if not os.path.exists(f): print("Error: Cannot find image file: %s at %s - aborting" % (TILES[image_file_name], f)) exit(-1) self.images[image_file_name] = PhotoImage(file=f) '''This opens each of the image files, converts the data into a form that Tkinter can use, then stores that converted form in the attribute self.images self.images is a dictionary, keyed by the letters we used in our model to represent the pieces - ie PRNBKQ for white and prnbkq for black eg self.images['N'] is a PhotoImage of a white knight this means we can directly translate a board entry from the model into a picture ''' self.pack() def clear_canvas(self): ''' delete everything from the canvas''' items = self.canvas.find_all() for i in items: self.canvas.delete(i) def draw_row(self, y, first_tile_white=True, debug_board=False): ''' draw a single row of alternating black and white tiles, the colour of the first tile is determined by first_tile_white if debug_board is set show the coordinates of each of the tile corners ''' if first_tile_white: remainder = 1 else: remainder = 0 for i in range(8): x = i * TILE_WIDTH if i % 2 == remainder: # i %2 is the remainder after dividing i by 2 # so i%2 will always be either 0 (no remainder- even numbers) or # 1 (remainder 1 - odd numbers) # this tests whether the number i is even or odd tile = self.images['black_tile'] else: tile = self.images['white_tile'] self.canvas.create_image(x, y, anchor=tk.NW, image=tile) # NW is a constant in the Tkinter module. It stands for "north west" # that is, the top left corner of the picture is to be located at x,y # if we used another anchor, the grid would not line up properly with # the canvas size if debug_board: # implicitly this means if debug_board == True. ''' If we are drawing a debug board, draw an arrow showing top left and its coordinates. ''' text_pos = (x + TILE_WIDTH / 2, y + TILE_WIDTH / 2) line_end = (x + TILE_WIDTH / 4, y + TILE_WIDTH / 4) self.canvas.create_line((x, y), line_end, arrow=tk.FIRST) text_content = "(%s,%s)" % (x, y) self.canvas.create_text(text_pos, text=text_content) def draw_empty_board(self, debug_board=False): ''' draw an empty board on the canvas if debug_board is set show the coordinates of each of the tile corners''' y = 0 for i in range(8): # draw 8 rows y = i * TILE_WIDTH # each time, advance the y value at which the row is drawn # by the length of the tile first_tile_white = not (i % 2) self.draw_row(y, first_tile_white, debug_board) def draw_pieces(self, board): for i, row in enumerate(board): # using enumerate we get an integer index # for each row which we can use to calculate y # because rows run down the screen, they correspond to the y axis # and the columns correspond to the x axis for j, piece in enumerate(row): if piece == EMPTY_SQUARE: continue # skip empty tiles tile = self.images[piece] x = j * TILE_WIDTH y = i * TILE_WIDTH self.canvas.create_image(x, y, anchor=tk.NW, image=tile) def display(self, board, debug_board=False): ''' draw an empty board then draw each of the pieces in the board over the top''' self.clear_canvas() self.draw_empty_board(debug_board=debug_board) if not debug_board: self.draw_pieces(board) # first draw the empty board # then draw the pieces # if the order was reversed, the board would be drawn over the pieces # so we couldn't see them def display_debug_board(self): self.clear_canvas() self.draw_empty_board() class Controller(object): def __init__(self, parent=None, model=None): if model is None: self.m = Model() else: self.m = model self.v = View(parent) ''' we have created both a model and a view within the controller the controller doesn't inherit from either model or view ''' self.v.canvas.bind("<Button-1>", self.handle_click) # this binds the handle_click method to the view's canvas for left button down self.clickList = [] # I have kept clickList here, and not in the model, because it is a record of what is happening # in the view (ie click events) rather than something that the model deals with (eg moves). def run(self, debug_mode=False): self.update_display(debug_board=debug_mode) tk.mainloop() def handle_click(self, event): ''' Handle a click received. The x,y location of the click on the canvas is at (event.x, event.y) First, we need to translate the event coordinates (ie the x,y of where the click occurred) into a position on the chess board add this to a list of clicked positions every first click is treated as a "from" and every second click as a"to" so, whenever there are an even number of clicks, use the most recent to two to perform a move then update the display ''' j = event.x / TILE_WIDTH # the / operator is called integer division # it returns the number of times TILE_WIDTH goes into event.x ignoring any remainder # eg: 2/2 = 1, 3/2 = 1, 11/5 = 2 and so on # so, it should return a number between 0 (if x < TILE_WIDTH) though to 7 i = event.y / TILE_WIDTH self.clickList.append(BoardLocation(i, j)) # just maintain a list of all of the moves # this list shouldn't be used to replay a series of moves because that is something # which should be stored in the model - but it wouldn't be much trouble to # keep a record of moves in the model. if len(self.clickList) % 2 == 0: # move complete, execute the move self.m.move(self.clickList[-2], self.clickList[-1]) # use the second last entry in the clickList and the last entry in the clickList self.update_display() def update_display(self, debug_board=False): self.v.display(self.m.board, debug_board=debug_board) def parse_move(self, move): ''' Very basic move parsing given a move in the form ab-cd where a and c are in [a,b,c,d,e,f,g,h] and b and d are numbers from 1 to 8 convert into BoardLocation instances for start (ab) and destination (cd) Does not deal with castling (ie 0-0 or 0-0-0) or bare pawn moves (e4) or capture d4xe5 etc No error checking! very fragile ''' s, d = move.split("-") i = 8 - int(s[-1]) # board is "upside down" with reference to the representation j = column_reference.index(s[0]) start = BoardLocation(i, j) i = 8 - int(d[-1]) j = column_reference.index(d[0]) destination = BoardLocation(i, j) return start, destination if __name__ == "__main__": if not os.path.exists(DATA_DIR): ''' basic check - if there are files missing from the data directory, the program will still fail ''' dl = raw_input("Cannot find chess images directory. Download from website? (Y/n)") if dl.lower() == "n": print("No image files found, quitting.") exit(0) print("Creating directory: %s" % os.path.join(os.getcwd(), DATA_DIR)) import urllib os.mkdir(DATA_DIR) url_format = "https://python4kids.files.wordpress.com/2013/04/%s" for k, v in TILES.items(): url = url_format % v target_filename = os.path.join(DATA_DIR, v) print("Downloading file: %s" % v) urllib.urlretrieve(url, target_filename) parent = tk.Tk() c = Controller(parent) c.run(debug_mode=False)
mit
-8,434,570,687,873,066,000
38.373984
105
0.581016
false
3.807128
false
false
false
Azure/azure-sdk-for-python
sdk/edgegateway/azure-mgmt-edgegateway/azure/mgmt/edgegateway/models/arm_base_model.py
1
1384
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class ARMBaseModel(Model): """Represents the base class for all object models. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The path ID that uniquely identifies the object. :vartype id: str :ivar name: The object name. :vartype name: str :ivar type: The hierarchical type of the object. :vartype type: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__(self, **kwargs): super(ARMBaseModel, self).__init__(**kwargs) self.id = None self.name = None self.type = None
mit
-4,780,398,978,749,978,000
29.755556
76
0.547688
false
4.379747
false
false
false
lightd22/smartDraft
src/data/database_ops.py
1
15164
import sqlite3 import re from .champion_info import champion_id_from_name,champion_name_from_id, convert_champion_alias, AliasException regionsDict = {"NA_LCS":"NA", "EU_LCS":"EU", "LCK":"LCK", "LPL":"LPL", "LMS":"LMS", "International":"INTL", "NA_ACA": "NA_ACA", "KR_CHAL":"KR_CHAL", "LDL":"LDL"} internationalEventsDict = {"Mid-Season_Invitational":"MSI", "Rift_Rivals":"RR","World_Championship":"WRLDS"} def get_matches_by_id(match_ids, path): """ Returns match data for each match_id in the list match_ids """ conn = sqlite3.connect(path) cur = conn.cursor() match_data = [] for match_id in match_ids: match = get_match_data(cur, match_id) match_data.append(match) conn.close() return match_data def get_game_ids_by_tournament(cursor, tournament, patch=None): """ getMatchIdsByTournament queries the connected db for game ids which match the input tournament string. Args: cursor (sqlite cursor): cursor used to execute commmands tournament (string): id string for tournament (ie "2017/EU/Summer_Split") patch (string, optional): id string for patch to additionally filter Returns: gameIds (list(int)): list of gameIds """ if patch: query = "SELECT id FROM game WHERE tournament=? AND patch=? ORDER BY id" params = (tournament, patch) else: query = "SELECT id FROM game WHERE tournament=? ORDER BY id" params = (tournament,) cursor.execute(query, params) response = cursor.fetchall() vals = [] for r in response: vals.append(r[0]) return vals def get_game_ids(cursor, tournament=None, patch=None): """ get_game_ids queries the connected db for game ids which match the input tournament and patch strings. Args: cursor (sqlite cursor): cursor used to execute commmands tournament (string, optional): id string for tournament (ie "2017/EU/Summer_Split") patch (string, optional): id string for patch to filter for Returns: gameIds (list(int)): list of gameIds """ if not patch and not tournament: return [] params = () where_clause = [] if tournament: where_clause.append("tournament=?") params += (tournament,) if patch: where_clause.append("patch=?") params += (patch,) query = "SELECT id FROM game WHERE {where_clause} ORDER BY id".format(where_clause=" AND ".join(where_clause)) cursor.execute(query, params) response = cursor.fetchall() vals = [] for r in response: vals.append(r[0]) return vals def get_match_data(cursor, gameId): """ get_match_data queries the connected db for draft data and organizes it into a more convenient format. Args: cursor (sqlite cursor): cursor used to execute commmands gameId (int): primary key of game to process Returns: match (dict): formatted pick/ban phase data for game """ match = {"id": gameId ,"winner": None, "blue":{}, "red":{}, "blue_team":None, "red_team":None, "header_id":None, "patch":None} # Get winning team query = "SELECT tournament, tourn_game_id, week, patch, winning_team FROM game WHERE id=?" params = (gameId,) cursor.execute(query, params) match["tournament"], match["tourn_game_id"], match["header_id"], match["patch"], match["winner"] = cursor.fetchone()#[0] # Get ban data query = "SELECT champion_id, selection_order FROM ban WHERE game_id=? and side_id=? ORDER BY selection_order" params = (gameId,0) cursor.execute(query, params) match["blue"]["bans"] = list(cursor.fetchall()) query = "SELECT champion_id, selection_order FROM ban WHERE game_id=? and side_id=? ORDER BY selection_order" params = (gameId,1) cursor.execute(query, params) match["red"]["bans"] = list(cursor.fetchall()) # Get pick data query = "SELECT champion_id, position_id, selection_order FROM pick WHERE game_id=? AND side_id=? ORDER BY selection_order" params = (gameId,0) cursor.execute(query, params) match["blue"]["picks"] = list(cursor.fetchall()) query = "SELECT champion_id, position_id, selection_order FROM pick WHERE game_id=? AND side_id=? ORDER BY selection_order" params = (gameId,1) cursor.execute(query, params) match["red"]["picks"] = list(cursor.fetchall()) query = "SELECT display_name FROM team JOIN game ON team.id = blue_teamid WHERE game.id = ?" params = (gameId,) cursor.execute(query, params) match["blue_team"] = cursor.fetchone()[0] query = "SELECT display_name FROM team JOIN game ON team.id = red_teamid WHERE game.id = ?" params = (gameId,) cursor.execute(query, params) match["red_team"] = cursor.fetchone()[0] return match def get_tournament_data(gameData): """ get_tournament_data cleans up and combines the region/year/tournament fields in gameData for entry into the game table. When combined with the game_id field it uniquely identifies the match played. The format of tournamentData output is 'year/region_abbrv/tournament' (forward slash delimiters) Args: gameData (dict): dictonary output from query_wiki() Returns: tournamentData (string): formatted and cleaned region/year/split data """ tournamentData = "/".join([gameData["year"], regionsDict[gameData["region"]], gameData["tournament"]]) return tournamentData def get_game_id(cursor,gameData): """ get_game_id looks in the game table for an entry with matching tournament and tourn_game_id as the input gameData and returns the id field. If no such entry is found, it adds this game to the game table and returns the id field. Args: cursor (sqlite cursor): cursor used to execute commmands gameData (dict): dictionary output from query_wiki() Returns: gameId (int): Primary key in game table corresponding to this gameData """ tournament = get_tournament_data(gameData) vals = (tournament,gameData["tourn_game_id"]) gameId = None while gameId is None: cursor.execute("SELECT id FROM game WHERE tournament=? AND tourn_game_id=?", vals) gameId = cursor.fetchone() if gameId is None: print("Warning: Game not found. Attempting to add game.") err = insert_game(cursor,[game]) else: gameId = gameId[0] return gameId def delete_game_from_table(cursor, game_ids, table_name): """ Deletes rows corresponding to game_id from table table_name. Args: cursor (sqlite cursor): cursor used to execute commmands game_ids (list(int)): game_ids to be removed from table table_name (string): name of table to remove rows from Returns: status (int): status = 1 if delete was successful, otherwise status = 0 """ status = 0 assert isinstance(game_ids,list), "game_ids is not a list" for game_id in game_ids: query = "SELECT count(*) FROM {table_name} WHERE game_id=?".format(table_name=table_name) vals = (game_id,) cursor.execute(query, vals) print("Found {count} rows for game_id={game_id} to delete from table {table}".format(count=cursor.fetchone()[0], game_id=game_id, table=table_name)) query = "DELETE FROM {table_name} WHERE game_id=?".format(table_name=table_name) cursor.execute(query, vals) status = 1 return status def insert_game(cursor, gameData): """ insert_game attempts to format collected gameData from query_wiki() and insert into the game table in the competitiveGameData.db. Args: cursor (sqlite cursor): cursor used to execute commmands gameData (list(dict)): list of dictionary output from query_wiki() Returns: status (int): status = 1 if insert was successful, otherwise status = 0 """ status = 0 assert isinstance(gameData,list), "gameData is not a list" for game in gameData: tournGameId = game["tourn_game_id"] # Which game this is within current tournament tournamentData = get_tournament_data(game) # Check to see if game data is already in table vals = (tournamentData,tournGameId) cursor.execute("SELECT id FROM game WHERE tournament=? AND tourn_game_id=?", vals) result = cursor.fetchone() if result is not None: print("game {} already exists in table.. skipping".format(result[0])) else: # Get blue and red team_ids blueTeamId = None redTeamId = None while (blueTeamId is None or redTeamId is None): cursor.execute("SELECT id FROM team WHERE display_name=?",(game["blue_team"],)) blueTeamId = cursor.fetchone() cursor.execute("SELECT id FROM team WHERE display_name=?",(game["red_team"],)) redTeamId = cursor.fetchone() if (blueTeamId is None) or (redTeamId is None): print("*WARNING: When inserting game-- team not found. Attempting to add teams") err = insert_team(cursor, [game]) else: blueTeamId = blueTeamId[0] redTeamId = redTeamId[0] winner = game["winning_team"] header_id = game["header_id"] patch = game["patch"] vals = (tournamentData, tournGameId, header_id, patch, blueTeamId, redTeamId, winner) cursor.execute("INSERT INTO game(tournament, tourn_game_id, week, patch, blue_teamid, red_teamid, winning_team) VALUES(?,?,?,?,?,?,?)", vals) status = 1 return status def insert_team(cursor, gameData): """ insert_team attempts to format collected gameData from query_wiki() and insert into the team table in the competitiveGameData.db. Args: cursor (sqlite cursor): cursor used to execute commmands wikiGameData (list(dict)): dictionary output from query_wiki() Returns: status (int): status = 1 if insert was successful, otherwise status = 0 """ status = 0 assert isinstance(gameData,list), "gameData is not a list" for game in gameData: # We don't track all regions (i.e wildcard regions), but they can still appear at # international tournaments. When this happens we will track the team, but list their # region as NULL. if game["region"] is "Inernational": region = None else: region = regionsDict[game["region"]] teams = [game["blue_team"], game["red_team"]] for team in teams: vals = (region,team) # This only looks for matching display names.. what happens if theres a # NA TSM and and EU TSM? cursor.execute("SELECT * FROM team WHERE display_name=?", (team,)) result = cursor.fetchone() if result is None: cursor.execute("INSERT INTO team(region, display_name) VALUES(?,?)", vals) status = 1 return status def insert_ban(cursor, gameData): """ insert_ban attempts to format collected gameData from query_wiki() and insert into the ban table in the competitiveGameData.db. Args: cursor (sqlite cursor): cursor used to execute commmands gameData (list(dict)): dictionary output from query_wiki() Returns: status (int): status = 1 if insert was successful, otherwise status = 0 """ status = 0 assert isinstance(gameData,list), "gameData is not a list" teams = ["blue", "red"] for game in gameData: tournament = get_tournament_data(game) vals = (tournament,game["tourn_game_id"]) gameId = get_game_id(cursor,game) # Check for existing entries in table. Skip if they already exist. cursor.execute("SELECT game_id FROM ban WHERE game_id=?",(gameId,)) result = cursor.fetchone() if result is not None: print("Bans for game {} already exists in table.. skipping".format(result[0])) else: for k in range(len(teams)): bans = game["bans"][teams[k]] selectionOrder = 0 side = k for ban in bans: if ban in ["lossofban","none"]: # Special case if no ban was submitted in game banId = None else: # print("ban={}".format(ban)) banId = champion_id_from_name(ban) # If no such champion name is found, try looking for an alias if banId is None: banId = champion_id_from_name(convert_champion_alias(ban)) selectionOrder += 1 vals = (gameId,banId,selectionOrder,side) cursor.execute("INSERT INTO ban(game_id, champion_id, selection_order, side_id) VALUES(?,?,?,?)", vals) status = 1 return status def insert_pick(cursor, gameData): """ insert_pick formats collected gameData from query_wiki() and inserts it into the pick table of the competitiveGameData.db. Args: cursor (sqlite cursor): cursor used to execute commmands gameData (list(dict)): list of formatted game data from query_wiki() Returns: status (int): status = 1 if insert was successful, otherwise status = 0 """ status = 0 assert isinstance(gameData,list), "gameData is not a list" teams = ["blue", "red"] for game in gameData: tournament = get_tournament_data(game) vals = (tournament,game["tourn_game_id"]) gameId = get_game_id(cursor,game) # Check for existing entries in table. Skip if they already exist. cursor.execute("SELECT game_id FROM pick WHERE game_id=?",(gameId,)) result = cursor.fetchone() if result is not None: print("Picks for game {} already exists in table.. skipping".format(result[0])) else: for k in range(len(teams)): picks = game["picks"][teams[k]] selectionOrder = 0 side = k for (pick,position) in picks: if pick in ["lossofpick","none"]: # Special case if no pick was submitted to game (not really sure what that would mean # but being consistent with insert_pick()) pickId = None else: pickId = champion_id_from_name(pick) # If no such champion name is found, try looking for an alias if pickId is None: pickId = champion_id_from_name(convert_champion_alias(pick)) selectionOrder += 1 vals = (gameId,pickId,position,selectionOrder,side) cursor.execute("INSERT INTO pick(game_id, champion_id, position_id, selection_order, side_id) VALUES(?,?,?,?,?)", vals) status = 1 return status
apache-2.0
-8,253,334,377,751,635,000
41.122222
156
0.615339
false
3.942798
false
false
false
mikalstill/nova
nova/conf/cinder.py
1
4043
# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from oslo_config import cfg cinder_group = cfg.OptGroup( 'cinder', title='Cinder Options', help="Configuration options for the block storage") cinder_opts = [ cfg.StrOpt('catalog_info', default='volumev3::publicURL', regex='(\w+):(\w*):(.*?)', help=""" Info to match when looking for cinder in the service catalog. The ``<service_name>`` is optional and omitted by default since it should not be necessary in most deployments. Possible values: * Format is separated values of the form: <service_type>:<service_name>:<endpoint_type> Note: Nova does not support the Cinder v2 API since the Nova 17.0.0 Queens release. Related options: * endpoint_template - Setting this option will override catalog_info """), cfg.StrOpt('endpoint_template', help=""" If this option is set then it will override service catalog lookup with this template for cinder endpoint Possible values: * URL for cinder endpoint API e.g. http://localhost:8776/v3/%(project_id)s Note: Nova does not support the Cinder v2 API since the Nova 17.0.0 Queens release. Related options: * catalog_info - If endpoint_template is not set, catalog_info will be used. """), cfg.StrOpt('os_region_name', help=""" Region name of this node. This is used when picking the URL in the service catalog. Possible values: * Any string representing region name """), cfg.IntOpt('http_retries', default=3, min=0, help=""" Number of times cinderclient should retry on any failed http call. 0 means connection is attempted only once. Setting it to any positive integer means that on failure connection is retried that many times e.g. setting it to 3 means total attempts to connect will be 4. Possible values: * Any integer value. 0 means connection is attempted only once """), cfg.BoolOpt('cross_az_attach', default=True, help=""" Allow attach between instance and volume in different availability zones. If False, volumes attached to an instance must be in the same availability zone in Cinder as the instance availability zone in Nova. This also means care should be taken when booting an instance from a volume where source is not "volume" because Nova will attempt to create a volume using the same availability zone as what is assigned to the instance. If that AZ is not in Cinder (or allow_availability_zone_fallback=False in cinder.conf), the volume create request will fail and the instance will fail the build request. By default there is no availability zone restriction on volume attach. """), ] def register_opts(conf): conf.register_group(cinder_group) conf.register_opts(cinder_opts, group=cinder_group) ks_loading.register_session_conf_options(conf, cinder_group.name) ks_loading.register_auth_conf_options(conf, cinder_group.name) def list_opts(): return { cinder_group.name: ( cinder_opts + ks_loading.get_session_conf_options() + ks_loading.get_auth_common_conf_options() + ks_loading.get_auth_plugin_conf_options('password') + ks_loading.get_auth_plugin_conf_options('v2password') + ks_loading.get_auth_plugin_conf_options('v3password')) }
apache-2.0
6,942,938,164,454,437,000
32.691667
79
0.697007
false
4.018887
false
false
false
alanaberdeen/coupled-minimum-cost-flow-track
cmcft/tools/params/c_cost.py
1
2405
# c_cost.py # Cost vector for edges in coupled matrix import numpy as np __all__ = ["c_cost"] def c_cost(g, a_coup, a_vertices): # TODO: think this function is slow. Check for performance increases. # c_cost # creates vector of costs for edges # # Inputs: g - graph structure # a_coup - coupled incidence matrix # a_vertices - order of rows in coupled matrix # # Outputs: c - list of costs for each edge in incidence matrix # # Initialise cost vector c = [] # For all edges in coupled matrix (iterating over transpose) for e in a_coup.T: # Get vertices connected by edge vertex_indices = np.nonzero(e) v = [a_vertices[i] for i in vertex_indices[1]] # Get weights cost = 0 # For simple edges if len(v) == 2: try: cost = g[v[0]][v[1]]['weight'] except KeyError: cost = g[v[1]][v[0]]['weight'] # For coupled edges elif len(v) == 4: # Find merge/split event label ms_node = ms_event(v, g) for n in v: try: cost = cost + g.edge[n][ms_node]['weight'] except KeyError: cost = cost + g.edge[ms_node][n]['weight'] # Append to cost vector c.append(cost) return c def ms_event(vertices, graph): # ms_event # given 4 nodes find the split or merge vertex that they are connected to # # Inputs: vertices - list of 4 node labels # graph - graph structure # Outputs: event_label - label of split/merge node # # initialise_out set num = [] event = None # split nodes if 'D' in vertices: event = 'M' for n in vertices: if 'L' in n: num.append(''.join(i for i in n if i.isdigit())) # merge nodes elif 'A' in vertices: event = 'S' for n in vertices: if 'R' in n: num.append(''.join(i for i in n if i.isdigit())) # Combine to give event label event_label = (event + '(' + num[0] + ',' + num[1] + ')') # Check if correct way around if not graph.has_node(event_label): event_label = (event + '(' + num[1] + ',' + num[0] + ')') return event_label
mit
-3,617,845,052,523,256,300
23.540816
77
0.50894
false
3.734472
false
false
false