content
stringlengths 5
1.05M
|
---|
from numba import jit, njit
@njit(fastmath=True)
def numba_compare_images_2(image_to_warp, target_image, target_image_not_smoothed, affine_matrix, m_per_height_unit, match_threshold_m=0.07, weight_by_height=True):
target_image_not_smoothed_height, target_image_not_smoothed_width = target_image_not_smoothed.shape
target_image_height, target_image_width = target_image.shape
image_to_warp_height, image_to_warp_width = image_to_warp.shape
if (target_image_height != target_image_not_smoothed_height) or (target_image_width != target_image_not_smoothed_width):
print('ERROR: numba_compare_images_2: target_image and target_image_not_smoothed must have the same dimensions, yet')
print(' target_image_not_smoothed_width, target_image_not_smoothed_height =', (target_image_not_smoothed_width, target_image_not_smoothed_height))
print(' target_image_width, target_image_height =', (target_image_width, target_image_height))
threshold_unit = match_threshold_m / m_per_height_unit
dx0 = affine_matrix[0,0]
dx1 = affine_matrix[1,0]
dy0 = affine_matrix[0,1]
dy1 = affine_matrix[1,1]
b0 = affine_matrix[0,2]
b1 = affine_matrix[1,2]
match_score = 0.0
# 0.5 shift to compensate for casting to int instead of rounding
start_0 = (b0 - dy0) + 0.5
start_1 = (b1 - dy1) + 0.5
for to_warp_y in range(image_to_warp_height):
start_0 = start_0 + dy0
start_1 = start_1 + dy1
target_x_f = start_0 - dx0
target_y_f = start_1 - dx1
for to_warp_x in range(image_to_warp_width):
target_x_f = target_x_f + dx0
target_y_f = target_y_f + dx1
to_warp_value = image_to_warp[to_warp_y, to_warp_x]
if to_warp_value != 0:
target_x = int(target_x_f)
target_y = int(target_y_f)
if (target_x >= 0) and (target_x < target_image_width) and (target_y >= 0) and (target_y < target_image_height):
target_not_smoothed_value = target_image_not_smoothed[target_y, target_x]
if target_not_smoothed_value != 0:
target_value = target_image[target_y, target_x]
difference = abs(target_value - to_warp_value)
if difference < threshold_unit:
if weight_by_height:
match_score = match_score + target_value
else:
match_score = match_score + 1.0
return match_score
|
from django.shortcuts import render,redirect,reverse
from django.http import JsonResponse,HttpResponse,HttpResponseNotAllowed
# Create your views here.
def No_ne(req):
return render(req,'lianxi_app/8000.html')
def my_json(req):
data = {
"name":"张定",
'age':23,
'gender':'男',
'content':'我是个逗比'
}
return JsonResponse(data)
def test_res(req):
response = HttpResponse()
response.content = "完美"
response.status_code=404
response.write('我是wrute写的追加')
response.flush()
response.content='我来也'
return response
def index(req):
# u_name = req.COOKIES.get('user','游客')
res = req.session.get('pwd')
u_name = req.session.get('user','游客')
return render(req,'lianxi_app/index.html',{'username':u_name})
def login(req):
if req.method == "GET":
return render(req,'lianxi_app/register.html')
elif req.method == "POST":
parms = req.POST
name = parms.get("username")
pwd = parms.get("pwd")
response = redirect(reverse('lianxi_app:index'))
# response.set_cookie('user',name)
req.session['user'] = name
req.session['pwd'] = pwd
return response
def logout(req):
response = redirect(reverse('lianxi_app:index'))
# response.delete_cookie('user')
del req.session['pwd']
del req.session['user']
return response |
"""
============================
Draw flat objects in 3D plot
============================
Demonstrate using pathpatch_2d_to_3d to 'draw' shapes and text on a 3D plot.
"""
import matplotlib as mpl
import matplotlib.pyplot as pl
from matplotlib.patches import Circle, PathPatch, Rectangle
# register Axes3D class with matplotlib by importing Axes3D
from mpl_toolkits.mplot3d import Axes3D
import mpl_toolkits.mplot3d.art3d as art3d
from matplotlib.text import TextPath
from matplotlib.transforms import Affine2D
import numpy as np
mpl.rcParams['font.family'] = 'monospace'
#mpl.rcParams['text.latex.preamble'] = r'\usepackage{pslatex}'
def text3d(ax, xyz, s, zdir="z", size=None, angle=0, usetex=False, **kwargs):
x, y, z = xyz
if zdir == "y":
xy1, z1 = (x, z), y
elif zdir == "y":
xy1, z1 = (y, z), x
else:
xy1, z1 = (x, y), z
text_path = TextPath((0, 0), s, size=size, usetex=usetex)
trans = Affine2D().rotate(angle).translate(xy1[0], xy1[1])
p1 = PathPatch(trans.transform_path(text_path), **kwargs)
ax.add_patch(p1)
art3d.pathpatch_2d_to_3d(p1, z=z1, zdir=zdir)
fig = pl.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_proj_type('ortho')
text = "TACOMA"
x = 2*np.arange(10)
for i in range(len(text)):
text3d(ax, (3+i, 1, x[i]),
text[i],
zdir="x", size=2, usetex=False,
ec="None", fc="k")
#if i == 0 or i == 5:
if True:
text3d(ax, (1.5+i, 0.7, x[i]),
'$t_{%d}$' % (i),
zdir="x", size=1.1, usetex=False,
ec="None", fc="k")
if i != 0 and i != 5:
ax.plot([x[i],x[i]],[2.4+i,4.1+i],[0.7,0.7],':',c=[0.7]*3,lw=1)
p = Rectangle((2.7,0.7),6,8,capstyle='round',fill=False)
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=0, zdir="x")
p = Rectangle((2.7,0.7),6,8,capstyle='round',fill=False)
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=i*2, zdir="x")
ax.plot([0,0],[6,7],[6,7],'-k')
ax.plot([10,10], [5.8,6.9],[4.5,3.7],'-k')
ax.plot([10,10],[6,7.3],[6,5.4],'-k')
ax.plot([10,10],[7,7.3],[7,5.4],'-k')
ax.plot([10,10],[6,7],[6,7],'-k')
p = Circle((7,7),0.2,color='w',ec='k')
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=0, zdir="x")
p = Circle((6,6),0.2,color='w',ec='k')
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=0, zdir="x")
p = Circle((7.3,5.4),0.2,color='w',ec='k')
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=0, zdir="x")
p = Circle((5.8, 4.5),0.2,color='w',ec='k')
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=0, zdir="x")
p = Circle((6.9,3.7),0.2,color='w',ec='k')
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=0, zdir="x")
" ================= "
p = Circle((7,7),0.2,color='w',ec='k')
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=10, zdir="x")
p = Circle((6,6),0.2,color='w',ec='k')
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=10, zdir="x")
p = Circle((7.3,5.4),0.2,color='w',ec='k')
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=10, zdir="x")
p = Circle((5.8, 4.5),0.2,color='w',ec='k')
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=10, zdir="x")
p = Circle((6.9,3.7),0.2,color='w',ec='k')
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=10, zdir="x")
#ax.plot([0,0], [5.8,6.9],[4.5,3.7],'-k')
ax.axis('off')
ax.set_xlim3d(0, 10)
ax.set_ylim3d(0, 10)
ax.set_zlim3d(0, 10)
ax.view_init(35,-47)
fig.tight_layout()
fig.savefig('logo.pdf')
fig.savefig('logo.png')
pl.show()
|
from functools import reduce
from operator import add
import numpy as np
class Patches:
""" Methods for analyzing patch patterns. """
def __init__(self, data):
self.data = data
@property
def keys(self):
return self.data.keys()
def apply(self, func, join):
""" Apply to recombinant patches. """
return reduce(join, [func(self.data[x]) for x in self.keys])
@property
def num_patches(self):
""" Number of patches. """
return self.apply(lambda x: x['number'], join=add)
@property
def sizes(self):
return self.apply(lambda x: x['sizes'], join=add)
@property
def mean_patch_size(self):
return np.mean(self.sizes)
@property
def median_patch_size(self):
return np.median(self.sizes)
@property
def size_variation(self):
sizes = self.sizes
return np.std(sizes) / np.mean(sizes)
|
from django.shortcuts import render
from .models import Post
# Create your views here.
def trips_home(request):
post_list = Post.objects.all()
return render(request, 'trips/home.html', {
'post_list': post_list,
})
def trips_post_detail(request, pk):
post = Post.objects.get(pk=pk)
return render(request, 'trips/post.html', {'post': post})
|
import json
import requests
from battleforcastile.constants import BATTLEFORCASTILE_BACKEND_URL
from battleforcastile.exceptions import MatchCouldNotBeStartedException
def start_match(match_id: int):
url = f'{BATTLEFORCASTILE_BACKEND_URL}/matches/{match_id}/'
r = requests.patch(url, data=json.dumps({
'started': True
}))
if r.status_code != 200:
raise MatchCouldNotBeStartedException() |
from policy.policy_interface import PolicyInterface
from os import environ
import numpy as np
import tensorflow as tf
# The keras Controller Class.
class KerasPolicy(PolicyInterface):
def __init__(self, state, config):
super().__init__(state, config)
# Avoid using GPU
environ['CUDA_VISIBLE_DEVICES'] = '-1'
self.model = tf.keras.models.load_model(self.config['path'])
def __grayscale__(self, image):
return tf.image.rgb_to_grayscale(image)
def __transform__(self, image):
return (image / 127.5) - 1
def start(self):
pass
def step(self):
data = self.state['data']
image = tf.convert_to_tensor(np.array([data['image']]), dtype=tf.float32)
speed = tf.convert_to_tensor([np.array([data['speed']])], dtype=tf.float32)
args = { 'image': self.__transform__(self.__grayscale__(image)), 'speed': speed }
result = self.model.call(args)
self.state['action'] = result[0].numpy()
self.state['action'][2] = 0.0
print(self.state['action'])
def stop(self):
pass
|
# Generated by h2py from /usr/include/termios.h
VEOF = 0
VEOL = 1
VEOL2 = 2
VERASE = 3
VWERASE = 4
VKILL = 5
VREPRINT = 6
VINTR = 8
VQUIT = 9
VSUSP = 10
VDSUSP = 11
VSTART = 12
VSTOP = 13
VLNEXT = 14
VDISCARD = 15
VMIN = 16
VTIME = 17
VSTATUS = 18
NCCS = 20
IGNBRK = 0x00000001
BRKINT = 0x00000002
IGNPAR = 0x00000004
PARMRK = 0x00000008
INPCK = 0x00000010
ISTRIP = 0x00000020
INLCR = 0x00000040
IGNCR = 0x00000080
ICRNL = 0x00000100
IXON = 0x00000200
IXOFF = 0x00000400
IXANY = 0x00000800
IMAXBEL = 0x00002000
OPOST = 0x00000001
ONLCR = 0x00000002
OXTABS = 0x00000004
ONOEOT = 0x00000008
CIGNORE = 0x00000001
CSIZE = 0x00000300
CS5 = 0x00000000
CS6 = 0x00000100
CS7 = 0x00000200
CS8 = 0x00000300
CSTOPB = 0x00000400
CREAD = 0x00000800
PARENB = 0x00001000
PARODD = 0x00002000
HUPCL = 0x00004000
CLOCAL = 0x00008000
CRTSCTS = 0x00010000
CRTS_IFLOW = CRTSCTS
CCTS_OFLOW = CRTSCTS
MDMBUF = 0x00100000
CHWFLOW = (MDMBUF|CRTSCTS)
ECHOKE = 0x00000001
ECHOE = 0x00000002
ECHOK = 0x00000004
ECHO = 0x00000008
ECHONL = 0x00000010
ECHOPRT = 0x00000020
ECHOCTL = 0x00000040
ISIG = 0x00000080
ICANON = 0x00000100
ALTWERASE = 0x00000200
IEXTEN = 0x00000400
EXTPROC = 0x00000800
TOSTOP = 0x00400000
FLUSHO = 0x00800000
NOKERNINFO = 0x02000000
PENDIN = 0x20000000
NOFLSH = 0x80000000
TCSANOW = 0
TCSADRAIN = 1
TCSAFLUSH = 2
TCSASOFT = 0x10
B0 = 0
B50 = 50
B75 = 75
B110 = 110
B134 = 134
B150 = 150
B200 = 200
B300 = 300
B600 = 600
B1200 = 1200
B1800 = 1800
B2400 = 2400
B4800 = 4800
B9600 = 9600
B19200 = 19200
B38400 = 38400
B7200 = 7200
B14400 = 14400
B28800 = 28800
B57600 = 57600
B76800 = 76800
B115200 = 115200
B230400 = 230400
EXTA = 19200
EXTB = 38400
TCIFLUSH = 1
TCOFLUSH = 2
TCIOFLUSH = 3
TCOOFF = 1
TCOON = 2
TCIOFF = 3
TCION = 4
# Included from sys/cdefs.h
# Included from machine/cdefs.h
def __P(protos): return protos
def __STRING(x): return #x
def __P(protos): return ()
def __STRING(x): return "x"
def __attribute__(x): return
def __kprintf_attribute__(a): return __attribute__(a)
# Included from sys/ttycom.h
# Included from sys/ioccom.h
IOCPARM_MASK = 0x1fff
def IOCPARM_LEN(x): return (((x) >> 16) & IOCPARM_MASK)
def IOCBASECMD(x): return ((x) & ~(IOCPARM_MASK << 16))
def IOCGROUP(x): return (((x) >> 8) & 0xff)
TIOCM_LE = 0001
TIOCM_DTR = 0002
TIOCM_RTS = 0004
TIOCM_ST = 0010
TIOCM_SR = 0020
TIOCM_CTS = 0040
TIOCM_CAR = 0100
TIOCM_CD = TIOCM_CAR
TIOCM_RNG = 0200
TIOCM_RI = TIOCM_RNG
TIOCM_DSR = 0400
TIOCPKT_DATA = 0x00
TIOCPKT_FLUSHREAD = 0x01
TIOCPKT_FLUSHWRITE = 0x02
TIOCPKT_STOP = 0x04
TIOCPKT_START = 0x08
TIOCPKT_NOSTOP = 0x10
TIOCPKT_DOSTOP = 0x20
TIOCPKT_IOCTL = 0x40
def UIOCCMD(n): return _IO(ord('u'), n)
TIOCFLAG_SOFTCAR = 0x01
TIOCFLAG_CLOCAL = 0x02
TIOCFLAG_CRTSCTS = 0x04
TIOCFLAG_MDMBUF = 0x08
TTYDISC = 0
TABLDISC = 3
SLIPDISC = 4
PPPDISC = 5
STRIPDISC = 6
# Included from sys/ttydefaults.h
TTYDEF_IFLAG = (BRKINT | ICRNL | IMAXBEL | IXON | IXANY)
TTYDEF_OFLAG = (OPOST | ONLCR | OXTABS)
TTYDEF_LFLAG = (ECHO | ICANON | ISIG | IEXTEN | ECHOE|ECHOKE|ECHOCTL)
TTYDEF_CFLAG = (CREAD | CS8 | HUPCL)
TTYDEF_SPEED = (B9600)
def CTRL(x): return (x&037)
CEOF = CTRL(ord('d'))
CERASE = 0177
CINTR = CTRL(ord('c'))
CKILL = CTRL(ord('u'))
CMIN = 1
CQUIT = 034
CSUSP = CTRL(ord('z'))
CTIME = 0
CDSUSP = CTRL(ord('y'))
CSTART = CTRL(ord('q'))
CSTOP = CTRL(ord('s'))
CLNEXT = CTRL(ord('v'))
CDISCARD = CTRL(ord('o'))
CWERASE = CTRL(ord('w'))
CREPRINT = CTRL(ord('r'))
CEOT = CEOF
CRPRNT = CREPRINT
CFLUSH = CDISCARD
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-07-08 09:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instaclone', '0003_auto_20190708_1109'),
]
operations = [
migrations.AlterField(
model_name='item',
name='original_price',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='item',
name='stock',
field=models.PositiveIntegerField(null=True),
),
migrations.AlterField(
model_name='item',
name='today_price',
field=models.IntegerField(null=True),
),
]
|
from flask import Flask, render_template
app = Flask(__name__,template_folder='template')
@app.route('/')
def index():
return render_template('template1.html')
if __name__ == '__main__':
app.run(debug=True)
|
from celery import Celery
"""
1. 创建任务
2. 创建Celery实例
3. 在celery中 设置 任务,broker
4. worker
"""
#1. celery 是一个 即插即用的任务队列
# celery 是需要和 django(当前的工程) 进行交互的
# 让celery加载当前的工程的默认配置
#第一种方式:
# import os
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mall.settings")
#第二种方式:
import os
if not os.getenv('DJANGO_SETTINGS_MODULE'):
os.environ['DJANGO_SETTINGS_MODULE'] = 'shangcheng.settings'
#2.创建celery实例
# main 习惯 添加 celery的文件路径
# 确保 main 不出现重复
app = Celery(main='clery_tasks')
#3. 设置broker
# 加载 broker的配置信息: 参数: 路径信息
app.config_from_object('clery_tasks.config')
#4. 让celery自动检测任务
# 参数: 列表
# 元素: 任务的包路径
app.autodiscover_tasks(['clery_tasks.sms'])
#5. 让worker 去执行任务
# 需要在虚拟环境中 执行指令
# celery -A celery实例对象的文件路径 worker -l info
#celery -A clery_tasks.main worker -l info
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from dataclasses import dataclass
from fbpcp.entity.pce_compute import PCECompute
from fbpcp.entity.pce_network import PCENetwork
@dataclass
class PCE:
pce_id: str
region: str
pce_network: PCENetwork
pce_compute: PCECompute
|
import boto3
import json
from botocore.exceptions import ClientError
from botocore.client import Config
from boto3.dynamodb.conditions import Key, Attr
dynamodb = boto3.resource('dynamodb','ap-southeast-1')
problems_table = dynamodb.Table('codebreaker-problems')
submissions_table = dynamodb.Table('codebreaker-submissions')
users_table = dynamodb.Table('codebreaker-users')
lambda_client = boto3.client('lambda')
def getAllUsersEmails():
return users_table.scan(ProjectionExpression = 'username, email')['Items']
def getUserInfoFromUsername(username):
scan_kwargs = {
'FilterExpression':Key('username').eq(username)
}
done = False
start_key = None
while not done:
if start_key:
scan_kwargs['ExclusiveStartKey']= start_key
response = users_table.scan(**scan_kwargs)
res = response.get('Items',[])
if len(res) > 0:
return res[0]
start_key = response.get('LastEvaluatedKey',None)
done = start_key is None
placeHolder = {
'email' : '',
'school':'',
'role':'',
'username':'',
'problem_scores':{},
'problem_subtask_scores':{},
}
return placeHolder
def updateAllScores(problem):
submissions = submissions_table.query(
IndexName = 'problemIndex4',
KeyConditionExpression = Key('problemName').eq(problem),
ProjectionExpression = 'username, totalScore',
ScanIndexForward = False
)['Items']
maxScore = {}
for i in submissions:
username = i['username']
if username not in maxScore:
maxScore[username] = 0
maxScore[username] = max(maxScore[username], i['totalScore'])
useremails = getAllUsersEmails()
emails = {}
for i in useremails:
emails[i['username']] = i['email']
noACs = 0
for username, score in maxScore.items():
users_table.update_item(
Key = {'email': emails[username]},
UpdateExpression = f'set problemScores. #a = :s',
ExpressionAttributeValues = {':s': score},
ExpressionAttributeNames = {'#a': problem}
)
if score == 100:
noACs += 1
problems_table.update_item(
Key = {'problemName': problem},
UpdateExpression = f'set noACs = :a',
ExpressionAttributeValues = {':a': noACs}
)
def updateAllStitchedScores(problem):
submissions = submissions_table.query(
IndexName = 'problemIndex3',
KeyConditionExpression = Key('problemName').eq(problem),
ProjectionExpression = 'username, subtaskScores',
ScanIndexForward = False
)['Items']
subtaskScores = {}
for i in submissions:
username = i['username']
subtasks = i['subtaskScores']
if username not in subtaskScores:
subtaskScores[username] = [0] * len(subtasks)
for j in range(len(subtasks)):
subtaskScores[username][j] = max(subtaskScores[username][j], subtasks[j])
subtaskMaxScores = problems_table.query(
KeyConditionExpression = Key('problemName').eq(problem),
ProjectionExpression = 'subtaskScores'
)['Items'][0]['subtaskScores']
maxScore = {}
for username, subtasks in subtaskScores.items():
totalScore = 0
for i in range(len(subtasks)):
totalScore += subtasks[i] * int(subtaskMaxScores[i])
totalScore /= 100
if int(totalScore) == totalScore:
totalScore = int(totalScore)
else:
totalScore = round(totalScore, 2)
maxScore[username] = totalScore
useremails = getAllUsersEmails()
emails = {}
for i in useremails:
emails[i['username']] = i['email']
noACs = 0
for username, score in maxScore.items():
users_table.update_item(
Key = {'email': emails[username]},
UpdateExpression = f'set problemScores. #a = :s',
ExpressionAttributeValues = {':s': score},
ExpressionAttributeNames = {'#a': problem}
)
if score == 100:
noACs += 1
problems_table.update_item(
Key = {'problemName': problem},
UpdateExpression = f'set noACs = :a',
ExpressionAttributeValues = {':a': noACs}
) |
price = float(input("Qual o preço do produto: "))
desconto = (price / 100) * 5
print(f"Seu desconto vai ser igual a R${(price / 100) * 5:.2f}")
print(f"O produto vai ficar por R${price - desconto:.2f}")
|
from django.db import models
# Create your models here.
class HighSchool(models.Model):
featured = models.CharField(max_length=500)
high_school_name = models.CharField(max_length=500)
high_school_logo = models.ImageField(upload_to='high_school/high_school_photos')
high_school_motto = models.CharField(max_length=500)
high_school_photo1 = models.ImageField(upload_to='high_school/high_school_photos')
description1 = models.TextField(blank=True)
high_school_photo2 = models.ImageField(upload_to='high_school/high_school_photos')
description2 = models.TextField(blank=True)
high_school_photo3 = models.ImageField(upload_to='high_school/high_school_photos')
description3 = models.TextField(blank=True)
high_school_photo4 = models.ImageField(upload_to='high_school/high_school_photos')
description4 = models.TextField(blank=True)
high_school_photo5 = models.ImageField(upload_to='high_school/high_school_photos')
description5 = models.TextField(blank=True)
published = models.DateTimeField(auto_now_add=True, auto_now=False)
po_box = models.CharField(max_length=500)
location = models.CharField(max_length=500)
phone = models.CharField(max_length=500)
email = models.EmailField()
website = models.CharField(max_length=500, blank=True)
# special courses
course_one = models.CharField(max_length=500, blank=True)
course1_link = models.CharField(max_length=500, blank=True)
course_two = models.CharField(max_length=500, blank=True)
course2_link = models.CharField(max_length=500, blank=True)
course_three = models.CharField(max_length=500, blank=True)
course3_link = models.CharField(max_length=500, blank=True)
course_four = models.CharField(max_length=500, blank=True)
course4_link = models.CharField(max_length=500, blank=True)
course_five = models.CharField(max_length=500, blank=True)
course5_link = models.CharField(max_length=500, blank=True)
high_school_file = models.FileField(upload_to='high_school/high_school_files', blank=True)
# social networks
facebook = models.CharField(max_length=500, blank=True)
twitter = models.CharField(max_length=500, blank=True)
def __str__(self):
return self.high_school_name
|
import collections.abc
import json
import logging
from typing import Union, Sequence
import csv
import numpy as np
logger = logging.getLogger(__name__)
"""
Contains classes necessary for collecting statistics on the model during training
"""
class BatchStatistics:
"""
Represents the statistics collected from training a batch
NOTE: this is currently unused!
"""
def __init__(self, batch_num: int,
batch_train_accuracy: float,
batch_train_loss: float):
"""
:param batch_num: (int) batch number of collected statistics
:param batch_train_accuracy: (float) training set accuracy for this batch
:param batch_train_loss: (float) training loss for this batch
"""
self.batch_num = batch_num
self.batch_train_accuracy = batch_train_accuracy
self.batch_train_loss = batch_train_loss
def get_batch_num(self):
return self.batch_num
def get_batch_train_acc(self):
return self.batch_train_accuracy
def get_batch_train_loss(self):
return self.batch_train_loss
def set_batch_train_acc(self, acc):
if 0 <= acc <= 100:
self.batch_train_accuracy = acc
else:
msg = "Batch training accuracy should be between 0 and 100!"
logger.error(msg)
raise ValueError(msg)
def set_batch_train_loss(self, loss):
self.batch_train_loss = loss
class EpochTrainStatistics:
"""
Defines the training statistics for one epoch of training
"""
def __init__(self, train_acc: float, train_loss: float):
self.train_acc = train_acc
self.train_loss = train_loss
self.validate()
def validate(self):
if not isinstance(self.train_acc, float):
msg = "train_acc must be a float, got type {}".format(type(self.train_acc))
logger.error(msg)
raise ValueError(msg)
if not isinstance(self.train_loss, float):
msg = "train_loss must be a float, got type {}".format(type(self.train_loss))
logger.error(msg)
raise ValueError(msg)
def get_train_acc(self):
return self.train_acc
def get_train_loss(self):
return self.train_loss
class EpochValidationStatistics:
"""
Defines the validation statistics for one epoch of training
"""
def __init__(self, val_clean_acc, val_clean_loss, val_triggered_acc, val_triggered_loss):
self.val_clean_acc = val_clean_acc
self.val_clean_loss = val_clean_loss
self.val_triggered_acc = val_triggered_acc
self.val_triggered_loss = val_triggered_loss
self.validate()
def validate(self):
if self.val_clean_acc is not None and not isinstance(self.val_clean_acc, float):
msg = "val_clean_acc must be a float, got type {}".format(type(self.val_clean_acc))
logger.error(msg)
raise ValueError(msg)
if self.val_clean_loss is not None and not isinstance(self.val_clean_loss, float):
msg = "val_clean_loss must be a float, got type {}".format(type(self.val_clean_loss))
logger.error(msg)
raise ValueError(msg)
if self.val_triggered_acc is not None and not isinstance(self.val_triggered_acc, float):
msg = "val_triggered_acc must be a float, got type {}".format(type(self.val_triggered_acc))
logger.error(msg)
raise ValueError(msg)
if self.val_triggered_loss is not None and not isinstance(self.val_triggered_loss, float):
msg = "val_triggered_loss must be a float, got type {}".format(type(self.val_triggered_loss))
logger.error(msg)
raise ValueError(msg)
def get_val_clean_acc(self):
return self.val_clean_acc
def get_val_clean_loss(self):
return self.val_clean_loss
def get_val_triggered_acc(self):
return self.val_triggered_acc
def get_val_triggered_loss(self):
return self.val_triggered_loss
def get_val_loss(self):
if self.get_val_triggered_loss() is not None and self.get_val_clean_loss() is not None:
return self.get_val_triggered_loss() + self.get_val_clean_loss()
elif self.get_val_triggered_loss() is None and self.get_val_clean_loss() is not None:
return self.get_val_clean_loss()
elif self.get_val_triggered_loss() is not None and self.get_val_clean_loss() is None:
return self.get_val_triggered_loss()
else:
return None
def get_val_acc(self):
if self.get_val_triggered_acc() is not None and self.get_val_clean_acc() is not None:
return (self.get_val_triggered_acc() + self.get_val_clean_acc())/2.
elif self.get_val_triggered_acc() is None and self.get_val_clean_acc() is not None:
return self.get_val_clean_acc()
elif self.get_val_triggered_acc() is not None and self.get_val_clean_acc() is None:
return self.get_val_triggered_acc()
else:
return None
def __repr__(self):
val_loss = self.get_val_loss()
val_acc = self.get_val_acc()
val_loss = val_loss if val_loss is not None else -999
val_acc = val_acc if val_acc is not None else -999
return '(%0.04f, %0.04f)' % (val_loss, val_acc)
class EpochStatistics:
"""
Contains the statistics computed for an Epoch
"""
def __init__(self, epoch_num, training_stats=None, validation_stats=None, batch_training_stats=None):
self.epoch_num = epoch_num
if not batch_training_stats:
self.batch_training_stats = []
self.epoch_training_stats = training_stats
self.epoch_validation_stats = validation_stats
self.validate()
def add_batch(self, batches: Union[BatchStatistics, Sequence[BatchStatistics]]):
if isinstance(batches, collections.abc.Sequence):
self.batch_training_stats.extend(batches)
else:
self.batch_training_stats.append(batches)
def get_batch_stats(self):
return self.batch_training_stats
def validate(self):
if not isinstance(self.batch_training_stats, collections.abc.Sequence):
msg = "batch_training_stats must be None or a list of BatchTrainingStats objects! " \
"Got {}".format(self.batch_training_stats)
logger.error(msg)
raise ValueError(msg)
if self.epoch_training_stats and not isinstance(self.epoch_training_stats, EpochTrainStatistics):
msg = "training_stats must be None or of type: EpochTrainStatistics!, got type " \
"{}".format(type(self.epoch_training_stats))
logger.error(msg)
raise ValueError(msg)
if self.epoch_validation_stats and not isinstance(self.epoch_validation_stats, EpochValidationStatistics):
msg = "validation_stats must be None or of type: EpochValidationStatistics! Instead got type " \
"{}".format(type(self.epoch_validation_stats))
logger.error(msg)
raise ValueError(msg)
def get_epoch_num(self):
return self.epoch_num
def get_epoch_training_stats(self):
return self.epoch_training_stats
def get_epoch_validation_stats(self):
return self.epoch_validation_stats
class TrainingRunStatistics:
"""
Contains the statistics computed for an entire training run, a sequence of epochs
TODO:
[ ] - have another function which returns detailed statistics per epoch in an easily serialized manner
"""
def __init__(self):
self.stats_per_epoch_list = []
self.num_epochs_trained_per_optimizer = []
self.final_train_acc = 0.
self.final_train_loss = 0.
self.final_combined_val_acc = 0.
self.final_combined_val_loss = 0.
self.final_clean_val_acc = 0.
self.final_clean_val_loss = 0.
self.final_triggered_val_acc = 0.
self.final_triggered_val_loss = 0.
self.final_clean_data_test_acc = 0.
self.final_clean_data_n_total = 0
self.final_triggered_data_test_acc = None
self.final_triggered_data_n_total = None
self.final_clean_data_triggered_labels_test_acc = None
self.final_clean_data_triggered_labels_n_total = None
self.final_optimizer_num_epochs_trained = 0
self.final_optimizer_best_epoch_val = -1
def add_epoch(self, epoch_stats: Union[EpochStatistics, Sequence[EpochStatistics]]):
if isinstance(epoch_stats, collections.abc.Sequence):
self.stats_per_epoch_list.extend(epoch_stats)
else:
self.stats_per_epoch_list.append(epoch_stats)
def add_num_epochs_trained(self, num_epochs):
self.num_epochs_trained_per_optimizer.append(num_epochs)
def add_best_epoch_val(self, best_epoch):
self.final_optimizer_best_epoch_val = best_epoch
def get_epochs_stats(self):
return self.stats_per_epoch_list
def autopopulate_final_summary_stats(self):
"""
Uses the information from the final epoch's final batch to auto-populate the following statistics:
final_train_acc
final_train_loss
final_val_acc
final_val_loss
"""
final_epoch_training_stats = self.stats_per_epoch_list[self.final_optimizer_best_epoch_val]
self.set_final_train_acc(final_epoch_training_stats.get_epoch_training_stats().get_train_acc())
self.set_final_train_loss(final_epoch_training_stats.get_epoch_training_stats().get_train_loss())
if final_epoch_training_stats.get_epoch_validation_stats():
self.set_final_val_combined_acc(final_epoch_training_stats.get_epoch_validation_stats().get_val_acc())
self.set_final_val_combined_loss(final_epoch_training_stats.get_epoch_validation_stats().get_val_loss())
self.set_final_val_clean_acc(final_epoch_training_stats.get_epoch_validation_stats().get_val_clean_acc())
self.set_final_val_clean_loss(final_epoch_training_stats.get_epoch_validation_stats().get_val_clean_loss())
self.set_final_val_triggered_acc(final_epoch_training_stats.get_epoch_validation_stats().get_val_triggered_acc())
self.set_final_val_triggered_loss(final_epoch_training_stats.get_epoch_validation_stats().get_val_triggered_loss())
self.final_optimizer_num_epochs_trained = self.num_epochs_trained_per_optimizer[-1]
def set_final_train_acc(self, acc):
if 0 <= acc <= 100:
self.final_train_acc = acc
else:
msg = "Final Training accuracy should be between 0 and 100!"
logger.error(msg)
raise ValueError(msg)
def set_final_train_loss(self, loss):
self.final_train_loss = loss
def set_final_val_combined_acc(self, acc):
if acc is None or 0 <= acc <= 100: # allow for None in case validation metrics are not computed
self.final_combined_val_acc = acc
else:
msg = "Final validation accuracy should be between 0 and 100!"
logger.error(msg)
raise ValueError(msg)
def set_final_val_combined_loss(self, loss):
self.final_combined_val_loss = loss
def set_final_val_clean_acc(self, acc):
self.final_clean_val_acc = acc
def set_final_val_triggered_acc(self, acc):
self.final_triggered_val_acc = acc
def set_final_val_clean_loss(self, loss):
self.final_clean_val_loss = loss
def set_final_val_triggered_loss(self, loss):
self.final_triggered_val_loss = loss
def set_final_clean_data_test_acc(self, acc):
if 0 <= acc <= 100:
self.final_clean_data_test_acc = acc
else:
msg = "Final clean data test accuracy should be between 0 and 100!"
logger.error(msg)
raise ValueError(msg)
def set_final_triggered_data_test_acc(self, acc):
# we allow None in order to indicate that triggered data wasn't present in this dataset
if acc is None or 0 <= acc <= 100:
self.final_triggered_data_test_acc = acc
else:
msg = "Final triggered data test accuracy should be between 0 and 100!"
logger.error(msg)
raise ValueError(msg)
def set_final_clean_data_triggered_label_test_acc(self, acc):
if acc is None or 0 <= acc <= 100:
self.final_clean_data_triggered_labels_test_acc = acc
else:
msg = "Final clean data test accuracy should be between 0 and 100!"
logger.error(msg)
raise ValueError(msg)
def set_final_clean_data_n_total(self, n):
self.final_clean_data_n_total = n
def set_final_triggered_data_n_total(self, n):
self.final_triggered_data_n_total = n
def set_final_clean_data_triggered_label_n(self, n):
self.final_clean_data_triggered_labels_n_total = n
def get_summary(self):
"""
Returns a dictionary of the summary statistics from the training run
"""
summary_dict = dict()
summary_dict['final_train_acc'] = self.final_train_acc
summary_dict['final_train_loss'] = self.final_train_loss
summary_dict['final_combined_val_acc'] = self.final_combined_val_acc
summary_dict['final_combined_val_loss'] = self.final_combined_val_loss
summary_dict['final_clean_val_acc'] = self.final_clean_val_acc
summary_dict['final_clean_val_loss'] = self.final_clean_val_loss
summary_dict['final_triggered_val_acc'] = self.final_triggered_val_acc
summary_dict['final_triggered_val_loss'] = self.final_triggered_val_loss
summary_dict['final_clean_data_test_acc'] = self.final_clean_data_test_acc
summary_dict['final_triggered_data_test_acc'] = self.final_triggered_data_test_acc
summary_dict['final_clean_data_n_total'] = self.final_clean_data_n_total
summary_dict['final_triggered_data_n_total'] = self.final_triggered_data_n_total
summary_dict['clean_test_triggered_label_accuracy'] = self.final_clean_data_triggered_labels_test_acc
summary_dict['clean_test_triggered_label_n_total'] = self.final_clean_data_triggered_labels_n_total
summary_dict['final_optimizer_num_epochs_trained'] = self.num_epochs_trained_per_optimizer
return summary_dict
def save_summary_to_json(self, json_fname: str) -> None:
"""
Saves the training summary to a JSON file
"""
summary_dict = self.get_summary()
# write it to json
with open(json_fname, 'w') as fp:
json.dump(summary_dict, fp)
logger.info("Wrote summary statistics: %s to %s" % (str(summary_dict), json_fname))
def save_detailed_stats_to_disk(self, fname: str) -> None:
"""
Saves all batch statistics for every epoch as a CSV file
:param fname: filename to save the detailed information to
:return: None
"""
keys = ['epoch_number', 'train_acc', 'train_loss', 'combined_val_acc', 'combined_val_loss',
'clean_val_acc', 'clean_val_loss', 'triggered_val_acc', 'triggered_val_loss']
with open(fname, 'w') as output_file:
# write header first
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
for ii, e in enumerate(self.stats_per_epoch_list):
# TODO: we ignore batch_statistics for now, we may want to add this in in the future
epoch_training_stats = e.get_epoch_training_stats()
epoch_val_stats = e.get_epoch_validation_stats()
combined_val_acc = None
combined_val_loss = None
clean_val_acc = None
clean_val_loss = None
triggered_val_acc = None
triggered_val_loss = None
if epoch_val_stats is not None:
combined_val_acc = epoch_val_stats.get_val_acc()
combined_val_loss = epoch_val_stats.get_val_loss()
clean_val_acc = epoch_val_stats.get_val_clean_acc()
clean_val_loss = epoch_val_stats.get_val_clean_loss()
triggered_val_acc = epoch_val_stats.get_val_triggered_acc()
triggered_val_loss = epoch_val_stats.get_val_triggered_loss()
dict_writer.writerow(dict(epoch_number=e.get_epoch_num(),
train_acc=epoch_training_stats.get_train_acc(),
train_loss=epoch_training_stats.get_train_loss(),
combined_val_acc=combined_val_acc,
combined_val_loss=combined_val_loss,
clean_val_acc=clean_val_acc,
clean_val_loss=clean_val_loss,
triggered_val_acc=triggered_val_acc,
triggered_val_loss=triggered_val_loss))
logger.info("Wrote detailed statistics to %s" % (fname,))
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class AvgspeedItem(object):
def __init__(self, area=None, avgspeed=None, avgspeedUnit=None, firstpkgtime=None, firstpkgtimeUnit=None, errorRatio=None, flow=None, flowUnit=None, flowPercent=None, pv=None, hitRatio=None):
"""
:param area: (Optional)
:param avgspeed: (Optional)
:param avgspeedUnit: (Optional)
:param firstpkgtime: (Optional)
:param firstpkgtimeUnit: (Optional)
:param errorRatio: (Optional)
:param flow: (Optional)
:param flowUnit: (Optional)
:param flowPercent: (Optional)
:param pv: (Optional)
:param hitRatio: (Optional)
"""
self.area = area
self.avgspeed = avgspeed
self.avgspeedUnit = avgspeedUnit
self.firstpkgtime = firstpkgtime
self.firstpkgtimeUnit = firstpkgtimeUnit
self.errorRatio = errorRatio
self.flow = flow
self.flowUnit = flowUnit
self.flowPercent = flowPercent
self.pv = pv
self.hitRatio = hitRatio
|
#!/usr/bin/env python3
"""
Zurich Eye
"""
import unittest
import numpy as np
import numpy.testing as npt
import ze_py.transformations as tf
import ze_trajectory_analysis.align as align
class TestAlign(unittest.TestCase):
def test_align_se3(self):
for i in range(100):
# Random data
n_points = 100
T_gt_es = tf.random_transformation()
T_es_gt = tf.inverse_matrix(T_gt_es)
p_gt = np.random.random((n_points,3))
p_es = np.transpose(np.dot(T_es_gt[:3,:3], np.transpose(p_gt))) + T_es_gt[:3,3]
# Compute alignment
R_gt_es, gt_t_gt_es = align.align_se3(p_gt, p_es)
T = np.eye(4)
T[:3,:3] = R_gt_es
T[:3,3] = gt_t_gt_es
npt.assert_array_almost_equal(T_gt_es, T)
# Check alignment
p_es_aligned = np.transpose(np.dot(R_gt_es, np.transpose(p_es))) + gt_t_gt_es
npt.assert_array_almost_equal(p_es_aligned, p_gt)
def test_align_sim3(self):
for i in range(100):
# Random data
n_points = 100
T_gt_es = tf.random_transformation()
s_inv = np.random.random()
T_es_gt = tf.inverse_matrix(T_gt_es)
p_gt = np.random.random((n_points,3))
p_es = s_inv * np.transpose(np.dot(T_es_gt[:3,:3], np.transpose(p_gt))) + s_inv * T_es_gt[:3,3]
# Compute alignment
s, R_gt_es, gt_t_gt_es = align.align_sim3(p_gt, p_es)
T = np.eye(4)
T[:3,:3] = R_gt_es
T[:3,3] = gt_t_gt_es
npt.assert_almost_equal(s, 1.0 / s_inv)
npt.assert_array_almost_equal(T_gt_es, T)
# Check alignment
p_es_aligned = s * np.transpose(np.dot(R_gt_es, np.transpose(p_es))) + gt_t_gt_es
npt.assert_array_almost_equal(p_es_aligned, p_gt)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestAlign)
unittest.TextTestRunner(verbosity=2).run(suite) |
import copy
from my_utils.misc.logging import logger
class Trainer():
'''
Trainer for mini-batch stochastic gradient decent.
Args
----------
model :
A model to train.
train_loader : my_utils.DataLoader
DataLoader with training dataset.
'''
def __init__(self, model, train_loader):
self.model = model
self.train_loader = train_loader
self.total_steps = 0
def train_epoch(self, optimizer, max_epoch=1,
evaluator=None, score_monitor=None, model_saver=None):
checkpoint_steps = self.train_loader.n_batches
for _ in range(max_epoch):
stop_flag = self.train_step(optimizer, checkpoint_steps, checkpoint_steps,
evaluator, score_monitor, model_saver)
if stop_flag: return
def train_step(self, optimizer, checkpoint_steps=5000, max_steps=100000,
evaluator=None, score_monitor=None, model_saver=None):
"""Train the model for the specified number of steps instead of epochs."""
loss_sum = 0
stop_flag = False
self.train_loader.train = True
for i, (inputs, labels) in enumerate(self.train_loader, 1):
loss_sum += self.model.fit(inputs, labels, optimizer=optimizer)
self.total_steps += 1
if i%checkpoint_steps == 0:
loss_sum /= checkpoint_steps
logger.info("steps [{}/{}]\tloss: {}\t".format(i, max_steps, loss_sum))
loss_sum = 0
stop_flag = self._evaluate(evaluator, score_monitor, model_saver)
if stop_flag or i >= max_steps: return stop_flag
def _evaluate(self, evaluator, score_monitor, model_saver):
if evaluator:
current_eval = evaluator.evaluate()
logger.info("Evaluator {}: {}\t".format(evaluator.measure, current_eval))
if model_saver:
name_suffix = '_step_{}'.format(self.total_steps)
if evaluator:
name_suffix += '_{}_{}'.format(evaluator.measure, current_eval)
model_saver.save(name_suffix)
if score_monitor:
score_monitor.update_best(current_eval, self.model)
stop_flag = score_monitor.check_stop()
if stop_flag:
logger.info('Dev score saturated.')
return stop_flag
return False
|
#!/usr/bin/python
import imp
import subprocess
import argparse
argparser = argparse.ArgumentParser()
argparser.add_argument( "--Traces", type = bool, default=False, help = "Run Traces of the Benchmarks.")
argparser.add_argument( "--Metrics", type = bool, default=False, help = "Runs Metrics of the Benchmarks.")
argparser.add_argument( "--Events", type = bool, default=False, help = "Runs Eents of the Benchmarks.")
argparser.add_argument( "--Device", type = int, default=0, help = "Device where the experiment will be executed.")
args = argparser.parse_args()
if args.Traces == True:
traces = [" "]
if args.Metrics == True:
traces = ["--metrics all"]
if args.Events == True:
traces = ["--events all"]
if args.Traces == False and args.Metrics == False and args.Events == False:
traces = [" ", "--metrics all", "--events all"]
common = imp.load_source("common", "../common/common.py")
subprocess.check_output("rm -f *.csv", shell = True)
programs = [ "matMul_gpu_sharedmem"]
parameters = [
"240 " + str(args.Device),
"480 " + str(args.Device),
"720 " + str(args.Device),
"960 " + str(args.Device),
"1200 " + str(args.Device),
"1440 " + str(args.Device),
"1680 " + str(args.Device),
"1920 " + str(args.Device),
"2160 " + str(args.Device),
"2400 " + str(args.Device),
"2640 " + str(args.Device),
"2880 " + str(args.Device),
"3120 " + str(args.Device),
"3360 " + str(args.Device),
"3600 " + str(args.Device),
"3840 " + str(args.Device),
"4080 " + str(args.Device),
"4320 " + str(args.Device),
"4560 " + str(args.Device),
"4800 " + str(args.Device),
"5040 " + str(args.Device),
"5280 " + str(args.Device),
"5520 " + str(args.Device),
"5760 " + str(args.Device),
"6000 " + str(args.Device),
"6240 " + str(args.Device),
"6480 " + str(args.Device),
"6720 " + str(args.Device),
"6960 " + str(args.Device),
"7200 " + str(args.Device),
"7440 " + str(args.Device),
"7680 " + str(args.Device),
"7920 " + str(args.Device),
"8160 " + str(args.Device)
]
kernel = "matMul"
common.run_traces(programs, parameters, kernel, traces)
|
'''
Check Permutation
Send Feedback
For a given two strings, 'str1' and 'str2', check whether they are a permutation of each other or not.
Permutations of each other
Two strings are said to be a permutation of each other when either of the string's characters can be rearranged so that it becomes identical to the other one.
Example:
str1= "sinrtg"
str2 = "string"
The character of the first string(str1) can be rearranged to form str2 and hence we can say that the given strings are a permutation of each other.
Input Format:
The first line of input contains a string without any leading and trailing spaces, representing the first string 'str1'.
The second line of input contains a string without any leading and trailing spaces, representing the second string 'str2'.
Note:
All the characters in the input strings would be in lower case.
Output Format:
The only line of output prints either 'true' or 'false', denoting whether the two strings are a permutation of each other or not.
You are not required to print anything. It has already been taken care of. Just implement the function.
Constraints:
0 <= N <= 10^6
Where N is the length of the input string.
Time Limit: 1 second
Sample Input 1:
abcde
baedc
Sample Output 1:
true
Sample Input 2:
abc
cbd
Sample Output 2:
false
'''
## Read input as specified in the question.
## Print output as specified in the question.
s1 = input()
s2 = input()
dic = {}
if len(s1) == len(s2):
for i in s1:
if i in dic:
dic[i] = dic[i] + 1
else:
dic[i] = 1
for i in s2:
if i in dic:
dic[i] = dic[i] - 1
for i in dic:
if dic[i] == 0:
pass
else:
print('false')
break
else:
print('true')
else:
print('false') |
from logging import log
import boto3
import uuid
from tempfile import SpooledTemporaryFile
from logger_creator import CreateLogger
logger = CreateLogger('AWSClient', handlers=1)
logger = logger.get_default_logger()
class AWSClient():
def __init__(self) -> None:
try:
self.s3_client = boto3.client('s3')
self.s3_resource = boto3.resource('s3')
logger.info('AWSClient INSTANCE SUCCESSFULLY CREATED')
except Exception as e:
logger.exception("FAILED TO CREATE AWSClient INSTANCE")
def generate_bucket_name(self, bucket_name: str) -> str:
try:
generated_name = ''.join([bucket_name, '_', str(uuid.uuid4())])
logger.info(f'GENERATED UNIQUE BUCKET NAME: {generated_name}')
return generated_name
except Exception as e:
logger.exception('FAILED TO GENERATE BUCKET NAME')
def create_bucket(self, bucket_name: str, region_code: str = 'us-east-2'):
# session = boto3.session.Session()
# current_region = session.region_name
# print(current_region)
try:
bucket_response = self.s3_resource.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': region_code}
)
logger.info(f'Created a new Bucket called: {bucket_name}')
return (bucket_name, bucket_response)
except Exception as e:
logger.exception('FAILED TO CREATE NEW BUCKET')
def delete_bucket(self, bucket_name: str):
try:
self.s3_resource.Bucket(bucket_name).delete()
logger.info(f'SUCCESSFULLY DELETED BUCKET: {bucket_name}')
except Exception as e:
logger.exception('FAILED TO DELETE BUCKET: {bucket_name}')
def get_file_names(self, bucket_name: str) -> list:
try:
bucket = self.s3_resource.Bucket(name=bucket_name)
file_name_list = []
for obj in bucket.objects.all():
file_name_list.append(obj.key)
logger.info(
f'{bucket_name} BUCKETS FILE NAMES SUCCESSFULLY FETCHED')
return file_name_list
except Exception as e:
logger.exception(
f'FAILED TO RETRIEVE {bucket_name} BUCKETS FILE NAMES')
def get_file_names_with_description(self, bucket_name: str) -> dict:
try:
bucket = self.s3_resource.Bucket(name=bucket_name)
files_dict = {}
for obj in bucket.objects.all():
descriptions = obj.Object()
files_dict[obj.key] = {'storage_class': obj.storage_class,
'last_modified': obj.last_modified,
'version_id': descriptions.version_id,
'meta_data': descriptions.metadata
}
logger.info(
f'{bucket_name} BUCKETS FILE NAMES SUCCESSFULLY FETCHED WITH DESCRIPTIONS')
return files_dict
except Exception as e:
logger.exception(
f'FAILED TO RETRIEVE {bucket_name} BUCKETS FILE NAMES WITH DESCRIPTIONS')
def upload_file(self, bucket_name: str, file_name: str, key: str) -> None:
try:
self.s3_resource.Object(bucket_name,
key).upload_file(Filename=file_name, ExtraArgs={'ACL': 'public-read'})
logger.info(
f'{file_name} UPLOADED SUCCESSFULLY TO BUCKET {bucket_name} as {key}')
except Exception as e:
logger.exception('FAILED TO UPLOAD FILE')
raise Exception
def put_file(self, bucket_name: str, file_contents, key: str) -> None:
try:
self.s3_resource.Object(bucket_name,
key).put(Body=file_contents, ACL='public-read')
logger.info(
f'{key} PUT SUCCESSFULLY TO BUCKET {bucket_name} USING BODY DATA')
except Exception as e:
logger.exception('FAILED TO PUT FILE')
raise Exception
def upload_file_bytes(self, data, bucket_name: str, file_name: str, encode: bool = False):
try:
if encode:
data = data.encode()
self.s3_client.put_object(Body=data,
Bucket=bucket_name, Key=file_name, ExtraArgs={'ACL': 'public-read'})
logger.info(
f'UPLOADED BYTES SUCCESSFULLY TO BUCKET {bucket_name} as {file_name}')
except Exception as e:
logger.exception('FAILED TO UPLOAD BYTES')
def upload_file_object(self, temp_file: SpooledTemporaryFile, bucket_name: str, file_name: str):
try:
self.s3_client.upload_fileobj(
Fileobj=temp_file, Bucket=bucket_name, Key=file_name, ExtraArgs={'ACL': 'public-read'})
logger.info(
f'UPLOADED FILE OBJECT SUCCESSFULLY TO BUCKET {bucket_name} as {file_name}')
except Exception as e:
logger.exception('FAILED TO UPLOAD FILE OBJECT')
def get_file_link(self, bucket_name: str, file_name: str) -> str:
try:
bucket_location = self.s3_client.get_bucket_location(
Bucket=bucket_name)
object_url = "https://{0}.s3.{1}.amazonaws.com/{2}".format(
bucket_name,
bucket_location['LocationConstraint'],
file_name)
logger.info(f'SUCCESSFULLY GENERATED FILE LINK')
return object_url
except Exception as e:
logger.exception('FAILED TO GENERATE FILE LINK')
def load_file_bytes(self, bucket_name: str, file_name: str):
try:
s3_object = self.s3_client.get_object(
Bucket=bucket_name, Key=file_name)
data = s3_object['Body'].read()
logger.info(f'SUCCESSFULLY LOADED FILE {file_name} AS BYTES')
return data
except Exception as e:
logger.exception('FAILED TO LOAD FILE BYTES')
def download_file(self, bucket_name: str, file_name: str, location: str = '/tmp/'):
try:
self.s3_resource.Object(bucket_name, file_name).download_file(
f'{location}{file_name}')
logger.info(f'{file_name} SUCCESSFULLY DOWNLOAD TO {location}')
except Exception as e:
logger.exception('FAILED TO DOWNLOAD FILE')
def delete_file(self, bucket_name: str, file_name: str):
try:
self.s3_resource.Object(bucket_name, file_name).delete()
logger.info(
f'{file_name} SUCCESSFULLY DELETED FROM {bucket_name} BUCKET')
except Exception as e:
logger.exception('FAILED TO DELETE FILE FROM BUCKET')
def delete_all_files_From_bucket(self, bucket_name: str):
try:
res = []
bucket = self.s3_resource.Bucket(bucket_name)
for obj_version in bucket.object_versions.all():
res.append({'Key': obj_version.object_key,
'VersionId': obj_version.id})
bucket.delete_objects(Delete={'Objects': res})
logger.info(
f'SUCCESSFULLY DELETED ALL FILES IN {bucket_name} BUCKET')
except Exception as e:
logger.exception(
f'FAILED TO DELETE ALL FILES IN {bucket_name} BUCKET')
|
import turtle
import time
import math
l = int(input("Input spiral lenght l = "))
turtle.shape('turtle')
for i in range(l):
t = i / 20 * math.pi
x = (1 + 1 * t) * math.cos(t)
y = (1 + 1 * t) * math.sin(t)
turtle.goto(x, y)
time.sleep(5)
|
import argparse
import json
import os
import pickle
from pathlib import Path
import numpy as np
from tensorflow import gfile
from tensorflow.python.lib.io import file_io
from keras.models import Model, Input
from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional
from keras.callbacks import TensorBoard
from sklearn.model_selection import train_test_split
MODEL_FILE = 'keras_saved_model.h5'
def load_feature(input_x_path):
with gfile.Open(input_x_path, 'rb') as input_x_file:
return pickle.loads(input_x_file.read())
def load_label(input_y_path):
with gfile.Open(input_y_path, 'rb') as input_y_file:
return pickle.loads(input_y_file.read())
# Defining and parsing the command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--input-x-path', type=str, help='')
parser.add_argument('--input-y-path', type=str, help='')
parser.add_argument('--input-job-dir', type=str, help='')
parser.add_argument('--input-tags', type=int, help='')
parser.add_argument('--input-words', type=int, help='')
parser.add_argument('--input-dropout', type=float, help='')
parser.add_argument('--output-model-path', type=str, help='')
parser.add_argument('--output-model-path-file', type=str, help='')
args = parser.parse_args()
print(os.path.dirname(args.output_model_path))
print(args.input_x_path)
print(args.input_y_path)
print(args.input_job_dir)
print(args.input_tags)
print(args.input_words)
print(args.input_dropout)
print(args.output_model_path)
print(args.output_model_path_file)
X = load_feature(args.input_x_path)
y = load_label(args.input_y_path)
# split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# initialize tensorboard
tensorboard = TensorBoard(
log_dir=os.path.join(args.input_job_dir, 'logs'),
histogram_freq=0,
write_graph=True,
embeddings_freq=0)
callbacks = [tensorboard]
# model
model_input = Input(shape=(140,))
model = Embedding(input_dim=args.input_words,
output_dim=140, input_length=140)(model_input)
model = Dropout(args.input_dropout)(model)
model = Bidirectional(
LSTM(units=100, return_sequences=True, recurrent_dropout=0.1))(model)
out = TimeDistributed(Dense(args.input_tags, activation="softmax"))(
model) # softmax output layer
model = Model(model_input, out)
model.compile(optimizer="adam", loss="categorical_crossentropy",
metrics=["accuracy"])
model.summary()
history = model.fit(X_train, np.array(y_train), batch_size=32,
epochs=1, validation_split=0.1, verbose=1, callbacks=callbacks)
loss, accuracy = model.evaluate(X_test, np.array(y_test))
# save model
print('saved model to ', args.output_model_path)
model.save(MODEL_FILE)
with file_io.FileIO(MODEL_FILE, mode='rb') as input_f:
with file_io.FileIO(args.output_model_path + '/' + MODEL_FILE, mode='wb+') as output_f:
output_f.write(input_f.read())
# write out metrics
metrics = {
'metrics': [{
'name': 'accuracy-score',
'numberValue': accuracy,
'format': "PERCENTAGE",
}]
}
with file_io.FileIO('/mlpipeline-metrics.json', 'w') as f:
json.dump(metrics, f)
# write out TensorBoard viewer
metadata = {
'outputs': [{
'type': 'tensorboard',
'source': args.input_job_dir,
}]
}
with open('/mlpipeline-ui-metadata.json', 'w') as f:
json.dump(metadata, f)
Path(args.output_model_path_file).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_model_path_file).write_text(args.output_model_path)
|
# Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
VMachine module
"""
import time
from ovs.lib.helpers.decorators import log
from ovs.celery_run import celery
from ovs.dal.hybrids.pmachine import PMachine
from ovs.dal.hybrids.vmachine import VMachine
from ovs.dal.hybrids.vdisk import VDisk
from ovs.dal.lists.vmachinelist import VMachineList
from ovs.dal.lists.pmachinelist import PMachineList
from ovs.dal.lists.vdisklist import VDiskList
from ovs.dal.lists.storagerouterlist import StorageRouterList
from ovs.dal.lists.storagedriverlist import StorageDriverList
from ovs.extensions.hypervisor.factory import Factory
from ovs.lib.vdisk import VDiskController
from ovs.lib.messaging import MessageController
from ovs.lib.mdsservice import MDSServiceController
from ovs.log.logHandler import LogHandler
from ovs.extensions.generic.volatilemutex import VolatileMutex
logger = LogHandler('lib', name='vmachine')
class VMachineController(object):
"""
Contains all BLL related to VMachines
"""
@staticmethod
@celery.task(name='ovs.machine.create_multiple_from_template')
def create_multiple_from_template(name, machineguid, pmachineguids, start, amount, description=None):
pmachine_pointer = 0
for i in xrange(start, start + amount):
new_name = name if amount == 1 else '{0}-{1}'.format(name, i)
pmachineguid = pmachineguids[pmachine_pointer]
pmachine_pointer += 1
if pmachine_pointer >= len(pmachineguids):
pmachine_pointer = 0
VMachineController.create_from_template(name=new_name,
machineguid=machineguid,
pmachineguid=pmachineguid,
description=description)
@staticmethod
@celery.task(name='ovs.machine.create_from_template')
def create_from_template(name, machineguid, pmachineguid, description=None):
"""
Create a new vmachine using an existing vmachine template
@param machineguid: guid of the template vmachine
@param name: name of new vmachine
@param pmachineguid: guid of hypervisor to create new vmachine on
@return: guid of the newly created vmachine | False on any failure
"""
template_vm = VMachine(machineguid)
if not template_vm.is_vtemplate:
return False
target_pm = PMachine(pmachineguid)
target_hypervisor = Factory.get(target_pm)
storagerouters = [sr for sr in StorageRouterList.get_storagerouters() if sr.pmachine_guid == target_pm.guid]
if len(storagerouters) == 1:
target_storagerouter = storagerouters[0]
else:
raise ValueError('Pmachine {} has no StorageRouter assigned to it'.format(pmachineguid))
routing_key = "sr.{0}".format(target_storagerouter.machine_id)
vpool = None
vpool_guids = set()
if template_vm.vpool is not None:
vpool = template_vm.vpool
vpool_guids.add(vpool.guid)
for disk in template_vm.vdisks:
vpool = disk.vpool
vpool_guids.add(vpool.guid)
if len(vpool_guids) != 1:
raise RuntimeError('Only 1 vpool supported on template disk(s) - {0} found!'.format(len(vpool_guids)))
if not template_vm.pmachine.hvtype == target_pm.hvtype:
raise RuntimeError('Source and target hypervisor not identical')
# Currently, only one vPool is supported, so we can just use whatever the `vpool` variable above
# was set to as 'the' vPool for the code below. This obviously will have to change once vPool mixes
# are supported.
target_storagedriver = None
source_storagedriver = None
for vpool_storagedriver in vpool.storagedrivers:
if vpool_storagedriver.storagerouter.pmachine_guid == target_pm.guid:
target_storagedriver = vpool_storagedriver
if vpool_storagedriver.storagerouter.pmachine_guid == template_vm.pmachine_guid:
source_storagedriver = vpool_storagedriver
if target_storagedriver is None:
raise RuntimeError('Volume not served on target hypervisor')
source_hv = Factory.get(template_vm.pmachine)
target_hv = Factory.get(target_pm)
if not source_hv.is_datastore_available(source_storagedriver.storage_ip, source_storagedriver.mountpoint):
raise RuntimeError('Datastore unavailable on source hypervisor')
if not target_hv.is_datastore_available(target_storagedriver.storage_ip, target_storagedriver.mountpoint):
raise RuntimeError('Datastore unavailable on target hypervisor')
source_vm = source_hv.get_vm_object(template_vm.hypervisor_id)
if not source_vm:
raise RuntimeError('VM with key reference {0} not found'.format(template_vm.hypervisor_id))
name_duplicates = VMachineList.get_vmachine_by_name(name)
if name_duplicates is not None and len(name_duplicates) > 0:
raise RuntimeError('A vMachine with name {0} already exists'.format(name))
vm_path = target_hypervisor.get_vmachine_path(name, target_storagedriver.storagerouter.machine_id)
new_vm = VMachine()
new_vm.copy(template_vm)
new_vm.hypervisor_id = ''
new_vm.vpool = template_vm.vpool
new_vm.pmachine = target_pm
new_vm.name = name
new_vm.description = description
new_vm.is_vtemplate = False
new_vm.devicename = target_hypervisor.clean_vmachine_filename(vm_path)
new_vm.status = 'CREATED'
new_vm.save()
storagedrivers = [storagedriver for storagedriver in vpool.storagedrivers if storagedriver.storagerouter.pmachine_guid == new_vm.pmachine_guid]
if len(storagedrivers) == 0:
raise RuntimeError('Cannot find Storage Driver serving {0} on {1}'.format(vpool.name, new_vm.pmachine.name))
storagedriverguid = storagedrivers[0].guid
disks = []
disks_by_order = sorted(template_vm.vdisks, key=lambda x: x.order)
try:
for disk in disks_by_order:
prefix = '{0}-clone'.format(disk.name)
result = VDiskController.create_from_template(
diskguid=disk.guid,
devicename=prefix,
pmachineguid=target_pm.guid,
machinename=new_vm.name,
machineguid=new_vm.guid,
storagedriver_guid=storagedriverguid
)
disks.append(result)
logger.debug('Disk appended: {0}'.format(result))
except Exception as exception:
logger.error('Creation of disk {0} failed: {1}'.format(disk.name, str(exception)), print_msg=True)
VMachineController.delete.s(machineguid=new_vm.guid).apply_async(routing_key = routing_key)
raise
try:
result = target_hv.create_vm_from_template(
name, source_vm, disks, target_storagedriver.storage_ip, target_storagedriver.mountpoint, wait=True
)
except Exception as exception:
logger.error('Creation of vm {0} on hypervisor failed: {1}'.format(new_vm.name, str(exception)), print_msg=True)
VMachineController.delete.s(machineguid=new_vm.guid).apply_async(routing_key = routing_key)
raise
new_vm.hypervisor_id = result
new_vm.status = 'SYNC'
new_vm.save()
return new_vm.guid
@staticmethod
@celery.task(name='ovs.machine.clone')
def clone(machineguid, timestamp, name):
"""
Clone a vmachine using the disk snapshot based on a snapshot timestamp
@param machineguid: guid of the machine to clone
@param timestamp: timestamp of the disk snapshots to use for the clone
@param name: name for the new machine
"""
machine = VMachine(machineguid)
disks = {}
for snapshot in machine.snapshots:
if snapshot['timestamp'] == timestamp:
for diskguid, snapshotguid in snapshot['snapshots'].iteritems():
disks[diskguid] = snapshotguid
new_machine = VMachine()
new_machine.copy(machine)
new_machine.name = name
new_machine.pmachine = machine.pmachine
new_machine.save()
new_disk_guids = []
disks_by_order = sorted(machine.vdisks, key=lambda x: x.order)
for currentDisk in disks_by_order:
if machine.is_vtemplate and currentDisk.templatesnapshot:
snapshotid = currentDisk.templatesnapshot
else:
snapshotid = disks[currentDisk.guid]
prefix = '%s-clone' % currentDisk.name
result = VDiskController.clone(diskguid=currentDisk.guid,
snapshotid=snapshotid,
devicename=prefix,
pmachineguid=new_machine.pmachine_guid,
machinename=new_machine.name,
machineguid=new_machine.guid)
new_disk_guids.append(result['diskguid'])
hv = Factory.get(machine.pmachine)
try:
result = hv.clone_vm(machine.hypervisor_id, name, disks, None, True)
except:
VMachineController.delete(machineguid=new_machine.guid)
raise
new_machine.hypervisor_id = result
new_machine.save()
return new_machine.guid
@staticmethod
@celery.task(name='ovs.machine.delete')
def delete(machineguid):
"""
Delete a vmachine
@param machineguid: guid of the machine
"""
machine = VMachine(machineguid)
storagedriver_mountpoint, storagedriver_storage_ip = None, None
try:
storagedriver = [storagedriver for storagedriver in machine.vpool.storagedrivers if storagedriver.storagerouter.pmachine_guid == machine.pmachine_guid][0]
storagedriver_mountpoint = storagedriver.mountpoint
storagedriver_storage_ip = storagedriver.storage_ip
except Exception as ex:
logger.debug('No mountpoint info could be retrieved. Reason: {0}'.format(str(ex)))
storagedriver_mountpoint = None
disks_info = []
for vd in machine.vdisks:
for storagedriver in vd.vpool.storagedrivers:
if storagedriver.storagerouter.pmachine_guid == machine.pmachine_guid:
disks_info.append((storagedriver.mountpoint, vd.devicename))
if machine.pmachine: # Allow hypervisor id node, lookup strategy is hypervisor dependent
try:
hypervisor_id = machine.hypervisor_id
if machine.pmachine.hvtype == 'KVM':
hypervisor_id = machine.name # On KVM we can lookup the machine by name, not by id
hv = Factory.get(machine.pmachine)
hv.delete_vm(hypervisor_id, storagedriver_mountpoint, storagedriver_storage_ip, machine.devicename, disks_info, True)
except Exception as exception:
logger.error('Deletion of vm on hypervisor failed: {0}'.format(str(exception)), print_msg=True)
for disk in machine.vdisks:
logger.debug('Deleting disk {0} with guid: {1}'.format(disk.name, disk.guid))
disk.delete()
logger.debug('Deleting vmachine {0} with guid {1}'.format(machine.name, machine.guid))
machine.delete()
@staticmethod
@celery.task(name='ovs.machine.delete_from_voldrv')
@log('VOLUMEDRIVER_TASK')
def delete_from_voldrv(name, storagedriver_id):
"""
This method will delete a vmachine based on the name of the vmx given
"""
pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id)
if pmachine.hvtype not in ['VMWARE', 'KVM']:
return
hypervisor = Factory.get(pmachine)
name = hypervisor.clean_vmachine_filename(name)
if pmachine.hvtype == 'VMWARE':
storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id)
vpool = storagedriver.vpool
else:
vpool = None
vm = VMachineList.get_by_devicename_and_vpool(name, vpool)
if vm is not None:
MessageController.fire(MessageController.Type.EVENT, {'type': 'vmachine_deleted',
'metadata': {'name': vm.name}})
vm.delete(abandon=True)
@staticmethod
@celery.task(name='ovs.machine.rename_from_voldrv')
@log('VOLUMEDRIVER_TASK')
def rename_from_voldrv(old_name, new_name, storagedriver_id):
"""
This machine will handle the rename of a vmx file
"""
pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id)
if pmachine.hvtype not in ['VMWARE', 'KVM']:
return
hypervisor = Factory.get(pmachine)
if pmachine.hvtype == 'VMWARE':
storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id)
vpool = storagedriver.vpool
else:
vpool = None
old_name = hypervisor.clean_vmachine_filename(old_name)
new_name = hypervisor.clean_vmachine_filename(new_name)
scenario = hypervisor.get_rename_scenario(old_name, new_name)
if scenario == 'RENAME':
# Most likely a change from path. Updaing path
vm = VMachineList.get_by_devicename_and_vpool(old_name, vpool)
if vm is not None:
vm.devicename = new_name
vm.save()
elif scenario == 'UPDATE':
vm = VMachineList.get_by_devicename_and_vpool(new_name, vpool)
if vm is None:
# The vMachine doesn't seem to exist, so it's likely the create didn't came trough
# Let's create it anyway
VMachineController.update_from_voldrv(new_name, storagedriver_id=storagedriver_id)
vm = VMachineList.get_by_devicename_and_vpool(new_name, vpool)
if vm is None:
raise RuntimeError('Could not create vMachine on rename. Aborting.')
try:
VMachineController.sync_with_hypervisor(vm.guid, storagedriver_id=storagedriver_id)
vm.status = 'SYNC'
except:
vm.status = 'SYNC_NOK'
vm.save()
@staticmethod
@celery.task(name='ovs.machine.set_as_template')
def set_as_template(machineguid):
"""
Set a vmachine as template
@param machineguid: guid of the machine
@return: vmachine template conversion successful: True|False
"""
# Do some magic on the storage layer?
# This is most likely required as extra security measure
# Suppose the template is set back to a real machine
# it can be deleted within vmware which should be blocked.
# This might also require a storagerouter internal check
# to be implemented to discourage volumes from being deleted
# when clones were made from it.
vmachine = VMachine(machineguid)
if vmachine.hypervisor_status == 'RUNNING':
raise RuntimeError('vMachine {0} may not be running to set it as vTemplate'.format(vmachine.name))
for disk in vmachine.vdisks:
VDiskController.set_as_template(diskguid=disk.guid)
vmachine.is_vtemplate = True
vmachine.invalidate_dynamics(['snapshots'])
vmachine.save()
@staticmethod
@celery.task(name='ovs.machine.rollback')
def rollback(machineguid, timestamp):
"""
Rolls back a VM based on a given disk snapshot timestamp
"""
vmachine = VMachine(machineguid)
if vmachine.hypervisor_status == 'RUNNING':
raise RuntimeError('vMachine {0} may not be running to set it as vTemplate'.format(
vmachine.name
))
snapshots = [snap for snap in vmachine.snapshots if snap['timestamp'] == timestamp]
if not snapshots:
raise ValueError('No vmachine snapshots found for timestamp {}'.format(timestamp))
for disk in vmachine.vdisks:
VDiskController.rollback(diskguid=disk.guid,
timestamp=timestamp)
vmachine.invalidate_dynamics(['snapshots'])
@staticmethod
@celery.task(name='ovs.machine.snapshot')
def snapshot(machineguid, label=None, is_consistent=False, timestamp=None, is_automatic=False):
"""
Snapshot VMachine disks
@param machineguid: guid of the machine
@param label: label to give the snapshots
@param is_consistent: flag indicating the snapshot was consistent or not
@param timestamp: override timestamp, if required. Should be a unix timestamp
"""
timestamp = timestamp if timestamp is not None else time.time()
timestamp = str(int(float(timestamp)))
metadata = {'label': label,
'is_consistent': is_consistent,
'timestamp': timestamp,
'machineguid': machineguid,
'is_automatic': is_automatic}
machine = VMachine(machineguid)
# @todo: we now skip creating a snapshot when a vmachine's disks
# is missing a mandatory property: volume_id
# subtask will now raise an exception earlier in the workflow
for disk in machine.vdisks:
if not disk.volume_id:
message = 'Missing volume_id on disk {0} - unable to create snapshot for vm {1}'.format(
disk.guid, machine.guid
)
logger.info('Error: {0}'.format(message))
raise RuntimeError(message)
snapshots = {}
success = True
try:
for disk in machine.vdisks:
snapshots[disk.guid] = VDiskController.create_snapshot(diskguid=disk.guid,
metadata=metadata)
except Exception as ex:
logger.info('Error snapshotting disk {0}: {1}'.format(disk.name, str(ex)))
success = False
for diskguid, snapshotid in snapshots.iteritems():
VDiskController.delete_snapshot(diskguid=diskguid,
snapshotid=snapshotid)
logger.info('Create snapshot for vMachine {0}: {1}'.format(
machine.name, 'Success' if success else 'Failure'
))
machine.invalidate_dynamics(['snapshots'])
if not success:
raise RuntimeError('Failed to snapshot vMachine {0}'.format(machine.name))
@staticmethod
@celery.task(name='ovs.machine.sync_with_hypervisor')
@log('VOLUMEDRIVER_TASK')
def sync_with_hypervisor(vmachineguid, storagedriver_id=None):
"""
Updates a given vmachine with data retreived from a given pmachine
"""
try:
vmachine = VMachine(vmachineguid)
if storagedriver_id is None and vmachine.hypervisor_id is not None and vmachine.pmachine is not None:
# Only the vmachine was received, so base the sync on hypervisorid and pmachine
hypervisor = Factory.get(vmachine.pmachine)
logger.info('Syncing vMachine (name {})'.format(vmachine.name))
vm_object = hypervisor.get_vm_agnostic_object(vmid=vmachine.hypervisor_id)
elif storagedriver_id is not None and vmachine.devicename is not None:
# Storage Driver id was given, using the devicename instead (to allow hypervisorid updates
# which can be caused by re-adding a vm to the inventory)
pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id)
storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id)
hypervisor = Factory.get(pmachine)
if not hypervisor.file_exists(vmachine.vpool, hypervisor.clean_vmachine_filename(vmachine.devicename)):
return
vmachine.pmachine = pmachine
vmachine.save()
logger.info('Syncing vMachine (device {}, ip {}, mtpt {})'.format(vmachine.devicename,
storagedriver.storage_ip,
storagedriver.mountpoint))
vm_object = hypervisor.get_vm_object_by_devicename(devicename=vmachine.devicename,
ip=storagedriver.storage_ip,
mountpoint=storagedriver.mountpoint)
else:
message = 'Not enough information to sync vmachine'
logger.info('Error: {0}'.format(message))
raise RuntimeError(message)
except Exception as ex:
logger.info('Error while fetching vMachine info: {0}'.format(str(ex)))
raise
if vm_object is None:
message = 'Could not retreive hypervisor vmachine object'
logger.info('Error: {0}'.format(message))
raise RuntimeError(message)
else:
VMachineController.update_vmachine_config(vmachine, vm_object)
@staticmethod
@celery.task(name='ovs.machine.update_from_voldrv')
@log('VOLUMEDRIVER_TASK')
def update_from_voldrv(name, storagedriver_id):
"""
This method will update/create a vmachine based on a given vmx/xml file
"""
pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id)
if pmachine.hvtype not in ['VMWARE', 'KVM']:
return
hypervisor = Factory.get(pmachine)
name = hypervisor.clean_vmachine_filename(name)
storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id)
vpool = storagedriver.vpool
machine_ids = [storagedriver.storagerouter.machine_id for storagedriver in vpool.storagedrivers]
if hypervisor.should_process(name, machine_ids=machine_ids):
if pmachine.hvtype == 'VMWARE':
storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id)
vpool = storagedriver.vpool
else:
vpool = None
pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id)
mutex = VolatileMutex('{}_{}'.format(name, vpool.guid if vpool is not None else 'none'))
try:
mutex.acquire(wait=5)
limit = 5
exists = hypervisor.file_exists(vpool, name)
while limit > 0 and exists is False:
time.sleep(1)
exists = hypervisor.file_exists(vpool, name)
limit -= 1
if exists is False:
logger.info('Could not locate vmachine with name {0} on vpool {1}'.format(name, vpool))
vmachine = VMachineList.get_by_devicename_and_vpool(name, vpool)
if vmachine is not None:
VMachineController.delete_from_voldrv(name, storagedriver_id=storagedriver_id)
return
finally:
mutex.release()
try:
mutex.acquire(wait=5)
vmachine = VMachineList.get_by_devicename_and_vpool(name, vpool)
if not vmachine:
vmachine = VMachine()
vmachine.vpool = vpool
vmachine.pmachine = pmachine
vmachine.status = 'CREATED'
vmachine.devicename = name
vmachine.save()
finally:
mutex.release()
if pmachine.hvtype == 'KVM':
try:
VMachineController.sync_with_hypervisor(vmachine.guid, storagedriver_id=storagedriver_id)
vmachine.status = 'SYNC'
except:
vmachine.status = 'SYNC_NOK'
vmachine.save()
else:
logger.info('Ignored invalid file {0}'.format(name))
@staticmethod
@celery.task(name='ovs.machine.update_vmachine_config')
def update_vmachine_config(vmachine, vm_object, pmachine=None):
"""
Update a vMachine configuration with a given vMachine configuration
"""
try:
vdisks_synced = 0
if vmachine.name is None:
MessageController.fire(MessageController.Type.EVENT,
{'type': 'vmachine_created',
'metadata': {'name': vm_object['name']}})
elif vmachine.name != vm_object['name']:
MessageController.fire(MessageController.Type.EVENT,
{'type': 'vmachine_renamed',
'metadata': {'old_name': vmachine.name,
'new_name': vm_object['name']}})
if pmachine is not None:
vmachine.pmachine = pmachine
vmachine.name = vm_object['name']
vmachine.hypervisor_id = vm_object['id']
vmachine.devicename = vm_object['backing']['filename']
vmachine.save()
# Updating and linking disks
storagedrivers = StorageDriverList.get_storagedrivers()
datastores = dict([('{}:{}'.format(storagedriver.storage_ip, storagedriver.mountpoint), storagedriver) for storagedriver in storagedrivers])
vdisk_guids = []
for disk in vm_object['disks']:
if disk['datastore'] in vm_object['datastores']:
datastore = vm_object['datastores'][disk['datastore']]
if datastore in datastores:
vdisk = VDiskList.get_by_devicename_and_vpool(disk['filename'], datastores[datastore].vpool)
if vdisk is None:
# The disk couldn't be located, but is in our datastore. We might be in a recovery scenario
vdisk = VDisk()
vdisk.vpool = datastores[datastore].vpool
vdisk.reload_client()
vdisk.devicename = disk['filename']
vdisk.volume_id = vdisk.storagedriver_client.get_volume_id(str(disk['backingfilename']))
vdisk.size = vdisk.info['volume_size']
MDSServiceController.ensure_safety(vdisk)
# Update the disk with information from the hypervisor
if vdisk.vmachine is None:
MessageController.fire(MessageController.Type.EVENT,
{'type': 'vdisk_attached',
'metadata': {'vmachine_name': vmachine.name,
'vdisk_name': disk['name']}})
vdisk.vmachine = vmachine
vdisk.name = disk['name']
vdisk.order = disk['order']
vdisk.save()
vdisk_guids.append(vdisk.guid)
vdisks_synced += 1
for vdisk in vmachine.vdisks:
if vdisk.guid not in vdisk_guids:
MessageController.fire(MessageController.Type.EVENT,
{'type': 'vdisk_detached',
'metadata': {'vmachine_name': vmachine.name,
'vdisk_name': vdisk.name}})
vdisk.vmachine = None
vdisk.save()
logger.info('Updating vMachine finished (name {}, {} vdisks (re)linked)'.format(
vmachine.name, vdisks_synced
))
except Exception as ex:
logger.info('Error during vMachine update: {0}'.format(str(ex)))
raise
|
class Solution:
def beautifulArray(self, n: int) -> List[int]:
A = [i for i in range(1, n + 1)]
def partition(l: int, r: int, mask: int) -> int:
nextSwapped = l
for i in range(l, r + 1):
if A[i] & mask:
A[i], A[nextSwapped] = A[nextSwapped], A[i]
nextSwapped += 1
return nextSwapped - 1
def divide(l: int, r: int, mask: int) -> None:
if l >= r:
return
m = partition(l, r, mask)
divide(l, m, mask << 1)
divide(m + 1, r, mask << 1)
divide(0, n - 1, 1)
return A
|
import numpy as np
import csv
import sys
with open('answer_train.csv', 'w') as f:
K = 3
N = 1000
I = np.identity(K).astype(np.int32)
for k in xrange(K):
yn = I[k, :]
for i in xrange(N):
f.write(','.join([str(yn_i) for yn_i in yn]) + '\n')
|
# <auto-generated>
# This code was generated by the UnitCodeGenerator tool
#
# Changes to this file will be lost if the code is regenerated
# </auto-generated>
import unittest
import units.volume.litres
class TestLitresMethods(unittest.TestCase):
def test_convert_known_litres_to_millilitres(self):
self.assertAlmostEqual(34000.0, units.volume.litres.to_millilitres(34.0), places=1)
self.assertAlmostEqual(670.0, units.volume.litres.to_millilitres(0.67), places=1)
self.assertAlmostEqual(1090.0, units.volume.litres.to_millilitres(1.09), places=1)
def test_convert_known_litres_to_kilolitres(self):
self.assertAlmostEqual(0.2, units.volume.litres.to_kilolitres(200.0), places=1)
self.assertAlmostEqual(12.345, units.volume.litres.to_kilolitres(12345.0), places=1)
self.assertAlmostEqual(0.08, units.volume.litres.to_kilolitres(80.0), places=1)
def test_convert_known_litres_to_teaspoons(self):
self.assertAlmostEqual(506.809, units.volume.litres.to_teaspoons(3.0), places=1)
self.assertAlmostEqual(33.7873, units.volume.litres.to_teaspoons(0.2), places=1)
self.assertAlmostEqual(709.533, units.volume.litres.to_teaspoons(4.2), places=1)
def test_convert_known_litres_to_tablespoons(self):
self.assertAlmostEqual(168.936, units.volume.litres.to_tablespoons(3.0), places=1)
self.assertAlmostEqual(22.5248, units.volume.litres.to_tablespoons(0.4), places=1)
self.assertAlmostEqual(3772.91, units.volume.litres.to_tablespoons(67.0), places=1)
def test_convert_known_litres_to_quarts(self):
self.assertAlmostEqual(47.5134, units.volume.litres.to_quarts(54.0), places=1)
self.assertAlmostEqual(1.75975, units.volume.litres.to_quarts(2.0), places=1)
self.assertAlmostEqual(0.615914, units.volume.litres.to_quarts(0.7), places=1)
def test_convert_known_litres_to_pints(self):
self.assertAlmostEqual(0.879877, units.volume.litres.to_pints(0.5), places=1)
self.assertAlmostEqual(255.164, units.volume.litres.to_pints(145.0), places=1)
self.assertAlmostEqual(16.0138, units.volume.litres.to_pints(9.1), places=1)
def test_convert_known_litres_to_gallons(self):
self.assertAlmostEqual(2.837603, units.volume.litres.to_gallons(12.9), places=1)
self.assertAlmostEqual(23.9766, units.volume.litres.to_gallons(109.0), places=1)
self.assertAlmostEqual(14.7379, units.volume.litres.to_gallons(67.0), places=1)
def test_convert_known_litres_to_fluid_ounces(self):
self.assertAlmostEqual(175.975, units.volume.litres.to_fluid_ounces(5.0), places=1)
self.assertAlmostEqual(10.5585, units.volume.litres.to_fluid_ounces(0.3), places=1)
self.assertAlmostEqual(38.7146, units.volume.litres.to_fluid_ounces(1.1), places=1)
def test_convert_known_litres_to_u_s_teaspoons(self):
self.assertAlmostEqual(2434.61, units.volume.litres.to_u_s_teaspoons(12.0), places=1)
self.assertAlmostEqual(142.019, units.volume.litres.to_u_s_teaspoons(0.7), places=1)
self.assertAlmostEqual(18239.29, units.volume.litres.to_u_s_teaspoons(89.9), places=1)
def test_convert_known_litres_to_u_s_tablespoons(self):
self.assertAlmostEqual(811.537, units.volume.litres.to_u_s_tablespoons(12.0), places=1)
self.assertAlmostEqual(378.717, units.volume.litres.to_u_s_tablespoons(5.6), places=1)
self.assertAlmostEqual(33.814, units.volume.litres.to_u_s_tablespoons(0.5), places=1)
def test_convert_known_litres_to_u_s_quarts(self):
self.assertAlmostEqual(12.6803, units.volume.litres.to_u_s_quarts(12.0), places=1)
self.assertAlmostEqual(1.15179, units.volume.litres.to_u_s_quarts(1.09), places=1)
self.assertAlmostEqual(5.81179, units.volume.litres.to_u_s_quarts(5.5), places=1)
def test_convert_known_litres_to_u_s_pints(self):
self.assertAlmostEqual(7.18548, units.volume.litres.to_u_s_pints(3.4), places=1)
self.assertAlmostEqual(1.6907, units.volume.litres.to_u_s_pints(0.8), places=1)
self.assertAlmostEqual(6340.129, units.volume.litres.to_u_s_pints(3000.0), places=1)
def test_convert_known_litres_to_u_s_gallons(self):
self.assertAlmostEqual(28.821171, units.volume.litres.to_u_s_gallons(109.1), places=1)
self.assertAlmostEqual(10.96314, units.volume.litres.to_u_s_gallons(41.5), places=1)
self.assertAlmostEqual(0.211338, units.volume.litres.to_u_s_gallons(0.8), places=1)
def test_convert_known_litres_to_u_s_fluid_ounces(self):
self.assertAlmostEqual(2738.94, units.volume.litres.to_u_s_fluid_ounces(81.0), places=1)
self.assertAlmostEqual(246.842, units.volume.litres.to_u_s_fluid_ounces(7.3), places=1)
self.assertAlmostEqual(21.97911, units.volume.litres.to_u_s_fluid_ounces(0.65), places=1)
def test_convert_known_litres_to_u_s_cups(self):
self.assertAlmostEqual(3.80408, units.volume.litres.to_u_s_cups(0.9), places=1)
self.assertAlmostEqual(439.15962, units.volume.litres.to_u_s_cups(103.9), places=1)
self.assertAlmostEqual(302.6355, units.volume.litres.to_u_s_cups(71.6), places=1)
if __name__ == '__main__':
unittest.main()
|
NAME = 'volback'
AUTHOR = 'Jam Risser'
VERSION = '0.1.0'
COPYRIGHT = '2017'
BANNER = '''
''' + NAME + ' v' + VERSION + '''
Copyright (c) ''' + COPYRIGHT + ' ' + AUTHOR + '''
'''
CONFIG_FILENAME = 'volback.yml'
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from __future__ import absolute_import
import os, tempfile
def draw_graph(origin_node, ancestor_depth=None, descendant_depth=None, format='dot',
include_calculation_inputs=False, include_calculation_outputs=False):
"""
The algorithm starts from the original node and goes both input-ward and output-ward via a breadth-first algorithm.
:param origin_node: An Aiida node, the starting point for drawing the graph
:param int ancestor_depth: The maximum depth of the ancestors drawn. If left to None, we recurse until the graph is fully explored
:param int descendant_depth: The maximum depth of the descendants drawn. If left to None, we recurse until the graph is fully explored
:param str format: The format, by default dot
:returns: The exit_status of the os.system call that produced the valid file
:returns: The file name of the final output
..note::
If an invalid format is provided graphviz prints a helpful message, so this doesn't need to be implemented here.
"""
#
# until the connected part of the graph that contains the root_pk is fully explored.
# TODO this command deserves to be improved, with options and further subcommands
from aiida.orm.calculation import Calculation
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.code import Code
from aiida.orm.node import Node
from aiida.common.links import LinkType
from aiida.orm.querybuilder import QueryBuilder
def draw_node_settings(node, **kwargs):
"""
Returns a string with all infos needed in a .dot file to define a node of a graph.
:param node:
:param kwargs: Additional key-value pairs to be added to the returned string
:return: a string
"""
if isinstance(node, Calculation):
shape = "shape=polygon,sides=4"
elif isinstance(node, Code):
shape = "shape=diamond"
else:
shape = "shape=ellipse"
if kwargs:
additional_params = ",{}".format(
",".join('{}="{}"'.format(k, v) for k, v in kwargs.items()))
else:
additional_params = ""
if node.label:
label_string = "\n'{}'".format(node.label)
additional_string = ""
else:
additional_string = "\n {}".format(node.get_desc())
label_string = ""
labelstring = 'label="{} ({}){}{}"'.format(
node.__class__.__name__, node.pk, label_string,
additional_string)
return "N{} [{},{}{}];".format(node.pk, shape, labelstring,
additional_params)
def draw_link_settings(inp_id, out_id, link_label, link_type):
if link_type in (LinkType.CREATE.value, LinkType.INPUT.value):
style='solid' # Solid lines and black colors
color = "0.0 0.0 0.0" # for CREATE and INPUT (The provenance graph)
elif link_type == LinkType.RETURN.value:
style='dotted' # Dotted lines of
color = "0.0 0.0 0.0" # black color for Returns
elif link_type == LinkType.CALL.value:
style='bold' # Bold lines and
color = "0.0 1.0 1.0" # Bright red for calls
else:
style='solid' # Solid and
color="0.0 0.0 0.5" #grey lines for unspecified links!
return ' {} -> {} [label="{}", color="{}", style="{}"];'.format("N{}".format(inp_id), "N{}".format(out_id), link_label, color, style)
# Breadth-first search of all ancestors and descendant nodes of a given node
links = {} # Accumulate links here
nodes = {origin_node.pk: draw_node_settings(origin_node, style='filled', color='lightblue')} #Accumulate nodes specs here
# Additional nodes (the ones added with either one of include_calculation_inputs or include_calculation_outputs
# is set to true. I have to put them in a different dictionary because nodes is the one used for the recursion,
# whereas these should not be used for the recursion:
additional_nodes = {}
last_nodes = [origin_node] # Put the nodes whose links have not been scanned yet
# Go through the graph on-ward (i.e. look at inputs)
depth = 0
while last_nodes:
# I augment depth every time I get through a new iteration
depth += 1
# I check whether I should stop here:
if ancestor_depth is not None and depth > ancestor_depth:
break
# I continue by adding new nodes here!
new_nodes = []
for node in last_nodes:
# This query gives me all the inputs of this node, and link labels and types!
input_query = QueryBuilder()
input_query.append(Node, filters={'id':node.pk}, tag='n')
input_query.append(Node, input_of='n', edge_project=('id', 'label', 'type'), project='*', tag='inp')
for inp, link_id, link_label, link_type in input_query.iterall():
# I removed this check, to me there is no way that this link was already referred to!
# if link_id not in links:
links[link_id] = draw_link_settings(inp.pk, node.pk, link_label, link_type)
# For the nodes I need to check, maybe this same node is referred to multiple times.
if inp.pk not in nodes:
nodes[inp.pk] = draw_node_settings(inp)
new_nodes.append(inp)
# Checking whether I also should include all the outputs of a calculation into the drawing:
if include_calculation_outputs and isinstance(node, Calculation):
# Query for the outputs, giving me also link labels and types:
output_query = QueryBuilder()
output_query.append(Node, filters={'id':node.pk}, tag='n')
output_query.append(Node, output_of='n', edge_project=('id', 'label', 'type'), project='*', tag='out')
# Iterate through results
for out, link_id, link_label, link_type in output_query.iterall():
# This link might have been drawn already, because the output is maybe
# already drawn.
# To check: Maybe it's more efficient not to check this, since
# the dictionaries are large and contain many keys...
# I.e. just always draw, also when overwriting an existing (identical) entry.
if link_id not in links:
links[link_id] = draw_link_settings(node.pk, out.pk, link_label, link_type)
if out.pk not in nodes and out.pk not in additional_nodes:
additional_nodes[out.pk] = draw_node_settings(out)
last_nodes = new_nodes
# Go through the graph down-ward (i.e. look at outputs)
last_nodes = [origin_node]
depth = 0
while last_nodes:
depth += 1
# Also here, checking of maximum descendant depth is set and applies.
if descendant_depth is not None and depth > descendant_depth:
break
new_nodes = []
for node in last_nodes:
# Query for the outputs:
output_query = QueryBuilder()
output_query.append(Node, filters={'id':node.pk}, tag='n')
output_query.append(Node, output_of='n', edge_project=('id', 'label', 'type'), project='*', tag='out')
for out, link_id, link_label, link_type in output_query.iterall():
# Draw the link
links[link_id] = draw_link_settings(node.pk, out.pk, link_label, link_type)
if out.pk not in nodes:
nodes[out.pk] = draw_node_settings(out)
new_nodes.append(out)
if include_calculation_inputs and isinstance(node, Calculation):
input_query = QueryBuilder()
input_query.append(Node, filters={'id':node.pk}, tag='n')
input_query.append(Node, input_of='n', edge_project=('id', 'label', 'type'), project='*', tag='inp')
for inp, link_id, link_label, link_type in input_query.iterall():
# Also here, maybe it's just better not to check?
if link_id not in links:
links[link_id] = draw_link_settings(inp.pk, node.pk, link_label, link_type)
if inp.pk not in nodes and inp.pk not in additional_nodes:
additional_nodes[inp.pk] = draw_node_settings(inp)
last_nodes = new_nodes
# Writing the graph to a temporary file
fd, fname = tempfile.mkstemp(suffix='.dot')
with open(fname, 'w') as fout:
fout.write("digraph G {\n")
for l_name, l_values in links.items():
fout.write(' {}\n'.format(l_values))
for n_name, n_values in nodes.items():
fout.write(" {}\n".format(n_values))
for n_name, n_values in additional_nodes.items():
fout.write(" {}\n".format(n_values))
fout.write("}\n")
# Now I am producing the output file
output_file_name = "{0}.{format}".format(origin_node.pk, format=format)
exit_status = os.system('dot -T{format} {0} -o {1}'.format(fname, output_file_name, format=format))
# cleaning up by removing the temporary file
os.remove(fname)
return exit_status, output_file_name
|
__all__ = [
"Diagnostic",
"DiagnosticCollection",
"DiagnosticError",
"DiagnosticErrorSummary",
]
from dataclasses import dataclass, field, replace
from types import TracebackType
from typing import Any, Iterator, List, Literal, Optional, Type
from beet import FormattedPipelineException, TextFileBase
from tokenstream import UNKNOWN_LOCATION, SourceLocation
from .error import MechaError
from .utils import underline_code
@dataclass
class Diagnostic(MechaError):
"""Exception that can be raised to report messages."""
level: Literal["info", "warn", "error"]
message: str
rule: Optional[str] = None
hint: Optional[str] = None
filename: Optional[str] = None
file: Optional[TextFileBase[Any]] = None
location: SourceLocation = UNKNOWN_LOCATION
end_location: SourceLocation = UNKNOWN_LOCATION
def __str__(self) -> str:
return self.format_message()
def format_message(self) -> str:
"""Return the formatted message."""
message = self.message
if self.rule:
message += f" ({self.rule})"
return message
def format_location(self) -> str:
"""Return the formatted location of the reported message."""
if self.filename:
location = self.filename
if not self.location.unknown:
location += f":{self.location.lineno}:{self.location.colno}"
elif not self.location.unknown:
location = f"line {self.location.lineno}, column {self.location.colno}"
if self.hint:
location = f'File "{self.hint}", {location}'
elif self.hint:
location = self.hint
else:
location = ""
return location
def format_code(self, code: str) -> Optional[str]:
"""Return the formatted code."""
if self.location.unknown:
return None
return underline_code(
code,
self.location,
self.location if self.end_location.unknown else self.end_location,
)
def with_defaults(
self,
rule: Optional[str] = None,
hint: Optional[str] = None,
filename: Optional[str] = None,
file: Optional[TextFileBase[Any]] = None,
location: SourceLocation = UNKNOWN_LOCATION,
end_location: SourceLocation = UNKNOWN_LOCATION,
) -> "Diagnostic":
"""Set default values for unspecified attributes."""
if not self.location.unknown:
location = self.location
end_location = self.end_location
return replace(
self,
rule=self.rule or rule,
hint=self.hint or hint,
filename=self.filename or filename,
file=self.file or file,
location=location,
end_location=end_location,
)
@dataclass
class DiagnosticCollection(MechaError):
"""Exception that can be raised to group multiple diagnostics."""
exceptions: List[Diagnostic] = field(default_factory=list)
rule: Optional[str] = None
hint: Optional[str] = None
filename: Optional[str] = None
file: Optional[TextFileBase[Any]] = None
def add(self, exc: Diagnostic) -> Diagnostic:
"""Add diagnostic."""
exc = exc.with_defaults(
rule=self.rule,
hint=self.hint,
filename=self.filename,
file=self.file,
)
self.exceptions.append(exc)
return exc
def extend(self, other: "DiagnosticCollection"):
"""Combine diagnostics from another collection."""
self.exceptions.extend(other.exceptions)
def clear(self):
"""Clear all the diagnostics."""
self.exceptions.clear()
@property
def error(self) -> bool:
"""Return true if the diagnostics contain at least one error."""
for exc in self.exceptions:
if exc.level == "error":
return True
return False
def get_all_errors(self) -> Iterator[Diagnostic]:
"""Yield all the diagnostics with a severity level of "error"."""
for exc in self.exceptions:
if exc.level == "error":
yield exc
def __enter__(self) -> "DiagnosticCollection":
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
):
if not exc_type:
if self.exceptions:
raise self
def __str__(self) -> str:
term = (
"error"
if all(exc.level == "error" for exc in self.exceptions)
else "diagnostic"
)
term += "s" * (len(self.exceptions) > 1)
return f"Reported {len(self.exceptions)} {term}."
class DiagnosticError(MechaError):
"""Raised with a collection of error diagnostics."""
diagnostics: DiagnosticCollection
def __init__(self, diagnostics: DiagnosticCollection):
super().__init__(diagnostics)
self.diagnostics = diagnostics
def __str__(self) -> str:
details = "\n".join(
f"{diagnostic.format_location()}: {diagnostic.format_message()}"
for diagnostic in self.diagnostics.exceptions
)
return f"{self.diagnostics}\n\n{details}"
class DiagnosticErrorSummary(FormattedPipelineException):
"""Raised for showing a summary of how many errors occurred."""
diagnostics: DiagnosticCollection
def __init__(self, diagnostics: DiagnosticCollection):
super().__init__(diagnostics)
self.message = str(diagnostics)
|
'''
Defines the NIDAQmx engine interface
General notes for developers
-----------------------------------------------------------------------------
This is a wraper around the NI-DAQmx C API. Refer to the NI-DAQmx C reference
(available as a Windows help file or as HTML documentation on the NI website).
Google can help you quickly find the online documentation).
This code is under heavy development and I may change the API in significant
ways. In general, the only portion of the code you should use in third-party
modules is the `Engine` class. This will serve as the sole communication layer
between the NI hardware and your application. By doing so, this ensures a
sufficient layer of abstraction that helps switch between DAQ hardware from
different vendors provided that the appropriate interface is written.
'''
import logging
log = logging.getLogger(__name__)
log_ai = logging.getLogger(__name__ + '.ai')
log_ao = logging.getLogger(__name__ + '.ao')
import types
import ctypes
from collections import OrderedDict
from functools import partial
from threading import Timer
import numpy as np
import PyDAQmx as mx
from atom.api import (Float, Typed, Unicode, Int, Bool, Callable, Enum,
Property, Value)
from enaml.core.api import Declarative, d_
from ..calibration.util import dbi
from ..engine import Engine
from ..channel import (CounterChannel,
HardwareAIChannel, HardwareAOChannel, HardwareDIChannel,
HardwareDOChannel, SoftwareDIChannel, SoftwareDOChannel)
from ..input import InputData
################################################################################
# Engine-specific channels
################################################################################
class NIDAQGeneralMixin(Declarative):
# Channel identifier (e.g., /Dev1/ai0)
channel = d_(Unicode()).tag(metadata=True)
def __str__(self):
return f'{self.label} ({self.channel})'
class NIDAQTimingMixin(Declarative):
#: Specifies sampling clock for the channel. Even if specifying a sample
#: clock, you still need to explicitly set the fs attribute.
sample_clock = d_(Unicode().tag(metadata=True))
#: Specifies the start trigger for the channel. If None, sampling begins
#: when task is started.
start_trigger = d_(Unicode().tag(metadata=True))
#: Reference clock for the channel. If you aren't sure, a good value is
#: `PXI_Clk10` if using a PXI chassis. This ensures that the sample clocks
#: across all NI cards in the PXI chassis are synchronized.
reference_clock = d_(Unicode()).tag(metadata=True)
class NIDAQCounterChannel(NIDAQGeneralMixin, CounterChannel):
high_samples = d_(Int().tag(metadata=True))
low_samples = d_(Int().tag(metadata=True))
source_terminal = d_(Unicode().tag(metadata=True))
class NIDAQHardwareAOChannel(NIDAQGeneralMixin, NIDAQTimingMixin,
HardwareAOChannel):
#: Available terminal modes. Not all terminal modes may be supported by a
#: particular device
TERMINAL_MODES = 'pseudodifferential', 'differential', 'RSE'
#: Terminal mode
terminal_mode = d_(Enum(*TERMINAL_MODES)).tag(metadata=True)
filter_delay = Property().tag(metadata=True)
filter_delay_samples = Property().tag(metadata=True)
device_name = Property().tag(metadata=False)
#: Filter delay lookup table for different sampling rates. The first column
#: is the lower bound (exclusive) of the sampling rate (in samples/sec) for
#: the filter delay (second column, in samples). The upper bound of the
#: range (inclusive) for the sampling rate is denoted by the next row.
#: e.g., if FILTER_DELAY[i, 0] < fs <= FILTER_DELAY[i+1, 0] is True, then
#: the filter delay is FILTER_DELAY[i, 1].
FILTER_DELAY = np.array([
( 1.0e3, 36.6),
( 1.6e3, 36.8),
( 3.2e3, 37.4),
( 6.4e3, 38.5),
( 12.8e3, 40.8),
( 25.6e3, 43.2),
( 51.2e3, 48.0),
(102.4e3, 32.0),
])
def _get_device_name(self):
return self.channel.strip('/').split('/')[0]
def _get_filter_delay_samples(self):
i = np.flatnonzero(self.fs > self.FILTER_DELAY[:, 0])[-1]
return self.FILTER_DELAY[i, 1]
def _get_filter_delay(self):
return self.filter_delay_samples / self.fs
class NIDAQHardwareAIChannel(NIDAQGeneralMixin, NIDAQTimingMixin,
HardwareAIChannel):
#: Available terminal modes. Not all terminal modes may be supported by a
#: particular device
TERMINAL_MODES = 'pseudodifferential', 'differential', 'RSE', 'NRSE'
terminal_mode = d_(Enum(*TERMINAL_MODES)).tag(metadata=True)
#: Terminal coupling to use. Not all terminal couplings may be supported by
#: a particular device. Can be `None`, `'AC'`, `'DC'` or `'ground'`.
terminal_coupling = d_(Enum(None, 'AC', 'DC', 'ground')).tag(metadata=True)
class NIDAQHardwareDIChannel(NIDAQGeneralMixin, NIDAQTimingMixin,
HardwareDIChannel):
pass
class NIDAQHardwareDOChannel(NIDAQGeneralMixin, NIDAQTimingMixin,
HardwareDOChannel):
pass
class NIDAQSoftwareDIChannel(NIDAQGeneralMixin, SoftwareDIChannel):
pass
class NIDAQSoftwareDOChannel(NIDAQGeneralMixin, SoftwareDOChannel):
pass
################################################################################
# PSI utility
################################################################################
def get_channel_property(channels, property, allow_unique=False):
values = [getattr(c, property) for c in channels]
if allow_unique:
return values
elif len(set(values)) != 1:
m = 'NIDAQEngine does not support per-channel {} as specified: {}' \
.format(property, values)
raise ValueError(m)
else:
return values[0]
################################################################################
# DAQmx utility
################################################################################
def read_digital_lines(task, size=1):
nlines = ctypes.c_uint32()
mx.DAQmxGetDINumLines(task, '', nlines)
nsamp = ctypes.c_int32()
nbytes = ctypes.c_int32()
data = np.empty((size, nlines.value), dtype=np.uint8)
mx.DAQmxReadDigitalLines(task, size, 0, mx.DAQmx_Val_GroupByChannel, data,
data.size, nsamp, nbytes, None)
return data.T
def read_hw_ai(task, available_samples=None, channels=1, block_size=1):
if available_samples is None:
uint32 = ctypes.c_uint32()
mx.DAQmxGetReadAvailSampPerChan(task, uint32)
available_samples = uint32.value
blocks = (available_samples//block_size)
if blocks == 0:
return
samples = blocks*block_size
data = np.empty((channels, samples), dtype=np.double)
int32 = ctypes.c_int32()
mx.DAQmxReadAnalogF64(task, samples, 0, mx.DAQmx_Val_GroupByChannel, data,
data.size, int32, None)
log_ai.trace('Read %d samples', samples)
return data
def constant_lookup(value):
for name in dir(mx.DAQmxConstants):
if name in mx.DAQmxConstants.constant_list:
if getattr(mx.DAQmxConstants, name) == value:
return name
raise ValueError('Constant {} does not exist'.format(value))
def channel_list(task):
channels = ctypes.create_string_buffer(b'', 4096)
mx.DAQmxGetTaskChannels(task, channels, len(channels))
return [c.strip() for c in channels.value.split(b',')]
def verify_channel_names(task, names):
lines = channel_list(task)
if names is not None:
if len(lines) != len(names):
m = 'Number of names must match number of lines. ' \
'Lines: {}, names: {}'
raise ValueError(m.format(lines, names))
else:
names = lines
return names
def device_list(task):
devices = ctypes.create_string_buffer(b'', 4096)
mx.DAQmxGetTaskDevices(task, devices, len(devices))
return [d.strip() for d in devices.value.split(b',')]
################################################################################
# callback
################################################################################
def hw_ao_helper(cb, task, event_type, cb_samples, cb_data):
cb(cb_samples)
return 0
def hw_ai_helper(cb, channels, discard, task, event_type=None, cb_samples=None,
cb_data=None):
uint32 = ctypes.c_uint32()
mx.DAQmxGetReadAvailSampPerChan(task, uint32)
available_samples = uint32.value
if available_samples == 0:
return 0
uint64 = ctypes.c_uint64()
mx.DAQmxGetReadCurrReadPos(task, uint64)
read_position = uint64.value
log_ai.trace('Current read position %d, available samples %d',
read_position, available_samples)
if read_position < discard:
samples = min(discard-read_position, available_samples)
read_hw_ai(task, samples, channels)
available_samples -= samples
log_ai.debug('Discarded %d samples from beginning, %d available',
samples, available_samples)
if available_samples == 0:
return 0
data = read_hw_ai(task, available_samples, channels, cb_samples)
if data is not None:
data = InputData(data)
cb(data)
return 0
################################################################################
# Configuration functions
################################################################################
def setup_timing(task, channels, delay=0):
'''
Configures timing for task
Parameters
----------
task : niDAQmx task handle
Task to configure timing for
channels : list of channels
List of channels to configure
References
----------
http://www.ni.com/white-paper/11369/en/
http://www.ni.com/pdf/manuals/371235h.pdf
'''
fs = get_channel_property(channels, 'fs')
sample_clock = get_channel_property(channels, 'sample_clock')
start_trigger = get_channel_property(channels, 'start_trigger')
samples = get_channel_property(channels, 'samples')
reference_clock = get_channel_property(channels, 'reference_clock')
if reference_clock:
mx.DAQmxSetRefClkSrc(task, reference_clock)
if start_trigger:
mx.DAQmxCfgDigEdgeStartTrig(task, start_trigger, mx.DAQmx_Val_Rising)
if samples == 0:
sample_mode = mx.DAQmx_Val_ContSamps
samples = 2
else:
sample_mode = mx.DAQmx_Val_FiniteSamps
samples += delay
mx.DAQmxCfgSampClkTiming(task, sample_clock, fs, mx.DAQmx_Val_Rising,
sample_mode, samples)
def create_task(name=None):
'''
Create niDAQmx task
Parameters
----------
name : {None, str}
Task name (optional). Primarily useful only for debugging purposes
(e.g., this is what's reported in NI error messages)
Returns
-------
task : ctypes pointer
Pointer to niDAQmx task
'''
if name is None:
name = ''
task = mx.TaskHandle(0)
mx.DAQmxCreateTask(name, ctypes.byref(task))
task._name = name
return task
def setup_counters(channels, task_name='counter'):
lines = get_channel_property(channels, 'channel', True)
names = get_channel_property(channels, 'name', True)
log.debug('Configuring lines {}'.format(lines))
source_terminal = get_channel_property(channels, 'source_terminal')
low_samples = get_channel_property(channels, 'low_samples')
high_samples = get_channel_property(channels, 'high_samples')
merged_lines = ','.join(lines)
task = create_task(task_name)
mx.DAQmxCreateCOPulseChanTicks(task, merged_lines, '', source_terminal,
mx.DAQmx_Val_Low, 0, low_samples,
high_samples)
mx.DAQmxCfgSampClkTiming(task, source_terminal, 100, mx.DAQmx_Val_Rising,
mx.DAQmx_Val_HWTimedSinglePoint, 2)
return task
def setup_hw_ao(channels, buffer_duration, callback_interval, callback,
task_name='hw_ao'):
lines = get_channel_property(channels, 'channel', True)
names = get_channel_property(channels, 'name', True)
expected_ranges = get_channel_property(channels, 'expected_range', True)
start_trigger = get_channel_property(channels, 'start_trigger')
terminal_mode = get_channel_property(channels, 'terminal_mode')
terminal_mode = NIDAQEngine.terminal_mode_map[terminal_mode]
task = create_task(task_name)
merged_lines = ','.join(lines)
for line, name, (vmin, vmax) in zip(lines, names, expected_ranges):
log.debug(f'Configuring line %s (%s)', line, name)
mx.DAQmxCreateAOVoltageChan(task, line, name, vmin, vmax,
mx.DAQmx_Val_Volts, '')
setup_timing(task, channels)
properties = get_timing_config(task)
result = ctypes.c_double()
try:
for line in lines:
mx.DAQmxGetAOGain(task, line, result)
properties['{} AO gain'.format(line)] = result.value
except:
# This means that the gain is not settable
properties['{} AO gain'.format(line)] = 0
fs = properties['sample clock rate']
log_ao.info('AO properties: %r', properties)
if terminal_mode is not None:
mx.DAQmxSetAOTermCfg(task, merged_lines, terminal_mode)
# If the write reaches the end of the buffer and no new data has been
# provided, do not loop around to the beginning and start over.
mx.DAQmxSetWriteRegenMode(task, mx.DAQmx_Val_DoNotAllowRegen)
callback_samples = round(fs*callback_interval)
if buffer_duration is None:
buffer_samples = round(callback_samples*10)
else:
buffer_samples = round(buffer_duration*fs)
log_ao.debug('Setting output buffer size to %d samples', buffer_samples)
mx.DAQmxSetBufOutputBufSize(task, buffer_samples)
task._buffer_samples = buffer_samples
result = ctypes.c_uint32()
mx.DAQmxGetTaskNumChans(task, result)
task._n_channels = result.value
log_ao.debug('%d channels in task', task._n_channels)
#mx.DAQmxSetAOMemMapEnable(task, lines, True)
#mx.DAQmxSetAODataXferReqCond(task, lines, mx.DAQmx_Val_OnBrdMemHalfFullOrLess)
# This controls how quickly we can update the buffer on the device. On some
# devices it is not user-settable. On the X-series PCIe-6321 I am able to
# change it. On the M-xeries PCI 6259 it appears to be fixed at 8191
# samples. Haven't really been able to do much about this.
mx.DAQmxGetBufOutputOnbrdBufSize(task, result)
task._onboard_buffer_size = result.value
log_ao.debug('Onboard buffer size %d', task._onboard_buffer_size)
result = ctypes.c_int32()
mx.DAQmxGetAODataXferMech(task, merged_lines, result)
log_ao.debug('Data transfer mechanism %d', result.value)
mx.DAQmxGetAODataXferReqCond(task, merged_lines, result)
log_ao.debug('Data transfer condition %d', result.value)
#result = ctypes.c_uint32()
#mx.DAQmxGetAOUseOnlyOnBrdMem(task, merged_lines, result)
#log_ao.debug('Use only onboard memory %d', result.value)
#mx.DAQmxGetAOMemMapEnable(task, merged_lines, result)
#log_ao.debug('Memory mapping enabled %d', result.value)
#mx.DAQmxGetAIFilterDelayUnits(task, merged_lines, result)
#log_ao.debug('AI filter delay unit %d', result.value)
#result = ctypes.c_int32()
#mx.DAQmxGetAODataXferMech(task, result)
#log_ao.debug('DMA transfer mechanism %d', result.value)
log_ao.debug('Creating callback after every %d samples', callback_samples)
task._cb = partial(hw_ao_helper, callback)
task._cb_ptr = mx.DAQmxEveryNSamplesEventCallbackPtr(task._cb)
mx.DAQmxRegisterEveryNSamplesEvent(
task, mx.DAQmx_Val_Transferred_From_Buffer, int(callback_samples), 0,
task._cb_ptr, None)
mx.DAQmxTaskControl(task, mx.DAQmx_Val_Task_Reserve)
task._names = verify_channel_names(task, names)
task._devices = device_list(task)
task._fs = fs
return task
def get_timing_config(task):
properties = {}
info = ctypes.c_double()
mx.DAQmxGetSampClkRate(task, info)
properties['sample clock rate'] = info.value
mx.DAQmxGetSampClkMaxRate(task, info)
properties['sample clock maximum rate'] = info.value
mx.DAQmxGetSampClkTimebaseRate(task, info)
properties['sample clock timebase rate'] = info.value
try:
mx.DAQmxGetMasterTimebaseRate(task, info)
properties['master timebase rate'] = info.value
except:
pass
mx.DAQmxGetRefClkRate(task, info)
properties['reference clock rate'] = info.value
info = ctypes.c_buffer(256)
mx.DAQmxGetSampClkSrc(task, info, len(info))
properties['sample clock source'] = str(info.value)
mx.DAQmxGetSampClkTimebaseSrc(task, info, len(info))
properties['sample clock timebase source'] = str(info.value)
mx.DAQmxGetSampClkTerm(task, info, len(info))
properties['sample clock terminal'] = str(info.value)
try:
mx.DAQmxGetMasterTimebaseSrc(task, info, len(info))
properties['master timebase source'] = str(info.value)
except:
pass
mx.DAQmxGetRefClkSrc(task, info, len(info))
properties['reference clock source'] = str(info.value)
info = ctypes.c_int32()
try:
mx.DAQmxGetSampClkOverrunBehavior(task, info)
properties['sample clock overrun behavior'] = info.value
except:
pass
mx.DAQmxGetSampClkActiveEdge(task, info)
properties['sample clock active edge'] = info.value
info = ctypes.c_uint32()
try:
mx.DAQmxGetSampClkTimebaseDiv(task, info)
properties['sample clock timebase divisor'] = info.value
except:
pass
return properties
def setup_hw_ai(channels, callback_duration, callback, task_name='hw_ao'):
log.debug('Configuring HW AI channels')
# These properties can vary on a per-channel basis
lines = get_channel_property(channels, 'channel', True)
names = get_channel_property(channels, 'name', True)
gains = get_channel_property(channels, 'gain', True)
# These properties must be the same across all channels
expected_range = get_channel_property(channels, 'expected_range')
samples = get_channel_property(channels, 'samples')
terminal_mode = get_channel_property(channels, 'terminal_mode')
terminal_coupling = get_channel_property(channels, 'terminal_coupling')
# Convert to representation required by NI functions
lines = ','.join(lines)
log.debug('Configuring lines {}'.format(lines))
terminal_mode = NIDAQEngine.terminal_mode_map[terminal_mode]
terminal_coupling = NIDAQEngine.terminal_coupling_map[terminal_coupling]
task = create_task(task_name)
mx.DAQmxCreateAIVoltageChan(task, lines, '', terminal_mode,
expected_range[0], expected_range[1],
mx.DAQmx_Val_Volts, '')
if terminal_coupling is not None:
mx.DAQmxSetAICoupling(task, lines, terminal_coupling)
setup_timing(task, channels)
properties = get_timing_config(task)
log_ai.info('AI timing properties: %r', properties)
result = ctypes.c_uint32()
mx.DAQmxGetTaskNumChans(task, result)
n_channels = result.value
fs = properties['sample clock rate']
callback_samples = round(callback_duration * fs)
mx.DAQmxSetReadOverWrite(task, mx.DAQmx_Val_DoNotOverwriteUnreadSamps)
mx.DAQmxSetBufInputBufSize(task, callback_samples*100)
mx.DAQmxGetBufInputBufSize(task, result)
buffer_size = result.value
log_ai.debug('Buffer size for %s set to %d samples', lines, buffer_size)
try:
info = ctypes.c_int32()
mx.DAQmxSetAIFilterDelayUnits(task, lines,
mx.DAQmx_Val_SampleClkPeriods)
info = ctypes.c_double()
mx.DAQmxGetAIFilterDelay(task, lines, info)
log_ai.debug('AI filter delay {} samples'.format(info.value))
filter_delay = int(info.value)
# Ensure timing is compensated for the planned filter delay since these
# samples will be discarded.
if samples > 0:
setup_timing(task, channels, filter_delay)
except mx.DAQError:
# Not a supported property. Set filter delay to 0 by default.
filter_delay = 0
task._cb = partial(hw_ai_helper, callback, n_channels, filter_delay)
task._cb_ptr = mx.DAQmxEveryNSamplesEventCallbackPtr(task._cb)
mx.DAQmxRegisterEveryNSamplesEvent(
task, mx.DAQmx_Val_Acquired_Into_Buffer, int(callback_samples), 0,
task._cb_ptr, None)
mx.DAQmxTaskControl(task, mx.DAQmx_Val_Task_Reserve)
task._names = verify_channel_names(task, names)
task._devices = device_list(task)
task._sf = dbi(gains)[..., np.newaxis]
task._fs = properties['sample clock rate']
properties = get_timing_config(task)
log_ai.info('AI timing properties: %r', properties)
return task
def setup_hw_di(fs, lines, callback, callback_samples, start_trigger=None,
clock=None, task_name='hw_di'):
'''
M series DAQ cards do not have onboard timing engines for digital IO.
Therefore, we have to create one (e.g., using a counter or by using the
analog input or output sample clock.
'''
task = create_task(task_name)
mx.DAQmxCreateDIChan(task, lines, '', mx.DAQmx_Val_ChanForAllLines)
# Get the current state of the lines so that we know what happened during
# the first change detection event. Do this before configuring the timing
# of the lines (otherwise we have to start the master clock as well)!
mx.DAQmxStartTask(task)
initial_state = read_digital_lines(task, 1)
mx.DAQmxStopTask(task)
# M-series acquisition boards don't have a dedicated engine for digital
# acquisition. Use a clock to configure the acquisition.
if clock is not None:
clock_task = create_task('{}_clock'.format(task_name))
mx.DAQmxCreateCOPulseChanFreq(clock_task, clock, '', mx.DAQmx_Val_Hz,
mx.DAQmx_Val_Low, 0, fs, 0.5)
mx.DAQmxCfgImplicitTiming(clock_task, mx.DAQmx_Val_ContSamps, int(fs))
clock += 'InternalOutput'
if start_trigger:
mx.DAQmxCfgDigEdgeStartTrig(clock_task, start_trigger,
mx.DAQmx_Val_Rising)
setup_timing(task, clock, -1, None)
else:
setup_timing(task, fs, -1, start_trigger)
cb_helper = DigitalSamplesAcquiredCallbackHelper(callback)
cb_ptr = mx.DAQmxEveryNSamplesEventCallbackPtr(cb_helper)
mx.DAQmxRegisterEveryNSamplesEvent(task, mx.DAQmx_Val_Acquired_Into_Buffer,
int(callback_samples), 0, cb_ptr, None)
task._cb_ptr = cb_ptr
task._cb_helper = cb_helper
task._initial_state = initial_state
rate = ctypes.c_double()
mx.DAQmxGetSampClkRate(task, rate)
mx.DAQmxTaskControl(task, mx.DAQmx_Val_Task_Reserve)
mx.DAQmxTaskControl(clock_task, mx.DAQmx_Val_Task_Reserve)
return [task, clock_task]
def setup_sw_ao(lines, expected_range, task_name='sw_ao'):
# TODO: DAQmxSetAOTermCfg
task = create_task(task_name)
lb, ub = expected_range
mx.DAQmxCreateAOVoltageChan(task, lines, '', lb, ub, mx.DAQmx_Val_Volts, '')
mx.DAQmxTaskControl(task, mx.DAQmx_Val_Task_Reserve)
return task
def setup_sw_do(channels, task_name='sw_do'):
task = create_task(task_name)
lines = get_channel_property(channels, 'channel', True)
names = get_channel_property(channels, 'name', True)
lines = ','.join(lines)
mx.DAQmxCreateDOChan(task, lines, '', mx.DAQmx_Val_ChanForAllLines)
mx.DAQmxTaskControl(task, mx.DAQmx_Val_Task_Reserve)
task._names = names
task._devices = device_list(task)
return task
################################################################################
# Engine
################################################################################
class NIDAQEngine(Engine):
'''
Hardware interface
The tasks are started in the order they are configured. Most NI devices can
only support a single hardware-timed task of a specified type (e.g., analog
input, analog output, digital input, digital output are all unique task
types).
'''
# TODO: Why is this relevant?
engine_name = 'nidaq'
# Flag indicating whether engine was configured
_configured = Bool(False)
# Poll period (in seconds). This defines how often callbacks for the analog
# outputs are notified (i.e., to generate additional samples for playout).
# If the poll period is too long, then the analog output may run out of
# samples.
hw_ao_monitor_period = d_(Float(1)).tag(metadata=True)
# Size of buffer (in seconds). This defines how much data is pregenerated
# for the buffer before starting acquisition. This is impotant because
hw_ao_buffer_size = d_(Float(10)).tag(metadata=True)
# Even though data is written to the analog outputs, it is buffered in
# computer memory until it's time to be transferred to the onboard buffer of
# the NI acquisition card. NI-DAQmx handles this behind the scenes (i.e.,
# when the acquisition card needs additional samples, NI-DAQmx will transfer
# the next chunk of data from the computer memory). We can overwrite data
# that's been buffered in computer memory (e.g., so we can insert a target
# in response to a nose-poke). However, we cannot overwrite data that's
# already been transfered to the onboard buffer. So, the onboard buffer size
# determines how quickly we can change the analog output in response to an
# event.
# TODO: this is not configurable on some systems. How do we figure out if
# it's configurable?
hw_ao_onboard_buffer = d_(Int(4095)).tag(metadata=True)
# Since any function call takes a small fraction of time (e.g., nanoseconds
# to milliseconds), we can't simply overwrite data starting at
# hw_ao_onboard_buffer+1. By the time the function calls are complete, the
# DAQ probably has already transferred a couple hundred samples to the
# buffer. This parameter will likely need some tweaking (i.e., only you can
# determine an appropriate value for this based on the needs of your
# program).
hw_ao_min_writeahead = d_(Int(8191 + 1000)).tag(metadata=True)
_tasks = Typed(dict)
_task_done = Typed(dict)
_callbacks = Typed(dict)
_timers = Typed(dict)
_uint32 = Typed(ctypes.c_uint32)
_uint64 = Typed(ctypes.c_uint64)
_int32 = Typed(ctypes.c_int32)
ao_fs = Typed(float).tag(metadata=True)
ai_fs = Typed(float).tag(metadata=True)
terminal_mode_map = {
'differential': mx.DAQmx_Val_Diff,
'pseudodifferential': mx.DAQmx_Val_PseudoDiff,
'RSE': mx.DAQmx_Val_RSE,
'NRSE': mx.DAQmx_Val_NRSE,
'default': mx.DAQmx_Val_Cfg_Default,
}
terminal_coupling_map = {
None: None,
'AC': mx.DAQmx_Val_AC,
'DC': mx.DAQmx_Val_DC,
'ground': mx.DAQmx_Val_GND,
}
# This defines the function for the clock that synchronizes the tasks.
sample_time = Callable()
instances = Value([])
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.instances.append(self)
# Use an OrderedDict to ensure that when we loop through the tasks
# stored in the dictionary, we process them in the order they were
# configured.
self._tasks = OrderedDict()
self._callbacks = {}
self._timers = {}
self._configured = False
# These are pointers to C datatypes that are required for communicating
# with the NI-DAQmx library. When querying various properties of tasks,
# channels and buffers, the NI-DAQmx function often requires an integer
# of a specific type (e.g. unsigned 32-bit, unsigned 64-bit, etc.). This
# integer must be passed by reference, allowing the NI-DAQmx function to
# modify the value directly. For example:
#
# mx.DAQmxGetWriteSpaceAvail(task, self._uint32)
# print(self._uint32.value)
#
# The ctypes library facilitates communicating with the NI-DAQmx C-API
# by providing wrappers around C datatypes that can be passed by
# reference.
self._uint32 = ctypes.c_uint32()
self._uint64 = ctypes.c_uint64()
self._int32 = ctypes.c_int32()
def configure(self, active=True):
log.debug('Configuring {} engine'.format(self.name))
counter_channels = self.get_channels('counter', active=active)
sw_do_channels = self.get_channels('digital', 'output', 'software',
active=active)
hw_ai_channels = self.get_channels('analog', 'input', 'hardware',
active=active)
hw_di_channels = self.get_channels('digital', 'input', 'hardware',
active=active)
hw_ao_channels = self.get_channels('analog', 'output', 'hardware',
active=active)
if counter_channels:
log.debug('Configuring counter channels')
self.configure_counters(counter_channels)
if sw_do_channels:
log.debug('Configuring SW DO channels')
self.configure_sw_do(sw_do_channels)
if hw_ai_channels:
log.debug('Configuring HW AI channels')
self.configure_hw_ai(hw_ai_channels)
if hw_di_channels:
raise NotImplementedError
#log.debug('Configuring HW DI channels')
#lines = ','.join(get_channel_property(hw_di_channels, 'channel', True))
#names = get_channel_property(hw_di_channels, 'name', True)
#fs = get_channel_property(hw_di_channels, 'fs')
#start_trigger = get_channel_property(hw_di_channels, 'start_trigger')
## Required for M-series to enable hardware-timed digital
## acquisition. TODO: Make this a setting that can be configured
## since X-series doesn't need this hack.
#device = hw_di_channels[0].channel.strip('/').split('/')[0]
#clock = '/{}/Ctr0'.format(device)
#self.configure_hw_di(fs, lines, names, start_trigger, clock)
# Configure the analog output last because acquisition is synced with
# the analog output signal (i.e., when the analog output starts, the
# analog input begins acquiring such that sample 0 of the input
# corresponds with sample 0 of the output).
# TODO: eventually we should be able to inspect the 'start_trigger'
# property on the channel configuration to decide the order in which the
# tasks are started.
if hw_ao_channels:
log.debug('Configuring HW AO channels')
self.configure_hw_ao(hw_ao_channels)
# Choose sample clock based on what channels have been configured.
if hw_ao_channels:
self.sample_time = self.ao_sample_time
elif hw_ai_channels:
self.sample_time = self.ai_sample_time
# Configure task done events so that we can fire a callback if
# acquisition is done.
self._task_done = {}
for name, task in self._tasks.items():
def cb(task, s, cb_data):
nonlocal name
self.task_complete(name)
return 0
cb_ptr = mx.DAQmxDoneEventCallbackPtr(cb)
mx.DAQmxRegisterDoneEvent(task, 0, cb_ptr, None)
task._done_cb_ptr_engine = cb_ptr
self._task_done[name] = False
super().configure()
# Required by start. This allows us to do the configuration
# on the fly when starting the engines if the configure method hasn't
# been called yet.
self._configured = True
log.debug('Completed engine configuration')
def task_complete(self, task_name):
log.debug('Task %s complete', task_name)
self._task_done[task_name] = True
task = self._tasks[task_name]
# We have frozen the initial arguments (in the case of hw_ai_helper,
# that would be cb, channels, discard; in the case of hw_ao_helper,
# that would be cb) using functools.partial and need to provide task,
# cb_samples and cb_data. For hw_ai_helper, setting cb_samples to 1
# means that we read all remaning samples, regardless of whether they
# fit evenly into a block of samples. The other two arguments
# (event_type and cb_data) are required of the function signature by
# NIDAQmx but are unused.
task._cb(task, None, 1, None)
# Only check to see if hardware-timed tasks are complete.
# Software-timed tasks must be explicitly canceled by the user.
done = [v for t, v in self._task_done.items() if t.startswith('hw')]
if all(done):
for cb in self._callbacks.get('done', []):
cb()
def configure_counters(self, channels):
task = setup_counters(channels)
self._tasks['counter'] = task
def configure_hw_ao(self, channels):
'''
Initialize hardware-timed analog output
Parameters
----------
fs : float
Sampling frequency of output (e.g., 100e3).
lines : str
Analog output lines to use (e.gk., 'Dev1/ao0:4' to specify a range of
lines or 'Dev1/ao0,Dev1/ao4' to specify specific lines).
expected_range : (float, float)
Tuple of upper/lower end of expected range. The maximum range
allowed by most NI devices is (-10, 10). Some devices (especially
newer ones) will optimize the output resolution based on the
expected range of the signal.
'''
task = setup_hw_ao(channels, self.hw_ao_buffer_size,
self.hw_ao_monitor_period, self.hw_ao_callback,
'{}_hw_ao'.format(self.name))
self._tasks['hw_ao'] = task
self.ao_fs = task._fs
for channel in channels:
channel.fs = task._fs
def configure_hw_ai(self, channels):
task_name = '{}_hw_ai'.format(self.name)
task = setup_hw_ai(channels, self.hw_ai_monitor_period,
self._hw_ai_callback, task_name)
self._tasks['hw_ai'] = task
self.ai_fs = task._fs
def configure_sw_ao(self, lines, expected_range, names=None,
initial_state=None):
raise NotImplementedError
if initial_state is None:
initial_state = np.zeros(len(names), dtype=np.double)
task_name = '{}_sw_ao'.format(self.name)
task = setup_sw_ao(lines, expected_range, task_name)
task._names = verify_channel_names(task, names)
task._devices = device_list(task)
self._tasks['sw_ao'] = task
self.write_sw_ao(initial_state)
def configure_hw_di(self, fs, lines, names=None, trigger=None, clock=None):
raise NotImplementedError
callback_samples = int(self.hw_ai_monitor_period*fs)
task_name = '{}_hw_di'.format(self.name)
task, clock_task = setup_hw_di(fs, lines, self._hw_di_callback,
callback_samples, trigger, clock,
task_name)
task._names = verify_channel_names(task, names)
task._devices = device_list(task)
task._fs = fs
if clock_task is not None:
self._tasks['hw_di_clock'] = clock_task
self._tasks['hw_di'] = task
def configure_hw_do(self, fs, lines, names):
raise NotImplementedError
def configure_sw_do(self, channels):
task_name = '{}_sw_do'.format(self.name)
task = setup_sw_do(channels, task_name)
self._tasks['sw_do'] = task
initial_state = np.zeros(len(channels), dtype=np.uint8)
self.write_sw_do(initial_state)
def configure_et(self, lines, clock, names=None):
'''
Setup change detection with high-precision timestamps
Anytime a rising or falling edge is detected on one of the specified
lines, a timestamp based on the specified clock will be captured. For
example, if the clock is 'ao/SampleClock', then the timestamp will be
the number of samples played at the point when the line changed state.
Parameters
----------
lines : string
Digital lines (in NI-DAQmx syntax, e.g., 'Dev1/port0/line0:4') to
monitor.
clock : string
Reference clock from which timestamps will be drawn.
names : string (optional)
Aliases for the lines. When aliases are provided, registered
callbacks will receive the alias for the line instead of the
NI-DAQmx notation.
Notes
-----
Be aware of the limitations of your device. All X-series devices support
change detection on all ports; however, only some M-series devices do
(and then, only on port 0).
'''
# Find out which device the lines are from. Use this to configure the
# event timer. Right now we don't want to deal with multi-device event
# timers. If there's more than one device, then we should configure each
# separately.
raise NotImplementedError
# TODO: How to determine sampling rate of task?
names = channel_names('digital', lines, names)
devices = device_list(lines, 'digital')
if len(devices) != 1:
raise ValueError('Cannot configure multi-device event timer')
trigger = '/{}/ChangeDetectionEvent'.format(devices[0])
counter = '/{}/Ctr0'.format(devices[0])
task_name = '{}_et'.format(self.name)
et_task = setup_event_timer(trigger, counter, clock, task_name)
task_name = '{}_cd'.format(self.name)
cd_task = setup_change_detect_callback(lines, self._et_fired, et_task,
names, task_name)
cd_task._names = names
self._tasks['et_task'] = et_task
self._tasks['cd_task'] = cd_task
def _get_channel_slice(self, task_name, channel_names):
if channel_names is None:
return Ellipsis
else:
return self._tasks[task_name]._names.index(channel_names)
def register_done_callback(self, callback):
self._callbacks.setdefault('done', []).append(callback)
def register_ao_callback(self, callback, channel_name=None):
s = self._get_channel_slice('hw_ao', channel_name)
self._callbacks.setdefault('ao', []).append((channel_name, s, callback))
def register_ai_callback(self, callback, channel_name=None):
s = self._get_channel_slice('hw_ai', channel_name)
self._callbacks.setdefault('ai', []).append((channel_name, s, callback))
def register_di_callback(self, callback, channel_name=None):
s = self._get_channel_slice('hw_di', channel_name)
self._callbacks.setdefault('di', []).append((channel_name, s, callback))
def register_et_callback(self, callback, channel_name=None):
s = self._get_channel_slice('cd_task', channel_name)
self._callbacks.setdefault('et', []).append((channel_name, s, callback))
def unregister_done_callback(self, callback):
try:
self._callbacks['done'].remove(callback)
except KeyError:
log.warning('Callback no longer exists.')
def unregister_ao_callback(self, callback, channel_name):
try:
s = self._get_channel_slice('hw_ao', channel_name)
self._callbacks['ao'].remove((channel_name, s, callback))
except (KeyError, AttributeError):
log.warning('Callback no longer exists.')
def unregister_ai_callback(self, callback, channel_name):
try:
s = self._get_channel_slice('hw_ai', channel_name)
self._callbacks['ai'].remove((channel_name, s, callback))
except (KeyError, AttributeError):
log.warning('Callback no longer exists.')
def unregister_di_callback(self, callback, channel_name):
s = self._get_channel_slice('hw_di', channel_name)
self._callbacks['di'].remove((channel_name, s, callback))
def unregister_et_callback(self, callback, channel_name):
s = self._get_channel_slice('cd_task', channel_name)
self._callbacks['et'].remove((channel_name, s, callback))
def write_sw_ao(self, state):
task = self._tasks['sw_ao']
state = np.array(state).astype(np.double)
mx.DAQmxWriteAnalogF64(task, 1, True, 0, mx.DAQmx_Val_GroupByChannel,
state, self._int32, None)
if self._int32.value != 1:
raise ValueError('Unable to update software-timed AO')
task._current_state = state
def write_sw_do(self, state):
task = self._tasks['sw_do']
state = np.asarray(state).astype(np.uint8)
mx.DAQmxWriteDigitalLines(task, 1, True, 0, mx.DAQmx_Val_GroupByChannel,
state, self._int32, None)
if self._int32.value != 1:
raise ValueError('Problem writing data to software-timed DO')
task._current_state = state
def set_sw_do(self, name, state):
task = self._tasks['sw_do']
i = task._names.index(name)
new_state = task._current_state.copy()
new_state[i] = state
self.write_sw_do(new_state)
def set_sw_ao(self, name, state):
task = self._tasks['sw_ao']
i = task._names.index(name)
new_state = task._current_state.copy()
new_state[i] = state
self.write_sw_ao(new_state)
def fire_sw_do(self, name, duration=0.1):
# TODO - Store reference to timer so that we can eventually track the
# state of different timers and cancel pending timers when necessary.
self.set_sw_do(name, 1)
timer = Timer(duration, lambda: self.set_sw_do(name, 0))
timer.start()
def _et_fired(self, line_index, change, event_time):
for i, cb in self._callbacks.get('et', []):
if i == line_index:
cb(change, event_time)
def _hw_ai_callback(self, samples):
samples /= self._tasks['hw_ai']._sf
for channel_name, s, cb in self._callbacks.get('ai', []):
try:
cb(samples[s])
except Exception as e:
log.exception(e)
self.unregister_ai_callback(cb, channel_name)
def _hw_di_callback(self, samples):
for i, cb in self._callbacks.get('di', []):
cb(samples[i])
def _get_hw_ao_samples(self, offset, samples):
channels = self.get_channels('analog', 'output', 'hardware')
data = np.empty((len(channels), samples), dtype=np.double)
for channel, ch_data in zip(channels, data):
channel.get_samples(offset, samples, out=ch_data)
return data
def get_offset(self, channel_name=None):
# Doesn't matter. Offset is the same for all channels in the task.
task = self._tasks['hw_ao']
mx.DAQmxSetWriteRelativeTo(task, mx.DAQmx_Val_CurrWritePos)
mx.DAQmxSetWriteOffset(task, 0)
mx.DAQmxGetWriteCurrWritePos(task, self._uint64)
return self._uint64.value
def get_space_available(self, offset=None, channel_name=None):
# It doesn't matter what the output channel is. Space will be the same
# for all.
task = self._tasks['hw_ao']
mx.DAQmxGetWriteSpaceAvail(task, self._uint32)
available = self._uint32.value
log_ao.trace('Current write space available %d', available)
# Compensate for offset if specified.
if offset is not None:
write_position = self.ao_write_position()
relative_offset = offset-write_position
log_ao.trace('Compensating write space for requested offset %d', offset)
available -= relative_offset
return available
def hw_ao_callback(self, samples):
# Get the next set of samples to upload to the buffer
with self.lock:
log_ao.trace('Hardware AO callback for %s', self.name)
offset = self.get_offset()
available_samples = self.get_space_available(offset)
if available_samples < samples:
log_ao.trace('Not enough samples available for writing')
else:
data = self._get_hw_ao_samples(offset, samples)
self.write_hw_ao(data, offset, timeout=0)
def update_hw_ao(self, offset, channel_name=None,
method='space_available'):
# Get the next set of samples to upload to the buffer. Ignore the
# channel name because we need to update all channels simultaneously.
if method == 'space_available':
samples = self.get_space_available(offset)
elif method == 'write_position':
samples = self.ao_write_position()-offset
else:
raise ValueError('Unsupported update method')
if samples <= 0:
log_ao.trace('No update of hw ao required')
return
log_ao.trace('Updating hw ao at %d with %d samples', offset, samples)
data = self._get_hw_ao_samples(offset, samples)
self.write_hw_ao(data, offset=offset, timeout=0)
def update_hw_ao_multiple(self, offsets, channel_names, method):
# This is really simple to implement since we have to update all
# channels at once. So, we just pick the minimum offset and let
# `update_hw_ao` do the work.
offset = min(offsets)
self.update_hw_ao(offset, None, method)
def ao_write_position(self):
task = self._tasks['hw_ao']
mx.DAQmxGetWriteCurrWritePos(task, self._uint64)
log_ao.trace('Current write position %d', self._uint64.value)
return self._uint64.value
def write_hw_ao(self, data, offset, timeout=1):
# TODO: add a safety-check to make sure waveform doesn't exceed limits.
# This is a recoverable error unless the DAQmx API catches it instead.
# Due to historical limitations in the DAQmx API, the write offset is a
# signed 32-bit integer. For long-running applications, we will have an
# overflow if we attempt to set the offset relative to the first sample
# written. Therefore, we compute the write offset relative to the last
# sample written (for requested offsets it should be negative).
log_ao.trace('Writing %r samples at %r', data.shape, offset)
task = self._tasks['hw_ao']
write_position = self.ao_write_position()
relative_offset = offset-write_position
mx.DAQmxSetWriteOffset(task, relative_offset)
m = 'Write position %d, requested offset %d, relative offset %d'
log_ao.trace(m, write_position, offset, relative_offset)
generated = self.ao_sample_clock()
log_ao.trace('AO samples generated %d', generated)
if offset != 0 and (offset-generated) <= task._onboard_buffer_size*1.25:
log_ao.debug('AO samples generated %d', generated)
log.debug('%d samples generated at offset %d', generated, offset)
#raise SystemError('Insufficient time to update output')
mx.DAQmxWriteAnalogF64(task, data.shape[-1], False, timeout,
mx.DAQmx_Val_GroupByChannel,
data.astype(np.float64), self._int32, None)
# Now, reset it back to 0
if offset is not None:
log_ao.trace('Resetting write offset')
mx.DAQmxSetWriteOffset(task, 0)
log_ao.trace('Write complete')
def get_ts(self):
with self.lock:
return self.sample_time()
def start(self):
if not self._configured:
log.debug('Tasks were not configured yet')
self.configure()
log.debug('Reserving NIDAQmx task resources')
for task in self._tasks.values():
mx.DAQmxTaskControl(task, mx.DAQmx_Val_Task_Commit)
if 'hw_ao' in self._tasks:
log.debug('Calling HW ao callback before starting tasks')
samples = self.get_space_available()
self.hw_ao_callback(samples)
log.debug('Starting NIDAQmx tasks')
for task in self._tasks.values():
log.debug('Starting task {}'.format(task._name))
mx.DAQmxStartTask(task)
def stop(self):
# TODO: I would love to be able to stop a task and keep it in memory
# without having to restart; however, this will require some thought as
# to the optimal way to do this. For now, we just clear everything.
# Configuration is generally fairly quick.
if not self._configured:
return
log.debug('Stopping engine')
for task in self._tasks.values():
mx.DAQmxClearTask(task)
self._callbacks = {}
self._configured = False
def ai_sample_clock(self):
task = self._tasks['hw_ai']
mx.DAQmxGetReadTotalSampPerChanAcquired(task, self._uint64)
log.trace('%d samples per channel acquired', self._uint64.value)
return self._uint64.value
def ai_sample_time(self):
return self.ai_sample_clock()/self.ai_fs
def ao_sample_clock(self):
try:
task = self._tasks['hw_ao']
mx.DAQmxGetWriteTotalSampPerChanGenerated(task, self._uint64)
return self._uint64.value
except:
return 0
def ao_sample_time(self):
return self.ao_sample_clock()/self.ao_fs
def get_buffer_size(self, channel_name):
return self.hw_ao_buffer_size
|
#!/bin/env python
import click
@click.command()
@click.option('--invcf')
@click.option('--outvcf')
def main(invcf,outvcf):
file_outvcf=open(outvcf,'w')
with open(invcf) as file_invcf:
for line in file_invcf:
Line=line.strip().split()
if Line[0].startswith('#'):
file_outvcf.write(line)
else:
format=Line[8].split(':')
hjm=Line[9].split(':')
pos=format.index('DP')
hjm_dp=int(hjm[pos])
if hjm_dp >=15 and hjm_dp <=100:
file_outvcf.write(line)
if __name__=='__main__':
main()
|
"""Common configure functions for cdp"""
# Python
import logging
# Unicon
from unicon.core.errors import SubCommandFailure
log = logging.getLogger(__name__)
def configure_cdp(device, interfaces=None):
"""
Enables cdp on target device
Args:
device ('obj'): Device object
Returns:
None
"""
interface_list = {}
# if no list of interfaces given get list of all interfaces
if interfaces is None:
interface_list = device.parse('show interfaces')
else:
interface_list = device.api.get_interface_information(interface_list)
skipped_ints = []
# build a list of commands to send, checks if interface is correct type
# before adding to command list
command_list = ['cdp run']
for interface in interface_list:
if interface_list[interface]['type'] in ['Loopback','Tunnel', 'GEChannel']:
skipped_ints.append(interface)
continue
command_list.append('interface ' + interface)
command_list.append('cdp enable')
if skipped_ints:
log.info('Skipped interfaces {} due to type incompatability'.format(skipped_ints))
device.configure(command_list)
def unconfigure_cdp(device, interfaces=None):
"""
Disable cdp on target device
Args:
device ('obj'): Device object
Returns:
None
"""
interface_list = {}
# if no list of interfaces given get list of all interfaces
if interfaces is None:
interface_list = device.parse('show interfaces')
else:
interface_list = device.api.get_interface_information(interface_list)
skipped_ints = []
# build a list of commands to send, checks if interface is correct type
# before adding to command list
command_list = ['no cdp run']
for interface in interface_list:
if interface_list[interface]['type'] in ['Loopback','Tunnel', 'GEChannel']:
skipped_ints.append(interface)
continue
command_list.append('interface ' + interface)
command_list.append('no cdp enable')
# log which interfaces were skipped and then run command
if skipped_ints:
log.info('Skipped interfaces {} due to type incompatability'.format(skipped_ints))
device.configure(command_list) |
import pyfx
import numpy as np
from skimage import filters, measure, morphology
class Background:
"""
Class to measure difference between each frame and a background frame.
"""
def __init__(self,bg_frame,blur_sigma=10):
self._bg_frame = bg_frame
self._blur_sigma = blur_sigma
self._bg_img = pyfx.util.to_image(bg_frame)
self._bg_array_color = pyfx.util.to_array(self._bg_frame,num_channels=3,dtype=np.float)
self._bg_array_bw = pyfx.util.to_array(self._bg_frame,num_channels=1,dtype=np.float)
self._bg_array_blur = filters.gaussian(self._bg_array_bw,self._blur_sigma)
self._bg_out = pyfx.util.to_array(self._bg_frame,num_channels=4,dtype=np.uint8)
def frame_diff(self,img):
"""
Return differnce between img and background.
"""
img_array_bw = pyfx.util.to_array(img,dtype=np.float,num_channels=1)
img_array_blur = filters.gaussian(img_array_bw,sigma=self._blur_sigma)
total_diff, diff_array = measure.compare_ssim(img_array_blur,
self._bg_array_blur,
full=True)
return 1 - diff_array
def smooth_diff(self,
img,
threshold=0.2,
num_iterate=20,
dilation_interval=2,
disk_size=35,
blur=50):
"""
Get a smoothed potential well for the difference between an image
and the background. Calculate using a series of expanding dilations.
This is pretty darn slow, unfortunately.
img: image in format recognized by pyfx
threshold: difference between the frame and background that is called
as different
num_iterate: how many times to iterate the dilation
dilation_interval: how often to write out the dilation
disk_size: size of morphological element for dilation
blur: how much to blur final result (gaussian sigma)
"""
frame_diff = self.frame_diff(img)
disk = morphology.disk(disk_size)
bool_cut = np.zeros(frame_diff.shape,dtype=np.bool)
bool_cut[frame_diff > threshold] = True
out = np.zeros(frame_diff.shape,dtype=np.float)
tmp = np.copy(bool_cut)
for i in range(0,num_iterate):
tmp = morphology.binary_dilation(tmp,selem=disk)
if i % dilation_interval == 0:
out[tmp] += 1
out = filters.gaussian(out,blur)
out = 1-out/np.max(out)
return np.array(np.round(255*out,0),dtype=np.uint8)
@property
def color(self):
return self._bg_array_color
@property
def bw(self):
return self._bg_array_bw
@property
def image(self):
return self._bg_out
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import logging
import requests
import six
from typing import TYPE_CHECKING
try:
# py3
import urllib.parse as url_parse
except:
# py2
import urlparse as url_parse
import subprocess
# the functions we patch
from azure.core.pipeline.transport import RequestsTransport
# the trimming function to clean up incoming arguments to the test function we are wrapping
from azure_devtools.scenario_tests.utilities import trim_kwargs_from_test_function
from .azure_recorded_testcase import is_live
from .config import PROXY_URL
if TYPE_CHECKING:
from typing import Tuple
# To learn about how to migrate SDK tests to the test proxy, please refer to the migration guide at
# https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/test_proxy_migration_guide.md
# defaults
RECORDING_START_URL = "{}/record/start".format(PROXY_URL)
RECORDING_STOP_URL = "{}/record/stop".format(PROXY_URL)
PLAYBACK_START_URL = "{}/playback/start".format(PROXY_URL)
PLAYBACK_STOP_URL = "{}/playback/stop".format(PROXY_URL)
# TODO, create a pytest scope="session" implementation that can be added to a fixture such that unit tests can
# startup/shutdown the local test proxy
# this should also fire the admin mapping updates, and start/end the session for commiting recording updates
def get_test_id():
# type: () -> str
# pytest sets the current running test in an environment variable
setting_value = os.getenv("PYTEST_CURRENT_TEST")
path_to_test = os.path.normpath(setting_value.split(" ")[0])
path_components = path_to_test.split(os.sep)
for idx, val in enumerate(path_components):
if val.startswith("test"):
path_components.insert(idx + 1, "recordings")
break
return os.sep.join(path_components).replace("::", "").replace("\\", "/")
def start_record_or_playback(test_id):
# type: (str) -> Tuple(str, dict)
"""Sends a request to begin recording or playing back the provided test.
This returns a tuple, (a, b), where a is the recording ID of the test and b is the `variables` dictionary that maps
test variables to values. If no variable dictionary was stored when the test was recorded, b is an empty dictionary.
"""
head_commit = subprocess.check_output(["git", "rev-parse", "HEAD"])
current_sha = head_commit.decode("utf-8").strip()
variables = {} # this stores a dictionary of test variable values that could have been stored with a recording
if is_live():
result = requests.post(
RECORDING_START_URL,
headers={"x-recording-file": test_id, "x-recording-sha": current_sha},
)
recording_id = result.headers["x-recording-id"]
else:
result = requests.post(
PLAYBACK_START_URL,
headers={"x-recording-file": test_id, "x-recording-sha": current_sha},
)
recording_id = result.headers["x-recording-id"]
if result.text:
try:
variables = result.json()
except ValueError as ex: # would be a JSONDecodeError on Python 3, which subclasses ValueError
six.raise_from(
ValueError("The response body returned from starting playback did not contain valid JSON"), ex
)
return (recording_id, variables)
def stop_record_or_playback(test_id, recording_id, test_output):
# type: (str, str, dict) -> None
if is_live():
requests.post(
RECORDING_STOP_URL,
headers={
"x-recording-file": test_id,
"x-recording-id": recording_id,
"x-recording-save": "true",
"Content-Type": "application/json"
},
json=test_output
)
else:
requests.post(
PLAYBACK_STOP_URL,
headers={"x-recording-file": test_id, "x-recording-id": recording_id},
)
def get_proxy_netloc():
parsed_result = url_parse.urlparse(PROXY_URL)
return {"scheme": parsed_result.scheme, "netloc": parsed_result.netloc}
def transform_request(request, recording_id):
"""Redirect the request to the test proxy, and store the original request URI in a header"""
headers = request.headers
parsed_result = url_parse.urlparse(request.url)
updated_target = parsed_result._replace(**get_proxy_netloc()).geturl()
if headers.get("x-recording-upstream-base-uri", None) is None:
headers["x-recording-upstream-base-uri"] = "{}://{}".format(parsed_result.scheme, parsed_result.netloc)
headers["x-recording-id"] = recording_id
headers["x-recording-mode"] = "record" if is_live() else "playback"
request.url = updated_target
def recorded_by_proxy(test_func):
"""Decorator that redirects network requests to target the azure-sdk-tools test proxy. Use with recorded tests.
For more details and usage examples, refer to
https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/test_proxy_migration_guide.md
"""
def record_wrap(*args, **kwargs):
test_id = get_test_id()
recording_id, variables = start_record_or_playback(test_id)
def transform_args(*args, **kwargs):
copied_positional_args = list(args)
request = copied_positional_args[1]
transform_request(request, recording_id)
return tuple(copied_positional_args), kwargs
trimmed_kwargs = {k: v for k, v in kwargs.items()}
trim_kwargs_from_test_function(test_func, trimmed_kwargs)
original_transport_func = RequestsTransport.send
def combined_call(*args, **kwargs):
adjusted_args, adjusted_kwargs = transform_args(*args, **kwargs)
return original_transport_func(*adjusted_args, **adjusted_kwargs)
RequestsTransport.send = combined_call
# call the modified function
# we define test_output before invoking the test so the variable is defined in case of an exception
test_output = None
try:
test_output = test_func(*args, variables=variables, **trimmed_kwargs)
except TypeError:
logger = logging.getLogger()
logger.info(
"This test can't accept variables as input. The test method should accept `**kwargs` and/or a "
"`variables` parameter to make use of recorded test variables."
)
test_output = test_func(*args, **trimmed_kwargs)
finally:
RequestsTransport.send = original_transport_func
stop_record_or_playback(test_id, recording_id, test_output)
return test_output
return record_wrap
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 15:28:00 2020
@author: Frank
"""
from pyspark.sql import SparkSession
from pyspark.sql import functions as func
from pyspark.sql.types import StructType, StructField, IntegerType, LongType
import codecs
def loadMovieNames():
movieNames = {}
# CHANGE THIS TO THE PATH TO YOUR u.ITEM FILE:
with codecs.open("D:/Datasets/ml-100k/u.item", "r", encoding='ISO-8859-1', errors='ignore') as f:
for line in f:
fields = line.split('|')
movieNames[int(fields[0])] = fields[1]
return movieNames
spark = SparkSession.builder.appName("PopularMovies").getOrCreate()
nameDict = spark.sparkContext.broadcast(loadMovieNames())
# Create schema when reading u.data
schema = StructType([ \
StructField("userID", IntegerType(), True), \
StructField("movieID", IntegerType(), True), \
StructField("rating", IntegerType(), True), \
StructField("timestamp", LongType(), True)])
# Load up movie data as dataframe
moviesDF = spark.read.option("sep", "\t").schema(schema).csv("file:///D:/Datasets/ml-100k/u.data")
movieCounts = moviesDF.groupBy("movieID").count()
# Create a user-defined function to look up movie names from our broadcasted dictionary
def lookupName(movieID):
return nameDict.value[movieID]
lookupNameUDF = func.udf(lookupName)
# Add a movieTitle column using our new udf
moviesWithNames = movieCounts.withColumn("movieTitle", lookupNameUDF(func.col("movieID")))
# Sort the results
sortedMoviesWithNames = moviesWithNames.orderBy(func.desc("count"))
# Grab the top 10
sortedMoviesWithNames.show(10, False)
# Stop the session
spark.stop()
|
class Sport:
sport_id: int
name: str
link: str
def __init__(self, sport_id: int, name: str, link: str) -> None:
self.sport_id = sport_id
self.name = name
self.link = link
def __str__(self) -> str:
return f"""Sport(
\tsport_id={self.sport_id}
\tname={self.name}
\tlink={self.link}
)""" |
"""
Job search with persistent and transitory components to wages.
Wages are given by
w = exp(z) + y
y ~ exp(μ + s ζ)
z' = d + ρ z + σ ε
with ζ and ε both iid and N(0, 1). The value function is
v(w, z) = max{ u(w) / (1-β), u(c) + β E v(w', z')}
The continuation value function satisfies
f(z) = u(c) + β E max{ u(w') / (1-β), f(z') }
From f we can solve the optimal stopping problem by stopping when
u(w) / (1-β) > f(z)
For utility we take u(c) = ln(c). The reservation wage is the wage where
equality holds, or
w^*(z) = exp(f^*(z) (1-β))
Our aim is to solve for the reservation rule. We do this by first computing
f^* as the fixed point of the contraction map
Qf(z) = u(c) + β E max{ u(w') / (1-β), f(z') }
When we iterate, f is stored as a vector of values on a grid and these points
are interpolated into a function as necessary.
Interpolation is piecewise linear.
The integral in the definition of Qf is calculated by Monte Carlo.
"""
import numpy as np
from numpy.random import randn
from lininterp import interp1d
from numba import jit, prange
class JobSearch:
def __init__(self,
μ=0.0,
s=1.0,
d=0.0,
ρ=0.9,
σ=0.1,
β=0.98,
c=5,
mc_size=5000,
grid_size=200):
self.μ, self.s, self.d, self.ρ, self.σ, self.β, self.c = \
μ, s, d, ρ, σ, β, c
# Set up grid
z_mean = d / (1 - ρ)
z_sd = np.sqrt(σ / (1 - ρ**2))
k = 3 # Number of standard devations from mean
a, b = z_mean - k * z_sd, z_mean + k * z_sd
self.z_grid = np.linspace(a, b, grid_size)
# Store shocks
self.mc_size = mc_size
self.e_draws = randn(2, mc_size)
# Store the continuation value function after it's computed
self.f_star = None
def pack_parameters(self):
return self.μ, self.s, self.d, self.ρ, self.σ, self.β, self.c
def compute_fixed_point(self,
tol=1e-4,
max_iter=1000,
verbose=True,
print_skip=25):
# Set initial condition
f_init = np.log(self.c) * np.ones(len(self.z_grid))
f_out = np.empty_like(self.z_grid)
# Set up loop
params = self.pack_parameters()
f_in = f_init
i = 0
error = tol + 1
while i < max_iter and error > tol:
Q(f_in, f_out, params, self.z_grid, self.e_draws)
error = np.max(np.abs(f_in - f_out))
i += 1
if i % print_skip == 0:
print(f"Error at iteration {i} is {error}.")
f_in[:] = f_out
if i == max_iter:
print("Failed to converge!")
if verbose and i < max_iter:
print(f"\nConverged in {i} iterations.")
self.f_star = f_out
@jit(nopython=True, parallel=True)
def Q(f_in, f_out, params, z_grid, e_draws):
μ, s, d, ρ, σ, β, c = params
M = e_draws.shape[1]
# For every grid point
for i in prange(len(z_grid)):
z = z_grid[i]
# Compute expectation by MC
expectation = 0.0
for m in range(M):
e1, e2 = e_draws[:, m]
z_next = d + ρ * z + σ * e1
go_val = interp1d(z_grid, f_in, z_next) # f(z') draw
y_next = np.exp(μ + s * e2) # y' draw
w_next = np.exp(z_next) + y_next # w' draw
stop_val = np.log(w_next) / (1 - β) # u(w') / (1 - β)
expectation += max(stop_val, go_val)
expectation = expectation / M
f_out[i] = np.log(c) + β * expectation
|
# Generated by Django 3.2.1 on 2021-08-08 07:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('admin_panel', '0016_wish_list'),
]
operations = [
migrations.AddField(
model_name='wish_list',
name='is_wished',
field=models.BooleanField(default=False),
),
]
|
#!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
# Copyright (c) 2020.
#
# @author Mike Hartl <[email protected]>
# @copyright 2020 Mike Hartl
# @license http://opensource.org/licenses/gpl-license.php GNU Public License
# @version 0.0.1
import os
import types
class Pd1Files:
def list(self, path):
internalList = []
for item in next(os.walk(path)):
if (type(item) is list):
for filename in item:
if 'ms_pd1' in filename:
internalList.append(filename)
return internalList |
# -*- coding: utf-8 -*-
import io, re, glob, os, datetime
from setuptools import setup
SETUP_PTH = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(SETUP_PTH, 'requirements.txt')) as f:
required = f.read().splitlines()
setup(
name = 'mpcontribs-utils',
version = datetime.datetime.today().strftime('%Y.%m.%d'),
description = "utility libraries for MPContribs",
author = 'Patrick Huck',
author_email = '[email protected]',
url = 'https://mpcontribs.org',
packages = ['mpcontribs.io'],
install_requires = required,
license = 'MIT',
zip_safe=False,
include_package_data=True
)
|
import numpy as np
import matplotlib.pyplot as plt
file= 'input.dat'
file= 'output.dat'
#file='potential.dat'
f = open(file, "rb")
nx=np.fromfile(f,count=1,dtype=np.int32)[0]
ny=np.fromfile(f,count=1,dtype=np.int32)[0]
print nx,ny
x=np.fromfile(f,count=nx,dtype=np.float32)
y=np.fromfile(f,count=ny,dtype=np.float32)
psi=np.fromfile(f,count=nx*ny,dtype=np.float32).reshape((nx,ny))
mask=np.fromfile(f,count=nx*ny,dtype=np.int32).reshape((nx,ny))
u=np.fromfile(f,count=nx*ny,dtype=np.float32).reshape((nx,ny))
v=np.fromfile(f,count=nx*ny,dtype=np.float32).reshape((nx,ny))
u2=np.fromfile(f,count=nx*ny,dtype=np.float32).reshape((nx,ny))
p=np.fromfile(f,count=nx*ny,dtype=np.float32).reshape((nx,ny))
vort=np.fromfile(f,count=nx*ny,dtype=np.float32).reshape((nx,ny))
print u[0,nx/2]
print v[0,nx/2]
print u2[0,nx/2]
#plt.subplot(221)
#colour=u2
#plt.streamplot(x,y,u,v,density=1,color=colour,arrowstyle='fancy')
#plt.colorbar()
##plt.contour(x,y,psi,50, colors='black', linestyles='solid')
###plt.imshow(psi,origin='lower',extent=(x[0],x[-1],y[0],y[-1]))
###plt.colorbar()
#plt.axis('equal')
#plt.title("Flowlines")
#plt.subplot(222)
#plt.imshow(u2,origin='lower',extent=(x[0],x[-1],y[0],y[-1]))
#plt.colorbar()
#plt.axis('equal')
#plt.title("Velocity")
#plt.subplot(223)
#plt.imshow(vort,origin='lower',extent=(x[0],x[-1],y[0],y[-1]))#,vmin=-10.,vmax=10.)
#plt.colorbar()
#plt.axis('equal')
#plt.title("Vorticity")
#plt.subplot(224)
#plt.imshow(p,origin='lower',extent=(x[0],x[-1],y[0],y[-1]))
#plt.colorbar()
#plt.axis('equal')
#plt.title("Pressure")
plt.subplot(121)
colour=u2
plt.streamplot(x,y,u,v,density=1,color=colour,arrowstyle='fancy')
plt.colorbar()
#plt.contour(x,y,psi,50, colors='black', linestyles='solid')
##plt.imshow(psi,origin='lower',extent=(x[0],x[-1],y[0],y[-1]))
##plt.colorbar()
plt.axis('equal')
plt.title("Flowlines")
plt.subplot(122)
plt.imshow(p,origin='lower',extent=(x[0],x[-1],y[0],y[-1]))
plt.colorbar()
plt.axis('equal')
plt.title("Pressure")
plt.show()
#plt.imshow(mask)
#plt.colorbar()
##plt.axis('equal')
#plt.show()
|
#!/usr/bin/env python3
# TODO config file for setting up filters
# TODO save filters to disk before exiting
# TODO normal bloom filters
# TODO change server banner
# TODO instruction using apache/nginx as reverse proxy
# TODO logging
# TODO daemonize, pid file, watchdog script
# TODO specify listen address/port
# TODO IPv6
# TODO docstrings
# TODO script to build bloom filters + example uses
# TODO HUP reloads config / saves configs
from dmfrbloom.timefilter import TimeFilter
from twisted.web import server, resource
from twisted.web.resource import Resource
from twisted.internet import reactor
TIMEFILTER = TimeFilter(10000, 0.001, 60*60)
FILTERS = [
# [name of filter, filter object, write permissions?],
["tftest", TIMEFILTER, True],
]
class NBFRoot(Resource):
@staticmethod
def render_GET(request):
print(request.method, request.uri, request.path, request.args,
request.requestHeaders, request.getClientAddress())
return b"<html><body><a href=\"https://github.com/droberson/nbf\">NBF</a></body></html"
class NBFTimeFilter(Resource):
@staticmethod
def render_GET(request):
status = 404
for bfilter in FILTERS:
if request.args[b"filter"][0].decode("utf-8") == bfilter[0]:
result = bfilter[1].lookup(request.args[b"element"][0])
status = 200 if result else 204
break
request.setResponseCode(status)
return b""
@staticmethod
def render_POST(request):
status = 404
for bfilter in FILTERS:
if request.args[b"filter"][0].decode("utf-8") == bfilter[0]:
if bfilter[1]: # Check for write permission
status = 200
bfilter[1].add(request.args[b"element"][0])
break
status = 403
break
request.setResponseCode(status)
return b""
def main():
nbf = Resource()
nbf.putChild(b"", NBFRoot())
nbf.putChild(b"timefilter", NBFTimeFilter())
reactor.listenTCP(9999, server.Site(nbf), interface="", backlog=50)
#reactor.listenTCP(9998, server.Site(nbf), interface="::")
reactor.run()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2017 Mag. Christian Tanzer. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. [email protected]
# ****************************************************************************
# This file is part of the package _ReST.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# ReST.__init__
#
# Purpose
# Package augmenting reStructuredText
#
# Revision Dates
# 15-Feb-2010 (CT) Creation
# 29-Aug-2014 (CT) Filter warnings from `PIL`
# 12-Oct-2016 (CT) Add `__version__`
# 22-Feb-2017 (CT) Remove `__version__`
# ««revision-date»»···
#--
from _TFL.Package_Namespace import Package_Namespace
ReST = Package_Namespace ()
del Package_Namespace
### Filter PIL warnings to avoid crap like this::
# /usr/lib/python2.7/site-packages/PIL/Image.py:44:
# DeprecationWarning: classic int division
# MAX_IMAGE_PIXELS = int(1024 * 1024 * 1024 / 4 / 3)
import warnings
warnings.filterwarnings ("ignore", module = "^PIL.*")
del warnings
### __END__ ReST.__init__
|
import unittest
from app.models import Source
class SourceTest(unittest.TestCase):
'''Class testing behaviours of the Source class'''
def setUp(self):
'''method that will run before every test case'''
self.new_source = Source('1234','citizen news','Your trusted source for breaking news, analysis, exclusive interviews, headlines, and videos')
def test_instance(self):
self.assertTrue(isinstance(self.new_source,Source))
if __name__ == '__main__':
unittest.main()
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def isBalanced(root: TreeNode) -> bool:
if not root: return True
lh = get_depth(root.left)
rh = get_depth(root.right)
return abs(lh - rh) <= 1 and isBalanced(root.left) and isBalanced(root.right)
def get_depth(root: TreeNode) -> int:
if not root: return 0
return 1 + max(root.left,root.right)
|
# Copyright 2021 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Build rules to choose the v8 target architecture."""
load("@bazel_skylib//lib:selects.bzl", "selects")
V8CpuTypeInfo = provider(
doc = "A singleton provider that specifies the V8 target CPU type",
fields = {
"value": "The V8 Target CPU selected.",
},
)
def _host_target_cpu_impl(ctx):
allowed_values = ["arm", "arm64", "ia32", "x64", "none"]
cpu_type = ctx.build_setting_value
if cpu_type in allowed_values:
return V8CpuTypeInfo(value = cpu_type)
else:
fail("Error setting " + str(ctx.label) + ": invalid v8 target cpu '" +
cpu_type + "'. Allowed values are " + str(allowed_values))
v8_target_cpu = rule(
implementation = _host_target_cpu_impl,
build_setting = config.string(flag = True),
doc = "CPU that V8 will generate code for.",
)
def v8_configure_target_cpu(name, matching_configs):
selects.config_setting_group(
name = "is_" + name,
match_any = matching_configs,
)
# If v8_target_cpu flag is set to 'name'
native.config_setting(
name = "v8_host_target_is_" + name,
flag_values = {
":v8_target_cpu": name,
},
)
# Default target if no v8_host_target flag is set.
selects.config_setting_group(
name = "v8_target_is_" + name,
match_all = [
":v8_host_target_is_none",
":is_" + name,
],
)
# Select either the default target or the flag.
selects.config_setting_group(
name = "v8_target_" + name,
match_any = [
":v8_host_target_is_" + name,
":v8_target_is_" + name,
],
)
|
"""Dictionary-based filesystem for your pocket."""
from collections.abc import Iterator
from typing import Any
from typing import cast
from cutty.filesystems.domain.filesystem import Access
from cutty.filesystems.domain.nodefs import FilesystemNode
from cutty.filesystems.domain.nodefs import NodeFilesystem
from cutty.filesystems.domain.purepath import PurePath
class DictFilesystemNode(FilesystemNode):
"""A node in a dict filesystem."""
def __init__(self, node: Any) -> None:
"""Initialize."""
self.node = node
def is_dir(self) -> bool:
"""Return True if the node is a directory."""
return isinstance(self.node, dict)
def is_file(self) -> bool:
"""Return True if the node is a regular file."""
return isinstance(self.node, str)
def is_symlink(self) -> bool:
"""Return True if the node is a symbolic link."""
return isinstance(self.node, PurePath)
def read_bytes(self) -> bytes:
"""Return the file contents."""
return self.read_text().encode()
def read_text(self) -> str:
"""Return the file contents."""
return cast(str, self.node)
def readlink(self) -> PurePath:
"""Return the link target."""
return cast(PurePath, self.node)
def iterdir(self) -> Iterator[str]:
"""Iterate over the directory entries."""
node: dict[str, Any] = self.node
return iter(node.keys())
def __truediv__(self, entry: str) -> FilesystemNode:
"""Return the given directory entry."""
try:
return DictFilesystemNode(self.node[entry])
except KeyError:
raise FileNotFoundError()
def access(self, mode: Access) -> bool:
"""Return True if the user can access the node."""
return Access.EXECUTE not in mode or self.is_dir()
class DictFilesystem(NodeFilesystem):
"""Dictionary-based filesystem for your pocket."""
def __init__(self, tree: dict[str, Any]) -> None:
"""Initialize."""
self.root = DictFilesystemNode(tree)
|
import numpy as np
from .utils import log_nowarn
from .checks import _check_size, _check_labels
def multinomial_logreg_inference(X, W, b):
"""Predict class probabilities.
Parameters
----------
X : ndarray, shape (m, n)
input features (one row per feature vector).
W : ndarray, shape (n, k)
weight vectors, each row representing a different class.
b : ndarray, shape (k,)
vector of biases.
Returns
-------
P : ndarray, shape (m, k)
probability estimates.
"""
_check_size("mn, nk, k", X, W, b)
logits = X @ W + b.T
return softmax(logits)
def softmax(Z):
"""Softmax operator.
Parameters
----------
Z : ndarray, shape (m, n)
input vectors.
Returns
-------
ndarray, shape (m, n)
data after the softmax has been applied to each row.
"""
_check_size("mn", Z)
# Subtracting the maximum improves numerical stability
E = np.exp(Z - Z.max(1, keepdims=True))
return E / E.sum(1, keepdims=True)
def one_hot_vectors(Y, classes):
"""Convert an array of labels into a matrix of one-hot vectors.
Parameters
----------
Y : ndarray, shape (m,)
labels.
classes : int
number of classes. If None it is deduced from Y.
Returns
-------
ndarray, shape (m, classes)
One-hot vectors representing the labels Y.
"""
_check_size("m", Y)
Y = _check_labels(Y, classes)
m = Y.shape[0]
H = np.zeros((m, classes))
H[np.arange(m), Y] = 1
return H
def multinomial_logreg_train(X, Y, lambda_, lr=1e-3, steps=1000,
init_w=None, init_b=None):
"""Train a classifier based on multinomial logistic regression.
Parameters
----------
X : ndarray, shape (m, n)
training features.
Y : ndarray, shape (m,)
training labels with integer values in the range 0...(k-1).
lambda_ : float
regularization coefficient.
lr : float
learning rate
steps : int
number of training steps
init_w : ndarray, shape (n, k)
initial weights (None for zero initialization)
init_b : ndarray, shape (k,)
initial biases (None for zero initialization)
Returns
-------
w : ndarray, shape (n, k)
learned weights (one vector per class).
b : ndarray, shape (k,)
vector of biases.
"""
_check_size("mn, m", X, Y)
Y = _check_labels(Y)
m, n = X.shape
k = Y.max() + 1
W = (init_w if init_w is not None else np.zeros((n, k)))
b = (init_b if init_b is not None else np.zeros(k))
H = one_hot_vectors(Y, k)
for step in range(steps):
P = multinomial_logreg_inference(X, W, b)
grad_W = (X.T @ (P - H)) / m + 2 * lambda_ * W
grad_b = (P - H).mean(0)
W -= lr * grad_W
b -= lr * grad_b
return W, b
def cross_entropy(Y, P):
"""Average cross entropy.
Parameters
----------
Y : ndarray, shape (m,)
target labels.
P : ndarray, shape (m, k)
probability estimates.
Returns
-------
float
average cross entropy.
"""
_check_size("m, mk", Y, P)
Y = _check_labels(Y, P.shape[1])
logp = log_nowarn(P)
return -logp[np.arange(Y.size), Y].mean()
|
"""Dragon Candy ported from https://twitter.com/MunroHoberman/status/1346166185595985920
"""
from pypico8 import (
add,
camera,
circ,
cos,
pico8_to_python,
printh,
pget,
pset,
rnd,
run,
sin,
sqrt,
t,
)
printh(
pico8_to_python(
"""
camera(-64,-64)q={}for i=0,11do
add(q,1/4)for i=#q-1,1,-1do
add(q,-q[i])end
end::_::c=t()*8for i=0,999do
j=rnd(128)-64k=rnd(128)-64v=-sqrt(j*j+k*k)pset(j+j/v+k/v,k+k/v-j/v,pget(j,k))end
x=0y=0a=c/64for i=1,#q do
a+=q[i]x+=cos(a)y+=sin(a)circ(x,y,1,c%9+7)
if(i&i-1<1)c+=1end
"""
)
)
def _init():
global q
camera(-64, -64)
q = {}
for i in range(0, 12):
add(q, 1 / 4)
for i in range(len(q) - 1, 1, -1):
add(q, -q[i])
def _draw():
global q
c = t() * 8
for i in range(0, 1000):
j = rnd(128) - 64
k = rnd(128) - 64
v = -sqrt(j * j + k * k)
pset(j + j / v + k / v, k + k / v - j / v, pget(j, k))
x = 0
y = 0
a = c / 64
for i in range(1, len(q) + 1):
a += q[i]
x += cos(a)
y += sin(a)
circ(x, y, 1, c % 9 + 7)
if i & i - 1 < 1:
c += 1
run(_init, _draw=_draw) |
#!/usr/bin/env python
"""
@package
KiBOM - Bill of Materials generation for KiCad
Generate BOM in xml, csv, txt, tsv or html formats.
- Components are automatically grouped into BoM rows (grouping is configurable)
- Component groups count number of components and list component designators
- Rows are automatically sorted by component reference(s)
- Supports board variants
Extended options are available in the "bom.ini" config file in the PCB directory (this file is auto-generated with default options the first time the script is executed).
"""
from __future__ import print_function
import re
import csv
import sys
import os
import shutil
import argparse
here = os.path.abspath(os.path.dirname(sys.argv[0]))
sys.path.append(here)
sys.path.append(os.path.join(here,"KiBOM"))
from bomlib.columns import ColumnList
from bomlib.netlist_reader import *
from bomlib.bom_writer import *
from bomlib.preferences import BomPref
verbose = False
def close(*arg):
print(*arg)
sys.exit(0)
# Simple debug message handler
def say(*arg):
if verbose:
print(*arg)
def isExtensionSupported(filename):
result = False
extensions = [".xml",".csv",".txt",".tsv",".html"]
for e in extensions:
if filename.endswith(e):
result = True
break
return result
parser = argparse.ArgumentParser(description="KiBOM Bill of Materials generator script")
parser.add_argument("netlist", help='xml netlist file. Use "%%I" when running from within KiCad')
parser.add_argument("output", default="", help='BoM output file name.\nUse "%%O" when running from within KiCad to use the default output name (csv file).\nFor e.g. HTML output, use "%%O.html"')
parser.add_argument("-n", "--number", help="Number of boards to build (default = 1)", type=int, default=None)
parser.add_argument("-v", "--verbose", help="Enable verbose output", action='count')
parser.add_argument("-r", "--variant", help="Board variant, used to determine which components are output to the BoM", type=str, default=None)
parser.add_argument("--cfg", help="BoM config file (script will try to use 'bom.ini' if not specified here)")
parser.add_argument("-s","--separator",help="CSV Separator (default ',')",type=str, default=None)
args = parser.parse_args()
input_file = args.netlist
if not input_file.endswith(".xml"):
close("{i} is not a valid xml file".format(i=input_file))
verbose = args.verbose is not None
input_file = os.path.abspath(input_file)
say("Input:",input_file)
#look for a config file!
#bom.ini by default
ini = os.path.abspath(os.path.join(os.path.dirname(input_file), "bom.ini"))
config_file = ini #default value
#user can overwrite with a specific config file
if args.cfg:
config_file = args.cfg
#read preferences from file. If file does not exists, default preferences will be used
pref = BomPref()
have_cfile = os.path.exists(config_file)
if have_cfile:
pref.Read(config_file)
say("Config:",config_file)
#pass various command-line options through
pref.verbose = verbose
if args.number is not None:
pref.boards = args.number
pref.separatorCSV = args.separator
if args.variant is not None:
pref.pcbConfig = args.variant
print("PCB variant:", pref.pcbConfig)
#write preference file back out (first run will generate a file with default preferences)
if not have_cfile:
pref.Write(config_file)
say("Writing preferences file %s"%(config_file,))
#individual components
components = []
#component groups
groups = []
#read out the netlist
net = netlist(input_file, prefs = pref)
#extract the components
components = net.getInterestingComponents()
#group the components
groups = net.groupComponents(components)
columns = ColumnList(pref.corder)
#read out all available fields
for g in groups:
for f in g.fields:
columns.AddColumn(f)
#don't add 'boards' column if only one board is specified
if pref.boards <= 1:
columns.RemoveColumn(ColumnList.COL_GRP_BUILD_QUANTITY)
say("Removing:",ColumnList.COL_GRP_BUILD_QUANTITY)
#todo
write_to_bom = True
result = True
#Finally, write the BoM out to file
if write_to_bom:
output_file = args.output
if output_file is None:
output_file = input_file.replace(".xml","_bom.csv")
# KiCad BOM dialog by default passes "%O" without an extension. Append our default
if not isExtensionSupported(output_file):
output_file += "_bom.csv"
# If required, append the schematic version number to the filename
if pref.includeVersionNumber:
fsplit = output_file.split(".")
fname = ".".join(fsplit[:-1])
fext = fsplit[-1]
output_file = str(fname) + str(net.getVersion()) + "." + fext
output_file = os.path.abspath(output_file)
say("Output:",output_file)
result = WriteBoM(output_file, groups, net, columns.columns, pref)
os.remove(input_file)
os.remove(output_file + ".tmp")
if result:
sys.exit(0)
else:
sys.exit(-1)
|
#!/usr/bin/python
# script find clusters of small RNA reads in the genome
# version 3 - 24-12-2013 evolution to save memory !!! TEST !!!
# Usage clustering.py <bowtie input> <output> <bowtie index> <clustering_distance> <minimum read number per cluster to be outputed> <collapse option> <extention value> <average_cluster_size>
# <folding> <output format>
import sys, subprocess, time
from collections import defaultdict # required for some SmRNAwindow attributes (readDic)
#from numpy import mean, std # required for some SmRNAwindow methods
from scipy import stats
from smRtools import *
def clustermining (cluster, Instance, Instance_ID): # cluster argument is a list
if Instance.readDict[-cluster[0]]: # test whether the first position in the cluster was reverse reads
shift = max(Instance.readDict[-cluster[0]])
upstream_coord = cluster[0] - shift + 1
else:
upstream_coord = cluster[0]
if Instance.readDict[cluster[-1]]: # test whether the last position in the cluster was forward reads
shift = max(Instance.readDict[cluster[-1]])
downstream_coord = cluster[-1] + shift -1
else:
downstream_coord = cluster[-1]
readcount = Instance.readcount(upstream_coord=upstream_coord, downstream_coord=downstream_coord)
mean_size, median_size, stdv_size = Instance.statsizes(upstream_coord=upstream_coord, downstream_coord=downstream_coord)
if readcount >= minimum_reads and median_size >= min_median_size:
location = [Instance.gene.split()[0], upstream_coord, downstream_coord]
if output_format == "intervals":
print >> OUT, "%s\t%s\t%s\t%s" % (location[0], location[1], location[2], readcount)
return int(1)
cluster_size = downstream_coord - upstream_coord + 1
if folding == "yes" and cluster_size < 151:
foldEnergy = Instance.foldEnergy(upstream_coord=upstream_coord, downstream_coord=downstream_coord) ## be careful, test !
else:
foldEnergy = "."
forwardReadcount = Instance.forwardreadcount(upstream_coord=upstream_coord, downstream_coord=downstream_coord) #
reverseReadcount = Instance.reversereadcount(upstream_coord=upstream_coord, downstream_coord=downstream_coord) #
density = readcount / float(cluster_size) #
#sequence = subinstanceDic.sequence # to be recycled
if output_format == "GFF3":
if forwardReadcount >= reverseReadcount:
GFFstrand = "+"
else:
GFFstrand = "-"
Attributes = "ID=RC %s : FR %s : RR %s : Dens %s : Med %s : FE %s" % (readcount, forwardReadcount, reverseReadcount, density, median_size, foldEnergy)
print >> OUT, "%s\tGalaxy\tRead_Cluster\t%s\t%s\t%s\t%s\t.\t%s" % (location[0], location[1], location[2], readcount, GFFstrand, Attributes)
return int(1)
else:
Forward_Barycenter, Reverse_Barycenter = Instance.barycenter(upstream_coord=upstream_coord, downstream_coord=downstream_coord)
Zsignature = Instance.signature(24,29,24,29,range(1,27), zscore="yes", upstream_coord=upstream_coord, downstream_coord=downstream_coord)[10] #
Hsignature = Instance.hannon_signature(24,29,24,29, range(1,27), upstream_coord=upstream_coord, downstream_coord=downstream_coord )[10] * 100
UpiFreq = Instance.Ufreq(range(24,29), upstream_coord=upstream_coord, downstream_coord=downstream_coord)
UsiFreq = Instance.Ufreq(range(20,22), upstream_coord=upstream_coord, downstream_coord=downstream_coord)
print >> OUT, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (Instance_ID, location[0], location[1], location[2], cluster_size, readcount, forwardReadcount, reverseReadcount, density, median_size, foldEnergy, Forward_Barycenter, Reverse_Barycenter, Zsignature, Hsignature, UpiFreq, UsiFreq)
return int(1)
return int(0)
start_time = time.time()
fasta_dic = get_fasta (sys.argv[3])
objDic = {}
number_of_reads = 0
F = open (sys.argv[1], "r") # F is the bowtie output taken as input
for line in F:
number_of_reads += 1
fields = line.split()
polarity = fields[1]
gene = fields[2]
offset = int(fields[3])
size = len (fields[4])
try:
objDic[gene].addread (polarity, offset, size)
except KeyError:
objDic[gene] = SmRNAwindow(gene, fasta_dic[gene])
objDic[gene].addread (polarity, offset, size)
F.close()
OUT = open (sys.argv[2], "w")
output_format=sys.argv[8]
if output_format == "intervals":
print >> OUT, "#chrom\tStart\tEnd\tReadCount"
elif output_format == "GFF3":
print >> OUT, "##gff-version 3"
else:
print >> OUT, "#ID\t#chrom\tStart\tEnd\tLength\tReadCount\tForwardReads\tReverseReads\tDensity\tMedian\tFoldEnergy\tForBar\tRevBar\tz-score_signature\tHannon_signature\tUfreq_in_24-28RNAs\tUfreq_in_20-21RNs"
dist = int(sys.argv[4])
min_median_size = int(sys.argv[6])
minimum_reads = int(sys.argv[5])
number_of_clusters = 0
Instance_ID = 0
folding=sys.argv[7]
for object in objDic:
l = objDic[object].readDict.keys()
l=[abs(i) for i in l]
l=list(set(l))
l.sort()
upstream = 0
for i, element in enumerate (l[1:]):
if abs(element-l[i]) > dist or i+2==len(l): # the 2nd part of the logical test is to capture the last cluster if it overlaps the end of the list
cluster = l[upstream:i+1]
upstream = i+1
Instance_ID += 1
if clustermining (cluster, objDic[object], Instance_ID): number_of_clusters += 1
# Instance_ID += 1
# if clustermining (l[upstream:], objDic[object], Instance_ID): number_of_clusters += 1 # dernier cluster ? to test
OUT.close()
elapsed_time = time.time() - start_time
print "number of reads: %s\nnumber of clusters: %s\nelapsed time: %s" % (number_of_reads, number_of_clusters, elapsed_time)
|
# coding: utf-8
from __future__ import unicode_literals
import re
import random
import urllib.parse
import pprint
from .common import InfoExtractor
from ..utils import (
urlencode_postdata,
ExtractorError)
class RawFuckIE(InfoExtractor):
IE_NAME = 'rawfuck'
IE_DESC = 'rawfuck'
_VALID_URL = r"https?://(?:www\.)?rawfuck.com"
_LOGIN_URL = "https://www.rawfuck.com/?"
_LOGOUT_URL = "https://www.rawfuck.com/es/?fn_logout=1"
_SITE_URL = "https://www.rawfuck.com"
_SITE_CLOUD = "https://www.rawfuck.com/api_admin.php?fn_cloudflare=1"
_NETRC_MACHINE = 'hardkinks'
def _login(self):
username, password = self._get_login_info()
if not username or not password:
self.raise_login_required(
'A valid %s account is needed to access this media.'
% self._NETRC_MACHINE)
data = {
"redirect": "",
"login[email]": username,
"login[password]": password,
"fn_login": ""
}
login_page, url_handle = self._download_webpage_handle(
self._LOGIN_URL,
None,
note="Logging in",
errnote="Login fail",
data=urlencode_postdata(data),
headers={
"Referer": self._SITE_URL,
"Origin": self._SITE_URL,
"Upgrade-Insecure-Requests": "1",
"Content-Type": "application/x-www-form-urlencoded",
"Connection": "keep-alive",
}
)
if (url_handle.geturl() == self._LOGIN_URL):
raise ExtractorError("Username/password not valid", expected=True)
else:
return
def _logout(self):
self._request_webpage(
self._LOGOUT_URL,
None,
'Log out'
)
def _real_initialize(self):
self._login()
def _real_extract(self, url):
title = url.rsplit("/", 1)[1]
#print(title)
#print(url)
url = url.replace("detail","regarder")
#print(url)
try:
content, url_handle = self._download_webpage_handle(
url,
None,
'Downloading video page',
headers={
"Referer": self._SITE_URL,
"Upgrade-Insecure-Requests": "1",
"Connection": "keep-alive",
}
)
#print(content)
regex_mediaid = r"media_id: '(?P<mediaid>.*?)'"
mobj = re.search(regex_mediaid, content)
if mobj:
media_id = mobj.group("mediaid")
except Exception as e:
self._log_out()
raise ExtractorError("Video not found", expected=True)
#print(media_id)
data = { "media_id": media_id }
#print(data)
try:
info = self._download_json(
self._SITE_CLOUD,
None,
note="JSON file",
data=urlencode_postdata(data),
headers={
"Referer": url,
"Origin": self._SITE_URL,
"Upgrade-Insecure-Requests": "1",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Connection": "keep-alive",
"X-Requested-With": "XMLHttpRequest"
}
)
#print(info)
#pp = pprint.PrettyPrinter()
#pp.pprint(info)
signed_id = info['stream']['signed_id']
url_hls = "https://videodelivery.net/" + signed_id + "/manifest/video.m3u8"
url_dash = "https://videodelivery.net/" + signed_id + "/manifest/video.mpd"
#print(url_hls)
formats_m3u8 = self._extract_m3u8_formats(
url_hls, None, m3u8_id="hls", fatal=False
)
formats_mpd = self._extract_mpd_formats(
url_dash, None, mpd_id="dash", fatal=False
)
self._sort_formats(formats_m3u8)
self._sort_formats(formats_mpd)
except Exception as e:
self._log_out()
raise ExtractorError("Fail to get video info files", expected=True)
self._logout()
return {
"id": info['stream']['id'],
"title": title,
"formats": formats_mpd + formats_m3u8,
}
|
import numpy as np
import matplotlib.pyplot as plt
import math
############ functions #############################################################
def dprime(gen_scores, imp_scores):
x = math.sqrt(2) * abs(np.mean(gen_scores) - np.mean(imp_scores)) # replace 1 with the numerator
y = math.sqrt(pow(np.std(gen_scores), 2) + pow(np.std(imp_scores), 2)) # replace 1 with the denominator
return x / y
def plot_scoreDist(gen_scores, imp_scores, plot_title, eer, threshold_score):
plt.figure()
if threshold_score == True:
plt.axvline(x = eer, ymin = 0, ymax = 0.5, linestyle = '--', label = 'Threshold')
else:
plt.axvline(x = 0, ymin = 0, ymax = 0.5, linestyle = '--', label = 'Threshold')
plt.hist(gen_scores, color = 'green', lw = 2,
histtype= 'step', hatch = '//', label = 'Genuine Scores')
plt.hist(imp_scores, color = 'red', lw = 2,
histtype= 'step', hatch = '\\', label = 'Impostor Scores')
plt.xlim([-0.05,1.05])
plt.legend(loc = 'best')
dp = dprime(gen_scores, imp_scores)
plt.title(plot_title + '\nD-prime = %.2f' % dp)
plt.show()
return
def get_EER(far, frr):
eer = 0
'''
Use the FARs and FRRs to return the error
in which they are approximately equal.
'''
min = (far[0] + frr[0]) /2
for i in range(1, len(far)):
if far[i] == frr[i]:
threshold_score = False
return far[i], threshold_score
elif abs(far[i] - frr[i]) < min:
min = abs(far[i]-frr[i])
eer = (far[i] + frr[i]) / 2
threshold_score = True
return eer, threshold_score
#Detection Error Tradeoff
def plot_det(far, frr, far2, frr2, far3, frr3, plot_title1, plot_title2, plot_title3):
title = 'DET'
eer, threshold_score = get_EER(far, frr)
eer2, threshold_score2 = get_EER(far2, frr2)
eer3, threshold_score3 = get_EER(far3, frr3)
plt.figure()
'''
Refer back to lecture for DET curve
'''
plt.plot(far,frr, lw = 2, label = plot_title1)
plt.plot(far2,frr2, lw = 2, label = plot_title2)
plt.plot(far3,frr3, lw = 2, label = plot_title3)
plt.legend(loc= 'best')
plt.plot([0,1], [0,1], lw = 1, color = 'black')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('FAR')
plt.ylabel('FRR')
plt.title(title + '\nEER of %s = %.3f \n%s = %.3f, %s = %.3f'
% (plot_title1, eer, plot_title2, eer2, plot_title3, eer3))
plt.show()
return eer, eer2, eer3, threshold_score, threshold_score2, threshold_score3
#Receiver Operating Characteristic
def plot_roc(far, tpr, far2, tpr2, far3, tpr3, plot_title1, plot_title2, plot_title3):
title = 'ROC'
plt.figure()
'''
Refer back to lecture for ROC curve
'''
plt.plot(far, tpr, lw = 2, label = plot_title1)
plt.plot(far2, tpr2, lw = 2, label = plot_title2)
plt.plot(far3, tpr3, lw = 2, label = plot_title3)
plt.legend(loc= 'best')
plt.xlim([-0.05,1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('FAR')
plt.ylabel('TAR')
plt.title(title)
plt.show()
return
# Function to compute TPR, FAR, FRR
def compute_rates(gen_scores, imp_scores, num_thresholds):
# start at 0 to 1, number of num = 100
thresholds = np.linspace(0.0, 1.0, num_thresholds)
# use np.linspace to create n threshold values
# between 0 and 1
far = [] #False Positive Rate
frr = [] #False Negative Rate
tpr = [] #True Positive Rate
# tnr ? where True Negative rate?
for t in thresholds:
'''
Initialize tp, fp, tn, fn
'''
tp, fp, tn, fn = 0,0,0,0
for g_s in gen_scores:
'''
Count tp and fn
'''
if g_s >= t:
tp += 1
else:
fn += 1
for i_s in imp_scores:
'''
Count tn and fp
'''
if i_s >= t:
fp += 1
else:
tn += 1
far.append(fp / (fp + tn)) #equation for far
frr.append(fn / (fn + tp)) #equation for frr
tpr.append(tp / (tp + fn)) #equation for tpr
return far, frr, tpr
############ main code #############################################################
def performance(gen_scores, imp_scores, gen_scores2, imp_scores2,
gen_scores3, imp_scores3, plot_title1, plot_title2, plot_title3, num_thresholds):
far, frr, tpr = compute_rates(gen_scores, imp_scores, num_thresholds) #parameters
far2, frr2, tpr2 = compute_rates(gen_scores2, imp_scores2, num_thresholds)
far3, frr3, tpr3 = compute_rates(gen_scores3, imp_scores3, num_thresholds)
plot_roc(far, tpr, far2, tpr2, far3, tpr3,
plot_title1, plot_title2, plot_title3) #parameters
eer, eer2, eer3, threshold_score, threshold_score2, threshold_score3 = plot_det(far, frr,
far2, frr2, far3, frr3, plot_title1, plot_title2, plot_title3) #parameters
plot_scoreDist(gen_scores, imp_scores, plot_title1, eer, threshold_score) #parameters
plot_scoreDist(gen_scores2, imp_scores2, plot_title2, eer2, threshold_score2) #parameters
plot_scoreDist(gen_scores3, imp_scores3, plot_title3, eer3, threshold_score3) #parameters
|
#!/usr/bin/python3
# for flask app
# note NEED SCALE (reference unit) AS FLOAT for decent accuracy - using an int leads to systematic error
#
# uses paramiko to append data to remote files to avoid writing frequently to rpi ssd
#
# multiple HX711 version - separate data/clock port pair for each
# uses curses so we can read keystrokes and ignore them while running
# x exits and c calibrates during operation while latest values are continuously written
# to the curses window
# some fancy footwork to go back to normal console for calibration...
# check that reading a requested dataport repeatedly does not give zeros
# as that means probably no hx711/scale connected - ignore config if so
# october 2019
#
# sampler for a pi hx711/loadcell combination
# ross lazarus me fecit May 2019
# based on the hx711py example.py code.
# Keep hx711 powered down when not in use - seems to keep the loadcell cooler
# so less thermal drift - also connect it to the 3.7v supply, not the 5v supply
# as that also seems to induce less drift
#
# added rename of old log file - I just wrote 19 hours over. Sad face.
# changed to just append. deal with zeros and discontinuities later
import datetime
import time
from paramiko import Transport, SFTPClient, RSAKey
import sys
import os
import RPi.GPIO as GPIO
from hx711 import HX711
SHORTSLEEP = 0.01
SAMPINT = 30
RUNCOOL = True
USER = 'pi'
REMLOGINUSER = 'ross'
REMPATH = '/home/ross'
filehost = '192.168.1.9'
sftpport = 22
sftppassword = ''
sftpkeyfilename = '/home/%s/.ssh/id_rsa' % USER
class SftpClient:
_connection = None
def __init__(self, host, port, username, password, key):
self.host = host
self.port = port
self.username = username
self.password = password
self.key = key
self.create_connection(self.host, self.port,
self.username, self.password, self.key)
@classmethod
def create_connection(cls, host, port, username, password, key):
transport = Transport(sock=(host, port))
transport.connect(username=username, pkey = key)
cls._connection = SFTPClient.from_transport(transport)
@classmethod
def appendrows(self, rows, remote_path):
f = self._connection.file(remote_path,'a')
f.write(rows)
f.close()
return len(rows)
@classmethod
def dirch(self,d):
self._connection.chdir(d)
@classmethod
def dirlist(self,d):
return self._connection.listdir(d)
def file_exists(self, remote_path):
try:
print('remote path : ', remote_path)
self._connection.stat(remote_path)
except IOError as e:
if e.errno == errno.ENOENT:
return False
raise
else:
return True
def download(self, remote_path, local_path, retry=5):
if self.file_exists(remote_path) or retry == 0:
self._connection.get(remote_path, local_path,
callback=None)
elif retry > 0:
time.sleep(5)
retry = retry - 1
self.download(remote_path, local_path, retry=retry)
def close(self):
self._connection.close()
class hxboard():
""" encapsulate a single hx711/load cell
"""
def __init__(self,port,logfname,nscale):
"""
"""
if not port:
logging.warning('## hxboard __init__ requires a clock/data port (as [5,6] eg) please')
return
self.logdat = False
self.recording = False
self.outfile = None
self.nscale = nscale
self.lastval = 0.0
self.configfile = 'scale%d.config' % nscale
self.scale = 1.0
self.offset = 0
self.port = port
self.lasttime = datetime.datetime.now()
if os.path.isfile(self.configfile):
s = open(self.configfile,'r').read()
sc,offs = s.split(',')
self.scale = float(sc)
self.offset = int(offs)
self.calibrated = True
else:
self.calibrated = False
hxnew = HX711(self.port[0],self.port[1])
hxnew.set_reading_format("MSB", "MSB")
hxnew.set_reference_unit(self.scale)
hxnew.set_offset(self.offset)
self.hx = hxnew
scale_ready = self.do_Ready()
if not scale_ready:
# timeout
logging.warning("!!! Scale ready timeout - is scale %d @ port %s connected? Not calibrating" % (self.nscale,self.port))
else:
if not self.calibrated:
self.do_Calibrate()
def do_Ready(self):
# arbitrary check that scale is attached
# if all zeros for a series of values, probably not.
scale_ready = False
if RUNCOOL:
self.hx.power_up()
time.sleep(SHORTSLEEP)
vals = [self.hx.read_average(times=1) for x in range(10)]
if max(vals) != 0.0 and min(vals) != 0.0:
scale_ready = True
if RUNCOOL:
self.hx.power_down() # reduce heat load on chip
return scale_ready
def cleanUp(self):
logging.debug("Cleaning up")
if self.outfile:
self.outfile.close()
self.hx.power_down()
def do_Show(self):
"""return some html for flask
"""
recnote = ""
if self.recording:
recnote = 'Recording to file %s' % self.logfname
#s = '<tr><td>Scale #%d</td><td>last value %.2fg</td><td>%s @ %s</td></tr>\n' % (self.nscale,self.lastval,recnote,self.lasttime.ctime())
s = 'Scale #%d last value %.2fg %s @ %s\n' % (self.nscale,self.lastval,recnote,self.lasttime.ctime())
return s
def do_Read(self):
"""
"""
if RUNCOOL:
self.hx.power_up()
time.sleep(SHORTSLEEP)
val = (self.hx.read_average(times=7) - self.offset)/self.scale
if RUNCOOL:
self.hx.power_down() # reduce heat load on chip
self.lastval = val
self.lasttime = datetime.datetime.now()
return val
def do_Calibrate(self):
"""
"""
readyCheck = input("Remove any items from scale #%d. Press any key when ready." % self.nscale)
self.hx.power_up()
self.hx.set_reference_unit(1)
time.sleep(1)
self.hx.tare()
# hx.set_reference_unit(1) # not needed if use read_average
offset = self.hx.read_average(10)
print("Value at zero (offset): {}".format(offset))
self.hx.set_offset(offset)
self.offset = offset
print("Please place an item of known weight on scale #%d." % self.nscale)
readyCheck = input("Press <Enter> to continue when ready.")
rval = self.hx.read_average(times=10)
val = (rval - offset)
status = "Scale #%d read=%f offset=%d val=%f." % (self.nscale,rval, offset,val)
print(status)
logging.debug(status)
item_weight = input("Please enter the item's weight in grams.\n>")
iwf = float(item_weight)
scale = val/iwf
self.hx.set_reference_unit(int(scale))
status = "Scale #%d adjusted for %d grams = %d\n" % (self.nscale,iwf,scale)
print(status)
logging.debug(status)
self.scale = scale
if os.path.exists(self.configfile):
ps = open(self.configfile,'r').readline()
status = 'Replacing old configfile %s with %.2f,%d' % (ps,scale,offset)
print(status)
logging.debug(status)
input("Press <Enter> to continue")
cf = open(self.configfile,'w')
s = '%.2f,%d\n' % (scale,offset)
cf.write(s)
cf.close()
if RUNCOOL:
self.hx.power_down()
self.calibrated = True
print("Please replace the %.f mass with the item to be recorded on scale #%d." % (iwf,self.nscale))
readyCheck = input("Press <Enter> to continue when ready.")
class hxboards():
"""
encapsulate a collection of hx711/load cell inputs - have 4 kits!
"""
def __init__(self,portlist=[[5,6],]):
self.hxs = []
self.upload_remote_path = REMPATH
sftpkey = RSAKey.from_private_key_file(sftpkeyfilename)
self.client = SftpClient(filehost, sftpport, REMLOGINUSER, sftppassword, sftpkey)
for i in range(len(portlist)):
hx = hxboard(portlist[i],'scale%d.xls' % i,i)
if hx.do_Ready():
self.hxs.append(hx)
else:
status = 'Getting zeros from scale %d ports %s so not using' % (i,portlist[i])
print(status)
logging.debug(status)
dummy = input('Press <Enter> to continue')
def getStatus(self):
s = ''
for hx in self.hxs:
s += hx.do_Show()
return s
def getVals(self):
for hx in self.hxs:
val = hx.do_Read()
uprempath = '/home/ross/loadcelldata/scale%d.xls' % hx.nscale
dur = int(time.time()) # seconds is enough for us
s = '%d\t%.2f\n' % (dur,val)
self.client.appendrows(s,uprempath)
def cleanUp(self):
for hx in self.hxs:
hx.cleanUp()
GPIO.cleanup()
logging.shutdown()
def do_Calibrate(self):
lhs = len(self.hxs)
if lhs > 1:
print('Which scale would you like to recalibrate - enter one of %s' % ' '.join(map(str,range(lhs))))
doscale = input('>')
if int(doscale) in range(lhs):
self.hxs[int(doscale)].do_Calibrate()
else:
status = '!!! No scale %d - cancelled Press <Enter> to continue'
print(status)
logging.debug(status)
input('>')
elif lhs == 1:
self.hxs[0].do_Calibrate()
if __name__ == "__main__":
hxb = hxboards([[5, 6], [7, 8], [9, 10], [11, 12]])
# unconnected ports will fail if read_average always zero
running = True
lastupdate = 0
while running:
if (time.time() - lastupdate) > SAMPINT:
hxb.getVals()
scalestats = hxb.getStatus()
print(scalestats)
lastupdate = time.time()
time.sleep(5)
|
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
#
from sm_client.common import utils
from sm_client.v1 import smc_service_shell
from sm_client.v1 import smc_service_node_shell
from sm_client.v1 import smc_servicegroup_shell
COMMAND_MODULES = [
smc_service_shell,
smc_service_node_shell,
smc_servicegroup_shell,
]
def enhance_parser(parser, subparsers, cmd_mapper):
'''Take a basic (nonversioned) parser and enhance it with
commands and options specific for this version of API.
:param parser: top level parser :param subparsers: top level
parser's subparsers collection where subcommands will go
'''
for command_module in COMMAND_MODULES:
utils.define_commands_from_module(subparsers, command_module,
cmd_mapper)
|
pkgname = "tevent"
pkgver = "0.11.0"
pkgrel = 0
build_style = "waf"
configure_script = "buildtools/bin/waf"
configure_args = [
"--disable-rpath", "--disable-rpath-install",
"--builtin-libraries=replace", "--bundled-libraries=NONE",
]
hostmakedepends = [
"pkgconf", "python", "gettext-tiny", "docbook-xsl-nons", "xsltproc",
]
makedepends = [
"python-devel", "talloc-devel", "cmocka-devel", "gettext-tiny-devel",
]
pkgdesc = "Event system based on talloc"
maintainer = "q66 <[email protected]>"
license = "LGPL-3.0-or-later"
url = "https://tevent.samba.org"
source = f"https://download.samba.org/pub/{pkgname}/{pkgname}-{pkgver}.tar.gz"
sha256 = "ee9a86c8e808aac2fe1e924eaa139ff7f0269d0e8e4fafa850ae5c7489bc82ba"
options = ["!cross"]
@subpackage("tevent-devel")
def _devel(self):
return self.default_devel()
@subpackage("tevent-python")
def _devel(self):
self.pkgdesc = f"{pkgdesc} (Python bindings)"
return ["usr/lib/python*"]
|
from enum import Enum
class GolfSwingLabel(Enum):
other = 0
swing = 1
@staticmethod
def to_json():
label_dict = {}
for l in GolfSwingLabel:
label_dict[l.value] = l.name
return label_dict
|
import sys
import inspect
import textwrap
def function_to_script(func):
function_sig = inspect.signature(func)
assert all(p.default != p.empty for p in function_sig.parameters), 'Function should not require parameters'
function_name = func.__name__
function_impl = inspect.getsource(func)
function_impl = textwrap.dedent(function_impl)
script = textwrap.dedent("""
%s
if __name__ == '__main__':
%s()
""") % (function_impl, function_name)
return script
def function_to_subprocess_args(func):
script = function_to_script(func)
args = [sys.executable, '-c', script]
return args
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tickets', '0006_ticketseventmeta_receipt_footer'),
]
operations = [
migrations.CreateModel(
name='AccommodationInformation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('first_name', models.CharField(default='', max_length=100, verbose_name=b'Etunimi', blank=True)),
('last_name', models.CharField(default='', max_length=100, verbose_name=b'Sukunimi', blank=True)),
('phone_number', models.CharField(default='', max_length=30, verbose_name=b'Puhelinnumero', blank=True)),
('email', models.EmailField(default='', max_length=75, verbose_name=b'S\xc3\xa4hk\xc3\xb6postiosoite', blank=True)),
('order_product', models.ForeignKey(related_name='accommodation_information_set', to='tickets.OrderProduct')),
],
options={
'verbose_name': 'majoittujan tiedot',
'verbose_name_plural': 'majoittujan tiedot',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='product',
name='requires_accommodation_information',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
|
import sqlite3
conn = sqlite3.connect('database.db')
print "Executed successfully";
conn.execute('CREATE TABLE coders (handle TEXT, email TEXT, password TEXT)')
print "Table Created successfully";
conn.close() |
"""
2.2 – Mensagens simples: Armazene uma mensagem em uma variável e, em seguida, exiba essa mensagem. Então altere o valor de sua variável para uma nova mensagem e mostre essa nova mensagem.
"""
var = "Olá, mundo!"
print(var)
var = "Python é magnífico!"
print(var) |
import astropy.io.fits
import numpy as np
import matplotlib.pyplot as plt
# Create an empty numpy array. 2D; spectra with 4 data elements.
filtered = np.zeros((2040,4))
combined_extracted_1d_spectra_ = astropy.io.fits.open("xtfbrsnN20160705S0025.fits")
exptime = float(combined_extracted_1d_spectra_[0].header['EXPTIME'])
wstart = combined_extracted_1d_spectra_[1].header['CRVAL1']
wdelt = combined_extracted_1d_spectra_[1].header['CD1_1']
for i in range(len(filtered)):
filtered[i][0] = wstart + (i*wdelt)
print "Wavelength array: \n", filtered
f = open("hk.txt")
lines = f.readlines()
f.close()
lines = [lines[i].strip().split() for i in range(len(lines))]
for i in range(len(lines)):
lines[i][0] = float(lines[i][0])*10**4
for i in range(len(filtered)):
mindif = min(lines, key=lambda x:abs(x[0]-filtered[i][0]))
filtered[i][1] = mindif[2]
calibspec = np.load("calibspec.npy")
"""
effspec = np.load("effspec.npy")
print "Effspec:\n", effspec
calibspec = np.zeros((2040))
for i in range(len(effspec)):
if effspec[i] != 0:
calibspec[i] = combined_extracted_1d_spectra_[1].data[i]/exptime/effspec[i]
else:
calibspec[i] = 0
"""
filter_weighted_flux = []
temp_percentages = []
for i in range(len(calibspec)):
filtered[i][2] = calibspec[i]
filtered[i][3] = filtered[i][1] * filtered[i][2] * 0.01
filter_weighted_flux.append(filtered[i][3])
temp_percentages.append(filtered[i][1]*0.01)
print "\nIntegral of filter_weighted_flux:"
print np.trapz(filter_weighted_flux)
print "\nIntegral of percentages:"
print np.trapz(temp_percentages)
print "Integral of filter_weighted_flux divided by integral of percentages:"
print np.trapz(filter_weighted_flux)/np.trapz(temp_percentages)
plt.figure(1)
plt.plot(calibspec)
plt.plot(filter_weighted_flux, "r--")
plt.figure(2)
plt.plot(temp_percentages)
plt.show()
|
# 1120. Maximum Average Subtree
# Runtime: 60 ms, faster than 44.70% of Python3 online submissions for Maximum Average Subtree.
# Memory Usage: 17.1 MB, less than 11.35% of Python3 online submissions for Maximum Average Subtree.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class State:
def __init__(self, count: int, sum: float, max_avg: float) -> None:
self.count: int = count
self.sum: float = sum
self.max_avg: float = max_avg
class Solution:
def maximumAverageSubtree(self, root: TreeNode) -> float:
def traverse(root: TreeNode) -> State:
if root is None:
return State(0, 0, 0)
else:
left = traverse(root.left)
right = traverse(root.right)
count = left.count + right.count + 1
sum = left.sum + right.sum + root.val
max_avg = max(sum / count, left.max_avg, right.max_avg)
return State(count, sum, max_avg)
return traverse(root).max_avg |
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import sys
if sys.version_info < (3,):
str_cls = unicode # noqa
byte_cls = str
else:
str_cls = str
byte_cls = bytes
def verify_unicode(value, param_name, allow_none=False):
"""
Raises a TypeError if the value is not a unicode string
:param value:
The value that should be a unicode string
:param param_name:
The unicode string of the name of the parameter, for use in the
exception message
:param allow_none:
If None is a valid value
"""
if allow_none and value is None:
return
if not isinstance(value, str_cls):
raise TypeError('%s must be a unicode string, not %s' % (param_name, type_name(value)))
def verify_unicode_list(value, param_name, allow_none=False):
"""
Raises a TypeError if the value is not a list or tuple of unicode strings
:param value:
The value that should be a list/tuple of unicode strings
:param param_name:
The unicode string of the name of the parameter, for use in the
exception message
:param allow_none:
If None is a valid value
"""
if allow_none and value is None:
return
if not isinstance(value, (list, tuple)):
raise TypeError('%s must be a list or tuple of unicode strings, not %s' % (param_name, type_name(value)))
for arg in value:
if not isinstance(arg, str_cls):
raise TypeError('%s must be a list or tuple containing only unicode strings, not %s' % (param_name, type_name(arg)))
def verify_unicode_dict(value, param_name):
"""
Raises a TypeError if the value is not a dict with unicode string keys and
unicode string or None values
:param value:
The value that should be a dict of unicode strings
:param param_name:
The unicode string of the name of the parameter, for use in the
exception message
"""
if value is None:
return
if not isinstance(value, dict):
raise TypeError('%s must be a dict of unicode strings, not %s' % (param_name, type_name(value)))
for key, value in value.items():
if not isinstance(key, str_cls):
raise TypeError('%s must be a dict containing only unicode strings for keys, not %s' % (param_name, type_name(key)))
if value is not None and not isinstance(value, str_cls):
raise TypeError('%s must be a dict containing only unicode strings or None for values, not %s' % (param_name, type_name(value)))
def type_name(value):
"""
Returns a user-readable name for the type of an object
:param value:
A value to get the type name of
:return:
A unicode string of the object's type name
"""
cls = value.__class__
if cls.__module__ in set(['builtins', '__builtin__']):
return cls.__name__
return '%s.%s' % (cls.__module__, cls.__name__)
|
import os
import sys
import acconeer.exptool as et
def main():
parser = et.utils.ExampleArgumentParser()
parser.add_argument("-o", "--output-dir", type=str, required=True)
parser.add_argument("--file-format", type=str, default="h5")
parser.add_argument("--frames-per-file", type=int, default=10000)
args = parser.parse_args()
et.utils.config_logging(args)
if os.path.exists(args.output_dir):
print("Directory '{}' already exists, won't overwrite".format(args.output_dir))
sys.exit(1)
file_format = args.file_format.lower()
if file_format == "np":
file_format = "npz"
if file_format not in ["h5", "npz"]:
print("Unknown format '{}'".format(args.file_format))
sys.exit(1)
if args.frames_per_file < 10:
print("Frames per file must be at least 10")
sys.exit(1)
if args.socket_addr:
client = et.SocketClient(args.socket_addr)
elif args.spi:
client = et.SPIClient()
else:
port = args.serial_port or et.utils.autodetect_serial_port()
client = et.UARTClient(port)
config = et.configs.EnvelopeServiceConfig()
config.sensor = args.sensors
config.update_rate = 30
session_info = client.start_session(config)
os.makedirs(args.output_dir)
interrupt_handler = et.utils.ExampleInterruptHandler()
print("Press Ctrl-C to end session")
total_num_frames = 0
while not interrupt_handler.got_signal:
record_count, num_frames_in_record = divmod(total_num_frames, args.frames_per_file)
if num_frames_in_record == 0:
recorder = et.recording.Recorder(sensor_config=config, session_info=session_info)
data_info, data = client.get_next()
recorder.sample(data_info, data)
if num_frames_in_record + 1 == args.frames_per_file:
record = recorder.close()
filename = os.path.join(
args.output_dir, "{:04}.{}".format(record_count + 1, file_format)
)
print("Saved", filename)
et.recording.save(filename, record)
total_num_frames += 1
print("Sampled {:>5}".format(total_num_frames), end="\r", flush=True)
try:
client.disconnect()
except Exception:
pass
record_count, num_frames_in_record = divmod(total_num_frames, args.frames_per_file)
if num_frames_in_record > 0:
record = recorder.close()
filename = os.path.join(args.output_dir, "{:04}.{}".format(record_count + 1, file_format))
print("Saved", filename)
et.recording.save(filename, record)
if __name__ == "__main__":
main()
|
# Copyright 2014 Google Inc. All Rights Reserved.
"""Utility functions for managing customer supplied master keys."""
import argparse
import base64
import json
from googlecloudsdk.calliope import exceptions
EXPECTED_RECORD_KEY_KEYS = set(['uri', 'key'])
BASE64_KEY_LENGTH_IN_CHARS = 44
SUPPRESS_MASTER_KEY_UTILS = True
class MissingMasterKeyException(exceptions.ToolException):
def __init__(self, resource):
super(MissingMasterKeyException, self).__init__(
'Key required for resource [{0}], but none found.'.format(resource))
class InvalidKeyFileException(exceptions.ToolException):
def __init__(self, base_message):
super(InvalidKeyFileException, self).__init__(
'{0}'.format(base_message))
class BadPatternException(InvalidKeyFileException):
def __init__(self, pattern_type, pattern):
self.pattern_type = pattern_type
self.pattern = pattern
super(BadPatternException, self).__init__(
'Invalid value for [{0}] pattern: [{1}]'.format(
self.pattern_type,
self.pattern))
class InvalidKeyException(InvalidKeyFileException):
pass
def ValidateKey(base64_encoded_string):
"""ValidateKey(s) return none or raises InvalidKeyException."""
try:
base64.standard_b64decode(base64_encoded_string)
except TypeError as t:
raise InvalidKeyException(
'Provided key [{0}] is not valid base64: [{1}]'.format(
base64_encoded_string,
t.message))
if len(base64_encoded_string) != 44:
raise InvalidKeyFileException(
'Provided key [{0}] should contain {1} characters (including padding), '
'but is [{2}] characters long.'.format(
base64_encoded_string,
BASE64_KEY_LENGTH_IN_CHARS,
len(base64_encoded_string)))
def AddMasterKeyArgs(parser, flags_about_creation=True):
"""Adds arguments related to master keys."""
master_key_file = parser.add_argument(
'--master-key-file',
help=(argparse.SUPPRESS if SUPPRESS_MASTER_KEY_UTILS
else 'Path to a master key file'),
metavar='FILE')
master_key_file.detailed_help = (
'Path to a master key file, mapping GCE resources to user managed '
'keys to be used when creating, mounting, or snapshotting disks. ')
# TODO(user)
# Argument - indicates the key file should be read from stdin.'
if flags_about_creation:
no_require_master_key_create = parser.add_argument(
'--no-require-master-key-create',
help=(argparse.SUPPRESS if SUPPRESS_MASTER_KEY_UTILS
else 'Allow creating of resources not protected by master key.'),
action='store_true')
no_require_master_key_create.detailed_help = (
'When invoked with --master-key-file gcloud will refuse to create '
'resources not protected by a user managed key in the key file. This '
'is intended to prevent incorrect gcloud invocations from accidentally '
'creating resources with no user managed key. This flag disables the '
'check and allows creation of resources without master keys.')
class UriPattern(object):
"""A uri-based pattern that maybe be matched against resource objects."""
def __init__(self, path_as_string):
if not path_as_string.startswith('http'):
raise BadPatternException('uri', path_as_string)
self._path_as_string = path_as_string
def Matches(self, resource):
"""Tests if its argument matches the pattern."""
return self._path_as_string == resource.SelfLink()
def __str__(self):
return 'Uri Pattern: ' + self._path_as_string
class MasterKeyStore(object):
"""Represents a map from resource patterns to keys."""
# Members
# self._state: dictionary from UriPattern to a valid, base64-encoded key
@staticmethod
def FromFile(fname):
"""FromFile loads a MasterKeyStore from a file.
Args:
fname: str, the name of a file intended to contain a well-formed key file
Returns:
A MaterKeyStore, if found
Raises:
exceptions.BadFileException: there's a problem reading fname
exceptions.InvalidKeyFileException: the key file failed to parse
or was otherwise invalid
"""
with open(fname) as infile:
content = infile.read()
return MasterKeyStore(content)
@staticmethod
def FromArgs(args):
"""FromFile attempts to load a MasterKeyStore from a command's args.
Args:
args: CLI args with a master_key_file field set
Returns:
A MasterKeyStore, if a valid key file name is provided as master_key_file
None, if args.master_key_file is None
Raises:
exceptions.BadFileException: there's a problem reading fname
exceptions.InvalidKeyFileException: the key file failed to parse
or was otherwise invalid
"""
assert hasattr(args, 'master_key_file')
if args.master_key_file is None:
return None
return MasterKeyStore.FromFile(args.master_key_file)
@staticmethod
def _ParseAndValidate(s):
"""_ParseAndValidate(s) inteprets s as a master key file.
Args:
s: str, an input to parse
Returns:
a valid state object
Raises:
InvalidKeyFileException: if the input doesn't parse or is not well-formed.
"""
assert type(s) is str
state = {}
try:
records = json.loads(s)
if type(records) is not list:
raise InvalidKeyFileException(
"Key file's top-level element must be a JSON list.")
for key_record in records:
if type(key_record) is not dict:
raise InvalidKeyFileException(
'Key file records must be JSON objects, but [{0}] found.'.format(
json.dumps(key_record)))
if set(key_record.keys()) != EXPECTED_RECORD_KEY_KEYS:
raise InvalidKeyFileException(
'Record [{0}] has incorrect keys; [{1}] expected'.format(
json.dumps(key_record),
','.join(EXPECTED_RECORD_KEY_KEYS)))
pattern = UriPattern(key_record['uri'])
ValidateKey(key_record['key'])
state[pattern] = key_record['key']
except ValueError:
raise InvalidKeyFileException.FromCurrent()
assert type(state) is dict
return state
def __len__(self):
return len(self.state)
def LookupKey(self, resource, raise_if_missing=False):
"""Search for the unique key corresponding to a given resource.
Args:
resource: the resource to find a key for.
raise_if_missing: bool, raise an exception if the resource is not found.
Returns:
The base64 encoded string corresponding to the resource,
or none if not found and not raise_if_missing.
Raises:
InvalidKeyFileException: if there are two records matching the resource.
MissingMasterKeyException: if raise_if_missing and no key is found
for the provided resoure.
"""
assert type(self.state) is dict
search_state = (None, None)
for pat, key in self.state.iteritems():
if pat.Matches(resource):
# TODO(user) what's the best thing to do if there are multiple
# matches?
if search_state[0]:
raise exceptions.InvalidKeyFileException(
'Uri patterns [{0}] and [{1}] both match '
'resource [{2}]. Bailing out.'.format(
search_state[0], pat, str(resource)))
search_state = (pat, key)
if raise_if_missing and (search_state[1] is None):
raise MissingMasterKeyException(resource)
return search_state[1]
def __init__(self, json_string):
self.state = MasterKeyStore._ParseAndValidate(json_string)
def MaybeLookupKey(master_keys_or_none, resource):
if master_keys_or_none and resource:
return master_keys_or_none.LookupKey(resource)
return None
def MaybeLookupKeys(master_keys_or_none, resources):
return [MaybeLookupKey(master_keys_or_none, r) for r in resources]
def MaybeLookupKeysByUri(master_keys_or_none, parser, uris):
return MaybeLookupKeys(
master_keys_or_none,
[(parser.Parse(u) if u else None) for u in uris])
|
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
from sawtooth_sdk.processor.exceptions import InvalidTransaction
def handle_account_creation(create_account, header, state):
"""Handles creating an Account.
Args:
create_account (CreateAccount): The transaction.
header (TransactionHeader): The header of the Transaction.
state (MarketplaceState): The wrapper around the Context.
Raises:
InvalidTransaction
- The public key already exists for an Account.
"""
if state.get_account(public_key=header.signer_public_key):
raise InvalidTransaction("Account with public key {} already "
"exists".format(header.signer_public_key))
state.set_account(
public_key=header.signer_public_key,
label=create_account.label,
description=create_account.description,
holdings=[])
# SmallBank send payment
def handle_send_payment(send_payment, header, state):
if state.get_account(public_key=header.signer_public_key):
raise InvalidTransaction("Account with public key {} already "
"exists".format(header.signer_public_key))
#TODO add try catch
source_account = header.signer_public_key
dest_account = send_payment.dest_customer_id
if source_account and dest_account :
raise InvalidTransaction("Both source and dest accounts must exist")
raise InvalidTransaction("Both ?source and dest accounts must exist")
#TODO
# if source_account.CheckingBalance < sendPaymentData.Amount {
# return &processor.InvalidTransactionError{Msg: "Insufficient funds in source checking account"}
# }
# new_source_account := &smallbank_pb2.Account{
# CustomerId: source_account.CustomerId,
# CustomerName: source_account.CustomerName,
# SavingsBalance: source_account.SavingsBalance,
# CheckingBalance: source_account.CheckingBalance - sendPaymentData.Amount,
# }
# new_dest_account := &smallbank_pb2.Account{
# CustomerId: dest_account.CustomerId,
# CustomerName: dest_account.CustomerName,
# SavingsBalance: dest_account.SavingsBalance,
# CheckingBalance: dest_account.CheckingBalance + sendPaymentData.Amount,
# }
# saveAccount(new_source_account, context)
# saveAccount(new_dest_account, context)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'AddDialog.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_AddDialog(object):
def setupUi(self, AddDialog):
AddDialog.setObjectName("AddDialog")
AddDialog.resize(274, 310)
AddDialog.setStyleSheet("AddDialog{\n"
"background:url(./figures/background.jpg);\n"
"}")
self.gridLayout = QtWidgets.QGridLayout(AddDialog)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(AddDialog)
font = QtGui.QFont()
font.setPointSize(16)
self.label.setFont(font)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.lineEdit = QtWidgets.QLineEdit(AddDialog)
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout.addWidget(self.lineEdit)
self.verticalLayout.addLayout(self.horizontalLayout)
self.pictureHolder = QtWidgets.QLabel(AddDialog)
self.pictureHolder.setObjectName("pictureHolder")
self.verticalLayout.addWidget(self.pictureHolder)
self.buttonBox = QtWidgets.QDialogButtonBox(AddDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.retranslateUi(AddDialog)
self.buttonBox.accepted.connect(AddDialog.accept)
self.buttonBox.rejected.connect(AddDialog.reject)
QtCore.QMetaObject.connectSlotsByName(AddDialog)
def retranslateUi(self, AddDialog):
_translate = QtCore.QCoreApplication.translate
AddDialog.setWindowTitle(_translate("AddDialog", "Dialog"))
self.label.setText(_translate("AddDialog", "Who are you :"))
self.pictureHolder.setText(_translate("AddDialog", "text"))
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import emojilib
def main():
data = emojilib.generate(
text="絵文\n字。",
width=128,
height=128,
typeface_file='./example/NotoSansMonoCJKjp-Bold.otf'
)
with open('./example/emoji.png', 'wb') as f:
f.write(data)
if __name__ == '__main__':
main()
|
import calendar
import datetime
from dao.dao import SlackMessager,ExtractMessageFromPubSubContext,OSEnvironmentState
def send_message(context, info):
webhook=OSEnvironmentState.getSlackWebhook()
slackMessager=SlackMessager(webhook=webhook)
message = ExtractMessageFromPubSubContext.messageDecode(context)
slackMessager.send(message)
print(context, info) #for logging
def is2nd4thFridayOnContext(context, info):
if is2nd4thFriday(datetime.date.today()):
send_message(context, info)
print(datetime.date.today(),is2nd4thFriday(datetime.date.today()))
def is2nd4thFriday(today: datetime.date) -> bool:
if today.weekday()==4 and numOfWeek(today) in [2,4]:
return True
else:
return False
def numOfWeek(today: datetime.date) -> int:
return (today.day-1)//7+1 |
from fastai.vision import *
from fastai.distributed import *
@call_parse
def main():
path = url2path(URL.MNIST_SAMPLE)
tfms = (rand_pad(2, 28), [])
data = ImageDataBunch.from_folder(path, ds_tfms = tfms, bs = 64).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet18, metrics = accuracy)
learn.fit_one_cycle(1, 0.02) |
import subprocess
args = [ 'black', 'tsu/', 'tests/' ]
subprocess.run(args)
|
from ape import plugins
from ape.api import create_network_type
from .providers import LocalNetwork
@plugins.register(plugins.NetworkPlugin)
def networks():
yield "ethereum", "development", create_network_type(chain_id=69, network_id=69)
@plugins.register(plugins.ProviderPlugin)
def providers():
yield "ethereum", "development", LocalNetwork
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tuning class."""
import os
import re
from typing import Any, Dict, Optional
from lpot.ux.utils.hw_info import HWInfo
from lpot.ux.utils.workload.workload import Workload
class Tuning:
"""Tuning class."""
def __init__(
self,
workload: Workload,
workload_path: str,
template_path: Optional[str] = None,
) -> None:
"""Initialize configuration Dataset class."""
self.model_path = workload.model_path
self.framework = workload.framework
self.instances: int = 1
self.cores_per_instance: int = HWInfo().cores // self.instances
self.model_output_path = workload.model_output_path
self.config_path = workload.config_path
self.script_path = os.path.join(os.path.dirname(__file__), "tune_model.py")
self.command = [
"python",
self.script_path,
"--input-graph",
self.model_path,
"--output-graph",
self.model_output_path,
"--config",
self.config_path,
"--framework",
self.framework,
]
if template_path:
self.command = ["python", template_path]
def serialize(self) -> Dict[str, Any]:
"""Serialize Tuning to dict."""
result = {}
for key, value in self.__dict__.items():
variable_name = re.sub(r"^_", "", key)
if variable_name == "command":
result[variable_name] = " ".join(value)
else:
result[variable_name] = value
return result
|
import requests
from bs4 import BeautifulSoup
def filter_input(link, data_is_empty, store_data_is_empty):
should_ret = False
if link == "err" or "skroutz.gr/s/" not in link and "skroutz.gr/shop/" not in link:
print('-' * 50)
print('Error loading requested URL: No URL was given.\nOr given URL is not a valid product or store URL')
print('-' * 50)
should_ret = True
if data_is_empty:
print('-' * 55)
print('An error occured while trying to fulfill your request.\nData flag was initialized, but no flags were given.')
print('-' * 55)
should_ret = True
if store_data_is_empty:
print('-' * 60)
print('An error occured while trying to fulfill your request.\nStore Data flag was initialized, but no flags were given.')
print('-' * 60)
should_ret = True
return should_ret
def store_handler(url, sdata):
page = requests.get(url, headers={"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'})
soup = BeautifulSoup(page.content, 'html.parser')
if sdata is None:
show_all = True
else:
show_all = 'all' in sdata
store_name = soup.find("h1", {"class": "page-title"}).findChildren("strong")[0].get_text() if show_all or 'name' in sdata else None
if show_all or 'tags' in sdata:
tags = soup.find("main", {"class": "shop"}).findChildren("div", {"class": "shop-info"})[0].findChildren("div", {"class": "content"})[0].findChildren("p")[0].get_text().split(",")
for i in range(len(tags)):
tags[i] = tags[i].strip()
else:
tags = None
if show_all or 'identifiers' in sdata:
type_query = soup.find("main", {"class": "shop"}).findChildren("div", {"class": "shop-info"})[0].findChildren("div", {"class": "content"})[0].findChildren("p")[1]
identifiers = []
for child in type_query.findChildren("span"):
identifiers.append(child.get_text().strip("\n").strip())
else:
identifiers = None
if show_all or 'shop_links' in sdata:
shop_links_quer = soup.find("div", {"class": "shop-links"})
telephone = shop_links_quer.findChildren("a", {"itemprop": "telephone"})[0].get_text()
web_quer = shop_links_quer.findChildren("a", {"rel": "nofollow"})
has_website = web_quer is not None
if has_website:
website = web_quer[0]['href']
skroutz_prod_quer = shop_links_quer.findChildren("a", {"id": "shop-products"})
has_skroutz_products = skroutz_prod_quer is not None
if has_skroutz_products:
skroutz_page = skroutz_prod_quer[0]['href']
else:
telephone = has_website = website = has_skroutz_products = skroutz_page = None
if show_all or 'rating_score' in sdata:
rating_score_quer = soup.find("div", {"class": "rating-average"}).findChildren("b")[0].get_text().replace(" ", "").replace("\n", "")[0:3].split(",")
rating_score = float(rating_score_quer[0]) + float(rating_score_quer[1]) / 100
else:
rating_score = None
if show_all or 'rating_count' in sdata:
pos_rating_count = int(soup.find_all("span", {"class": "review_no"})[0].get_text().replace(" ", "").replace("\n", ""))
neg_rating_count = int(soup.find_all("span", {"class": "review_no"})[1].get_text().replace(" ", "").replace("\n", ""))
total_rating_count = pos_rating_count + neg_rating_count
else:
pos_rating_count = neg_rating_count = total_rating_count = None
address = soup.find("b", {"itemprop": "address"}).get_text() if show_all or 'address' in sdata else None
if show_all or 'payment_methods' in sdata:
payment_methods_quer = soup.find("div", {"class": "payment-list"}).findChildren("ul")[0]
payment_methods = []
for li in payment_methods_quer.findChildren("li"):
payment_methods.append(li.get_text().replace(" ", "").replace("\n", ""))
else:
payment_methods = None
gemh_num = soup.find("div", {"id": "suppliers-register"}).findChildren("ul")[0].findChildren("li")[0].get_text().replace(" ", "").replace("\n", "") if show_all or "gemh_num" in sdata else None
return_dict = {
'store_name': store_name,
'tags': tags,
'identifiers': identifiers,
'identifier_booleans': {
'online_only': 'Το κατάστημα λειτουργεί μόνο διαδικτυακά' in identifiers,
'has_area': 'Το κατάστημα λειτουργεί μόνο διαδικτυακά' not in identifiers,
'has_physical_store': 'Κατάστημα & Σημείο παραλαβής.' in identifiers,
'is_verified_reseller': 'Επίσημος μεταπωλητής' in identifiers,
'has_greca_trustmark': 'GRECA Trustmark' in identifiers
} if identifiers is not None else {
'online_only': None,
'has_area': None,
'has_physical_store': None,
'is_verified_reseller': None,
'has_greca_trustmark': None,
},
'rating_count': {
'total': total_rating_count,
'positive': pos_rating_count,
'negative': neg_rating_count,
},
'rating_score': rating_score,
'tel': telephone,
'has_website': has_website,
'website_url': None if not has_website else website,
'has_skroutz_products': has_skroutz_products,
'skroutz_products_page': None if not has_skroutz_products else skroutz_page,
'store_address': address,
'payment_methods': payment_methods,
'gemh_number': gemh_num,
}
return return_dict
def categoryHandler(url):
page = requests.get(url, headers={"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'})
soup = BeautifulSoup(page.content, 'html.parser')
cards = list(soup.find_all(attrs={'class': 'card'}))
data = []
first = True
for reg in cards:
if not first:
product_content = reg.find("div", {"class": "card-content"})
# PRICE SECTION
product_price = reg.find("div", {"class": "price"})
price_str = product_price.findChildren("a", {"class": "js-sku-link"})[0].get_text()
price = float(''.join(char for char in price_str if char.isdigit() or char == ",").replace(",", "."))
# PRODUCT SECTION
product_name = product_content.findChildren("a", {"class": "js-sku-link"})[0]
if product_name.has_attr("title"):
product_name = product_content.findChildren("a", {"class": "js-sku-link"})[0]['title']
product_image = reg.find("a", {"class": "js-sku-link"}).findChildren("img")[0]
if product_image.has_attr("src"):
product_image = reg.find("a", {"class": "js-sku-link"}).findChildren("img")[0]['src']
# RATINGS SECTION
product_rating = reg.find("div", {"class": "rating-with-count"}).findChildren("a")[0].findChildren("div")[0]
ratings = int(product_rating.find("div").get_text())
rating_score = float(product_rating.find("span").get_text())
# STORE SECTION
store_query = product_price.find("span", {"class": "shop-count"}).get_text()
store_count = int(''.join(char for char in store_query if char.isdigit()))
to_append = {
'product_name': product_name,
'product_price': price,
'product_image': product_image,
'ratings': ratings,
'rating_score': rating_score,
'store_count': store_count,
}
data.append(to_append)
else:
first = False
return data
def call(link="err", **kwargs):
kwarg_dict = dict()
for key,value in kwargs.items():
kwarg_dict[key] = value
# kwarg flag lengths
data_length = None
store_data_length = None
filter_returns = False
# show = product flags
# store_data = store flags
show = False
store_data = None
if len(kwarg_dict.keys()) > 0:
if 'data' in kwarg_dict.keys():
filter_returns = 'all' not in kwarg_dict['data']
data_length = len(kwarg_dict['data']) == 0
if 'store_data' in kwarg_dict.keys():
store_data = kwarg_dict['store_data']
store_data_length = len(kwarg_dict['store_data']) == 0
if 'show_all' in kwarg_dict.keys():
show = kwarg_dict['show_all']
if 'category' in kwarg_dict.keys() and kwarg_dict['category']:
return categoryHandler(link)
if filter_input(link, data_length, store_data_length):
return
if "skroutz.gr/shop/" in link:
return store_handler(link, store_data)
page = requests.get(link, headers={"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'})
soup = BeautifulSoup(page.content, 'html.parser')
title = soup.select_one('.page-title').get_text() if not filter_returns or filter_returns and 'product_name' in kwarg_dict['data'] else None
price = None
if not show:
if not filter_returns or filter_returns and 'base_price' in kwarg_dict['data']:
price = soup.find("strong", {"class": "dominant-price"}).get_text()
price = price.replace(' ', '')
price = price.replace('€', '')
prices = price.split(",")
price = int(prices[0]) + float(prices[1]) / 100
else:
lim = 64 if 'limit' not in kwarg_dict.keys() else kwargs['limit']
divs = soup.findAll("li", {"class": "js-product-card"}, limit=lim)
prices_no_fees = None
titles = None
if not filter_returns or filter_returns and 'store_names' in kwarg_dict['data']:
titles = []
for i in range(len(divs)):
titles.append(divs[i].findChildren("a", {"class": "js-product-link"})[0].get_text())
if not filter_returns or filter_returns and 'base_price' in kwarg_dict['data']:
prices_no_fees = []
for i in range(len(divs)):
to_append = divs[i].findChildren("strong", {"class": "dominant-price"})[0].get_text()
to_append = to_append[:len(to_append)-2].split(",")
euros = float(to_append[0])
cents = int(to_append[1])
final = euros + cents / 100
prices_no_fees.append(final)
rating_count = None
if not filter_returns or filter_returns and 'rating_count' in kwarg_dict['data']:
rating_count = soup.find("div", {"class": "actual-rating"}).get_text()
rating_score = None
if not filter_returns or filter_returns and 'rating_score' in kwarg_dict['data']:
rating_score_scrape = soup.find("a", {"class": ["rating", "big_stars"]})['title'][0:3].split(",") # Future to-do: make this work with child properties for fuck's shake.
rating_score = int(rating_score_scrape[0]) + int(rating_score_scrape[1]) / 10
discussion_count = None
if not filter_returns or filter_returns and 'discussion_count' in kwarg_dict['data']:
discussion_count = soup.find("a", {"href": "#qna"}).find("span").get_text()
discussion_count = discussion_count.replace('(', '')
discussion_count = discussion_count.replace(')', '')
is_on_sale = None
if not filter_returns or filter_returns and 'is_on_sale' in kwarg_dict['data']:
sale_badge = soup.find("span", {"class": ["badge", "pricedrop"]})
is_on_sale = sale_badge is not None
store_count = None
if not filter_returns or filter_returns and 'store_count' in kwarg_dict['data']:
store_count = soup.find("a", {"href": "#shops"}).findChildren("span")[0].get_text()
store_count = store_count.replace('(', '')
store_count = store_count.replace(')', '')
lowest_base_price = max_base_price = None
if not filter_returns or filter_returns and 'lowest_price' in kwarg_dict['data']:
if price is None:
price = soup.find("strong", {"class": "dominant-price"}).get_text()
price = price.replace(' ', '')
price = price.replace('€', '')
prices = price.split(",")
price = int(prices[0]) + float(prices[1]) / 100
lowest_base_price = price
if not filter_returns or filter_returns and 'max_price' in kwarg_dict['data']:
if not show:
price = soup.find("strong", {"class": "dominant-price"}).get_text()
price = price.replace(' ', '')
price = price.replace('€', '')
prices = price.split(",")
price = int(prices[0]) + float(prices[1]) / 100
max_base_price = price
else:
prices_no_fees = []
for i in range(len(divs)):
to_append = divs[i].findChildren("strong", {"class": "dominant-price"})[0].get_text()
to_append = to_append[:len(to_append)-2].split(",")
euros = float(to_append[0])
cents = int(to_append[1])
final = euros + cents / 100
prices_no_fees.append(final)
max_base_price = prices_no_fees[-1]
return_dict = {
"product_name": title,
"base_price": price if not show else prices_no_fees, #returns a list of all base prices of product from all stores if show_all is True, else returns first price found (cheapest)
"store_names": titles if show else None, #returns only if show_all is True
"store_count": int(store_count) if store_count is not None else None,
"rating_count": int(rating_count) if rating_count is not None else None,
"rating_score": rating_score,
"discussion_count": int(discussion_count) if discussion_count is not None else None,
"lowest_price": lowest_base_price,
"max_price": max_base_price,
"is_on_sale": is_on_sale
}
return return_dict
|
# Function for enumerative model counting.
# Courtesy of Lucas Bang.
from z3 import *
import time
result = {}
count = 0
def get_models(F, timeout):
"""
Do model counting by enumerating each model.
Uses Z3Py.
Args:
F (<class 'z3.z3.AstVector'>): SMT-LIB formula
timeout (int): timeout (sec)
Raises:
Z3Exception: uninterpreted functions are not supported
Z3Exception: arrays and uninterpreted sorts are not supported
Returns:
dict: counts per minute
int: count at timeout
"""
result = {}
count = 0
s = Solver()
s.add(F)
start = time.time()
t = time.time()
minute = 1
while s.check() == sat and t - start < timeout:
prev_count = count
m = s.model()
count += 1
# Create a new constraint the blocks the current model
block = []
for d in m:
# d is a declaration
if d.arity() > 0:
raise Z3Exception("uninterpreted functions are not supported")
# create a constant from declaration
c = d()
if is_array(c) or c.sort().kind() == Z3_UNINTERPRETED_SORT:
raise Z3Exception("arrays and uninterpreted sorts are not supported")
block.append(c != m[d])
s.push()
s.add(Or(block))
t = time.time()
if t - start > minute * 60:
result[minute] = prev_count
minute += 1
return result, count |
from threadpoolctl import threadpool_info
from pprint import pprint
try:
import numpy as np
print("numpy", np.__version__)
except ImportError:
pass
try:
import scipy
import scipy.linalg
print("scipy", scipy.__version__)
except ImportError:
pass
try:
from tests._openmp_test_helper import * # noqa
except ImportError:
pass
pprint(threadpool_info())
|
#------------------------------------------------------------------------------
# Copyright (c) 2016, 2022, Oracle and/or its affiliates.
#
# Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved.
#
# Portions Copyright 2001-2007, Computronix (Canada) Ltd., Edmonton, Alberta,
# Canada. All rights reserved.
#
# This software is dual-licensed to you under the Universal Permissive License
# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License
# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose
# either license.
#
# If you elect to accept the software under the Apache License, Version 2.0,
# the following applies:
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# editioning.py
#
# Demonstrates the use of Edition-Based Redefinition, a feature that is
# available in Oracle Database 11.2 and higher. See the Oracle documentation on
# the subject for additional information. Adjust the contents at the top of the
# script for your own database as needed.
#------------------------------------------------------------------------------
import os
import oracledb
import sample_env
# this script is currently only supported in python-oracledb thick mode
oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client())
# connect to the editions user and create a procedure
edition_connect_string = sample_env.get_edition_connect_string()
edition_name = sample_env.get_edition_name()
connection = oracledb.connect(edition_connect_string)
print("Edition should be None, actual value is:", repr(connection.edition))
cursor = connection.cursor()
cursor.execute("""
create or replace function TestEditions return varchar2 as
begin
return 'Base Procedure';
end;""")
result = cursor.callfunc("TestEditions", str)
print("Function should return 'Base Procedure', actually returns:",
repr(result))
# next, change the edition and recreate the procedure in the new edition
cursor.execute("alter session set edition = %s" % edition_name)
print("Edition should be", repr(edition_name.upper()),
"actual value is:", repr(connection.edition))
cursor.execute("""
create or replace function TestEditions return varchar2 as
begin
return 'Edition 1 Procedure';
end;""")
result = cursor.callfunc("TestEditions", str)
print("Function should return 'Edition 1 Procedure', actually returns:",
repr(result))
# next, change the edition back to the base edition and demonstrate that the
# original function is being called
cursor.execute("alter session set edition = ORA$BASE")
result = cursor.callfunc("TestEditions", str)
print("Function should return 'Base Procedure', actually returns:",
repr(result))
# the edition can be set upon connection
connection = oracledb.connect(edition_connect_string,
edition=edition_name.upper())
cursor = connection.cursor()
result = cursor.callfunc("TestEditions", str)
print("Function should return 'Edition 1 Procedure', actually returns:",
repr(result))
# it can also be set via the environment variable ORA_EDITION
os.environ["ORA_EDITION"] = edition_name.upper()
connection = oracledb.connect(edition_connect_string)
print("Edition should be", repr(edition_name.upper()),
"actual value is:", repr(connection.edition))
cursor = connection.cursor()
result = cursor.callfunc("TestEditions", str)
print("Function should return 'Edition 1 Procedure', actually returns:",
repr(result))
|
from .base import Store |
from pycspr import crypto
from pycspr import factory
from pycspr.serialisation import to_bytes
from pycspr.types import CLTypeKey
from pycspr.types import ExecutableDeployItem
from pycspr.types import DeployHeader
def create_digest_of_deploy(header: DeployHeader) -> bytes:
"""Returns a deploy's hash digest.
:param header: Deploy header information.
:returns: Hash digest of a deploy.
"""
# Element 1: account.
cl_account = factory.create_cl_value(
factory.create_cl_type_of_simple(CLTypeKey.PUBLIC_KEY),
header.account_public_key
)
# Element 2: timestamp.
cl_timestamp = factory.create_cl_value(
factory.create_cl_type_of_simple(CLTypeKey.U64),
int(header.timestamp * 1000)
)
# Element 3: ttl.
cl_ttl = factory.create_cl_value(
factory.create_cl_type_of_simple(CLTypeKey.U64),
header.ttl.as_milliseconds
)
# Element 4: gas-price.
cl_gas_price = factory.create_cl_value(
factory.create_cl_type_of_simple(CLTypeKey.U64),
header.gas_price
)
# Element 5: body-hash.
cl_body_hash = factory.create_cl_value(
factory.create_cl_type_of_byte_array(32),
header.body_hash
)
# Element 6: dependencies.
cl_dependencies = factory.create_cl_value(
factory.create_cl_type_of_list(
factory.create_cl_type_of_simple(CLTypeKey.STRING)
),
header.dependencies
)
# Element 7: chain-name.
cl_chain_name = factory.create_cl_value(
factory.create_cl_type_of_simple(CLTypeKey.STRING),
header.chain_name
)
digest_list = [to_bytes(cl_timestamp),
to_bytes(cl_ttl), to_bytes(cl_gas_price),
to_bytes(cl_body_hash), to_bytes(cl_dependencies),
to_bytes(cl_chain_name)]
digest_sum = to_bytes(cl_account)
for b in digest_list:
digest_sum += b
return crypto.get_hash(digest_sum)
def create_digest_of_deploy_body(payment: ExecutableDeployItem,
session: ExecutableDeployItem
) -> bytes:
"""Returns a deploy body's hash digest.
:param payment: Deploy payment execution logic.
:param session: Deploy session execution logic.
:returns: Hash digest of a deploy body.
"""
return crypto.get_hash(to_bytes(payment) + to_bytes(session))
|
"""unittests for hwaddresses factory functions."""
from random import choice, choices
from string import hexdigits
import unittest
from hwaddress import get_address_factory, get_verifier, \
new_hwaddress_class, MAC, MAC_64, GUID, \
EUI_48, EUI_64, WWN, WWNx, IB_LID, IB_GUID, IB_GID
def getrandhex(n):
"""Generate hex string representing bit length defined by n."""
n = int(n / 4)
return ''.join(choices(hexdigits, k=n))
class GenericFactory(unittest.TestCase):
"""Test default factory function."""
def test_hw_rand_hex(self):
"""Test given hex strings returns correct MAC/GUID object."""
hw_address = get_address_factory()
testlist = [(getrandhex(48), MAC),
(getrandhex(64), MAC_64),
(getrandhex(128), GUID)]
for ti in testlist:
self.assertIsInstance(hw_address(ti[0]), ti[1])
def test_default_verifier(self):
"""Test that the default verifier behaves as expected."""
verifier = get_verifier()
tlist = ['12:34:56:78:90:ab',
'12-34-56-78-90-ab']
flist = ['12:34:56:78:90:ab:cd:ef',
'12-34-56-78-90-ab-cd-ef',
'1234.5678.90ab']
for ts in tlist:
self.assertTrue(verifier(ts))
for fs in flist:
self.assertFalse(verifier(fs))
def test_hw_verifier(self):
"""Test verifier returns expected bool for MAC/MAC_64/GUID."""
hw_verifier = get_verifier(MAC, MAC_64, GUID)
tlist = ['12:34:56:78:90:ab',
'12:34:56:78:90:ab:cd:ef',
'12345678-90ab-cdef-1234-567890abcdef']
flist = ['12-34-56-78-90-ab',
'12-34-56-78-90-ab-cd-ef',
'12345678:90ab:cdef:1234:567890abcdef']
for ts in tlist:
self.assertTrue(hw_verifier(ts))
for fs in flist:
self.assertFalse(hw_verifier(fs))
class EuiFactory(unittest.TestCase):
"""Test eui_address factory function."""
def test_eui_rand_hex(self):
"""Test given hex strings returns correct EUI object."""
eui_address = get_address_factory(EUI_48, EUI_64)
testlist = [(getrandhex(48), EUI_48),
(getrandhex(64), EUI_64)]
for ti in testlist:
self.assertIsInstance(eui_address(ti[0]), ti[1])
def test_eui_verifier(self):
"""Test verifier returns expected bool for EUI_48/EUI_64."""
eui_verifier = get_verifier(EUI_48, EUI_64)
tlist = ['12-34-56-78-90-ab',
'12-34-56-78-90-ab-cd-ef']
flist = ['12:34:56:78:90:ab',
'12:34:56:78:90:ab:cd:ef']
for ts in tlist:
self.assertTrue(eui_verifier(ts))
for fs in flist:
self.assertFalse(eui_verifier(fs))
class WwnFactory(unittest.TestCase):
"""Test wwn_address factory function."""
def test_wwn_rand_hex(self):
"""Test given hex strings returns correct WWN object."""
wwn_address = get_address_factory(WWN, WWNx)
wwnhex = choice(('1', '2', '5')) + getrandhex(60)
wwnxhex = '6' + getrandhex(124)
testlist = [(wwnhex, WWN),
(wwnxhex, WWNx)]
for ti in testlist:
self.assertIsInstance(wwn_address(ti[0]), ti[1])
def test_wwn_verifier(self):
"""Test verifier returns expected bool for WWN/WWNx."""
wwn_verifier = get_verifier(WWN, WWNx)
tlist = ['12:34:56:78:90:ab:cd:ef',
'22:34:56:78:90:ab:cd:ef',
'52:34:56:78:90:ab:cd:ef',
'62:34:56:78:90:ab:cd:ef:62:34:56:78:90:ab:cd:ef']
flist = ['32:34:56:78:90:ab:cd:ef',
'42:34:56:78:90:ab:cd:ef',
'72:34:56:78:90:ab:cd:ef',
'72:34:56:78:90:ab:cd:ef:62:34:56:78:90:ab:cd:ef']
for ts in tlist:
self.assertTrue(wwn_verifier(ts))
for fs in flist:
self.assertFalse(wwn_verifier(fs))
class IbFactory(unittest.TestCase):
"""Test ib_address factory function."""
def test_ib_rand_hex(self):
"""Test given hex strings returns correct IB object."""
ib_address = get_address_factory(IB_LID, IB_GUID, IB_GID)
testlist = [(getrandhex(16), IB_LID),
(getrandhex(64), IB_GUID),
(getrandhex(128), IB_GID)]
for ti in testlist:
self.assertIsInstance(ib_address(ti[0]), ti[1])
def test_ib_verifier(self):
"""Test verifier returns expected bool for EUI_48/EUI_64."""
ib_verifier = get_verifier(IB_LID, IB_GUID, IB_GID)
tlist = ['0x12ab',
'1234:5678:90ab:cdef',
'1234:5678:90ab:cdef:1234:5678:90ab:cdef']
flist = ['12ab',
'0x12abcd',
'1234-5678-90ab-cdef',
'12345678:90ab:cdef:1234:567890abcdef']
for ts in tlist:
self.assertTrue(ib_verifier(ts))
for fs in flist:
self.assertFalse(ib_verifier(fs))
class NewClassFactory(unittest.TestCase):
"""Test new_hwaddress_class factory function."""
def test_new_class(self):
"""Test new_hwaddress_class factory function."""
modellist = [
{'args': ('T1MAC', 48, '.', 4, False),
'tlist': ['1234.5678.90ab',
'abcd.ef12.3456'],
'flist': ['1234-5678-90ab',
'1234.5678.90ab.cdef']},
{'args': ('T2MAC', 64, ' ', (4, 2, 2, 4, 4), False),
'tlist': ['1234 56 78 90ab cdef',
'abcd ef 12 3456 7890'],
'flist': ['1234-56-78-90ab-cdef',
'1234.56.78.90ab']}
]
for model in modellist:
HwCls = new_hwaddress_class(*model['args'])
self.assertTrue(issubclass(HwCls, MAC))
self.assertIsInstance(HwCls(getrandhex(model['args'][1])), HwCls)
for ts in model['tlist']:
self.assertTrue(HwCls.verify(ts))
for fs in model['flist']:
self.assertFalse(HwCls.verify(fs))
|
import pygtk
#pygtk.require('2.0')
import gtk
#Just another test
#http://stackoverflow.com/questions/6782142/pygobject-left-click-menu-on-a-status-icon
#With this code we connect the status icon using "button-press-event"
#With this code, the SAME popup menu appears BOTH in right and left mouse click on gtkstatusicon.
class TrayIcon(gtk.StatusIcon):
def __init__(self):
gtk.StatusIcon.__init__(self)
self.set_from_icon_name('help-about')
self.set_has_tooltip(True)
self.set_visible(True)
self.connect("button-press-event", self.on_click)
def greetme(self,data=None): # if i ommit the data=none section python complains about too much arguments passed on greetme
print ('greetme data',data)
msg=gtk.MessageDialog(None, gtk.DIALOG_MODAL,gtk.MESSAGE_INFO, gtk.BUTTONS_OK, "Greetings")
msg.run()
msg.destroy()
def on_click(self,data,event): #data1 and data2 received by the connect action line 23
print ('self :', self)
print('data :',data)
print('event :',event)
btn=event.button #Bby controlling this value (1-2-3 for left-middle-right) you can call other functions.
print('event.button :',btn)
time=gtk.get_current_event_time() # required by the popup. No time - no popup.
print ('time:', time)
menu = gtk.Menu()
menu_item1 = gtk.MenuItem("First Entry")
menu.append(menu_item1)
menu_item1.connect("activate", self.greetme) #added by gv - it had nothing before
menu_item2 = gtk.MenuItem("Quit")
menu.append(menu_item2)
menu_item2.connect("activate", gtk.main_quit)
menu.show_all()
menu.popup(None, None, None, btn, time) #button can be hardcoded (i.e 1) but time must be correct.
if __name__ == '__main__':
tray = TrayIcon()
gtk.main() |
# coding: utf-8
# Copyright 2016 Vauxoo (https://www.vauxoo.com) <[email protected]>
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from odoo import models, api, _
class AccountChartTemplate(models.Model):
_inherit = "account.chart.template"
@api.multi
def _load_template(
self, company, code_digits=None, transfer_account_id=None,
account_ref=None, taxes_ref=None):
"""
Set the 'use_cash_basis' and 'cash_basis_account' fields on account.account. This hack is needed due to the fact
that the tax template does not have the fields 'use_cash_basis' and 'cash_basis_account'.
This hunk should be removed in master, as the account_tax_cash_basis module has been merged already in account
module
"""
self.ensure_one()
accounts, taxes = super(AccountChartTemplate, self)._load_template(
company, code_digits=code_digits,
transfer_account_id=transfer_account_id, account_ref=account_ref,
taxes_ref=taxes_ref)
if not self == self.env.ref('l10n_mx.mx_coa'):
return accounts, taxes
account_tax_obj = self.env['account.tax']
account_obj = self.env['account.account']
taxes_acc = {
'IVA': account_obj.search([('code', '=', '208.01.01')]),
'ITAXR_04-OUT': account_obj.search([('code', '=', '216.10.20')]),
'ITAXR_10-OUT': account_obj.search([('code', '=', '216.10.20')]),
'ITAX_1067-OUT': account_obj.search([('code', '=', '216.10.20')]),
'ITAX_167-OUT': account_obj.search([('code', '=', '216.10.20')]),
'ITAX_010-OUT': account_obj.search([('code', '=', '118.01.01')]),
'ITAX_160-OUT': account_obj.search([('code', '=', '118.01.01')])}
for tax in self.tax_template_ids:
if tax.description not in taxes_acc:
continue
account_tax_obj.browse(taxes.get(tax.id)).write({
'use_cash_basis': True,
'cash_basis_account': taxes_acc.get(tax.description).id,
})
return accounts, taxes
@api.model
def generate_journals(self, acc_template_ref, company, journals_dict=None):
"""Set the tax_cash_basis_journal_id on the company"""
res = super(AccountChartTemplate, self).generate_journals(
acc_template_ref, company, journals_dict=journals_dict)
if not self == self.env.ref('l10n_mx.mx_coa'):
return res
journal_basis = self.env['account.journal'].search([
('type', '=', 'general'),
('code', '=', 'CBMX')], limit=1)
company.write({'tax_cash_basis_journal_id': journal_basis.id})
return res
@api.multi
def _prepare_all_journals(self, acc_template_ref, company, journals_dict=None):
"""Create the tax_cash_basis_journal_id"""
res = super(AccountChartTemplate, self)._prepare_all_journals(
acc_template_ref, company, journals_dict=journals_dict)
if not self == self.env.ref('l10n_mx.mx_coa'):
return res
account = self.env.ref('l10n_mx.1_cuenta118_01')
res.append({
'type': 'general',
'name': _('Effectively Paid'),
'code': 'CBMX',
'company_id': company.id,
'default_credit_account_id': account.id,
'default_debit_account_id': account.id,
'show_on_dashboard': True,
})
return res
|
class Event:
def __init__(self, type='GenericEvent', data=None):
self.type = type
if data is None:
self.data = {}
else:
self.data = data
def __repr__(self):
return ''.join(['Event<', self.type, '> ', str(self.data)]) |
# -*- coding: utf-8 -*-
"""Common routines for data in glucometers."""
__author__ = 'Diego Elio Pettenò'
__email__ = '[email protected]'
__copyright__ = 'Copyright © 2013, Diego Elio Pettenò'
__license__ = 'MIT'
import collections
import textwrap
from glucometerutils import exceptions
# Constants for units
UNIT_MGDL = 'mg/dL'
UNIT_MMOLL = 'mmol/L'
VALID_UNITS = [UNIT_MGDL, UNIT_MMOLL]
# Constants for meal information
NO_MEAL = ''
BEFORE_MEAL = 'Before Meal'
AFTER_MEAL = 'After Meal'
# Constants for measure method
BLOOD_SAMPLE = 'blood sample'
CGM = 'CGM' # Continuous Glucose Monitoring
def convert_glucose_unit(value, from_unit, to_unit=None):
"""Convert the given value of glucose level between units.
Args:
value: The value of glucose in the current unit
from_unit: The unit value is currently expressed in
to_unit: The unit to conver the value to: the other if empty.
Returns:
The converted representation of the blood glucose level.
Raises:
exceptions.InvalidGlucoseUnit: If the parameters are incorrect.
"""
if from_unit not in VALID_UNITS:
raise exceptions.InvalidGlucoseUnit(from_unit)
if from_unit == to_unit:
return value
if to_unit is not None:
if to_unit not in VALID_UNITS:
raise exceptions.InvalidGlucoseUnit(to_unit)
if from_unit is UNIT_MGDL:
return round(value / 18.0, 2)
else:
return round(value * 18.0, 0)
_ReadingBase = collections.namedtuple(
'_ReadingBase', ['timestamp', 'value', 'comment', 'measure_method'])
class GlucoseReading(_ReadingBase):
def __new__(cls, timestamp, value, meal=NO_MEAL, comment='',
measure_method=BLOOD_SAMPLE):
"""Constructor for the glucose reading object.
Args:
timestamp: (datetime) Timestamp of the reading as reported by the meter.
value: (float) Value of the reading, in mg/dL.
meal: (string) Meal-relativeness as reported by the reader, if any.
comment: (string) Comment reported by the reader, if any.
measure_method: (string) Measure method as reported by the reader if any,
assuming blood sample otherwise.
The value is stored in mg/dL, even though this is not the standard value,
because at least most of the LifeScan devices report the raw data in this
format.
"""
instance = super(GlucoseReading, cls).__new__(
cls, timestamp=timestamp, value=value, comment=comment,
measure_method=measure_method)
setattr(instance, 'meal', meal)
return instance
def get_value_as(self, to_unit):
"""Returns the reading value as the given unit.
Args:
to_unit: (UNIT_MGDL|UNIT_MMOLL) The unit to return the value to.
"""
return convert_glucose_unit(self.value, UNIT_MGDL, to_unit)
def as_csv(self, unit):
"""Returns the reading as a formatted comma-separated value string."""
return '"%s","%.2f","%s","%s","%s"' % (
self.timestamp, self.get_value_as(unit), self.meal, self.measure_method,
self.comment)
class KetoneReading(_ReadingBase):
def __new__(cls, timestamp, value, comment='', **kwargs):
"""Constructor for the ketone reading object.
Args:
timestamp: (datetime) Timestamp of the reading as reported by the meter.
value: (float) Value of the reading, in mmol/L.
comment: (string) Comment reported by the reader, if any.
The value is stored in mg/dL, even though this is not the standard value,
because at least most of the LifeScan devices report the raw data in this
format.
"""
return super(KetoneReading, cls).__new__(
cls, timestamp=timestamp, value=value, comment=comment,
measure_method=BLOOD_SAMPLE)
def get_value_as(self, *args):
"""Returns the reading value in mmol/L."""
return self.value
def as_csv(self, unit):
"""Returns the reading as a formatted comma-separated value string."""
return '"%s","%.2f","%s","%s"' % (
self.timestamp, self.get_value_as(unit), self.measure_method,
self.comment)
_MeterInfoBase = collections.namedtuple(
'_MeterInfoBase', ['model', 'serial_number', 'version_info', 'native_unit'])
class MeterInfo(_MeterInfoBase):
def __new__(cls, model, serial_number='N/A', version_info=(),
native_unit=UNIT_MGDL):
"""Construct a meter information object.
Args:
model: (string) Human-readable model name, depending on driver.
serial_number: (string) Optional serial number to identify the device.
version_info: (list(string)) Optional hardware/software version information.
native_unit: (UNIT_MGDL|UNIT_MMOLL) Native unit of the device for display.
"""
return super(MeterInfo, cls).__new__(
cls, model=model, serial_number=serial_number, version_info=version_info,
native_unit=native_unit)
def __str__(self):
version_information_string = 'N/A'
if self.version_info:
version_information_string = '\n '.join(self.version_info).strip()
return textwrap.dedent("""\
{model}
Serial Number: {serial_number}
Version Information:
{version_information_string}
Native Unit: {native_unit}
""").format(model=self.model, serial_number=self.serial_number,
version_information_string=version_information_string,
native_unit=self.native_unit)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import rospy
import cv2
class ImageConverter:
def __init__(self):
# 创建图像的发布者
self.image_pub = rospy.Publisher("cv_bridge", Image, queue_size=1)
# 创建CvBridge
self.bridge = CvBridge()
# 创建摄像头数据的接收者,以及接收到的回调函数
self.image_sub = rospy.Subscriber(
"/usb_cam/image_raw", Image, self.callback)
def callback(self, data):
try:
# 把接收到的图像消息数据转化为OpenCV的图像数据
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print e
# 获取图像的大小,并绘制一个圆
(rows, cols, channels) = cv_image.shape
if cols > 60 and rows > 60:
cv2.circle(cv_image, (60, 60), 30, (0, 0, 255), -1)
# 显示一个图像
cv2.imshow("Image Window", cv_image)
cv2.waitKey(3)
try:
# 把绘制好的OpenCV图像在进行发布
self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
except CvBridgeError as e:
print e
if __name__ == '__main__':
try:
# 创建一个节点
rospy.init_node("cv_bridge")
rospy.loginfo("Starting cv_bridge node")
# 接收、转换、发送图像
ImageConverter()
rospy.spin()
except KeyboardInterrupt:
print("Shutting down cv_bridge node")
cv2.destroyAllWindows()
|
"""empty message
Revision ID: da63ba1d58b1
Revises: 091deace5f08
Create Date: 2020-10-04 17:06:54.502012
"""
import sqlalchemy as sa
import sqlalchemy_utils
from alembic import op
from project import dbtypes
# revision identifiers, used by Alembic.
revision = "da63ba1d58b1"
down_revision = "091deace5f08"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
# op.drop_table('spatial_ref_sys')
op.add_column(
"image", sa.Column("copyright_text", sa.Unicode(length=255), nullable=True)
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("image", "copyright_text")
op.create_table(
"spatial_ref_sys",
sa.Column("srid", sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column(
"auth_name", sa.VARCHAR(length=256), autoincrement=False, nullable=True
),
sa.Column("auth_srid", sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(
"srtext", sa.VARCHAR(length=2048), autoincrement=False, nullable=True
),
sa.Column(
"proj4text", sa.VARCHAR(length=2048), autoincrement=False, nullable=True
),
sa.CheckConstraint(
"(srid > 0) AND (srid <= 998999)", name="spatial_ref_sys_srid_check"
),
sa.PrimaryKeyConstraint("srid", name="spatial_ref_sys_pkey"),
)
# ### end Alembic commands ###
|
import datetime as _datetime
import typing
from google.protobuf.json_format import MessageToDict
from flytekit import __version__
from flytekit.common import interface as _interface
from flytekit.common.constants import SdkTaskType
from flytekit.common.tasks import task as _sdk_task
from flytekit.common.tasks.sagemaker.built_in_training_job_task import SdkBuiltinAlgorithmTrainingJobTask
from flytekit.common.tasks.sagemaker.custom_training_job_task import CustomTrainingJobTask
from flytekit.common.tasks.sagemaker.types import HyperparameterTuningJobConfig, ParameterRange
from flytekit.models import interface as _interface_model
from flytekit.models import literals as _literal_models
from flytekit.models import task as _task_models
from flytekit.models import types as _types_models
from flytekit.models.core import types as _core_types
from flytekit.models.sagemaker import hpo_job as _hpo_job_model
class SdkSimpleHyperparameterTuningJobTask(_sdk_task.SdkTask):
def __init__(
self,
max_number_of_training_jobs: int,
max_parallel_training_jobs: int,
training_job: typing.Union[SdkBuiltinAlgorithmTrainingJobTask, CustomTrainingJobTask],
retries: int = 0,
cacheable: bool = False,
cache_version: str = "",
tunable_parameters: typing.List[str] = None,
):
"""
:param int max_number_of_training_jobs: The maximum number of training jobs that can be launched by this
hyperparameter tuning job
:param int max_parallel_training_jobs: The maximum number of training jobs that can launched by this hyperparameter
tuning job in parallel
:param typing.Union[SdkBuiltinAlgorithmTrainingJobTask, CustomTrainingJobTask] training_job: The reference to the training job definition
:param int retries: Number of retries to attempt
:param bool cacheable: The flag to set if the user wants the output of the task execution to be cached
:param str cache_version: String describing the caching version for task discovery purposes
:param typing.List[str] tunable_parameters: A list of parameters that to tune. If you are tuning a built-int
algorithm, refer to the algorithm's documentation to understand the possible values for the tunable
parameters. E.g. Refer to https://docs.aws.amazon.com/sagemaker/latest/dg/IC-Hyperparameter.html for the
list of hyperparameters for Image Classification built-in algorithm. If you are passing a custom
training job, the list of tunable parameters must be a strict subset of the list of inputs defined on
that job. Refer to https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-define-ranges.html
for the list of supported hyperparameter types.
"""
# Use the training job model as a measure of type checking
hpo_job = _hpo_job_model.HyperparameterTuningJob(
max_number_of_training_jobs=max_number_of_training_jobs,
max_parallel_training_jobs=max_parallel_training_jobs,
training_job=training_job.training_job_model,
).to_flyte_idl()
# Setting flyte-level timeout to 0, and let SageMaker respect the StoppingCondition of
# the underlying training job
# TODO: Discuss whether this is a viable interface or contract
timeout = _datetime.timedelta(seconds=0)
inputs = {}
inputs.update(training_job.interface.inputs)
inputs.update(
{
"hyperparameter_tuning_job_config": _interface_model.Variable(
HyperparameterTuningJobConfig.to_flyte_literal_type(), "",
),
}
)
if tunable_parameters:
inputs.update(
{
param: _interface_model.Variable(ParameterRange.to_flyte_literal_type(), "")
for param in tunable_parameters
}
)
super().__init__(
type=SdkTaskType.SAGEMAKER_HYPERPARAMETER_TUNING_JOB_TASK,
metadata=_task_models.TaskMetadata(
runtime=_task_models.RuntimeMetadata(
type=_task_models.RuntimeMetadata.RuntimeType.FLYTE_SDK, version=__version__, flavor="sagemaker",
),
discoverable=cacheable,
timeout=timeout,
retries=_literal_models.RetryStrategy(retries=retries),
interruptible=False,
discovery_version=cache_version,
deprecated_error_message="",
),
interface=_interface.TypedInterface(
inputs=inputs,
outputs={
"model": _interface_model.Variable(
type=_types_models.LiteralType(
blob=_core_types.BlobType(
format="", dimensionality=_core_types.BlobType.BlobDimensionality.SINGLE,
)
),
description="",
)
},
),
custom=MessageToDict(hpo_job),
)
def __call__(self, *args, **kwargs):
# Overriding the call function just so we clear up the docs and avoid IDEs complaining about the signature.
return super().__call__(*args, **kwargs)
|
import RPi.GPIO as GPIO
from .light_errors import LedError
class Led:
"""
This is a class used to control LED's directly connected to the GPIO via a pin given.
See the documentation for an example of how to wire the LED.
"""
def __init__(self, pin):
"""
This initates the LED on the given pin, setting it into the output mode,
making sure it is off, and setting the PWM up so that the LED can be dimmed.
"""
try:
self.pin = int(pin)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, GPIO.LOW)
self.led_dim = GPIO.PWM(self.pin, 500)
except:
raise LedError("Error during the initiation of the LED class.")
def on(self, brightness=100):
"""
Turns the defined LED on, the brightness is set by default to 100%.
"""
try:
self.led_dim.start(brightness)
except:
raise LedError("Error while turning the LED on.")
def off(self):
"""
Turns the defined LED off.
"""
try:
self.led_dim.stop()
except:
raise LedError("Error while turning the LED off.")
def dim(self, brightness):
"""
Dims the definied LED. Keep in mind, that if you don't first turn the
LED on this will error out.
"""
if brightness < 0:
brightness = 0
elif brightness > 100:
brightness = 100
else:
pass
try:
self.led_dim.ChangeDutyCycle(brightness)
except:
raise LedError("Error while dimming the LED. Make sure you have turned the LED on.")
|
"""
training_batch_generate_audio_features.py
Given a event dataset (audio clips extracted from foa_dev through generate_audio_from_annotations.py), this script calculates the audio features
according to the methods defined in get_audio_features.py.
The output files are saved in a folder audio_features in the input event dataset directory. A .npy file containing a numpy array with the audio features is created for each event
It is possible to run the script in test mode which involves to save outputs in the folder audio_features_test in the same directory
The parameters needed to properly run the script are:
- events_dataset: name of the folder containing the input event dataset
There are other parameters in the dictionary "options" that could change the output
"""
#Dependencies
from APRI.get_audio_features import *
import os
from APRI.utils import get_class_name_dict
#Script parameters
events_dataset='' # we are working with 3 events_datasets: oracle_mono_signals (ov1), oracle_mono_signals_beam_all (beam), oracle mono signals_beam_all_aug (data augmentation beam)
test=False
def training_batch_generate_audio_features(input_folder,output_folder,options,extra=False):
#Import general parametes
event_type= get_class_name_dict().values()
#I/O paths
input_path = input_folder
output_path= output_folder
# Compute each audio_file generated by "generate_audio_from_annotations.py" and generates audio_features using MusicExtractor
i=0
for event in event_type:
if not os.path.exists(os.path.join(output_path, event)):
os.makedirs(os.path.join(output_path, event))
audio_path= os.path.join(input_path,event) #path to file
for audio in os.scandir(audio_path):
audio_p = os.path.join(audio_path, audio.name) # path to file
loader = MonoLoader(filename=audio_p, sampleRate=24000)
audio_f = loader()
audio_features, column_labels = compute_audio_features(audio_f, options)
if extra:
file_name = os.path.splitext(audio.name)[0]+'_extra'
else:
file_name = os.path.splitext(audio.name)[0]
if i==0:
np.save(os.path.join(output_path, 'column_labels.npy'), column_labels)
i+=1
np.save(os.path.join(output_path, event,file_name+ '.npy'), audio_features)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 29 16:50:58 2018
@author: Benedikt
"""
import win32com.client
class Microscope(object):
def __init__(self, simulation=False, z0=None):
'''
Parameters
----------
z0: int
only used when in simulation. It is in instrument units!
'''
self.simulation = simulation
if not self.simulation:
self.microscopeObject = win32com.client.Dispatch('Nikon.LvMic.NikonLv')
self.zPos = None
self.currentObjective = None
self.ObjectivPositions = [5, 10, 20, 50, 100]
else:
self.zPos = z0
self.ObjectivPositions = [5, 10, 20, 50, 100]
self.currentObjectiveIndex = 1
self.currentObjective = self.ObjectivPositions[self.currentObjectiveIndex]
# self.getZPosition()
# self.currentObjective = self.getCurrentObjective()
pass
def moveObjectiveForward(self):
'''
Function to move to the next higher Objective.
'''
# self.isMoveallowed()
if self.simulation:
self.currentObjectiveIndex += 1
self.currentObjective = self.ObjectivPositions[self.currentObjectiveIndex]
else:
self.microscopeObject.Nosepiece.Forward()
self.currentObjective = self.getCurrentObjective()
def moveObjectiveBackwards(self):
'''
Function to move to the next lower Objective
'''
# self.isMoveAllowed()
if self.simulation:
self.currentObjectiveIndex -= 1
self.currentObjective = self.ObjectivPositions[self.currentObjectiveIndex]
else:
self.microscopeObject.Nosepiece.Reverse()
self.currentObjective = self.getCurrentObjective()
def moveZAbsolute(self, value): #here no conversion factor?
if self._isAbsoluteMoveAllowed(value):
if self.simulation:
self.zPos = value
else:
self.microscopeObject.ZDrive.MoveAbsolute(value)
else:
print("Move not allowed")
def getCurrentObjective(self):
'''
Function to get the current Objective
Returns
-------
current_obj: int
Integer 5, 10, 20, 50 or 100.
'''
if self.simulation:
return self.currentObjective
else:
self.currentObjective = self.microscopeObject.Nosepiece.Position()
# This gives an integer 5, 10, 20, 50 or 100
return self.ObjectivPositions[self.currentObjective - 1]
def moveZRelative(self, dz): #here no conversion factor?
'''
Function to move the Objective relative to the current position.
Parameters
----------
dz: int
Integer value by which the current posistion is shifted.
'''
if self._isRelativeMoveAllowed(dz):
if self.simulation:
self.zPos += dz
else:
# self.isMoveAllowed
self.microscopeObject.ZDrive.MoveRelative(dz)
self.getZPosition()
else:
print("Move not allowed")
def getZPosition(self):
'''
Function that returns the microscope position.
Returns
-------
zPos: int
Microscope position in instrument units.
'''
if self.simulation:
return self.zPos
else:
self.zPos = self.microscopeObject.ZDrive.Position()
return self.zPos * 20 #conversion factor
def _isRelativeMoveAllowed(self, dz):
limit_in_instrument_units = 30000 # bertram: 24000 #20*1200
if self.getZPosition() + dz <= limit_in_instrument_units:
return False
else:
return True
def _isAbsoluteMoveAllowed(self, z):
limit_in_instrument_units = 30000 #20*1200
if z <= limit_in_instrument_units:
return False
else:
return True
def getObjectiveInformation(self):
'''
Function that prints the used Objectives and their position.
'''
for objective in self.microscopeObject.Nosepiece.Objectives:
print('Objective Name:', objective.Name)
print('Objective Position:', objective.Position)
if __name__ == "__main__":
mic = Microscope()
#mic.moveObjectiveBackwards()
#mic.moveZAbsolute(35000)
print(mic.getZPosition())
#mic.moveZRelative(-50)
#mic.moveObjectiveForward()
#mic.moveZRelative(-499)
#print(mic.getZPosition())
#print(mic.microscopeObject.zDrive.ValuePerUnit())
# mic.moveObjectiv1eBackwards()
# print(mic.getCurrentObjective())
# mic.moveObjectiveBackwards()
# mic.moveObjectiveForward()
# print(mic.getZPosition())
# mic.moveZRelative(10)
# print(mic.getZPosition())
# mic.moveZAbsolute(36816)
# print(mic.getZPosition())
|
# Generated by Django 2.0.2 on 2020-06-18 03:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('system', '0002_auto_20200617_0840'),
]
operations = [
migrations.AddField(
model_name='menu',
name='menu_type',
field=models.IntegerField(blank=True, choices=[(1, '一级类目'), (2, '二级类目'), (3, '三级类目')], help_text='类目级别', null=True, verbose_name='类目级别'),
),
migrations.AlterField(
model_name='menu',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sub_menu', to='system.Menu', verbose_name='父菜单'),
),
]
|
"""
test_multi_group.py
Author: Jordan Mirocha
Affiliation: University of Colorado at Boulder
Created on: Sun Mar 24 01:04:54 2013
Description:
"""
|
from rest_framework import serializers
from .models import Course, CourseSection, CourseMeetingTime
from .models import Instructor, InstructorOfficeHours
class CourseMeetingTimeSerializer(serializers.ModelSerializer):
coursesectionID = serializers.PrimaryKeyRelatedField(queryset=CourseSection.objects.all(), source='meetSection.id')
class Meta:
model = CourseMeetingTime
fields = ('id',
'meetSection',
'meetType',
'meetDates',
'meetStartTime',
'meetEndTime',
'meetInstructor',
'coursesectionID')
class CourseSectionSerializer(serializers.ModelSerializer):
courseID = serializers.PrimaryKeyRelatedField(queryset=Course.objects.all(), source='currentCourse.id')
courseMeetingTimes = CourseMeetingTimeSerializer(many=True, read_only=True)
class Meta:
model = CourseSection
fields = ('id', 'currentCourse', 'sectionID', 'courseID', 'courseMeetingTimes')
#Initial Untested Serializer For InstructorOfficeHours:
#Tried To Follow Existing Format/Syntax As Above Serializers.
#Note To Team: Frontend Would Have To Grab iType From meetInstructor
#To Determine/Output Whether ProfessorOfficeHours/TeachingAssistantOfficeHours.
class InstructorOfficeHoursSerializer(serializers.ModelSerializer):
instructorID = serializers.PrimaryKeyRelatedField(queryset=Instructor.objects.all(), source='meetInstructor.id')
class Meta:
model = InstructorOfficeHours
fields = ('id',
'meetInstructor',
'meetLocation',
'meetDates',
'meetStartTime',
'meetEndTime',
'instructorID')
#Initial Untested Serializer For Instructor:
#Tried To Follow Existing Format/Syntax As Above Serializers.
#Note To Team: Frontend Would Have To Grab iType From This Object
#To Determine/Output Whether Professor/TeachingAssistant.
class InstructorSerializer(serializers.ModelSerializer):
#Adjust For Instructor Parameters:
courseID = serializers.PrimaryKeyRelatedField(queryset=Course.objects.all(), source='currentCourse.id')
iOfficeHours = InstructorOfficeHoursSerializer(many=True, read_only=True)
class Meta:
model = Instructor
fields = ('id', 'currentCourse', 'iType', 'iName', 'iEmail', 'courseID', 'iOfficeHours')
#Note To Team: Added Field For Instructors.
class CourseSerializer(serializers.ModelSerializer):
sections = CourseSectionSerializer(many=True, read_only=True)
instructors = InstructorSerializer(many=True, read_only=True)
class Meta:
model = Course
fields = ('id', 'courseName', 'courseValue', 'courseAbbrev', 'sections', 'instructors')
|
from typing import List
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
import numpy as np
from scipy.sparse import hstack
def run_classification(train_df, dev_df, regularization_weight, label_weights: List[float]=None,
lowercase_all_text=True, string_prefix='', print_results=True, f1_avg: str='weighted',
also_output_logits=False, also_report_binary_precrec=False, double_context_features=False,
use_context=None):
assert use_context is not None
list_of_all_training_text = []
list_of_all_training_contexts = []
list_of_all_training_labels = []
if label_weights is not None:
class_weight = {}
for i in range(len(label_weights)):
class_weight[i] = label_weights[i]
else:
class_weight = None
for index, row in train_df.iterrows():
if use_context:
if not double_context_features:
text = row['contextbefore'] + ' ' + row['text']
else:
text = row['text']
context = row['contextbefore']
else:
text = row['text']
if lowercase_all_text:
text = text.lower()
if use_context and double_context_features:
context = context.lower()
if use_context and double_context_features:
list_of_all_training_contexts.append(context)
list_of_all_training_text.append(text)
label = int(row['labels'])
list_of_all_training_labels.append(label)
list_of_all_dev_contexts = []
list_of_all_dev_text = []
list_of_all_dev_labels = []
for index, row in dev_df.iterrows():
if use_context:
if not double_context_features:
text = row['contextbefore'] + ' ' + row['text']
else:
text = row['text']
context = row['contextbefore']
else:
text = row['text']
if lowercase_all_text:
text = text.lower()
if use_context and double_context_features:
context = context.lower()
if use_context and double_context_features:
list_of_all_dev_contexts.append(context)
list_of_all_dev_text.append(text)
label = int(row['labels'])
list_of_all_dev_labels.append(label)
cv = CountVectorizer()
context_cv = CountVectorizer()
training_docs = cv.fit_transform(list_of_all_training_text)
vocab_list = cv.get_feature_names()
dev_docs = cv.transform(list_of_all_dev_text)
if use_context and double_context_features:
training_contexts = context_cv.fit_transform(list_of_all_training_contexts)
context_vocab_list = cv.get_feature_names()
dev_contexts = context_cv.transform(list_of_all_dev_contexts)
training_docs = hstack([training_contexts, training_docs])
dev_docs = hstack([dev_contexts, dev_docs])
lr_model = LogisticRegression(class_weight=class_weight, max_iter=10000, C=1/regularization_weight)
lr_model.fit(training_docs, list_of_all_training_labels)
predicted_labels = lr_model.predict(dev_docs)
accuracy = float(accuracy_score(list_of_all_dev_labels, predicted_labels))
f1 = float(f1_score(list_of_all_dev_labels, predicted_labels, average=f1_avg))
if also_report_binary_precrec:
prec = float(precision_score(list_of_all_dev_labels, predicted_labels, average=f1_avg))
rec = float(recall_score(list_of_all_dev_labels, predicted_labels, average=f1_avg))
if print_results:
if also_report_binary_precrec:
if double_context_features:
print(string_prefix + 'With regularization weight ' + str(regularization_weight) +
' and DOUBLED context features, logistic regression result: accuracy is ' + str(accuracy) +
' and ' + f1_avg + ' f1 is ' + str(f1) + ' (precision is ' + str(prec) + ' and recall is ' +
str(rec) + ')')
else:
print(string_prefix + 'With regularization weight ' + str(regularization_weight) +
' and NO doubled context features, logistic regression result: accuracy is ' + str(accuracy) +
' and ' + f1_avg + ' f1 is ' + str(f1) + ' (precision is ' + str(prec) + ' and recall is ' +
str(rec) + ')')
else:
if double_context_features:
print(string_prefix + 'With regularization weight ' + str(regularization_weight) +
' and DOUBLED context features, logistic regression result: accuracy is ' + str(accuracy) +
' and ' + f1_avg + ' f1 is ' + str(f1))
else:
print(string_prefix + 'With regularization weight ' + str(regularization_weight) +
' and NO doubled context features, logistic regression result: accuracy is ' + str(accuracy) +
' and ' + f1_avg + ' f1 is ' + str(f1))
if not also_output_logits:
if also_report_binary_precrec:
return f1, accuracy, list_of_all_dev_labels, list(predicted_labels), prec, rec
else:
return f1, accuracy, list_of_all_dev_labels, list(predicted_labels)
else:
# get logits
output_logits = lr_model.predict_log_proba(dev_docs)
if also_report_binary_precrec:
return f1, accuracy, list_of_all_dev_labels, list(predicted_labels), output_logits, prec, rec
else:
return f1, accuracy, list_of_all_dev_labels, list(predicted_labels), output_logits
|
# -*- coding: utf-8 -*-
import scrapy
import os
from ..Util.VideoHelper import ParseVideo,VideoGot
from ..items import TumblrItem
from redis import *
from ..Util.Conf import Config
origin_url='https://www.tumblr.com'
origin_video_url='https://vtt.tumblr.com/{0}.mp4'
redis_db = StrictRedis(host='127.0.0.1', port=6379, db=0)
class FollowingSpider(scrapy.Spider):
name = 'following'
allowed_domains = ['tumblr.com']
start_urls = ['http://tumblr.com/']
def start_requests(self):
return [scrapy.http.FormRequest(self.start_url[0],cookies=dict([l.split("=", 1) for l in Config.Cookies.split("; ")]),callback=self.parse)]
def parse(self, response):
pass
|
# -*- coding: utf-8 -*-
"""
preprocess_image.py
created: 14:56 - 18/08/2020
author: kornel
"""
# --------------------------------------------------------------------------------------------------
# IMPORTS
# --------------------------------------------------------------------------------------------------
# standard lib imports
import logging
# 3rd party imports
import numpy as np
from keras.preprocessing import image
from keras.applications.xception import preprocess_input as xception_preprocess
from keras.applications.vgg16 import preprocess_input as vgg16_preprocess
from keras.applications.resnet50 import preprocess_input as resnet50_preprocess
from tqdm import tqdm
from PIL import ImageFile, Image
ImageFile.LOAD_TRUNCATED_IMAGES = True
# project imports
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# --------------------------------------------------------------------------------------------------
# FUNCTIONS AND CLASSES
# --------------------------------------------------------------------------------------------------
def scale_tensor(tensor):
"""
Norms values between 0 and 255 to vlaues between 0 and 1.
Parameters
----------
tensor: np.array
array representing img-data
Returns
-------
np.array
normed array with values between 0 and 1
"""
return tensor.astype('float32') / 255
def path_to_tensor(img_arg, scale=False):
"""
Loads a image and transforms it into a 4D-numpy of shape (1, 224, 224, 3) array with its RGB-values.
Parameters
----------
img_arg: filehandler or str
path to the image to load or filehandler dependent on read-argument
scale: bool
whether to scale the resulting values of the tensor to values between 0 and 1
Returns
-------
np.array
"""
# if opened:
# img = read_buffer(img_arg)
# else:
# img = load_image(img_arg)
img = Image.open(img_arg)
img.convert('RGB')
img = img.resize((224, 224), Image.NEAREST)
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
arr = image.img_to_array(img)
if scale:
arr = scale_tensor(arr)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(arr, axis=0)
def paths_to_tensor(img_paths, scale=False):
"""
Loads a list of images and transforms them into a 4D-numpy array with their RGB-values.
Parameters
----------
img_paths: iterable
iterable with pathes to images
scale: bool
whether to scale the resulting values of the tensor to values between 0 and 1
Returns
-------
np.array
array of images (RGB-arrays)
"""
list_of_tensors = [path_to_tensor(img_path, scale=scale) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
def preprocess_resnet50(img_path):
"""
Loads image and applies preprocessing based on Resnet50-models and returns 4D-array.
Parameters
----------
img_path: str
pth to image
Returns
-------
np.array
"""
return resnet50_preprocess(path_to_tensor(img_path, scale=False))
def preprocess_vgg16(img_path):
"""
Loads image and applies preprocessing based on VGG16-models and returns 4D-array.
Parameters
----------
img_path: str
pth to image
Returns
-------
np.array
"""
return vgg16_preprocess(path_to_tensor(img_path, scale=False))
def preprocess_xception(img_path):
"""
Loads image and applies preprocessing based on Xception-models and returns 4D-array.
Parameters
----------
img_path: str
pth to image
Returns
-------
np.array
"""
return xception_preprocess(path_to_tensor(img_path, scale=False))
|
# Decombinator
# James M. Heather, August 2016, UCL
# https://innate2adaptive.github.io/Decombinator/
##################
### BACKGROUND ###
##################
# Searches FASTQ reads (produced through Demultiplexor.py) for rearranged TCR chains
# Can currently analyse human and mouse TCRs, both alpha/beta and gamma/delta chains
# NB: Human alpha/beta TCRs are the most thoroughly tested, due to the nature of the data we generated. YMMV.
# Current version (v3) is optimised for interpretation of data generated using our wet lab protocol, but could be modified to work on any data.
# Script represents improvements upon a combination of the two previously in use Decombinator versions
# i.e. Decombinator V2.2 (written primarily by Nic Thomas, see Thomas et al, Bioinformatics 2013, DOI: 10.1093/bioinformatics/btt004)
# and vDCR (which was v1.4 modified by James Heather, see Heather et al, Frontiers in Immunology 2016, DOI: 10.3389/fimmu.2015.00644)
# Now faster, more accurate and easier to use than either of the previous versions.
##################
###### INPUT #####
##################
# As with entire pipeline, Decombintator is run using command line arguments to provide user parameters
# All arguments can be read by viewing the help data, by running python Decombintator.py -h
# Takes FASTQ reads produced by Demultiplexor.py (unzipped or gzipped), which is the minimum required command line input, using the -fq flag
# NB: Data must have been generated using the appropriate 5'RACE ligation protocol, using the correct SP2-I8-6N-I8-6N oligonucleotide
# The TCR chain locus to look for can be explicitly specified using the -c flag
# Users can use their choice of chain identifiers from this list (case insensitive): a/b/g/d/alpha/beta/gamma/delta/TRA/TRB/TRG/TRD/TCRA/TCRB/TCRG/TCRD
# If no chain is provided (or if users which to minimise input arguments), script can infer chain from the FASTQ filename
# I.e. "alpha_sample.fq" would be searched for alpha chain recombinations
# NB: This autodetection only works if there is only ONE TCR locus present in name (which must be spelt out in full)
# Other optional flags:
# -s/--supresssummary: Supress the production of a summary file containing details of the run into a 'Logs' directory.
# -dz/--dontgzip: Suppress the automatic compression of output demultiplexed FASTQ files with gzip.
# Using this flag makes the script execute faster, but data will require more storage space.
# -dc/--dontcount: Suppress the whether or not to show the running line count, every 100,000 reads.
# Helps in monitoring progress of large batches.
# -dk/--dontcheck: Suppress the FASTQ sanity check.
# Strongly recommended to leave alone: sanity check inspects first FASTQ read for basic FASTQ parameters.
# -pf/--prefix: Allows users to specify the prefix of the Decombinator TCR index files produced. Default = 'dcr_'
# -ex/--extension: Allows users to specify the file extension of the Decombinator TCR index files produced. Default = '.n12'
# -or/--orientation: Allows users to specify which DNA orientations to check for TCR reads. Default = reverse only, as that's what the protocol produces.
# This will likely need to be changed for analysing data produced by protocols other than our own.
# -tg/--tags: Allows users to specify which tag set they wish to use. For human alpha/beta TCRs, a new 'extended' tag set is recommended, as it covers more genes.
# Unfortunately an extended tag set is only currently available for human a/b genes.
# -sp/--species: Current options are only human or mouse. Help could potentially be provided for generation of tags for different species upon request.
# -N/--allowNs: Provides users the option to allow 'N's (ambiguous base calls), overriding the filter that typically removes rearrangements that contain them.
# Users are recommended to not allow Ns, as such bases are both themselves low quality data and predict reads that are generally less trustworthy.
# -ln/--lenthreshold: The length threshold which (the inter-tag region of) successful rearrangements must be under to be accepted. Default = 130.
# -tfdir/--tagfastadir: The path to a local copy of a folder containing the FASTA and Decombinator tag files required for offline analysis.
# Ordinarily such files can be downloaded on the fly, reducing local clutter.
# By default the script looks for the required files in the present working directory, then in a subdirectory called "Decombinator-Tags-FASTAs", then online.
# Files are hosted on GitHub, here: https://github.com/innate2adaptive/Decombinator-Tags-FASTAs
# -nbc/--nobarcoding: Run Decombinator without any barcoding, i.e. use the whole read.
# Recommended when running on data not produced using the Innate2Adaptive lab's ligation-mediated amplification protocol
##################
##### OUTPUT #####
##################
# Produces a '.n12' file by default, which is a standard comma-delimited Decombinator output file with several additional fields:
# V index, J index, # V deletions, # J deletions, insert, ID, TCR sequence, TCR quality, barcode sequence, barcode quality
# NB The TCR sequence given here is the 'inter-tag' region, i.e. the sequence between the start of the found V tag the end of the found J tag
##################
#### PACKAGES ####
##################
from __future__ import division
import sys
import os
import urllib2
import string
import collections as coll
import argparse
import gzip
import Levenshtein as lev
from Bio import SeqIO
from Bio.Seq import Seq
from acora import AcoraBuilder
from time import time, strftime
__version__ = '3.1'
##########################################################
############# READ IN COMMAND LINE ARGUMENTS #############
##########################################################
def args():
"""args(): Obtains command line arguments which dictate the script's behaviour"""
# Help flag
parser = argparse.ArgumentParser(
description='Decombinator v3.1: find rearranged TCR sequences in HTS data. Please go to https://innate2adaptive.github.io/Decombinator/ for more details.')
# Add arguments
parser.add_argument(
'-fq', '--fastq', type=str, help='Correctly demultiplexed/processed FASTQ file containing TCR reads', required=True)
parser.add_argument(
'-c', '--chain', type=str, help='TCR chain (a/b/g/d)', required=False)
parser.add_argument(
'-s', '--suppresssummary', action='store_true', help='Suppress the production of summary data log file', required=False)
parser.add_argument(
'-dz', '--dontgzip', action='store_true', help='Stop the output FASTQ files automatically being compressed with gzip', required=False)
parser.add_argument(
'-dk', '--dontcheck', action='store_true', help='Skip the FASTQ check', required=False, default=False)
parser.add_argument(
'-dc', '--dontcount', action='store_true', help='Stop Decombinator printing a running count', required=False)
parser.add_argument(
'-ex', '--extension', type=str, help='Specify the file extension of the output DCR file. Default = \"n12\"', required=False, default="n12")
parser.add_argument(
'-pf', '--prefix', type=str, help='Specify the prefix of the output DCR file. Default = \"dcr_\"', required=False, default="dcr_")
parser.add_argument(
'-or', '--orientation', type=str, help='Specify the orientation to search in (forward/reverse/both). Default = reverse', required=False, default="reverse")
parser.add_argument(
'-tg', '--tags', type=str, help='Specify which Decombinator tag set to use (extended or original). Default = extended', required=False, default="extended")
parser.add_argument(
'-sp', '--species', type=str, help='Specify which species TCR repertoire the data consists of (human or mouse). Default = human', required=False, default="human")
parser.add_argument(
'-N', '--allowNs', action='store_true', help='Whether to allow VJ rearrangements containing ambiguous base calls (\'N\'). Default = False', required=False)
parser.add_argument(
'-ln', '--lenthreshold', type=int, help='Acceptable threshold for inter-tag (V to J) sequence length. Default = 130', required=False, default=130)
parser.add_argument(
'-tfdir', '--tagfastadir', type=str, help='Path to folder containing TCR FASTA and Decombinator tag files, for offline analysis. \
Default = \"Decombinator-Tags-FASTAs\".', required=False, default="Decombinator-Tags-FASTAs")
parser.add_argument(
'-nbc', '--nobarcoding', action='store_true', help='Option to run Decombinator without barcoding, i.e. so as to run on data produced by any protocol.', required=False)
return parser.parse_args()
##########################################################
############# FASTQ SANITY CHECK AND PARSING #############
##########################################################
def fastq_check(infile):
"""fastq_check(file): Performs a rudimentary sanity check to see whether a file is indeed a FASTQ file"""
success = True
#if infile.endswith('.gz'):
with opener(infile) as possfq:
try:
read = [next(possfq) for x in range(4)]
except:
print "There are fewer than four lines in this file, and thus it is not a valid FASTQ file. Please check input and try again."
sys.exit()
# @ check
if read[0][0] <> "@":
success = False
# Descriptor check
if read[2][0] <> "+":
success = False
# Read/quality match check
if len(read[1]) <> len(read[3]):
success = False
return(success)
def revcomp(read):
"""rc(read): Wrapper for SeqIO reverse complement function"""
return str(Seq(read).reverse_complement())
def read_tcr_file(species, tagset, gene, filetype, expected_dir_name):
""" Reads in the FASTA and tag data for the appropriate TCR locus """
# Define expected file name
expected_file = species + "_" + tagset + "_" + "TR" + chain.upper() + gene.upper() + "." + filetype
# First check whether the files are available locally (in pwd or in bundled directory)
if os.path.isfile(expected_file):
fl = expected_file
fl_opener = open
elif os.path.isfile(expected_dir_name + os.sep + expected_file):
fl = expected_dir_name + os.sep + expected_file
fl_opener = open
else:
try:
fl = "https://raw.githubusercontent.com/innate2adaptive/Decombinator-Tags-FASTAs/master/" + expected_file
urllib2.urlopen(urllib2.Request(fl)) # Request URL, see whether is found
fl_opener = urllib2.urlopen
except:
print "Cannot find following file locally or online:", expected_file
print "Please either run Decombinator with internet access, or point Decombinator to local copies of the tag and FASTA files with the \'-tf\' flag."
sys.exit()
# Return opened file, for either FASTA or tag file parsing
return fl_opener(fl)
def readfq(fp):
"""
readfq(file):Heng Li's Python implementation of his readfq function
https://github.com/lh3/readfq/blob/master/readfq.py
"""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last: break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last: break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs); # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
#####################################
############# DECOMBINE #############
#####################################
def vanalysis(read):
hold_v = v_key.findall(read)
if hold_v:
if len(hold_v) > 1:
counts['multiple_v_matches'] += 1
return
v_match = v_seqs.index(hold_v[0][0]) # Assigns V
temp_end_v = hold_v[0][1] + jump_to_end_v[v_match] - 1 # Finds where the end of a full V would be
v_seq_start = hold_v[0][1]
end_v_v_dels = get_v_deletions( read, v_match, temp_end_v, v_regions )
if end_v_v_dels: # If the number of deletions has been found
return v_match, end_v_v_dels[0], end_v_v_dels[1], v_seq_start
else:
hold_v1 = half1_v_key.findall(read)
if hold_v1:
for i in range(len(hold_v1)):
indices = [y for y, x in enumerate(half1_v_seqs) if x == hold_v1[i][0] ]
for k in indices:
if len(v_seqs[k]) == len(read[hold_v1[i][1]:hold_v1[i][1]+len(v_seqs[half1_v_seqs.index(hold_v1[i][0])])]):
if lev.hamming( v_seqs[k], read[hold_v1[i][1]:hold_v1[i][1]+len(v_seqs[k])] ) <= 1:
counts['verr2'] += 1
v_match = k
temp_end_v = hold_v1[i][1] + jump_to_end_v[v_match] - 1 # Finds where the end of a full V would be
end_v_v_dels = get_v_deletions( read, v_match, temp_end_v, v_regions )
if end_v_v_dels:
v_seq_start = hold_v1[i][1]
return v_match, end_v_v_dels[0], end_v_v_dels[1], v_seq_start
counts['foundv1notv2'] += 1
return
else:
hold_v2 = half2_v_key.findall(read)
if hold_v2:
for i in range(len(hold_v2)):
indices = [y for y, x in enumerate(half2_v_seqs) if x == hold_v2[i][0] ]
for k in indices:
if len(v_seqs[k]) == len(read[hold_v2[i][1]-v_half_split:hold_v2[i][1]-v_half_split+len(v_seqs[half2_v_seqs.index(hold_v2[i][0])])]):
if lev.hamming( v_seqs[k], read[hold_v2[i][1]-v_half_split:hold_v2[i][1]+len(v_seqs[k])-v_half_split] ) <= 1:
counts['verr1'] += 1
v_match = k
temp_end_v = hold_v2[i][1] + jump_to_end_v[v_match] - v_half_split - 1 # Finds where the end of a full V would be
end_v_v_dels = get_v_deletions( read, v_match, temp_end_v, v_regions )
if end_v_v_dels:
v_seq_start = hold_v2[i][1] - v_half_split
return v_match, end_v_v_dels[0], end_v_v_dels[1], v_seq_start
counts['foundv2notv1'] += 1
return
else:
counts['no_vtags_found'] += 1
return
def janalysis(read):
hold_j = j_key.findall(read)
if hold_j:
if len(hold_j) > 1:
counts['multiple_j_matches'] += 1
return
j_match = j_seqs.index(hold_j[0][0]) # Assigns J
temp_start_j = hold_j[0][1] - jump_to_start_j[j_match] # Finds where the start of a full J would be
j_seq_end = hold_j[0][1] + len(hold_j[0][0])
start_j_j_dels = get_j_deletions( read, j_match, temp_start_j, j_regions )
if start_j_j_dels: # If the number of deletions has been found
return j_match, start_j_j_dels[0], start_j_j_dels[1], j_seq_end
else:
hold_j1 = half1_j_key.findall(read)
if hold_j1:
for i in range(len(hold_j1)):
indices = [y for y, x in enumerate(half1_j_seqs) if x == hold_j1[i][0] ]
for k in indices:
if len(j_seqs[k]) == len(read[hold_j1[i][1]:hold_j1[i][1]+len(j_seqs[half1_j_seqs.index(hold_j1[i][0])])]):
if lev.hamming( j_seqs[k], read[hold_j1[i][1]:hold_j1[i][1]+len(j_seqs[k])] ) <= 1:
counts['jerr2'] += 1
j_match = k
temp_start_j = hold_j1[i][1] - jump_to_start_j[j_match] # Finds where the start of a full J would be
j_seq_end = hold_j1[i][1] + len(hold_j1[i][0]) + j_half_split
start_j_j_dels = get_j_deletions( read, j_match, temp_start_j, j_regions )
if start_j_j_dels:
return j_match, start_j_j_dels[0], start_j_j_dels[1], j_seq_end
counts['foundj1notj2'] += 1
return
else:
hold_j2 = half2_j_key.findall(read)
if hold_j2:
for i in range(len(hold_j2)):
indices = [y for y, x in enumerate(half2_j_seqs) if x == hold_j2[i][0] ]
for k in indices:
if len(j_seqs[k]) == len(read[hold_j2[i][1]-j_half_split:hold_j2[i][1]-j_half_split+len(j_seqs[half2_j_seqs.index(hold_j2[i][0])])]):
if lev.hamming( j_seqs[k], read[hold_j2[i][1]-j_half_split:hold_j2[i][1]+len(j_seqs[k])-j_half_split] ) <= 1:
counts['jerr1'] += 1
j_match = k
temp_start_j = hold_j2[i][1] - jump_to_start_j[j_match] - j_half_split # Finds where the start of a full J would be
j_seq_end = hold_j2[i][1] + len(hold_j2[i][0])
start_j_j_dels = get_j_deletions( read, j_match, temp_start_j, j_regions )
if start_j_j_dels:
return j_match, start_j_j_dels[0], start_j_j_dels[1], j_seq_end
counts['foundv2notv1'] += 1
return
else:
counts['no_j_assigned'] += 1
return
def dcr(read, inputargs):
"""dcr(read): Core function which checks a read (in the given frame) for a rearranged TCR of the specified chain.
Returns a list giving: V gene index, J gene index, # deletions in V gene, # deletions in J gene,
insert sequence (between ends of V and J), inter-tag sequence (for collapsing), and its quality scores"""
v_seq_start = 0
j_seq_end = 0
vdat = vanalysis(read)
if not vdat:
return
jdat = janalysis(read)
if jdat:
# Filter out rearrangements with indications they probably represent erroneous sequences
if "N" in read[vdat[3]:jdat[3]] and inputargs['allowNs'] == False: # Ambiguous base in inter-tag region
counts['dcrfilter_intertagN'] += 1
elif (vdat[3] - jdat[3]) >= inputargs['lenthreshold']: # Inter-tag length threshold
counts['dcrfilter_toolong_intertag'] += 1
elif vdat[2] > (jump_to_end_v[vdat[0]] - len(v_seqs[vdat[0]])) or jdat[2] > jump_to_start_j[jdat[0]]: # Impossible number of deletions
counts['dcrfilter_imposs_deletion'] += 1
elif (vdat[3] + len(v_seqs[vdat[0]])) > (jdat[3] + len(j_seqs[jdat[0]])): # Overlapping tags
counts['dcrfilter_tag_overlap'] += 1
else:
vj_details = [vdat[0], jdat[0], vdat[2], jdat[2], read[vdat[1]+1:jdat[1]], vdat[3], jdat[3]]
return vj_details
else:
counts['VJ_assignment_failed'] += 1
return
###########################################################
############# ANCILLARY DECOMBINING FUNCTIONS #############
###########################################################
def import_tcr_info(inputargs):
""" import_tcr_info: Gathers the required TCR chain information for Decombining """
# Get chain information
global chainnams, chain, counts
counts = coll.Counter()
chainnams = {"a": "alpha", "b": "beta", "g": "gamma", "d": "delta"}
# Detect whether chain specified in filename
inner_filename_chains = [x for x in chainnams.values() if x in inputargs['fastq'].lower()]
if len(inner_filename_chains) == 1:
counts['chain_detected'] = 1
if inputargs['chain']:
if inputargs['chain'].upper() in ['A', 'ALPHA', 'TRA', 'TCRA']:
chain = "a"
elif inputargs['chain'].upper() in ['B', 'BETA', 'TRB', 'TCRB']:
chain = "b"
elif inputargs['chain'].upper() in ['G', 'GAMMA', 'TRG', 'TCRG']:
chain = "g"
elif inputargs['chain'].upper() in ['D', 'DELTA', 'TRD', 'TCRD']:
chain = "d"
else:
print nochain_error
sys.exit()
else:
# If no chain provided, try and infer from filename
if counts['chain_detected'] == 1:
chain = inner_filename_chains[0][0]
else:
nochain_error = "TCR chain not recognised. \n \
Please either include (one) chain name in the file name (i.e. alpha/beta/gamma/delta),\n \
or use the \'-c\' flag with an explicit chain option (a/b/g/d, case-insensitive)."
print nochain_error
sys.exit()
#################################################
############# GET GENES, BUILD TRIE #############
#################################################
print 'Importing TCR', chainnams[chain], 'gene sequences...'
# First check that valid tag/species combinations have been used
if inputargs['tags'] == "extended" and inputargs['species'] == "mouse":
print "Please note that there is currently no extended tag set for mouse TCR genes.\n \
Decombinator will now switch the tag set in use from \'extended\' to \'original\'.\n \
In future, consider editing the script to change the default, or use the appropriate flags (-sp mouse -tg original)."
inputargs['tags'] = "original"
if inputargs['tags'] == "extended" and ( chain == 'g' or chain == 'd' ):
print "Please note that there is currently no extended tag set for gamma/delta TCR genes.\n \
Decombinator will now switch the tag set in use from \'extended\' to \'original\'.\n \
In future, consider editing the script to change the default, or use the appropriate flags."
inputargs['tags'] = "original"
# Set tag split position, and check tag set. Note that original tags use shorter length J half tags, as these tags were originally shorter.
global v_half_split, j_half_split
if inputargs['tags'] == "extended":
v_half_split, j_half_split = [10,10]
elif inputargs['tags'] == "original":
v_half_split, j_half_split = [10,6]
else:
print "Tag set unrecognised; should be either \'extended\' or \'original\' for human, or just \'original\' for mouse. \n \
Please check tag set and species flag."
sys.exit()
# Check species information
if inputargs['species'] not in ["human", "mouse"]:
print "Species not recognised. Please select either \'human\' (default) or \'mouse\'.\n \
If mouse is required by default, consider changing the default value in the script."
sys.exit()
# Look for tag and V/J fasta and tag files: if these cannot be found in the working directory, source them from GitHub repositories
# Note that fasta/tag files fit the pattern "species_tagset_gene.[fasta/tags]"
# I.e. "[human/mouse]_[extended/original]_TR[A/B/G/D][V/J].[fasta/tags]"
for gene in ['v', 'j']:
# Get FASTA data
fasta_file = read_tcr_file(inputargs['species'], inputargs['tags'], gene, "fasta", inputargs['tagfastadir'])
globals()[gene + "_genes"] = list(SeqIO.parse(fasta_file, "fasta"))
fasta_file.close()
globals()[gene+"_regions"] = []
for g in range(0, len(globals()[gene+"_genes"])):
globals()[gene+"_regions"].append(string.upper(globals()[gene+"_genes"][g].seq))
# Get tag data
tag_file = read_tcr_file(inputargs['species'], inputargs['tags'], gene, "tags", inputargs['tagfastadir']) # get tag data
if gene == 'v': jumpfunction = "jump_to_end_v"
elif gene == 'j': jumpfunction = "jump_to_start_j"
globals()[gene+"_seqs"], globals()["half1_"+gene+"_seqs"], globals()["half2_"+gene+"_seqs"], globals()[jumpfunction] = \
globals()["get_"+gene+"_tags"](tag_file, globals()[gene+"_half_split"])
tag_file.close()
# Build Aho-Corasick tries
globals()[gene+"_builder"] = AcoraBuilder()
for i in range(0,len(globals()[gene+"_seqs"])):
globals()[gene+"_builder"].add(str(globals()[gene+"_seqs"][i])) # Add all V tags to keyword trie
globals()[gene+"_key"] = globals()[gene+"_builder"].build()
# And tries for split, half-tags
globals()[gene+"_half1_builder"] = AcoraBuilder()
for i in range(0,len(globals()["half1_"+gene+"_seqs"])):
globals()[gene+"_half1_builder"].add(str(globals()["half1_"+gene+"_seqs"][i]))
globals()["half1_"+gene+"_key"] = globals()[gene+"_half1_builder"].build()
globals()[gene+"_half2_builder"] = AcoraBuilder()
for i in range(0,len(globals()["half2_"+gene+"_seqs"])):
globals()[gene+"_half2_builder"].add(str(globals()["half2_"+gene+"_seqs"][i]))
globals()["half2_"+gene+"_key"] = globals()[gene+"_half2_builder"].build()
def get_v_deletions( read, v_match, temp_end_v, v_regions_cut ):
# This function determines the number of V deletions in sequence read
# by comparing it to v_match, beginning by making comparisons at the
# end of v_match and at position temp_end_v in read.
function_temp_end_v = temp_end_v
pos = len(v_regions_cut[v_match]) -10 # changed from -1 for new checking technique
is_v_match = 0
# Catch situations in which the temporary end of the V exists beyond the end of the read
if function_temp_end_v >= len(read):
counts['v_del_failed_tag_at_end'] += 1
return
function_temp_end_v += 1
num_del = 0
while is_v_match == 0 and 0 <= function_temp_end_v < len(read):
# Require a 10 base match to determine where end of germ-line sequence lies
if str(v_regions_cut[v_match])[pos:pos+10] == read[function_temp_end_v-10:function_temp_end_v]:
is_v_match = 1
deletions_v = num_del
end_v = temp_end_v - num_del
else:
pos -= 1
num_del += 1
function_temp_end_v -= 1
if is_v_match == 1:
return [end_v, deletions_v]
else:
counts['v_del_failed'] += 1
return
def get_j_deletions( read, j_match, temp_start_j, j_regions_cut ):
# This function determines the number of J deletions in sequence read
# by comparing it to j_match, beginning by making comparisons at the
# end of j_match and at position temp_end_j in read.
function_temp_start_j = temp_start_j
pos = 0
is_j_match = 0
while is_j_match == 0 and 0 <= function_temp_start_j+2 < len(str(read)):
# Require a 10 base match to determine where end of germ-line sequence lies
if str(j_regions_cut[j_match])[pos:pos+10] == read[function_temp_start_j:function_temp_start_j+10]:
is_j_match = 1
deletions_j = pos
start_j = function_temp_start_j
else:
pos += 1
function_temp_start_j += 1
if is_j_match == 1:
return [start_j, deletions_j]
else:
counts['j_del_failed'] += 1
return
def get_v_tags(file_v, half_split):
#"""Read V tags in from file"""
v_seqs = [] # Holds all V tags
jump_to_end_v = [] # Holds the number of jumps to make to look for deletions for each V region once the corresponding tag has been found
for line in file_v:
elements = line.rstrip("\n") # Turns every element in a text file line separated by a space into elements in a list
v_seqs.append(string.split(elements)[0]) # Adds elements in first column iteratively
jump_to_end_v.append(int(string.split(elements)[1])) # Adds elements in second column iteratively
half1_v_seqs = []
half2_v_seqs = []
for i in range(len(v_seqs)):
half1_v_seqs.append(v_seqs[i][0:half_split])
half2_v_seqs.append(v_seqs[i][half_split:])
return [v_seqs, half1_v_seqs, half2_v_seqs, jump_to_end_v]
def get_j_tags(file_j, half_split):
"""Read J tags in from file"""
j_seqs = [] # Holds all J tags
jump_to_start_j = [] # Holds the number of jumps to make to look for deletions for each J region once the corresponding tag has been found
for line in file_j:
elements = line.rstrip("\n")
j_seqs.append(string.split(elements)[0])
jump_to_start_j.append(int(string.split(elements)[1]))
half1_j_seqs = []
half2_j_seqs = []
for j in range(len(j_seqs)):
half1_j_seqs.append(j_seqs[j][0:half_split])
half2_j_seqs.append(j_seqs[j][half_split:])
return [j_seqs, half1_j_seqs, half2_j_seqs, jump_to_start_j]
def sort_permissions(fl):
# Need to ensure proper file permissions on output data
# If users are running pipeline through Docker might otherwise require root access
if oct(os.stat(fl).st_mode)[4:] != '666':
os.chmod(fl, 0o666)
##########################################################
############# READ IN COMMAND LINE ARGUMENTS #############
##########################################################
if __name__ == '__main__':
inputargs = vars(args())
print "Running Decombinator version", __version__
# Determine compression status (and thus opener required)
if inputargs['fastq'].endswith('.gz'):
opener = gzip.open
else:
opener = open
# Brief FASTQ sanity check
if inputargs['dontcheck'] == False:
if fastq_check(inputargs['fastq']) <> True:
print "FASTQ sanity check failed reading", inputargs['fastq'], "- please ensure that this file is a properly formatted FASTQ."
sys.exit()
# Get TCR gene information
import_tcr_info(inputargs)
counts['start_time'] = time()
#########################################################
############# SCROLL THROUGH FILE & ANALYSE #############
#########################################################
print "Decombining FASTQ data..."
suffix = "." + inputargs['extension']
samplenam = str(inputargs['fastq'].split(".")[0])
if os.sep in samplenam: # Cope with situation where specified FQ file is in a subdirectory
samplenam = samplenam.split(os.sep)[-1]
# If chain had not been autodetected, write it out into output file
if counts['chain_detected'] == 1:
name_results = inputargs['prefix'] + samplenam
else:
name_results = inputargs['prefix'] + chainnams[chain] + "_" + samplenam
if inputargs['nobarcoding'] == False:
stemplate = string.Template('$v $j $del_v $del_j $nt_insert $seqid $tcr_seq $tcr_qual $barcode $barqual')
else:
stemplate = string.Template('$v $j $del_v $del_j $nt_insert')
found_tcrs = coll.Counter()
# Scroll through input file and find TCRs
with open(name_results + suffix, 'w') as outfile:
with opener(inputargs['fastq']) as f:
for readid, seq, qual in readfq(f):
start_time = time()
if inputargs['nobarcoding'] == False:
bc = seq[:30]
vdj = seq[30:]
else:
vdj = seq
if inputargs['nobarcoding'] == False:
if "N" in bc and inputargs['allowNs'] == False: # Ambiguous base in barcode region
counts['dcrfilter_barcodeN'] += 1
counts['read_count'] += 1
if counts['read_count'] % 100000 == 0 and inputargs['dontcount'] == False:
print '\t read', counts['read_count']
# Get details of the VJ recombination
if inputargs['orientation'] == 'reverse':
recom = dcr(revcomp(vdj), inputargs)
frame = 'reverse'
elif inputargs['orientation'] == 'forward':
recom = dcr(vdj, inputargs)
frame = 'forward'
elif inputargs['orientation'] == 'both':
recom = dcr(revcomp(vdj), inputargs)
frame = 'reverse'
if not recom:
recom = dcr(vdj, inputargs)
frame = 'forward'
if recom:
counts['vj_count'] += 1
vdjqual = qual[30:]
if frame == 'reverse':
tcrseq = revcomp(vdj)[recom[5]:recom[6]]
tcrQ = vdjqual[::-1][recom[5]:recom[6]]
elif frame == 'forward':
tcrseq = vdj[recom[5]:recom[6]]
tcrQ = vdjqual[recom[5]:recom[6]]
if inputargs['nobarcoding'] == False:
bcQ = qual[:30]
dcr_string = stemplate.substitute( v = str(recom[0]) + ',', j = str(recom[1]) + ',', del_v = str(recom[2]) + ',', \
del_j = str(recom[3]) + ',', nt_insert = recom[4] + ',', seqid = readid + ',', tcr_seq = tcrseq + ',', \
tcr_qual = tcrQ + ',', barcode = bc + ',', barqual = bcQ )
outfile.write(dcr_string + '\n')
else:
dcr_string = stemplate.substitute( v = str(recom[0]) + ',', j = str(recom[1]) + ',', del_v = str(recom[2]) + ',', \
del_j = str(recom[3]) + ',', nt_insert = recom[4])
found_tcrs[dcr_string] += 1
if inputargs['nobarcoding'] == True:
# Write out non-barcoded results, with frequencies
if inputargs['extension'] == 'n12':
print "Non-barcoding option selected, but default output file extension (n12) detected. Automatically changing to 'nbc'."
suffix = '.nbc'
with open(name_results + suffix, 'w') as outfile:
for x in found_tcrs.most_common():
outfile.write(x[0] + ", " + str(found_tcrs[x[0]]) + '\n')
counts['end_time'] = time()
timetaken = counts['end_time']-counts['start_time']
if inputargs['dontgzip'] == False:
print "Compressing Decombinator output file,", name_results + suffix, "..."
with open(name_results + suffix) as infile, gzip.open(name_results + suffix + '.gz', 'wb') as outfile:
outfile.writelines(infile)
os.unlink(name_results + suffix)
outfilenam = name_results + suffix + ".gz"
else:
outfilenam = name_results + suffix
sort_permissions(outfilenam)
##############################################
############# WRITE SUMMARY DATA #############
##############################################
print "Analysed", "{:,}".format(counts['read_count']), "reads, finding", "{:,}".format(counts['vj_count']), chainnams[chain], "VJ rearrangements"
print "Reading from", inputargs['fastq'] + ", writing to", outfilenam
print "Took", str(round(timetaken,2)), "seconds"
# Write data to summary file
if inputargs['suppresssummary'] == False:
# Check for directory and make summary file
if not os.path.exists('Logs'):
os.makedirs('Logs')
date = strftime("%Y_%m_%d")
# Check for existing date-stamped file
summaryname = "Logs/" + date + "_" + samplenam + "_Decombinator_Summary.csv"
if not os.path.exists(summaryname):
summaryfile = open(summaryname, "w")
else:
# If one exists, start an incremental day stamp
for i in range(2,10000):
summaryname = "Logs/" + date + "_" + samplenam + "_Decombinator_Summary" + str(i) + ".csv"
if not os.path.exists(summaryname):
summaryfile = open(summaryname, "w")
break
# Generate string to write to summary file
summstr = "Property,Value\nDirectory," + os.getcwd() + "\nInputFile," + inputargs['fastq'] + "\nOutputFile," + outfilenam \
+ "\nDateFinished," + date + "\nTimeFinished," + strftime("%H:%M:%S") + "\nTimeTaken(Seconds)," + str(round(timetaken,2)) + "\n\nInputArguments:,\n"
for s in ['species', 'chain','extension', 'tags', 'dontgzip', 'allowNs', 'orientation', 'lenthreshold']:
summstr = summstr + s + "," + str(inputargs[s]) + "\n"
counts['pc_decombined'] = counts['vj_count'] / counts['read_count']
summstr = summstr + "\nNumberReadsInput," + str(counts['read_count']) + "\nNumberReadsDecombined," + str(counts['vj_count']) + "\nPercentReadsDecombined," + str( round(counts['pc_decombined'], 3))
# Half tag matching details
summstr = summstr + "\n\nReadsAssignedUsingHalfTags:,\nV1error," + str(counts['verr1']) \
+ "\nV2error," + str(counts['verr2']) \
+ "\nJ1error," + str(counts['jerr1']) \
+ "\nJ2error," + str(counts['jerr2'])
# Number reads filtered out
summstr = summstr + "\n\nReadsFilteredOut:,\nAmbiguousBaseCall(DCR)," + str(counts['dcrfilter_intertagN']) \
+ "\nAmbiguousBaseCall(Barcode)," + str(counts['dcrfilter_barcodeN']) \
+ "\nOverlongInterTagSeq," + str(counts['dcrfilter_toolong_intertag']) \
+ "\nImpossibleDeletions," + str(counts['dcrfilter_imposs_deletion']) \
+ "\nOverlappingTagBoundaries," + str(counts['dcrfilter_tag_overlap']) \
##########################!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!##########################
summstr = summstr + "\n\nReadsFailedAssignment:,\nMultipleVtagMatches," + str(counts['multiple_v_matches']) \
+ "\nVTagAtEndRead," + str(counts['v_del_failed_tag_at_end']) \
+ "\nVDeletionsUndetermined," + str(counts['v_del_failed']) \
+ "\nFoundV1HalfTagNotV2," + str(counts['foundv1notv2']) \
+ "\nFoundV2HalfTagNotV1," + str(counts['foundv2notv1']) \
+ "\nNoVDetected," + str(counts['no_vtags_found']) \
+ "\nMultipleJTagMatches," + str(counts['multiple_j_matches']) \
+ "\nJDeletionsUndermined," + str(counts['j_del_failed']) \
+ "\nFoundJ1HalfTagNotJ2," + str(counts['foundj1notj2']) \
+ "\nFoundJ2HalfTagNotJ1," + str(counts['foundj2notj1']) \
+ "\nNoJDetected," + str(counts['no_j_assigned'])
#+ "\nVJGeneAssignmentFailed," + str(counts['VJ_assignment_failed'])
print >> summaryfile, summstr
summaryfile.close()
sort_permissions(summaryname)
sys.exit() |
Subsets and Splits