content
stringlengths 5
1.05M
|
---|
#////////////////////////////////////////////////////////////////////
#////////////////////////////////////////////////////////////////////
# script: fusionDetectionPipelineManual.py
# author: Lincoln
# date: 1.9.19
#
# FUCK REFLOW
# usage:
# ipython fusionDetectionPipelineManual.py
#
# RUN FROM SCREEN SESSION!!
#////////////////////////////////////////////////////////////////////
#////////////////////////////////////////////////////////////////////
import os
import json
import pandas as pd
pd.options.display.max_colwidth = 500
pd.options.mode.chained_assignment = None
#////////////////////////////////////////////////////////////////////
# get_fastq1()
# return the full path of a R1 fastq file, given an s3 bucket, prefix
# and cell name
#////////////////////////////////////////////////////////////////////
def get_fastq1(cell):
s3_location = prefix + cell
lines = get_ipython().getoutput('aws s3 ls $s3_location')
try:
fastq_line = [x for x in lines if x.endswith('_R1_001.fastq.gz')][0] # get the fastq file, specifically
fastq_basename = fastq_line.split()[-1]
except IndexError:
return('dummy') # think this will work so long as i return something
return s3_location + fastq_basename
#////////////////////////////////////////////////////////////////////
# get_fastq2()
# return the full path of a R2 fastq file, given an s3 bucket, prefix
# and cell name
#////////////////////////////////////////////////////////////////////
def get_fastq2(cell):
s3_location = prefix + cell
lines = get_ipython().getoutput('aws s3 ls $s3_location')
try:
fastq_line = [x for x in lines if x.endswith('_R2_001.fastq.gz')][0] # get the fastq file, specifically
fastq_basename = fastq_line.split()[-1]
except IndexError:
return('dummy') # think this will work so long as i return something
return s3_location + fastq_basename
#////////////////////////////////////////////////////////////////////
# getCellTable()
# set up a table with cell name and full s3 paths to both fastqs, for
# every cell in a given run
#////////////////////////////////////////////////////////////////////
def getCellTable(prefix):
txt = 'runX_cells.txt'
get_ipython().getoutput('aws s3 ls $prefix > $txt')
# read into a pandas dataframe
cells_df = pd.read_table(txt, delim_whitespace=True, header=None, names=['is_prefix', 'cell_name'])
cells_df['input_fastq1'] = cells_df['cell_name'].map(get_fastq1)
cells_df['input_fastq2'] = cells_df['cell_name'].map(get_fastq2)
cells_df['sample_id'] = cells_df.cell_name.str.strip('/') # get rid of forward slashes
return(cells_df)
#////////////////////////////////////////////////////////////////////
# runTrinity()
# actually runs STAR-Fus, from within the Trinity docker container,
# given input_fastq1, input_fastq2 and cell_name
#////////////////////////////////////////////////////////////////////
def runTrinity(row):
fq1 = row['input_fastq1']
fq2 = row['input_fastq2']
cell = row['sample_id']
# get current fastqs
get_ipython().system('aws s3 cp $fq1 .')
get_ipython().system('aws s3 cp $fq2 .')
# run STAR-fus, from docker container
get_ipython().system('sudo docker run -v `pwd`:/data --rm trinityctat/ctatfusion /usr/local/src/STAR-Fusion/STAR-Fusion --left_fq /data/*_R1_001.fastq.gz --right_fq /data/*_R2_001.fastq.gz --genome_lib_dir /data/ctat_genome_lib_build_dir -O /data/StarFusionOut/$cell --FusionInspector validate --examine_coding_effect --denovo_reconstruct --CPU 34')
# remove current fastqs
get_ipython().system('rm *.fastq.gz')
#////////////////////////////////////////////////////////////////////
# main()
# pop life
# everybody needs a thrill!
# pop life
# we all got space 2 fill!
#////////////////////////////////////////////////////////////////////
# get list of all the runs
bucketPrefixes = 's3://darmanis-group/singlecell_lungadeno/non_immune/nonImmune_fastqs_9.27/'
f = 'myRuns.txt'
get_ipython().system('aws s3 ls $bucketPrefixes > $f')
# read run prefixes into a pandas df
runs_df = pd.read_table(f, delim_whitespace=True, header=None, names=['is_prefix', 'run_name'])
# add a full_path col
runs_df['full_path'] = 's3://darmanis-group/singlecell_lungadeno/non_immune/nonImmune_fastqs_9.27/' + runs_df['run_name']
for i in range(0, len(runs_df.index)):
global prefix # dont like this
prefix = runs_df['full_path'][i]
print(prefix)
currCells = getCellTable(prefix)
currCells.apply(runTrinity, axis=1) # send the currCell df to runTrinity func
#////////////////////////////////////////////////////////////////////
#////////////////////////////////////////////////////////////////////
|
from unittest import TestCase
from aazdev.app import create_app
from utils.config import Config
import os
import shutil
class ApiTestCase(TestCase):
AAZ_DEV_FOLDER = os.path.expanduser(os.path.join('~', '.aaz_dev_test'))
def __init__(self, *args, **kwargs):
self.cleanup_dev_folder()
Config.AAZ_DEV_FOLDER = self.AAZ_DEV_FOLDER
Config.AAZ_DEV_WORKSPACE_FOLDER = os.path.join(self.AAZ_DEV_FOLDER, 'workspaces')
super().__init__(*args, **kwargs)
self.app = create_app()
self.app.testing = True
self.addCleanup(self.cleanup_dev_folder)
def cleanup_dev_folder(self):
if os.path.exists(self.AAZ_DEV_FOLDER):
shutil.rmtree(self.AAZ_DEV_FOLDER)
|
__author__ = 'brett'
from rest_framework import viewsets, exceptions
from ..models import MatchResult
from .serializers import MatchResultSerializer
class MatchResultViewSet(viewsets.ReadOnlyModelViewSet):
queryset = MatchResult.objects.all().select_related('home_team', 'away_team')
def get_queryset(self):
try:
return self.queryset.filter(season_start_year=self.request.query_params['season']).order_by('match_date')
except (KeyError, ValueError):
raise exceptions.ParseError('season parameter (with a valid year in which a season began) is required')
serializer_class = MatchResultSerializer
|
"""
Module contain all need parameters to create a board.
It has two specific pair of variables:
- width and height of the chess board
- width and height of the field on the chess board, witch is a 1/64 part of whole board
Code using this module has to handle following exceptions:
AttributeError,TypeError: invalid type of width or height chess board
"""
import os
class Params:
def __init__(self, width_chess_board=800, height_chess_board=800, image_folder_path="chess_png"):
"""
Parameters are responsible for creating board, so the best options for value of there are: number * 8, because chess
board are made from 64 fields (8 by 8)
:param width_chess_board: must be int
:param height_chess_board: must be int
:param image_folder_path: path to folder where are all image (format: *.png)
"""
try:
# width and height of board
self.width_chess_board = width_chess_board
self.height_chess_board = height_chess_board
# width_field and height_field of one field (fields on board are 64)
self.width_field = ((self.width_chess_board * 1.0) / 8).__round__()
self.height_field = ((self.height_chess_board * 1.0) / 8).__round__()
except (AttributeError, TypeError) as exc:
print(f"Invalid type of parameters (expect Int): {exc}")
self._path_folder = os.path.dirname(__file__)
self.image_folder = os.path.join(self._path_folder, image_folder_path)
|
'''
1. 登录认证;
2. 增删改查和搜索
3.1 增 add # add monkey 12 132xxx [email protected]
3.2 删 delete # delete monkey
3.3 改 update # update monkey set age = 18
3.4 查 list # list
3.5 搜 find # find monkey
3. 格式化输出
'''
# 标准模块
import sys
# 定义变量
RESULT = []
INIT_FAIL_CNT = 0
MAX_FAIL_CNT = 6
USERINFO = ("51reboot", "123456")
FIELDS = ['username', 'age', 'tel', 'email']
RESULT.append(FIELDS)
while INIT_FAIL_CNT < MAX_FAIL_CNT:
username = input("Please input your username: ")
password = input("Please input your password: ")
if username == USERINFO[0] and password == USERINFO[1]:
# 如果输入无效的操作,则反复操作, 否则输入exit退出
while True:
# 业务逻辑
info = input("Please input your operation: ")
# string -> list
info_list = info.split()
# print(info)
# print(info_list)
action = info_list[0]
if action == "add":
#判断用户是否存在, 如果用户存在,提示用户已经存在, 不在添加
for i in RESULT:
if info_list[1] == i[0]:
print('用户已存在,请重新输入')
break
RESULT.append(info_list[1:])
# 打印结果信息
print("Add {} succ.".format(info_list[1]))
elif action == "delete":
# .remove
tmp = []
for i in RESULT:
if info_list[1] == i[0]:
tmp.append(i)
break
else:
print('%s值不存在'%info_list[1])
for j in tmp:
RESULT.remove(j)
print('%s删除成功' % info_list[1])
elif action == "update":
tmp = []
for i in RESULT:
if info_list[1] == i[0]:
tmp.append(i)
break
else:
print('%s值不存在' % info_list[1])
if info_list[2] == 'set':
for i in FIELDS:
if info_list[3] == i:
tmp[0][1] = info_list[5]
print('修改成功')
break
else:
print('%s为非法字段'%info_list[3])
else:
print('修改字段有误')
elif action == "list":
# 如果没有一条记录, 那么提示为空
if len(RESULT)<=1:
print('列表为空')
break
# print(RESULT)
for x in RESULT:
print("{} {} {} {}".format(x[0], x[1], x[2], x[3]), end="\t")
print()
print("-" * 50)
elif action == "find":
tmp=[]
for i in RESULT:
# if i[0].find(info_list[1])>0:
if i[0]==info_list[1]:
tmp.append(i)
for j in tmp:
print(j)
elif action == "exit":
sys.exit(0)
else:
print("invalid action.")
else:
# 带颜色
print("username or password error.")
INIT_FAIL_CNT += 1
print("\nInput {} failed, Terminal will exit.".format(MAX_FAIL_CNT))
|
#
# PySNMP MIB module WL400-SNMPGEN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/WL400-SNMPGEN-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:29:47 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
TimeTicks, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, Integer32, NotificationType, ModuleIdentity, ObjectIdentity, Gauge32, Counter32, Counter64, MibIdentifier, iso, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "Integer32", "NotificationType", "ModuleIdentity", "ObjectIdentity", "Gauge32", "Counter32", "Counter64", "MibIdentifier", "iso", "IpAddress")
DisplayString, RowStatus, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TextualConvention")
wl400Modules, wl400Generic = mibBuilder.importSymbols("WL400-GLOBAL-REG", "wl400Modules", "wl400Generic")
snmpGenMIBModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 232, 143, 1, 3))
if mibBuilder.loadTexts: snmpGenMIBModule.setLastUpdated('9905260000Z')
if mibBuilder.loadTexts: snmpGenMIBModule.setOrganization('Compaq Computer Corporation')
snmpGenMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 144, 1))
snmpGenConf = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 144, 1, 1))
snmpGenGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 144, 1, 1, 1))
snmpGenCompl = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 144, 1, 1, 2))
snmpGenObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 232, 144, 1, 2))
snmpGenReadCommunityString = MibScalar((1, 3, 6, 1, 4, 1, 232, 144, 1, 2, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmpGenReadCommunityString.setStatus('current')
snmpGenWriteCommunityString = MibScalar((1, 3, 6, 1, 4, 1, 232, 144, 1, 2, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmpGenWriteCommunityString.setStatus('current')
snmpGenTrapDstMaxTableLength = MibScalar((1, 3, 6, 1, 4, 1, 232, 144, 1, 2, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snmpGenTrapDstMaxTableLength.setStatus('current')
snmpGenTrapDstTable = MibTable((1, 3, 6, 1, 4, 1, 232, 144, 1, 2, 4), )
if mibBuilder.loadTexts: snmpGenTrapDstTable.setStatus('current')
snmpGenTrapDstEntry = MibTableRow((1, 3, 6, 1, 4, 1, 232, 144, 1, 2, 4, 1), ).setIndexNames((0, "WL400-SNMPGEN-MIB", "snmpGenTrapDstIndex"))
if mibBuilder.loadTexts: snmpGenTrapDstEntry.setStatus('current')
snmpGenTrapDstIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 144, 1, 2, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 256)))
if mibBuilder.loadTexts: snmpGenTrapDstIndex.setStatus('current')
snmpGenTrapDstIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 144, 1, 2, 4, 1, 2), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: snmpGenTrapDstIpAddress.setStatus('current')
snmpGenTrapDstType = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 144, 1, 2, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("trapOnly", 1), ("syslogOnly", 2), ("trapAndSyslog", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: snmpGenTrapDstType.setStatus('current')
snmpGenTrapDstRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 232, 144, 1, 2, 4, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: snmpGenTrapDstRowStatus.setStatus('current')
snmpGenLockStatus = MibScalar((1, 3, 6, 1, 4, 1, 232, 144, 1, 2, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("locked", 1), ("unlocked", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmpGenLockStatus.setStatus('current')
snmpGenChangeIPAddress = MibScalar((1, 3, 6, 1, 4, 1, 232, 144, 1, 2, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(18, 18)).setFixedLength(18)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmpGenChangeIPAddress.setStatus('current')
snmpGenUseDHCP = MibScalar((1, 3, 6, 1, 4, 1, 232, 144, 1, 2, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("always", 1), ("smart", 2), ("never", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snmpGenUseDHCP.setStatus('current')
snmpGenBasicGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 232, 144, 1, 1, 1, 1)).setObjects(("WL400-SNMPGEN-MIB", "snmpGenReadCommunityString"), ("WL400-SNMPGEN-MIB", "snmpGenWriteCommunityString"), ("WL400-SNMPGEN-MIB", "snmpGenTrapDstMaxTableLength"), ("WL400-SNMPGEN-MIB", "snmpGenTrapDstIpAddress"), ("WL400-SNMPGEN-MIB", "snmpGenTrapDstType"), ("WL400-SNMPGEN-MIB", "snmpGenTrapDstRowStatus"), ("WL400-SNMPGEN-MIB", "snmpGenLockStatus"), ("WL400-SNMPGEN-MIB", "snmpGenChangeIPAddress"), ("WL400-SNMPGEN-MIB", "snmpGenUseDHCP"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
snmpGenBasicGroup = snmpGenBasicGroup.setStatus('current')
snmpGenBasicCompl = ModuleCompliance((1, 3, 6, 1, 4, 1, 232, 144, 1, 1, 2, 1)).setObjects(("WL400-SNMPGEN-MIB", "snmpGenBasicGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
snmpGenBasicCompl = snmpGenBasicCompl.setStatus('current')
mibBuilder.exportSymbols("WL400-SNMPGEN-MIB", snmpGenTrapDstRowStatus=snmpGenTrapDstRowStatus, snmpGenWriteCommunityString=snmpGenWriteCommunityString, snmpGenObjs=snmpGenObjs, snmpGenUseDHCP=snmpGenUseDHCP, snmpGenMIB=snmpGenMIB, snmpGenTrapDstTable=snmpGenTrapDstTable, snmpGenMIBModule=snmpGenMIBModule, snmpGenTrapDstIndex=snmpGenTrapDstIndex, snmpGenCompl=snmpGenCompl, snmpGenTrapDstIpAddress=snmpGenTrapDstIpAddress, snmpGenTrapDstMaxTableLength=snmpGenTrapDstMaxTableLength, snmpGenBasicCompl=snmpGenBasicCompl, snmpGenTrapDstEntry=snmpGenTrapDstEntry, snmpGenTrapDstType=snmpGenTrapDstType, snmpGenBasicGroup=snmpGenBasicGroup, snmpGenConf=snmpGenConf, snmpGenLockStatus=snmpGenLockStatus, PYSNMP_MODULE_ID=snmpGenMIBModule, snmpGenGroups=snmpGenGroups, snmpGenReadCommunityString=snmpGenReadCommunityString, snmpGenChangeIPAddress=snmpGenChangeIPAddress)
|
"""Constants for the TickTick integration."""
DOMAIN = "ticktick"
|
import argparse
import os
import time
import wave
import numpy as np
import pyaudio
import utils
import model
import tensorflow
parser = argparse.ArgumentParser()
# set up training configuration.
parser.add_argument('--n_classes', default=5994, type=int, help='class dim number')
parser.add_argument('--audio_db', default='audio_db/', type=str, help='person audio database')
parser.add_argument('--resume', default=r'pretrained/weights.h5', type=str, help='resume model path')
# set up network configuration.
parser.add_argument('--net', default='resnet34s', choices=['resnet34s', 'resnet34l'], type=str)
parser.add_argument('--ghost_cluster', default=2, type=int)
parser.add_argument('--vlad_cluster', default=8, type=int)
parser.add_argument('--bottleneck_dim', default=512, type=int)
parser.add_argument('--aggregation_mode', default='gvlad', choices=['avg', 'vlad', 'gvlad'], type=str)
# set up learning rate, training loss and optimizer.
parser.add_argument('--loss', default='softmax', choices=['softmax', 'amsoftmax'], type=str)
args = parser.parse_args()
person_feature = []
person_name = []
config = tensorflow.ConfigProto()
config.gpu_options.allow_growth = True
_ = tensorflow.Session(config=config)
# ==================================
# Get Model
# ==================================
# construct the data generator.
params = {'dim': (257, None, 1),
'nfft': 512,
'spec_len': 250,
'win_length': 400,
'hop_length': 160,
'n_classes': args.n_classes,
'sampling_rate': 16000,
'normalize': True}
network_eval = model.vggvox_resnet2d_icassp(input_dim=params['dim'],
num_class=params['n_classes'],
mode='eval', args=args)
# ==> load pre-trained model
network_eval.load_weights(os.path.join(args.resume), by_name=True)
print('==> successfully loading model {}.'.format(args.resume))
def predict(audio_path):
specs = utils.load_data(audio_path, win_length=params['win_length'], sr=params['sampling_rate'],
hop_length=params['hop_length'], n_fft=params['nfft'],
spec_len=params['spec_len'], mode='eval')
specs = np.expand_dims(np.expand_dims(specs, 0), -1)
feature = network_eval.predict(specs)[0]
return feature
def load_audio_db(audio_db_path):
start = time.time()
audios = os.listdir(audio_db_path)
for audio in audios:
path = os.path.join(audio_db_path, audio)
name = audio[:-4]
feature = predict(path)
person_name.append(name)
person_feature.append(feature)
print("Loaded %s audio." % name)
end = time.time()
print('Loading of audio library completed, time consuming:%fms' % (round((end - start) * 1000)))
def recognition(path):
name = ''
pro = 0
feature = predict(path)
for i, person_f in enumerate(person_feature):
dist = np.dot(feature, person_f.T)
if dist > pro:
pro = dist
name = person_name[i]
return name, pro
def start_recognition():
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 16000
RECORD_SECONDS = 4
WAVE_OUTPUT_FILENAME = "infer_audio.wav"
while True:
# open recording
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
i = input("Press enter key to start recording, record %s in seconds:" % RECORD_SECONDS)
print("start recording......")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("recording complete!")
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
# Identify and compare audio from audio libraries
start = time.time()
name, p = recognition(WAVE_OUTPUT_FILENAME)
end = time.time()
if p > 0.8:
print("Prediction time: %d, recognition of speech: %s, similarity: %f" % (round((end - start) * 1000), name, p))
else:
print("The prediction time is: %d, the audio library does not have this user's voice" % round((end - start) * 1000))
if __name__ == '__main__':
load_audio_db(args.audio_db)
start_recognition()
|
from pygears import gear, registry
from pygears.typing import Union
from pygears.common.demux import demux
from pygears.common.mux import mux
from pygears.common.shred import shred
def fill_type(din_t, union_t, sel):
dtypes = union_t.types.copy()
dtypes[sel] = din_t
return Union[tuple(dtypes)]
@gear
def fill(din,
union_din: Union,
*,
fdemux=demux(ctrl_out=True),
fmux=mux,
sel) -> b'fill_type(din, union_din, sel)':
fields = union_din | fdemux
fields_list = list(fields)
fields_list[sel+1] | shred
fields_list[sel+1] = din
return tuple(fields_list) | fmux
|
"""
Script for making various plots for optimisation of regularisation parameters, as provided by RooUnfoldParms class:
http://hepunx.rl.ac.uk/~adye/software/unfold/htmldoc/RooUnfoldParms.html
Plots produced:
- Chi squared values vs regularisation parameter
- RMS of the residuals given by the true and the unfolded distrbutions vs regularisation parameter
- RMS spread of the residuals vs regularisation parameter
- RMS errors vs regularisation parameter
"""
from __future__ import division
from optparse import OptionParser
from rootpy.io import File
import matplotlib
from copy import deepcopy
from tools.ROOT_utils import set_root_defaults
from tools.file_utilities import make_folder_if_not_exists
from tools.hist_utilities import value_error_tuplelist_to_hist, get_fit_results_histogram
from tools.plotting import make_plot, Histogram_properties
from tools.Unfolding import Unfolding, get_unfold_histogram_tuple
from config.variable_binning import bin_edges
from config import CMS, XSectionConfig
from config.latex_labels import variables_latex
matplotlib.use('agg')
matplotlib.rc('font',**CMS.font)
matplotlib.rc('text', usetex = True)
def draw_regularisation_histograms( h_truth, h_measured, h_response, h_fakes = None, h_data = None ):
global method, variable, output_folder, output_formats, test
k_max = h_measured.nbins()
unfolding = Unfolding( h_truth,
h_measured,
h_response,
h_fakes,
method = method,
k_value = k_max,
Hreco = 2,
verbose = 1 )
RMSerror, MeanResiduals, RMSresiduals, Chi2 = unfolding.test_regularisation ( h_data, k_max )
histogram_properties = Histogram_properties()
histogram_properties.name = 'chi2_%s_channel_%s' % ( channel, variable )
histogram_properties.title = '$\chi^2$ for $%s$ in %s channel, %s test' % ( variables_latex[variable], channel, test )
histogram_properties.x_axis_title = '$i$'
histogram_properties.y_axis_title = '$\chi^2$'
histogram_properties.set_log_y = True
make_plot(Chi2, 'chi2', histogram_properties, output_folder, output_formats, draw_errorbar = True, draw_legend = False)
histogram_properties = Histogram_properties()
histogram_properties.name = 'RMS_error_%s_channel_%s' % ( channel, variable )
histogram_properties.title = 'Mean error for $%s$ in %s channel, %s test' % ( variables_latex[variable], channel, test )
histogram_properties.x_axis_title = '$i$'
histogram_properties.y_axis_title = 'Mean error'
make_plot(RMSerror, 'RMS', histogram_properties, output_folder, output_formats, draw_errorbar = True, draw_legend = False)
histogram_properties = Histogram_properties()
histogram_properties.name = 'RMS_residuals_%s_channel_%s' % ( channel, variable )
histogram_properties.title = 'RMS of residuals for $%s$ in %s channel, %s test' % ( variables_latex[variable], channel, test )
histogram_properties.x_axis_title = '$i$'
histogram_properties.y_axis_title = 'RMS of residuals'
if test == 'closure':
histogram_properties.set_log_y = True
make_plot(RMSresiduals, 'RMSresiduals', histogram_properties, output_folder, output_formats, draw_errorbar = True, draw_legend = False)
histogram_properties = Histogram_properties()
histogram_properties.name = 'mean_residuals_%s_channel_%s' % ( channel, variable )
histogram_properties.title = 'Mean of residuals for $%s$ in %s channel, %s test' % ( variables_latex[variable], channel, test )
histogram_properties.x_axis_title = '$i$'
histogram_properties.y_axis_title = 'Mean of residuals'
make_plot(MeanResiduals, 'MeanRes', histogram_properties, output_folder, output_formats, draw_errorbar = True, draw_legend = False)
if __name__ == '__main__':
set_root_defaults()
parser = OptionParser()
parser.add_option("-p", "--path", dest="path", default='data/absolute_eta_M3_angle_bl/',
help="set path to JSON files")
parser.add_option("-o", "--output_folder", dest = "output_folder", default = 'plots/unfolding_tests/',
help = "set path to save plots" )
parser.add_option("-c", "--centre-of-mass-energy", dest = "CoM", default = 8, type = int,
help = "set the centre of mass energy for analysis. Default = 8 [TeV]" )
parser.add_option("-u", "--unfolding_method", dest="unfolding_method", default = 'RooUnfoldSvd',
help="Unfolding method: RooUnfoldSvd (default), TSVDUnfold, TopSVDUnfold, RooUnfoldTUnfold, RooUnfoldInvert, RooUnfoldBinByBin, RooUnfoldBayes")
parser.add_option("-f", "--load_fakes", dest="load_fakes", action="store_true",
help="Load fakes histogram and perform manual fake subtraction in TSVDUnfold")
parser.add_option("-m", "--metType", dest="metType", default='type1',
help="set MET type used in the analysis of MET-dependent variables")
parser.add_option("-t", "--test", dest="test", default='bias',
help="set the test type for comparison: bias (default), closure or data")
( options, args ) = parser.parse_args()
measurement_config = XSectionConfig(options.CoM)
output_formats = ['pdf']
centre_of_mass = options.CoM
path_to_JSON = options.path
met_type = measurement_config.translate_options[options.metType]
method = options.unfolding_method
load_fakes = options.load_fakes
output_folder_base = options.output_folder + '/%dTeV/k_optimisation/' % measurement_config.centre_of_mass_energy
test = options.test
ttbar_xsection = measurement_config.ttbar_xsection
luminosity = measurement_config.luminosity * measurement_config.luminosity_scale
input_filename_central = measurement_config.unfolding_madgraph
input_filename_bias = measurement_config.unfolding_mcatnlo
variables = ['MET', 'WPT', 'MT' , 'ST', 'HT']
print 'Performing k-value optimisation checks using', test, 'info at', centre_of_mass, 'TeV'
input_file = File( input_filename_central, 'read' )
input_file_bias = File( input_filename_bias, 'read' )
for channel in ['electron', 'muon']:
for variable in variables:
print 'Doing variable', variable, 'in', channel, 'channel'
h_truth, h_measured, h_response, h_fakes = get_unfold_histogram_tuple(
inputfile = input_file,
variable = variable,
channel = channel,
met_type = met_type,
centre_of_mass = centre_of_mass,
ttbar_xsection = ttbar_xsection,
luminosity = luminosity,
load_fakes = load_fakes)
print 'h_fakes = ', h_fakes
h_data = None
if test == 'data':
h_data = get_fit_results_histogram( data_path = path_to_JSON,
centre_of_mass = centre_of_mass,
channel = channel,
variable = variable,
met_type = met_type,
bin_edges = bin_edges[variable] )
output_folder = output_folder_base + '/' + variable + '_data/'
elif test == 'bias':
h_truth_bias, h_measured_bias, _, h_fakes = get_unfold_histogram_tuple(
inputfile = input_file_bias,
variable = variable,
channel = channel,
met_type = met_type,
centre_of_mass = centre_of_mass,
ttbar_xsection = ttbar_xsection,
luminosity = luminosity,
load_fakes = load_fakes )
h_data = deepcopy( h_measured_bias )
h_expected = h_truth_bias
output_folder = output_folder_base + '/' + variable + '_bias/'
elif test == 'closure':
h_data = deepcopy( h_measured )
output_folder = output_folder_base + '/' + variable + '_closure/'
else:
raise Exception("Unknown test attempted - please choose data, bias or closure")
make_folder_if_not_exists(output_folder)
draw_regularisation_histograms( h_truth, h_measured, h_response, h_fakes, h_data ) |
# BAREOS - Backup Archiving REcovery Open Sourced
#
# Copyright (C) 2013-2014 Bareos GmbH & Co. KG
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of version three of the GNU Affero General Public
# License as published by the Free Software Foundation, which is
# listed in the file LICENSE.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
# Author: Marco van Wieringen
#
bJobMessageType = dict(
M_ABORT=1,
M_DEBUG=2,
M_FATAL=3,
M_ERROR=4,
M_WARNING=5,
M_INFO=6,
M_SAVED=7,
M_NOTSAVED=8,
M_SKIPPED=9,
M_MOUNT=10,
M_ERROR_TERM=11,
M_TERM=12,
M_RESTORED=13,
M_SECURITY=14,
M_ALERT=15,
M_VOLMGMT=16
)
bsdrVariable = dict(
bsdVarJob=1,
bsdVarLevel=2,
bsdVarType=3,
bsdVarJobId=4,
bsdVarClient=5,
bsdVarPool=6,
bsdVarPoolType=7,
bsdVarStorage=8,
bsdVarMediaType=9,
bsdVarJobName=10,
bsdVarJobStatus=11,
bsdVarVolumeName=12,
bsdVarJobErrors=13,
bsdVarJobFiles=14,
bsdVarJobBytes=15,
bsdVarCompatible=16,
bsdVarPluginDir=17
)
bsdwVariable = dict(
bsdwVarJobReport=1,
bsdwVarVolumeName=2,
bsdwVarPriority=3,
bsdwVarJobLevel=4
)
bRCs = dict(
bRC_OK=0,
bRC_Stop=1,
bRC_Error=2,
bRC_More=3,
bRC_Term=4,
bRC_Seen=5,
bRC_Core=6,
bRC_Skip=7,
bRC_Cancel=8
)
bsdEventType = dict(
bsdEventJobStart=1,
bsdEventJobEnd=2,
bsdEventDeviceInit=3,
bsdEventDeviceMount=4,
bsdEventVolumeLoad=5,
bsdEventDeviceReserve=6,
bsdEventDeviceOpen=7,
bsdEventLabelRead=8,
bsdEventLabelVerified=9,
bsdEventLabelWrite=10,
bsdEventDeviceClose=11,
bsdEventVolumeUnload=12,
bsdEventDeviceUnmount=13,
bsdEventReadError=14,
bsdEventWriteError=15,
bsdEventDriveStatus=16,
bsdEventVolumeStatus=17,
bsdEventSetupRecordTranslation=18,
bsdEventReadRecordTranslation=19,
bsdEventWriteRecordTranslation=20,
bsdEventDeviceRelease=21,
bsdEventNewPluginOptions=22,
bsdEventChangerLock=23,
bsdEventChangerUnlock=24
)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import json
import datetime
import decimal
from sqlalchemy.ext.declarative import DeclarativeMeta
class AlchemyJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj.__class__, DeclarativeMeta):
# an SQLAlchemy class
fields = {}
for field in [x.name for x in obj.__table__.columns]:
data = obj.__getattribute__(field)
try:
json.dumps(data) # this will fail on non-encodable values, like other classes
fields[field] = data
except TypeError: # 添加了对datetime的处理
if isinstance(data, datetime.datetime):
fields[field] = data.isoformat()
elif isinstance(data, datetime.date):
fields[field] = data.isoformat()
elif isinstance(data, datetime.timedelta):
fields[field] = (datetime.datetime.min + data).time().isoformat()
elif isinstance(data, decimal.Decimal):
fields[field] = str(data)
else:
fields[field] = None
# a json-encodable dict
return fields
elif isinstance(obj, decimal.Decimal):
return str(obj)
elif isinstance(obj, datetime.datetime):
return int(obj.timestamp())
elif isinstance(obj, datetime.date):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def sqlalchemy_serialize(data):
return json.dumps(data, cls=AlchemyJsonEncoder)
|
from django.db import models
from mptt.models import MPTTModel, TreeForeignKey
from django.db.models.fields.files import FieldFile
from . import tgupload
import os
from django.core.exceptions import SuspiciousFileOperation
from django.conf import settings
from django.contrib.auth import get_user_model
class RemoteFieldFile(FieldFile):
## Overriding property
@property
def url(self):
#self._require_file()
return os.path.join(tgupload.conf.domain.strip('/'), self.name.strip('/'))
class RemoteImageField(models.ImageField):
attr_class = RemoteFieldFile
class TGImages(MPTTModel):
available = models.BooleanField(default=True)
name = models.CharField(max_length=255, default='', blank=True)
alt = models.CharField(max_length=255, default='', blank=True)
description = models.TextField(default='', blank=True)
image = RemoteImageField(upload_to='uploads', max_length=255, null=True, blank=True, db_column='image')
parent = TreeForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, related_name='children')
author = models.ForeignKey(get_user_model(), on_delete=models.SET_NULL, blank=True, null=True)
def save(self, *args, **kwargs):
try:
if self.image:
## Presave. Otherwise we don't get actual file path
super().save(*args, **kwargs)
if self.image.storage.exists(self.image.path):
upload_result = tgupload.upload(self.image.path)
## save=False prevents from executing this function recursively
self.image.delete(save=False)
self.image = upload_result['src']
else:
raise tgupload.UploadError(f'File {self.image.path} not found')
except SuspiciousFileOperation as e:
## SuspiciousFileOperation exception may occur on deleting if path is
## not correct due to we already have overwritten it.
if settings.DEBUG:
print(f'ERROR: {repr(e)}')
pass
super().save(*args, **kwargs)
def __str__(self):
return f'{self.id}: {self.name}'
|
import tba_config
from controllers.base_controller import LoggedInHandler
from helpers.suggestions.suggestion_test_creator import SuggestionTestCreator
class AdminCreateTestSuggestions(LoggedInHandler):
"""
Create test suggestions.
"""
def get(self):
self._require_admin()
if tba_config.CONFIG["env"] != "prod":
SuggestionTestCreator.createEventWebcastSuggestion()
SuggestionTestCreator.createMatchVideoSuggestion()
SuggestionTestCreator.createTeamMediaSuggestion()
else:
logging.error("{} tried to create test events in prod! No can do.".format(
self.user_bundle.user.email()))
self.redirect("/admin/")
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""An NNVM implementation of graph packing."""
import nnvm
from nnvm.compiler import graph_attr, graph_util
def _pack_batch_channel(data, dshape, bfactor, cfactor):
"""Pack the data channel dimension.
"""
assert dshape[0] % bfactor == 0
assert dshape[1] % cfactor == 0
data = nnvm.sym.reshape(data,
shape=(dshape[0] // bfactor, bfactor,
dshape[1] // cfactor, cfactor,
dshape[2], dshape[3]))
data = nnvm.sym.transpose(
data, axes=(0, 2, 4, 5, 1, 3))
return data
def _unpack_batch_channel(data, old_shape):
"""Unpack the data channel dimension.
"""
data = nnvm.sym.transpose(data, axes=(0, 4, 1, 5, 2, 3))
data = nnvm.sym.reshape(data, shape=old_shape)
return data
def _pack_weight(data, dshape, cfactor):
"""Pack the weight into packed format.
"""
assert len(dshape) == 4
assert dshape[0] % cfactor == 0
assert dshape[1] % cfactor == 0
data = nnvm.sym.reshape(data,
shape=(dshape[0] // cfactor, cfactor,
dshape[1] // cfactor, cfactor,
dshape[2], dshape[3]))
data = nnvm.sym.transpose(
data, axes=(0, 2, 4, 5, 1, 3))
return data
def _pack_weight_conv2d_transpose(data, dshape, cfactor):
"""Pack the weight into packed format.
"""
assert len(dshape) == 4
assert dshape[0] % cfactor == 0
assert dshape[1] % cfactor == 0
data = nnvm.sym.reshape(data,
shape=(dshape[0] // cfactor, cfactor,
dshape[1] // cfactor, cfactor,
dshape[2], dshape[3]))
data = nnvm.sym.transpose(
data, axes=(2, 0, 4, 5, 3, 1))
return data
def _pack_bias(data, dshape, bfactor, cfactor):
"""Pack the bias parameter.
"""
assert len(dshape) == 3
assert dshape[0] % cfactor == 0
data = nnvm.sym.reshape(data,
shape=(dshape[0] // cfactor,
cfactor, dshape[1],
dshape[2], 1))
data = nnvm.sym.transpose(
data, axes=(0, 2, 3, 4, 1))
# broadcast batch dimension to bfactor
data = nnvm.sym.broadcast_to(
data,
shape=(dshape[0] // cfactor, dshape[1], dshape[2], bfactor, cfactor))
return data
def _get_shape(sym, shape_dict):
"""Get the shape of a node.
"""
return graph_util.infer_shape(
nnvm.graph.create(sym), **shape_dict)[1][0]
def nnvm_graph_pack(graph,
shape_dict,
bfactor,
cfactor,
weight_bits,
start_name="max_pool2d0",
stop_name="global_avg_pool2d0"):
"""Pack the graph into batch&channel packed format.
Parameters
----------
graph : Graph
The input graph.
shape_dict : dict of str to shape
The input shape.
bfactor : int
The packing factor in batch
cfactor : int
The packing factor in channel
start_name: str, optional
Start packing from certain known node.
start_name: str, optional
Stop packing from certain known node.
Returns
-------
graph : Graph
The transformed graph.
"""
graph = graph_attr.set_shape_inputs(graph, shape_dict)
graph = graph.apply("InferShape")
shape = graph.json_attr("shape")
gidx = graph.index
node_map = {}
dset = set()
start_pack = False
for nid, node in enumerate(gidx.nodes):
children = [node_map[e[0]] for e in node["inputs"]]
ishape = [shape[gidx.entry_id(e)] for e in node["inputs"]]
oshape = shape[gidx.entry_id(nid, 0)]
attrs = node.get("attrs", {})
node_name = node["name"]
op_name = node["op"]
get_clone = lambda c, o_n, n_n, a: getattr(nnvm.symbol, o_n)(
*c, name=n_n, **a)
if op_name == "null":
new_node = nnvm.symbol.Variable(node_name)
if start_name and node_name == start_name:
start_pack = True
new_node = _pack_batch_channel(new_node, oshape, bfactor, cfactor)
if start_pack and "_begin_state_" in node_name: # RNN -> CNN, pack
new_node = _pack_batch_channel(new_node, oshape, bfactor, cfactor)
elif node_name == start_name:
assert not start_pack
start_pack = True
new_node = get_clone(children, op_name, node_name, attrs)
new_node = _pack_batch_channel(new_node, oshape, bfactor, cfactor)
elif node_name == stop_name:
if start_pack:
start_pack = False
children[0] = _unpack_batch_channel(children[0], ishape[0])
new_node = getattr(nnvm.symbol, op_name)(
*children, name=node_name, **attrs)
else:
new_node = get_clone(children, op_name, node_name, attrs)
elif op_name == "conv2d" and attrs.get("out_dtype", None) == "int32":
assert 8 % weight_bits == 0
w_lanes = 8 // weight_bits
if start_pack:
attrs["layout"] = "NCHW%dn%dc" % (bfactor, cfactor)
attrs["kernel_layout"] = "OIHW%do%di%dp" % (cfactor, cfactor, w_lanes)
data, weight = children
weight = _pack_weight(weight, ishape[1], cfactor)
# insert bit packing when necessary
if w_lanes != 1:
assert 8 % w_lanes == 0
weight = nnvm.sym.bitpack(weight, lanes=w_lanes)
new_node = nnvm.sym.conv2d(
data, weight, name=node_name, **attrs)
else:
new_node = get_clone(children, op_name, node_name, attrs)
elif op_name == "conv2d_transpose" and attrs.get("out_dtype", None) == "int32":
assert 8 % weight_bits == 0
w_lanes = 8 // weight_bits
if start_pack:
attrs["layout"] = "NCHW%dn%dc" % (bfactor, cfactor)
attrs["kernel_layout"] = "IOHW%di%do%dp" % (cfactor, cfactor, w_lanes)
data, weight = children
weight = _pack_weight_conv2d_transpose(weight, ishape[1], cfactor)
new_node = nnvm.sym.conv2d_transpose(
data, weight, name=node_name, **attrs)
else:
new_node = get_clone(children, op_name, node_name, attrs)
elif op_name.startswith("broadcast_") and tuple(ishape[0]) == tuple(ishape[1]):
new_node = get_clone(children, op_name, node_name, attrs)
elif op_name.startswith("broadcast") and len(ishape[1]) == 3:
if start_pack:
children[1] = _pack_bias(children[1], ishape[1], bfactor, cfactor)
new_node = getattr(nnvm.symbol, op_name)(
*children, name=node_name, **attrs)
else:
new_node = get_clone(children, op_name, node_name, attrs)
elif op_name.startswith("elementwise_add"):
new_node = get_clone(children, op_name, node_name, attrs)
else:
new_node = get_clone(children, op_name, node_name, attrs)
dset.add(op_name)
node_map[nid] = new_node
assert len(graph.index.output_entries) == 1
ret = node_map[graph.index.output_entries[0][0]]
if start_pack:
oshape = shape[graph.index.output_entries[0][0]]
ret = _unpack_batch_channel(ret, oshape)
graph = nnvm.graph.create(ret)
graph = graph_attr.set_shape_inputs(graph, shape_dict)
graph = graph.apply("InferShape")
return graph
|
import numpy as np
import tensorflow as tf
import tensorlight as light
def sse(outputs, targets, name=None):
"""Sum of squared error (SSE) between images.
Parameters
----------
outputs: Tensor [batch_size, ...] of type float32
The first tensor.
targets: Tensor [batch_size, ...] of type float32
The second tensor.
name: str or None, optional
Optioanl name to be applied in TensorBoard. Defaults to "Mean" and follows to
'<loss-name>/Mean' in TensorBoard.
Returns
----------
Returns the calculated error.
"""
with tf.name_scope('SSE_loss'):
outputs_rank = outputs.get_shape().ndims
sum_indices = tuple(range(1, outputs_rank))
return tf.reduce_mean(
tf.reduce_sum(tf.square(outputs - targets), sum_indices), name=name)
def mse(outputs, targets, name=None):
"""Mean squared error (MSE) between images.
Parameters
----------
outputs: Tensor [batch_size, ...] of type float32
The first tensor.
targets: Tensor [batch_size, ...] of type float32
The second tensor.
name: str or None, optional
Optioanl name to be applied in TensorBoard. Defaults to "Mean" and follows to
'<loss-name>/Mean' in TensorBoard.
Returns
----------
Returns the calculated error.
"""
with tf.name_scope('MSE_loss'):
return tf.reduce_mean(tf.square(outputs - targets), name=name)
def rsse(outputs, targets, name=None):
"""Rooted sum of squared error (RSSE) between images.
Parameters
----------
outputs: Tensor [batch_size, ...] of type float32
The first tensor.
targets: Tensor [batch_size, ...] of type float32
The second tensor.
name: str or None, optional
Optioanl name to be applied in TensorBoard. Defaults to "Mean" and follows to
'<loss-name>/Mean' in TensorBoard.
Returns
----------
Returns the calculated error.
"""
with tf.name_scope('RSSE_loss'):
outputs_rank = outputs.get_shape().ndims
sum_indices = tuple(range(1, outputs_rank))
return tf.reduce_mean(
tf.sqrt(
tf.reduce_sum(tf.square(outputs - targets), sum_indices)), name=name)
def rmse(outputs, targets, name=None):
"""Rooted mean squared error (RMSE) between images.
Parameters
----------
outputs: Tensor [batch_size, ...] of type float32
The first tensor.
targets: Tensor [batch_size, ...] of type float32
The second tensor.
name: str or None, optional
Optioanl name to be applied in TensorBoard. Defaults to "Mean" and follows to
'<loss-name>/Mean' in TensorBoard.
Returns
----------
Returns the calculated error.
"""
with tf.name_scope('RMSE_loss'):
outputs_rank = outputs.get_shape().ndims
reduction_indices = tuple(range(1, outputs_rank))
return tf.reduce_mean(
tf.sqrt(
tf.reduce_mean(tf.square(outputs - targets), reduction_indices)), name=name)
def sae(outputs, targets, name=None):
"""Sum of aboslute error (SAE) between images.
Parameters
----------
outputs: Tensor [batch_size, ...] of type float32
The first tensor.
targets: Tensor [batch_size, ...] of type float32
The second tensor.
name: str or None, optional
Optioanl name to be applied in TensorBoard. Defaults to "Mean" and follows to
'<loss-name>/Mean' in TensorBoard.
Returns
----------
Returns the calculated error.
"""
with tf.name_scope('SAE_loss'):
outputs_rank = outputs.get_shape().ndims
sum_indices = tuple(range(1, outputs_rank))
return tf.reduce_mean(
tf.reduce_sum(tf.abs(outputs - targets), sum_indices), name=name)
def mae(outputs, targets, name=None):
"""Mean aboslute error (MAE) between images.
Parameters
----------
outputs: Tensor [batch_size, ...] of type float32
The first tensor.
targets: Tensor [batch_size, ...] of type float32
The second tensor.
name: str or None, optional
Optioanl name to be applied in TensorBoard. Defaults to "Mean" and follows to
'<loss-name>/Mean' in TensorBoard.
Returns
----------
Returns the calculated error.
"""
with tf.name_scope('MAE_loss'):
return tf.reduce_mean(tf.abs(outputs - targets), name=name)
def rsae(outputs, targets, name=None):
"""Rooted sum of absolute error (RSAE) between images.
Parameters
----------
outputs: Tensor [batch_size, ...] of type float32
The first tensor.
targets: Tensor [batch_size, ...] of type float32
The second tensor.
name: str or None, optional
Optioanl name to be applied in TensorBoard. Defaults to "Mean" and follows to
'<loss-name>/Mean' in TensorBoard.
Returns
----------
Returns the calculated error.
"""
with tf.name_scope('RSAE_loss'):
outputs_rank = outputs.get_shape().ndims
sum_indices = tuple(range(1, outputs_rank))
return tf.reduce_mean(
tf.sqrt(
tf.reduce_sum(tf.abs(outputs - targets), sum_indices)), name=name)
def rmae(outputs, targets, name=None):
"""Rooted mean absolute error (RMAE) between images.
Parameters
----------
outputs: Tensor [batch_size, ...] of type float32
The first tensor.
targets: Tensor [batch_size, ...] of type float32
The second tensor.
name: str or None, optional
Optioanl name to be applied in TensorBoard. Defaults to "Mean" and follows to
'<loss-name>/Mean' in TensorBoard.
Returns
----------
Returns the calculated error.
"""
with tf.name_scope('RMAE_loss'):
outputs_rank = outputs.get_shape().ndims
reduction_indices = tuple(range(1, outputs_rank))
return tf.reduce_mean(
tf.sqrt(
tf.reduce_mean(tf.abs(outputs - targets), reduction_indices)), name=name)
def bce(output_probs, targets, from_logits=False, name=None):
"""Binary cross-entropy (BCE) between an output and a target tensor.
Remarks: In case of images, this loss gives great results for image
like MNIST or MovingMNIST, but does NOT work for natural images
with color or gray-scaled, as it can lead to negative loss.
References:
Taken from Keras implementation (TensorFlow backend).
Parameters
----------
output_probs: Tensor [batch_size, ...] of type float32
The probabilities of the output. It should be the output of tf.sigmoid(output).
img2: Tensor [batch_size, ...] of type float32
The probabilities of the output in scale [0, 1].
from_logits: Boolean, optional
Whether the given values are probabilites (default) or logits.
name: str or None, optional
Optioanl name to be applied in TensorBoard. Defaults to "Mean" and follows to
'<loss-name>/Mean' in TensorBoard.
Returns
----------
Returns the caluclated error.
"""
with tf.name_scope('BCE_loss'):
# flatten
output_probs_flat = tf.contrib.layers.flatten(output_probs)
targets_flat = tf.contrib.layers.flatten(targets)
if not from_logits:
# transform back to logits
EPSILON = 10e-8
output_probs_flat = tf.clip_by_value(output_probs_flat, EPSILON, 1 - EPSILON)
output_probs_flat = tf.log(output_probs_flat / (1 - output_probs_flat))
bce_values = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_flat, logits=output_probs_flat)
return tf.reduce_mean(bce_values, name=name)
def ce(outputs, targets, name=None):
"""Cross entropy error (CE).
Parameters
----------
outputs: Tensor [batch_size, ...] of type float32
The first tensor.
targets: Tensor [batch_size, ...] of type float32
The second tensor.
name: str or None, optional
Optioanl name to be applied in TensorBoard. Defaults to "Mean" and follows to
'<loss-name>/Mean' in TensorBoard.
Returns
----------
Returns the calculated error.
"""
with tf.name_scope('CE_loss'):
outputs_rank = outputs.get_shape().ndims
sum_indices = tuple(range(1, outputs_rank))
return -tf.reduce_mean(
tf.reduce_sum(targets * tf.log(outputs), sum_indices), name=name)
def ssim(img1, img2, patch_size=11, sigma=1.5, L=1.0, K1=0.01, K2=0.03, name=None):
"""Calculates the Structural Similarity loss
Reference:
This function attempts to mimic precisely the functionality of ssim.m a
MATLAB provided by the author's of SSIM
https://ece.uwaterloo.ca/~z70wang/research/ssim/ssim_index.m
Parameters
----------
img1: Tensor [batch_size, h, w, c] of type float32
The first image. Expected to have 1 channel and in scale [0, 1].
img2: Tensor [batch_size, h, w, c] of type float32
The second image. Expected to have 1 channel and in scale [0, 1].
patch_size: int, optional
The size of a single patch.
sigma: float, optional
The Gaussian's sigma value.
L: int, optional
Note: Not using 255 will result in slightly different result.
The bit depth of the image. Use '1' when a value scale of [0,1] is used.
The scale of [-1, 1] is not supported and has to be rescaled.
K1: float, optional
The K1 value.
K2: float, optional
The K2 value.
name: str or None, optional
Optioanl name to be applied in TensorBoard. Defaults to "Mean" and follows to
'<loss-name>/Mean' in TensorBoard.
Returns
----------
value: float32
The structural similarity loss value between both images.
"""
with tf.name_scope('SSIM_loss'):
return 1 - light.image.ssim(img1, img2, patch_size, sigma,
L, K1, K2, name=name)
def ms_ssim(img1, img2, patch_size=11, sigma=1.5, L=1.0, K1=0.01, K2=0.03,
level_weights=[0.0448, 0.2856, 0.3001, 0.2363, 0.1333], name=None):
"""Calculates the Multi-Scale Structural Similarity (MS-SSIM) loss.
References:
Z. Wang's "Multi-scale structural similarity
for image quality assessment" Invited Paper, IEEE Asilomar Conference on
Signals, Systems and Computers, Nov. 2003
Author's MATLAB implementation:-
http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
Parameters
----------
img1: Tensor [batch_size, h, w, c] of type float32
The first image. Expected to have 1 channel and in scale [0, 1].
img2: Tensor [batch_size, h, w, c] of type float32
The second image. Expected to have 1 channel and in scale [0, 1].
patch_size: int, optional
The size of a single patch.
sigma: float, optional
The Gaussian's sigma value.
L: int, optional
WARNING: NOT USING 255 WILL RESULT IN DIFFERENT RESULTS!
The bit depth of the image. Use '1' when a value scale of [0,1] is used.
The scale of [-1, 1] is not supported and has to be rescaled.
K1: float, optional
The K1 value.
K2: float, optional
The K2 value.
level_weights: list(float), optional
The weights for each scale level M. Must be in range [2, 5].
We do not allow level=1, because then ssid() should be used for efficiency.
We do not allow level>5, because empirical weights higher levels are missing.
If a different value is selected, other weights should be used, because the
default values have been obtained from an empirical analysis. A level of 5 is only
suitable for huge images. E.g an image of 64x64 pixels with level M=3 can result
in NaN values.
It can be considered to allow more levels with smaller patch_size (5,7,9). Some other
papers use smaller sizes. Also, when in the non-human-perception optimized setting, all
wheits are equal with SUM(level_weights)=1.
name: str or None, optional
Optioanl name to be applied in TensorBoard. Defaults to "Mean" and follows to
'<loss-name>/Mean' in TensorBoard.
Returns
----------
value: float32
The multi-scale structural similarity metric value between both images,
where '1' means they are identical and '0' means they are completely different.
"""
with tf.name_scope('MSSSIM_loss'):
return 1 - light.image.ms_ssim(img1, img2, patch_size, sigma,
L, K1, K2, level_weights, name=name)
def ss_ssim(img1, img2, patch_size=11, sigma=1.5, L=1.0, K1=0.01, K2=0.03, level=2, name=None):
"""Calculates the Single-Scale Structural Similarity (SS-SSIM) loss.
References:
Z. Wang's "Multi-scale structural similarity
for image quality assessment" Invited Paper, IEEE Asilomar Conference on
Signals, Systems and Computers, Nov. 2003
Parameters
----------
img1: Tensor [batch_size, h, w, c] of type float32
The first image. Expected to have 1 channel and in scale [0, 1].
img2: Tensor [batch_size, h, w, c] of type float32
The second image. Expected to have 1 channel and in scale [0, 1].
patch_size: int, optional
The size of a single patch.
sigma: float, optional
The Gaussian's sigma value.
L: int, optional
WARNING: NOT USING 255 MIGHT RESULT IN DIFFERENT RESULTS!
The bit depth of the image. Use '1' when a value scale of [0,1] is used.
The scale of [-1, 1] is not supported and has to be rescaled.
K1: float, optional
The K1 value.
K2: float, optional
The K2 value.
level: int, optional
The level M=2.
A level of M=1 equals simple ssim() function.
name: str or None, optional
Optioanl name to be applied in TensorBoard. Defaults to "Mean" and follows to
'<loss-name>/Mean' in TensorBoard.
Returns
----------
value: float32
The single-scale structural similarity metric value between both images,
where '1' means they are identical and '0' means they are completely different.
"""
with tf.name_scope('SSSSIM_loss'):
return 1 - light.image.ss_ssim(img1, img2, patch_size, sigma,
L, K1, K2, level, name=name)
def _gradient_differences(img1, img2):
"""Computs the gradient differences between two images.
Based on: https://arxiv.org/abs/1511.05440 which is optimized and simplified
for efficiency.
"""
shape = img1.get_shape().as_list()
# gradient difference
# create filters [-1, 1] and [[1],[-1]] for diffing to the left and down respectively.
pos = tf.constant(np.identity(shape[3]), dtype=tf.float32)
neg = -1 * pos
filter_x = tf.expand_dims(tf.stack([neg, pos]), 0) # [-1, 1]
filter_y = tf.stack([tf.expand_dims(pos, 0), tf.expand_dims(neg, 0)]) # [[1],[-1]]
img1_dx = tf.abs(tf.nn.conv2d(img1, filter_x, [1, 1, 1, 1], padding='SAME'))
img1_dy = tf.abs(tf.nn.conv2d(img1, filter_y, [1, 1, 1, 1], padding='SAME'))
img2_dx = tf.abs(tf.nn.conv2d(img2, filter_x, [1, 1, 1, 1], padding='SAME'))
img2_dy = tf.abs(tf.nn.conv2d(img2, filter_y, [1, 1, 1, 1], padding='SAME'))
grad_diff_x = tf.abs(img2_dx - img1_dx)
grad_diff_y = tf.abs(img2_dy - img1_dy)
return grad_diff_x, grad_diff_y
def gdl(img1, img2, alpha=1.0, name=None):
"""Computes the (summed) Gradient Differences Loss (GDL) between two images on
the same scale, as defined in: https://arxiv.org/abs/1511.05440
Parameters
----------
img1: Tensor [batch_size, h, w, c] of type float32
The first image. Expected to have values in scale [0, max_value].
img2: Tensor [batch_size, h, w, c] of type float32
The second image. Expected to have values in scale [0, max_value].
alpha: float, optional
Value that is in range [1, ...).
name: str or None, optional
Optioanl name to be applied in TensorBoard. Defaults to "Mean" and follows to
'<loss-name>/Mean' in TensorBoard.
Returns
----------
mean(sum(gdl_values)): float32 Tensor
The per image summed Gradient Differences error over each frame in the batch.
Attention: The value can get very large for non-similar images (>100k)
"""
with tf.name_scope('GDL_loss'):
grad_diff_x, grad_diff_y = _gradient_differences(img1, img2)
gdl_values = tf.reduce_sum(grad_diff_x ** alpha + grad_diff_y ** alpha, [1, 2, 3])
return tf.reduce_mean(gdl_values, name=name)
def mgdl(img1, img2, alpha=1.0, name=None):
"""Computes the Mean / per-pixel Gradient Differences Loss (GDL) between
two images on the same scale. This version takes the mean, that values
do not explode on large images and have a similar scale like other loss
functions.
Parameters
----------
img1: Tensor [batch_size, h, w, c] of type float32
The first image. Expected to have values in scale [0, max_value].
img2: Tensor [batch_size, h, w, c] of type float32
The second image. Expected to have values in scale [0, max_value].
alpha: float, optional
Value that is in range [1, ...).
name: str or None, optional
Optioanl name to be applied in TensorBoard. Defaults to "Mean" and follows to
'<loss-name>/Mean' in TensorBoard.
Returns
----------
mean(gdl_values): float32 Tensor
The mean Gradient Differences error over each frame in the batch.
Attention: The value can get very large for non-similar images (>100k)
"""
with tf.name_scope('mGDL_loss'):
grad_diff_x, grad_diff_y = _gradient_differences(img1, img2)
gdl_value = tf.reduce_mean(grad_diff_x ** alpha + grad_diff_y ** alpha, name=name)
return gdl_value |
import sys
import os
sys.path.append(os.path.abspath("../"))
from unittest import TestCase
from unittest.mock import patch
from unit_test.util import Util
from icon_servicenow.actions.get_attachments_for_an_incident import GetAttachmentsForAnIncident
from icon_servicenow.actions.get_attachments_for_an_incident.schema import Input
class TestGetAttachmentsForAnIncident(TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.action = Util.default_connector(GetAttachmentsForAnIncident())
@patch("requests.sessions.Session.get", side_effect=Util.mocked_requests)
def test_get_attachments_for_an_incident(self, mock_post):
actual = self.action.run({Input.INCIDENT_ID: "3072d01d07a552f6d0ea83ef29c936be"})
expected = {"incident_attachments": ["ImNtRndhV1EzWVhSMFlXTm9iV1Z1ZEhSbGN6ZzNOalF6TWpKMCI="]}
self.assertEqual(actual, expected)
@patch("requests.sessions.Session.get", side_effect=Util.mocked_requests)
def test_get_attachments_for_an_incident_many(self, mock_post):
actual = self.action.run({Input.INCIDENT_ID: "51e4a8abb1b66fc04ba11001955e7dcb"})
expected = {
"incident_attachments": [
"ImNtRndhV1EzWVhSMFlXTm9iV1Z1ZEhSbGN6ZzNOalF6TWpKMCI=",
"ImNtRndhV1EzWVhSMFlXTm9iV1Z1ZEhSbGN6ZzNOalF6TWpKMCI=",
]
}
self.assertEqual(actual, expected)
@patch("requests.sessions.Session.get", side_effect=Util.mocked_requests)
def test_get_attachments_for_an_incident_empty(self, mock_post):
actual = self.action.run({Input.INCIDENT_ID: "c1565da4456c2df374793d471d6ae8dd"})
expected = {"incident_attachments": []}
self.assertEqual(actual, expected)
|
# Generated by Django 1.11.20 on 2019-04-09 19:32
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
import simple_history.models
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='HistoricalProgramEnrollment',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('external_user_key', models.CharField(db_index=True, max_length=255, null=True)),
('program_uuid', models.UUIDField(db_index=True)),
('curriculum_uuid', models.UUIDField(db_index=True)),
('status', models.CharField(choices=[('enrolled', 'enrolled'), ('pending', 'pending'), ('suspended', 'suspended'), ('withdrawn', 'withdrawn')], max_length=9)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical program enrollment',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='ProgramEnrollment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('external_user_key', models.CharField(db_index=True, max_length=255, null=True)),
('program_uuid', models.UUIDField(db_index=True)),
('curriculum_uuid', models.UUIDField(db_index=True)),
('status', models.CharField(choices=[('enrolled', 'enrolled'), ('pending', 'pending'), ('suspended', 'suspended'), ('withdrawn', 'withdrawn')], max_length=9)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
nota1 = float(input())
nota2 = float(input())
nota3 = float(input())
med = (nota1 + nota2 + nota3)/3
print(round(med,2)) |
from cffi import FFI
ffibuilder = FFI()
ffibuilder.cdef("""
void binary_gaussian_elimination(int **A, int m, int n);
void mat_vec_mod2(int **A, int *x, int *y, int m, int n);
void print_mat(int **A, int m, int n);
""")
ffibuilder.set_source("mm2r",
"""
void binary_gaussian_elimination(int **A, int m, int n);
void mat_vec_mod2(int **A, int *x, int *y, int m, int n);
void print_mat(int **A, int m, int n);
""",
sources=['matmod2routines.c'] # library name, for the linker
)
ffibuilder.compile(verbose=True)
|
class Settlement:
def __init__(self, req):
self.req = req
async def fetch(self, **kwargs):
"""
:param kwargs:
:return:
"""
endpoint = 'settlement'
return await self.req.get(endpoint=endpoint, params=kwargs) if kwargs else await self.req.get(endpoint=endpoint)
async def fetch_transactions(self, *, _id, **kwargs):
"""
:param _id:
:param kwargs:
:return:
"""
endpoint = f'settlement/{_id}/transactions'
return await self.req.get(endpoint=endpoint, params=kwargs) if kwargs else await self.req.get(endpoint=endpoint)
|
# Practical subset usage
# PyTorch dataset
import numpy as np
import torch
from torch.utils.data import Dataset
train_set = np.load('./train.npy')
train_label = np.load('./train_label.npy')
class EyeTrackingDataset(Dataset):
def __init__(self, train_set, train_label):
self.train_set = train_set
self.train_label = train_label
def __len__(self):
return len(self.train_set)
def __getitem__(self,idx):
sample = {'data': self.train_set[idx], 'labels': self.train_label[idx]}
return sample
dataset = EyeTrackingDataset(train_set, train_label)
|
def threeSum(nums: 'List[int]') -> 'List[List[int]]':
nums.sort()
result = []
for i in range(len(nums)-2):
if (i>0 and nums[i-1]==nums[i]):
continue
lo, hi = i+1, len(nums)-1
while lo<hi:
sum = nums[i]+nums[lo]+nums[hi]
if (sum==0):
result.append([nums[i], nums[lo], nums[hi]])
while (lo<hi and nums[lo]==nums[lo+1]):
lo+=1
while (lo<hi and nums[hi]==nums[hi-1]):
hi-=1
lo+=1
hi-=1
elif sum<0:
lo+=1
else:
hi-=1
return result
if __name__=='__main__':
print(threeSum([-1, 0, 1, 2, -1, -4]))
|
import glob, os, warnings
import pandas as pd
import numpy as np
from datetime import datetime
from posenet.constants import *
def count_files(path):
path = path + "\*.jpg"
return len(glob.glob(path))
def append_part(arr, path):
df = pd.DataFrame(arr)
df.to_csv(path, encoding="utf-8", index=False, mode="a", header=False)
return 0
def create_file_name(
path=r"C:\Users\kluse\Documents\python\posenet-python\output" + "\\",
):
today = datetime.now()
# return path + today.strftime("%d-%b-%Y-%H-%M") + ".csv"
return path + today.strftime("%d-%b-%Y") + ".csv"
def create_log_file(path):
names = ["postureType", "predScore"]
for i, pn in enumerate(PART_NAMES):
if i == 7:
break
names.append(pn + "X")
for i, pn in enumerate(PART_NAMES):
if i == 7:
break
names.append(pn + "Y")
names = np.asarray([names])
np.savetxt(path, names, delimiter=",", encoding="utf-8", fmt="%s")
csv_column_names = np.array(
[
[
"predScore",
"noseX",
"leftEyeX",
"rightEyeX",
"leftEarX",
"rightEarX",
"leftShoulderX",
"rightShoulderX",
"noseY",
"leftEyeY",
"rightEyeY",
"leftEarY",
"rightEarY",
"leftShoulderY",
"rightShoulderY",
"postureType",
]
]
)
|
#!/usr/bin/python3
import os
import sys
import cv2
import time
import torch
import rospy
import torch.optim as optim
import torch.nn as nn
from PIL import Image
from sensor_msgs.msg import CompressedImage
from torchvision import datasets, transforms
from matplotlib import patches, patheffects
from paramiko import SSHClient
from scp import SCPClient
sys_paths = ['../']
for p in sys_paths:
p = os.path.abspath(p)
if p not in sys.path:
sys.path.append(p)
from yolov3_pytorch.utils import *
from yolov3_pytorch.yolov3 import *
from yolov3_pytorch.yolov3_tiny import *
FREQUENCY = 10
IMG_WIDTH = 640
IMG_HEIGHT = 480
FOV = 60
class ObjectDetection:
def __init__(self):
self.class_names = ['person', 'bicycle', 'car', 'motorbike', 'aeroplane', 'bus', \
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', \
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', \
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', \
'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', \
'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', \
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', \
'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', \
'donut', 'cake', 'chair', 'sofa', 'pottedplant', 'bed', 'diningtable', \
'toilet', 'tvmonitor', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', \
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', \
'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
self.sz = 416
self._img_sub = rospy.Subscriber('camera/rgb/image_raw/compressed', CompressedImage, callback=self._image_callback, queue_size=1)
self.rel_angle = None
self.timestamp_secs = None
self.timestamp_nsecs = None
self.model = Yolov3Tiny(num_classes=len(self.class_names))
self.model.load_state_dict(torch.load('../yolov3_pytorch/yolov3_tiny_coco_01.h5'))
# http://wiki.ros.org/rospy_tutorials/Tutorials/WritingImagePublisherSubscriber
# https://github.com/holli/yolov3_pytorch
def _image_callback(self, msg):
np_arr = np.fromstring(msg.data, np.uint8)
image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
img_org = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
img_org = Image.fromarray(img_org)
img_resized = img_org.resize((self.sz, self.sz))
img_torch = image2torch(img_resized)
all_boxes = self.model.predict_img(img_torch, conf_thresh=0.2)[0]
boxes_found = nms(all_boxes, 0.3)
b = np.array(boxes_found)
if len(b) > 0:
classes = b[:, -1].astype(int)
boxes = b[:, 0:4]
boxes[:, 0] *= IMG_WIDTH
boxes[:, 2] *= IMG_WIDTH
boxes[:, 1] *= IMG_HEIGHT
boxes[:, 3] *= IMG_HEIGHT
for i in range(len(boxes)):
b, class_id = boxes[i], classes[i]
if b[0] == 0:
break
if self.class_names[classes[i]] == 'clock':
self.timestamp_secs = msg.header.stamp.secs
self.timestamp_nsecs = msg.header.stamp.nsecs
x, y = b[0], b[1]
w, h = b[2], b[3]
bb_bottom = (x + w/2, y)
rel_angle = np.radians((FOV/(IMG_WIDTH) * bb_bottom[0] - FOV/2) * -1)
self.rel_angle = rel_angle
plot_img_detections(img_resized, boxes_found, figsize=(8,8), class_names=self.class_names)
# https://stackoverflow.com/questions/43577248/scp-in-python-by-using-password
def ssh_scp_files(self, ssh_host, ssh_user, ssh_password, ssh_port, source_volume, destination_volume):
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.connect(ssh_host, username=ssh_user, password=ssh_password, look_for_keys=False)
with SCPClient(ssh.get_transport()) as scp:
scp.put(source_volume, recursive=True, remote_path=destination_volume)
def detect(self):
rate = rospy.Rate(FREQUENCY)
while not rospy.is_shutdown():
if self.rel_angle:
with open('./object_angle.txt', 'w') as f:
f.write(str(self.rel_angle) + ' ' + str(self.timestamp_secs) + ' ' + str(self.timestamp_nsecs))
self.ssh_scp_files(ssh_host='192.168.0.1', ssh_user='husarion', ssh_password='husarion', ssh_port='11311', source_volume='./object_angle.txt', destination_volume='/home/husarion/husarion_ws/src/hide-n-seek/src/hide_n_seek/nodes/object_angle.txt')
rate.sleep()
if __name__ == "__main__":
rospy.init_node('object_detection')
object_detection = ObjectDetection()
rospy.sleep(2)
object_detection.detect()
|
"""
Register the top menu for the frontend.
"""
from flask_nav import Nav
from flask_nav.elements import (
Navbar,
View,
Link
)
nav = Nav()
nav.register_element('frontend_top', Navbar(
View('Home', 'frontend_blueprint.index'),
View('Processors', 'processors_blueprint.processors'),
View('Chains', 'chains_blueprint.chains'),
View('Tasks', 'tasks_blueprint.tasks'),
View('Compare', 'compare_blueprint.compare'),
Link('API', dest='/api'),
Link('Queue Backend', dest='/flower/'),
))
|
#! /usr/bin/env python
from __future__ import print_function
import numpy as np
import hello_helpers.hello_misc as hm
from hello_helpers.gripper_conversion import GripperConversion
class SimpleCommandGroup:
def __init__(self, joint_name, joint_range, acceptable_joint_error=0.015):
"""Simple command group to extend
Attributes
----------
name: str
joint name
range: tuple(float)
acceptable joint bounds
active: bool
whether joint is active
index: int
index of joint's goal in point
goal: dict
components of the goal
error: float
the error between actual and desired
acceptable_joint_error: float
how close to zero the error must reach
"""
self.name = joint_name
self.range = joint_range
self.active = False
self.index = None
self.goal = {"position": None}
self.error = None
self.acceptable_joint_error = acceptable_joint_error
def get_num_valid_commands(self):
"""Returns number of active joints in the group
Returns
-------
int
the number of active joints within this group
"""
if self.active:
return 1
return 0
def update(self, commanded_joint_names, invalid_joints_callback, **kwargs):
"""Activates joints in the group
Checks commanded joints to activate the command
group and validates joints used correctly.
Parameters
----------
commanded_joint_names: list(str)
list of commanded joints in the trajectory
invalid_joints_callback: func
error callback for misuse of joints in trajectory
Returns
-------
bool
False if commanded joints invalid, else True
"""
self.active = False
self.index = None
if self.name in commanded_joint_names:
self.index = commanded_joint_names.index(self.name)
self.active = True
return True
def set_goal(self, point, invalid_goal_callback, fail_out_of_range_goal, **kwargs):
"""Sets goal for the joint group
Sets and validates the goal point for the joints
in this command group.
Parameters
----------
point: trajectory_msgs.JointTrajectoryPoint
the target point for all joints
invalid_goal_callback: func
error callback for invalid goal
fail_out_of_range_goal: bool
whether to bound out-of-range goals to range or fail
Returns
-------
bool
False if commanded goal invalid, else True
"""
self.goal = {"position": None, "velocity": None, "acceleration": None, "contact_threshold": None}
if self.active:
goal_pos = point.positions[self.index] if len(point.positions) > self.index else None
if goal_pos is None:
err_str = ("Received goal point with positions array length={0}. "
"This joint ({1})'s index is {2}. Length of array must cover all joints listed "
"in commanded_joint_names.").format(len(point.positions), self.name, self.index)
invalid_goal_callback(err_str)
return False
self.goal['position'] = hm.bound_ros_command(self.range, goal_pos, fail_out_of_range_goal)
self.goal['velocity'] = point.velocities[self.index] if len(point.velocities) > self.index else None
self.goal['acceleration'] = point.accelerations[self.index] if len(point.accelerations) > self.index else None
self.goal['contact_threshold'] = point.effort[self.index] if len(point.effort) > self.index else None
if self.goal['position'] is None:
err_str = ("Received {0} goal point that is out of bounds. "
"Range = {1}, but goal point = {2}.").format(self.name, self.range, goal_pos)
invalid_goal_callback(err_str)
return False
return True
def init_execution(self, robot, robot_status, **kwargs):
"""Starts execution of the point
Uses Stretch's Python API to begin moving to the
target point.
Parameters
----------
robot: stretch_body.robot.Robot
top-level interface to Python API
robot_status: dict
robot's current status
"""
raise NotImplementedError
def update_execution(self, robot_status, **kwargs):
"""Monitors progress of joint group
Checks against robot's status to track progress
towards the target point.
This method must set self.error.
Parameters
----------
robot_status: dict
robot's current status
Returns
-------
float/None
error value if group active, else None
"""
raise NotImplementedError
def goal_reached(self):
"""Returns whether reached target point
Returns
-------
bool
if active, whether reached target point, else True
"""
if self.active:
return (abs(self.error) < self.acceptable_joint_error)
return True
class HeadPanCommandGroup(SimpleCommandGroup):
def __init__(self, range_rad, head_pan_calibrated_offset, head_pan_calibrated_looked_left_offset):
SimpleCommandGroup.__init__(self, 'joint_head_pan', range_rad, acceptable_joint_error=0.15)
self.head_pan_calibrated_offset = head_pan_calibrated_offset
self.head_pan_calibrated_looked_left_offset = head_pan_calibrated_looked_left_offset
def init_execution(self, robot, robot_status, **kwargs):
if self.active:
_, pan_error = self.update_execution(robot_status, backlash_state=kwargs['backlash_state'])
robot.head.move_by('head_pan', pan_error, v_r=self.goal['velocity'], a_r=self.goal['acceleration'])
if pan_error > 0.0:
kwargs['backlash_state']['head_pan_looked_left'] = True
else:
kwargs['backlash_state']['head_pan_looked_left'] = False
def update_execution(self, robot_status, **kwargs):
self.error = None
backlash_state = kwargs['backlash_state']
if self.active:
if backlash_state['head_pan_looked_left']:
pan_backlash_correction = self.head_pan_calibrated_looked_left_offset
else:
pan_backlash_correction = 0.0
pan_current = robot_status['head']['head_pan']['pos'] + \
self.head_pan_calibrated_offset + pan_backlash_correction
self.error = self.goal['position'] - pan_current
return self.name, self.error
return None
class HeadTiltCommandGroup(SimpleCommandGroup):
def __init__(self, range_rad, head_tilt_calibrated_offset,
head_tilt_calibrated_looking_up_offset,
head_tilt_backlash_transition_angle):
SimpleCommandGroup.__init__(self, 'joint_head_tilt', range_rad, acceptable_joint_error=0.52)
self.head_tilt_calibrated_offset = head_tilt_calibrated_offset
self.head_tilt_calibrated_looking_up_offset = head_tilt_calibrated_looking_up_offset
self.head_tilt_backlash_transition_angle = head_tilt_backlash_transition_angle
def init_execution(self, robot, robot_status, **kwargs):
if self.active:
_, tilt_error = self.update_execution(robot_status, backlash_state=kwargs['backlash_state'])
robot.head.move_by('head_tilt', tilt_error, v_r=self.goal['velocity'], a_r=self.goal['acceleration'])
#if tilt_error > (self.head_tilt_backlash_transition_angle + self.head_tilt_calibrated_offset):
if self.goal['position'] > (self.head_tilt_backlash_transition_angle + self.head_tilt_calibrated_offset):
kwargs['backlash_state']['head_tilt_looking_up'] = True
else:
kwargs['backlash_state']['head_tilt_looking_up'] = False
def update_execution(self, robot_status, **kwargs):
self.error = None
backlash_state = kwargs['backlash_state']
if self.active:
if backlash_state['head_tilt_looking_up']:
tilt_backlash_correction = self.head_tilt_calibrated_looking_up_offset
else:
tilt_backlash_correction = 0.0
tilt_current = robot_status['head']['head_tilt']['pos'] + \
self.head_tilt_calibrated_offset + tilt_backlash_correction
self.error = self.goal['position'] - tilt_current
return self.name, self.error
return None
class WristYawCommandGroup(SimpleCommandGroup):
def __init__(self, range_rad):
SimpleCommandGroup.__init__(self, 'joint_wrist_yaw', range_rad)
def init_execution(self, robot, robot_status, **kwargs):
if self.active:
robot.end_of_arm.move_by('wrist_yaw',
self.update_execution(robot_status)[1],
v_r=self.goal['velocity'],
a_r=self.goal['acceleration'])
def update_execution(self, robot_status, **kwargs):
self.error = None
if self.active:
self.error = self.goal['position'] - robot_status['end_of_arm']['wrist_yaw']['pos']
return self.name, self.error
return None
class GripperCommandGroup(SimpleCommandGroup):
def __init__(self, range_robotis):
SimpleCommandGroup.__init__(self, None, None, acceptable_joint_error=1.0)
self.gripper_joint_names = ['joint_gripper_finger_left', 'joint_gripper_finger_right', 'gripper_aperture']
self.gripper_conversion = GripperConversion()
self.range_aperture_m = (self.gripper_conversion.robotis_to_aperture(range_robotis[0]),
self.gripper_conversion.robotis_to_aperture(range_robotis[1]))
self.range_finger_rad = (self.gripper_conversion.robotis_to_finger(range_robotis[0]),
self.gripper_conversion.robotis_to_finger(range_robotis[1]))
def update(self, commanded_joint_names, invalid_joints_callback, **kwargs):
self.active = False
self.index = None
active_gripper_joint_names = list(set(commanded_joint_names) & set(self.gripper_joint_names))
if len(active_gripper_joint_names) > 1:
err_str = ("Received a command for the gripper that includes more than one gripper joint name: {0}. "
"Only one joint name is allowed to be used for a gripper command to avoid conflicts "
"and confusion. The gripper only has a single degree of freedom that can be "
"controlled using the following three mutually exclusive joint names: "
"{1}.").format(active_gripper_joint_names, self.gripper_joint_names)
invalid_joints_callback(err_str)
return False
elif len(active_gripper_joint_names) == 1:
self.name = active_gripper_joint_names[0]
self.index = commanded_joint_names.index(self.name)
self.active = True
return True
def set_goal(self, point, invalid_goal_callback, fail_out_of_range_goal, **kwargs):
self.goal = {"position": None, "velocity": None, "acceleration": None, "contact_threshold": None}
if self.active:
goal_pos = point.positions[self.index] if len(point.positions) > self.index else None
if goal_pos is None:
err_str = ("Received goal point with positions array length={0}. "
"This joint ({1})'s index is {2}. Length of array must cover all joints listed "
"in commanded_joint_names.").format(len(point.positions), self.name, self.index)
invalid_goal_callback(err_str)
return False
joint_range = self.range_aperture_m if (self.name == 'gripper_aperture') else self.range_finger_rad
self.goal['position'] = hm.bound_ros_command(joint_range, goal_pos, fail_out_of_range_goal)
self.goal['velocity'] = point.velocities[self.index] if len(point.velocities) > self.index else None
self.goal['acceleration'] = point.accelerations[self.index] if len(point.accelerations) > self.index else None
self.goal['contact_threshold'] = point.effort[self.index] if len(point.effort) > self.index else None
if self.goal['position'] is None:
err_str = ("Received {0} goal point that is out of bounds. "
"Range = {1}, but goal point = {2}.").format(self.name, joint_range, goal_pos)
invalid_goal_callback(err_str)
return False
return True
def init_execution(self, robot, robot_status, **kwargs):
if self.active:
_, gripper_error = self.update_execution(robot_status)
if (self.name == 'gripper_aperture'):
gripper_robotis_error = self.gripper_conversion.aperture_to_robotis(gripper_error)
elif (self.name == 'joint_gripper_finger_left') or (self.name == 'joint_gripper_finger_right'):
gripper_robotis_error = self.gripper_conversion.finger_to_robotis(gripper_error)
robot.end_of_arm.move_by('stretch_gripper',
gripper_robotis_error,
v_r=self.goal['velocity'],
a_r=self.goal['acceleration'])
def update_execution(self, robot_status, **kwargs):
self.error = None
if self.active:
robotis_pct = robot_status['end_of_arm']['stretch_gripper']['pos_pct']
if (self.name == 'gripper_aperture'):
gripper_current = self.gripper_conversion.robotis_to_aperture(robotis_pct)
elif (self.name == 'joint_gripper_finger_left') or (self.name == 'joint_gripper_finger_right'):
gripper_current = self.gripper_conversion.robotis_to_finger(robotis_pct)
self.error = self.goal['position'] - gripper_current
return self.name, self.error
return None
class TelescopingCommandGroup(SimpleCommandGroup):
def __init__(self, range_m, wrist_extension_calibrated_retracted_offset):
#SimpleCommandGroup.__init__(self, 'wrist_extension', range_m, acceptable_joint_error=0.005)
SimpleCommandGroup.__init__(self, 'wrist_extension', range_m, acceptable_joint_error=0.008)
self.wrist_extension_calibrated_retracted_offset = wrist_extension_calibrated_retracted_offset
self.telescoping_joints = ['joint_arm_l3', 'joint_arm_l2', 'joint_arm_l1', 'joint_arm_l0']
self.is_telescoping = False
def get_num_valid_commands(self):
if self.active and self.is_telescoping:
return len(self.telescoping_joints)
elif self.active:
return 1
return 0
def update(self, commanded_joint_names, invalid_joints_callback, **kwargs):
self.active = False
self.is_telescoping = False
self.index = None
active_telescoping_joint_names = list(set(commanded_joint_names) & set(self.telescoping_joints))
if self.name in commanded_joint_names:
if len(active_telescoping_joint_names) == 0:
self.index = commanded_joint_names.index(self.name)
self.active = True
else:
err_str = ("Received a command for the wrist_extension joint and one or more telescoping_joints. "
"These are mutually exclusive options. The joint names in the received command = "
"{0}").format(commanded_joint_names)
invalid_joints_callback(err_str)
return False
elif len(active_telescoping_joint_names) != 0:
if len(active_telescoping_joint_names) == len(self.telescoping_joints):
self.active = True
self.is_telescoping = True
self.index = [commanded_joint_names.index(i) for i in self.telescoping_joints]
else:
err_str = ("Commands with telescoping joints requires all telescoping joints to be present. "
"Only received {0} of {1} telescoping joints. They are = "
"{2}").format(len(active_telescoping_joint_names), len(self.telescoping_joints),
active_telescoping_joint_names)
invalid_joints_callback(err_str)
return False
return True
def set_goal(self, point, invalid_goal_callback, fail_out_of_range_goal, **kwargs):
self.goal = {"position": None, "velocity": None, "acceleration": None, "contact_threshold": None}
if self.active:
if self.is_telescoping:
goal_pos = sum([point.positions[i] for i in self.index]) \
if len(point.positions) > max(self.index) else None
self.goal['velocity'] = point.velocities[self.index[0]] \
if len(point.velocities) > self.index[0] else None
self.goal['acceleration'] = point.accelerations[self.index[0]] \
if len(point.accelerations) > self.index[0] else None
self.goal['contact_threshold'] = point.effort[self.index[0]] \
if len(point.effort) > self.index[0] else None
else:
goal_pos = point.positions[self.index] \
if len(point.positions) > self.index else None
self.goal['velocity'] = point.velocities[self.index] \
if len(point.velocities) > self.index else None
self.goal['acceleration'] = point.accelerations[self.index] \
if len(point.accelerations) > self.index else None
self.goal['contact_threshold'] = point.effort[self.index] \
if len(point.effort) > self.index else None
if goal_pos is None:
err_str = ("Received goal point with positions array length={0}. "
"This joint ({1})'s index is {2}. Length of array must cover all joints listed "
"in commanded_joint_names.").format(len(point.positions), self.name, self.index)
invalid_goal_callback(err_str)
return False
self.goal['position'] = hm.bound_ros_command(self.range, goal_pos, fail_out_of_range_goal)
if self.goal['position'] is None:
err_str = ("Received {0} goal point that is out of bounds. "
"Range = {1}, but goal point = {2}.").format(self.name, self.range, goal_pos)
invalid_goal_callback(err_str)
return False
return True
def init_execution(self, robot, robot_status, **kwargs):
if self.active:
_, extension_error_m = self.update_execution(robot_status, backlash_state=kwargs['backlash_state'])
robot.arm.move_by(extension_error_m,
v_m=self.goal['velocity'],
a_m=self.goal['acceleration'],
contact_thresh_pos_N=self.goal['contact_threshold'],
contact_thresh_neg_N=-self.goal['contact_threshold'] \
if self.goal['contact_threshold'] is not None else None)
if extension_error_m < 0.0:
kwargs['backlash_state']['wrist_extension_retracted'] = True
else:
kwargs['backlash_state']['wrist_extension_retracted'] = False
def update_execution(self, robot_status, **kwargs):
backlash_state = kwargs['backlash_state']
success_callback = kwargs['success_callback'] if 'success_callback' in kwargs.keys() else None
self.error = None
if self.active:
if success_callback and robot_status['arm']['motor']['in_guarded_event']:
success_callback("{0} contact detected.".format(self.name))
return True
if backlash_state['wrist_extension_retracted']:
arm_backlash_correction = self.wrist_extension_calibrated_retracted_offset
else:
arm_backlash_correction = 0.0
extension_current = robot_status['arm']['pos'] + arm_backlash_correction
self.error = self.goal['position'] - extension_current
return (self.telescoping_joints, self.error) if self.is_telescoping else (self.name, self.error)
return None
class LiftCommandGroup(SimpleCommandGroup):
def __init__(self, range_m):
SimpleCommandGroup.__init__(self, 'joint_lift', range_m)
def init_execution(self, robot, robot_status, **kwargs):
if self.active:
robot.lift.move_by(self.update_execution(robot_status)[1],
v_m=self.goal['velocity'],
a_m=self.goal['acceleration'],
contact_thresh_pos_N=self.goal['contact_threshold'],
contact_thresh_neg_N=-self.goal['contact_threshold'] \
if self.goal['contact_threshold'] is not None else None)
def update_execution(self, robot_status, **kwargs):
success_callback = kwargs['success_callback'] if 'success_callback' in kwargs.keys() else None
self.error = None
if self.active:
if success_callback and robot_status['lift']['motor']['in_guarded_event']:
success_callback("{0} contact detected.".format(self.name))
return True
self.error = self.goal['position'] - robot_status['lift']['pos']
return self.name, self.error
return None
class MobileBaseCommandGroup(SimpleCommandGroup):
def __init__(self, virtual_range_m=(-0.5, 0.5)):
SimpleCommandGroup.__init__(self, 'joint_mobile_base_translation', virtual_range_m,
acceptable_joint_error=0.005)
self.incrementing_joint_names = ['translate_mobile_base', 'rotate_mobile_base']
self.active_translate_mobile_base = False
self.active_rotate_mobile_base = False
self.acceptable_mobile_base_error_m = 0.005
self.excellent_mobile_base_error_m = 0.005
self.acceptable_mobile_base_error_rad = (np.pi/180.0) * 6.0
self.excellent_mobile_base_error_rad = (np.pi/180.0) * 0.6
self.min_m_per_s = 0.002
self.min_rad_per_s = np.radians(1.0)
def get_num_valid_commands(self):
if self.active:
num_inc = self.active_translate_mobile_base + self.active_rotate_mobile_base
return num_inc if num_inc > 0 else 1
return 0
def update(self, commanded_joint_names, invalid_joints_callback, **kwargs):
robot_mode = kwargs['robot_mode']
self.active = False
self.active_translate_mobile_base = False
self.active_rotate_mobile_base = False
self.index = None
self.index_translate_mobile_base = None
self.index_rotate_mobile_base = None
active_incrementing_joint_names = list(set(commanded_joint_names) & set(self.incrementing_joint_names))
if self.name in commanded_joint_names:
if robot_mode == 'manipulation':
if len(active_incrementing_joint_names) == 0:
self.active = True
self.index = commanded_joint_names.index(self.name)
else:
err_str = ("Received a command for the mobile base virtual joint ({0}}) "
"and mobile base incremental motions ({1}). These are "
"mutually exclusive options. The joint names in the received command = "
"{2}").format(self.name, active_incrementing_joint_names, commanded_joint_names)
invalid_joints_callback(err_str)
return False
else:
err_str = ("Must be in manipulation mode to receive a command for the "
"{0} joint. Current mode = {1}.").format(self.name, robot_mode)
invalid_joints_callback(err_str)
return False
elif len(active_incrementing_joint_names) != 0:
if robot_mode == 'position':
self.active = True
if 'translate_mobile_base' in active_incrementing_joint_names:
self.active_translate_mobile_base = True
self.index_translate_mobile_base = commanded_joint_names.index('translate_mobile_base')
if 'rotate_mobile_base' in active_incrementing_joint_names:
self.active_rotate_mobile_base = True
self.index_rotate_mobile_base = commanded_joint_names.index('rotate_mobile_base')
else:
err_str = ("Must be in position mode to receive a command for the {0} joint(s). "
"Current mode = {1}.").format(active_positioning_joint_names, robot_mode)
invalid_joints_callback(err_str)
return False
return True
def set_goal(self, point, invalid_goal_callback, fail_out_of_range_goal, **kwargs):
self.goal = {"position": None, "velocity": None, "acceleration": None, "contact_threshold": None}
self.goal_translate_mobile_base = {"position": None, "velocity": None, "acceleration": None, "contact_threshold": None}
self.goal_rotate_mobile_base = {"position": None, "velocity": None, "acceleration": None, "contact_threshold": None}
if self.active:
if self.active_translate_mobile_base or self.active_rotate_mobile_base:
if len(point.positions) <= self.index_translate_mobile_base and len(point.positions) <= self.index_rotate_mobile_base:
err_str = ("Received goal point with positions array length={0}. These joints ({1})'s "
"indices are {2} & {3} respectively. Length of array must cover all joints "
"listed in commanded_joint_names.").format(len(point.positions),
self.incrementing_joint_names,
self.index_translate_mobile_base,
self.index_rotate_mobile_base)
invalid_goal_callback(err_str)
return False
if self.active_translate_mobile_base and \
not np.isclose(point.positions[self.index_translate_mobile_base], 0.0, rtol=1e-5, atol=1e-8, equal_nan=False):
self.goal_translate_mobile_base['position'] = point.positions[self.index_translate_mobile_base]
self.goal_translate_mobile_base['velocity'] = point.velocities[self.index_translate_mobile_base] if len(point.velocities) > self.index_translate_mobile_base else None
self.goal_translate_mobile_base['acceleration'] = point.accelerations[self.index_translate_mobile_base] if len(point.accelerations) > self.index_translate_mobile_base else None
self.goal_translate_mobile_base['contact_threshold'] = point.effort[self.index_translate_mobile_base] if len(point.effort) > self.index_translate_mobile_base else None
if self.active_rotate_mobile_base and \
not np.isclose(point.positions[self.index_rotate_mobile_base], 0.0, rtol=1e-5, atol=1e-8, equal_nan=False):
self.goal_rotate_mobile_base['position'] = point.positions[self.index_rotate_mobile_base]
self.goal_rotate_mobile_base['velocity'] = point.velocities[self.index_rotate_mobile_base] if len(point.velocities) > self.index_rotate_mobile_base else None
self.goal_rotate_mobile_base['acceleration'] = point.accelerations[self.index_rotate_mobile_base] if len(point.accelerations) > self.index_rotate_mobile_base else None
self.goal_rotate_mobile_base['contact_threshold'] = point.effort[self.index_rotate_mobile_base] if len(point.effort) > self.index_rotate_mobile_base else None
if (self.goal_translate_mobile_base['position'] is not None) and \
(self.goal_rotate_mobile_base['position'] is not None):
err_str = ("Received a goal point with simultaneous translation and rotation mobile base goals. "
"This is not allowed. Only one is allowed to be sent for a given goal point. "
"translate_mobile_base = {0} and rotate_mobile_base = {1}").format(self.goal_translate_mobile_base['position'],
self.goal_rotate_mobile_base['position'])
invalid_goal_callback(err_str)
return False
else:
goal_pos = point.positions[self.index] if len(point.positions) > self.index else None
if goal_pos is None:
err_str = ("Received goal point with positions array length={0}. This joint ({1})'s index "
"is {2}. Length of array must cover all joints listed in "
"commanded_joint_names.").format(len(point.positions), self.name, self.index)
invalid_goal_callback(err_str)
return False
self.goal['position'] = self.ros_to_mechaduino(goal_pos, kwargs['manipulation_origin'], fail_out_of_range_goal)
self.goal['velocity'] = point.velocities[self.index] if len(point.velocities) > self.index else None
self.goal['acceleration'] = point.accelerations[self.index] if len(point.accelerations) > self.index else None
self.goal['contact_threshold'] = point.effort[self.index] if len(point.effort) > self.index else None
if self.goal['position'] is None:
err_str = ("Received {0} goal point that is out of bounds. "
"Range = {1}, but goal point = {2}.").format(self.name, self.range, goal_pos)
invalid_goal_callback(err_str)
return False
return True
def ros_to_mechaduino(self, ros_ros, manipulation_origin, fail_out_of_range_goal):
ros_pos = hm.bound_ros_command(self.range, ros_ros, fail_out_of_range_goal)
return (manipulation_origin['x'] + ros_pos) if ros_pos is not None else None
def init_execution(self, robot, robot_status, **kwargs):
self.startx = robot_status['base']['x']
self.starty = robot_status['base']['y']
self.starttheta = robot_status['base']['theta']
self.base_status = robot_status['base']
if self.active:
if self.active_translate_mobile_base or self.active_rotate_mobile_base:
(_, mobile_base_error_m), (_, mobile_base_error_rad) = self.update_execution(robot_status)
if mobile_base_error_m is not None:
robot.base.translate_by(mobile_base_error_m,
v_m=self.goal_translate_mobile_base['velocity'],
a_m=self.goal_translate_mobile_base['acceleration'],
contact_thresh_N=self.goal_translate_mobile_base['contact_threshold'])
elif mobile_base_error_rad is not None:
robot.base.rotate_by(mobile_base_error_rad,
v_r=self.goal_rotate_mobile_base['velocity'],
a_r=self.goal_rotate_mobile_base['acceleration'],
contact_thresh_N=self.goal_rotate_mobile_base['contact_threshold'])
else:
robot.base.translate_by(self.update_execution(robot_status)[1],
v_m=self.goal['velocity'],
a_m=self.goal['acceleration'],
contact_thresh_N=self.goal['contact_threshold'])
def update_execution(self, robot_status, **kwargs):
success_callback = kwargs['success_callback'] if 'success_callback' in kwargs.keys() else None
currx = robot_status['base']['x']
curry = robot_status['base']['y']
currtheta = robot_status['base']['theta']
self.base_status = robot_status['base']
self.error = None
self.error_translate_mobile_base_m = None
self.error_rotate_mobile_base_rad = None
if self.active:
if self.active_translate_mobile_base or self.active_rotate_mobile_base:
if self.goal_translate_mobile_base['position'] is not None:
if (robot_status['base']['left_wheel']['in_guarded_event'] or \
robot_status['base']['right_wheel']['in_guarded_event']) and \
success_callback:
success_callback("translate_mobile_base contact detected.")
return True
dist = np.sqrt(np.square(currx - self.startx) + np.square(curry - self.starty))
self.error_translate_mobile_base_m = self.goal_translate_mobile_base['position'] - (dist * np.sign(self.goal_translate_mobile_base['position']))
return [('translate_mobile_base', self.error_translate_mobile_base_m), ('rotate_mobile_base', self.error_rotate_mobile_base_rad)]
elif self.goal_rotate_mobile_base['position'] is not None:
if (robot_status['base']['left_wheel']['in_guarded_event'] or \
robot_status['base']['right_wheel']['in_guarded_event']) and \
success_callback:
success_callback("rotate_mobile_base contact detected.")
return True
rot = hm.angle_diff_rad(currtheta, self.starttheta)
self.error_rotate_mobile_base_rad = hm.angle_diff_rad(self.goal_rotate_mobile_base['position'], rot)
return [('translate_mobile_base', self.error_translate_mobile_base_m), ('rotate_mobile_base', self.error_rotate_mobile_base_rad)]
else:
if (robot_status['base']['left_wheel']['in_guarded_event'] or \
robot_status['base']['right_wheel']['in_guarded_event']) and \
success_callback:
success_callback("{0} contact detected.".format(self.name))
return True
self.error = self.goal['position'] - currx
return self.name, self.error
return None
def goal_reached(self):
if self.active:
if self.active_translate_mobile_base or self.active_rotate_mobile_base:
if self.active_translate_mobile_base:
reached = (abs(self.error_translate_mobile_base_m) < self.acceptable_mobile_base_error_m)
if not (abs(self.error_translate_mobile_base_m) < self.excellent_mobile_base_error_m):
# Use velocity to help decide when the low-level command has been finished
speed = np.sqrt(np.square(self.base_status['x_vel']) + np.square(self.base_status['y_vel']))
reached = reached and (speed < self.min_m_per_s)
return reached
elif self.active_rotate_mobile_base:
reached = (abs(self.error_rotate_mobile_base_rad) < self.acceptable_mobile_base_error_rad)
if not (abs(self.error_rotate_mobile_base_rad) < self.excellent_mobile_base_error_rad):
# Use velocity to help decide when the low-level command has been finished
speed = self.base_status['theta_vel']
reached = reached and (abs(speed) < self.min_rad_per_s)
return reached
else:
return (abs(self.error) < self.acceptable_joint_error)
return True
|
#%%
# These algorithm were create in my early learning times when i did not know
# the time and space complexities and all so these are just for educational purpose only.
import os
import pandas as pd
#%%
class Searches():
def __init__(self, paths_csv_file):
# reads paths as data frame and initialising the extended node list
columns=['from', 'to', 'length', 'adm_huristic']
self.paths = pd.read_csv(paths_csv_file, delimiter=',', names=columns)
self.extended_nodes_set = set()
def path_to_goal(self):
# for printing the searched path asthetically... (hope so....)
path = []
try:
ans_len = len(self.ans_path)
if self.ans_path[2] == 0: self.cost = 'NaN'
else: self.cost = self.ans_path[2]
for i in range(3, ans_len):
if i == ans_len-1:
path.append(self.ans_path[i])
else:
path.append(self.ans_path[i])
path.append('-->')
return " ".join(path)
except: pass
def initialise(self, starting_node, end_node, verbose):
self.start = starting_node
self.goal = end_node
# keeping nodes to discover
self.queue = [[0,0,0]]
# checking, if start and goal is in the paths
if self.start in self.paths['from'].values and self.goal in self.paths['to'].values:
self.queue[0].append(self.start)
if verbose > 0: print('initiallising... ', self.start)
else:
raise KeyError('check start and end nodes')
def extend(self, current_node, extended_nodes=True):
# extendes the node given as current node after looking into the paths data frame
temp_ext_list = self.paths.where(self.paths['from'] == current_node)
temp_ext_list = temp_ext_list.dropna(subset=['to'])
temp_ext_list = temp_ext_list.drop_duplicates(subset=['to'])
# checking if current node has been explored before and if so, removing them
if extended_nodes:
temp_ext_list = temp_ext_list[temp_ext_list != self.extended_nodes_set]
return temp_ext_list
def enqeueing(self, search_type, current_node, temp_ext_list, verbose):
c = 0
'''for hill climbing search creating new list for shorting the nodes
found after extending the parent node, sorting them and going in dfs manner'''
if search_type == 'hill_climbing': temp_queue = []
if verbose > 0:
print('extending... ', current_node)
if verbose > 1:
print('traversing over',self.queue)
# removing first node from queue for futher enqeueing
popped = self.queue.pop(0)
''' making new lists and appending new nodes which has to be explored
depending upon the type of search '''
def enq(search_type, current_node, temp_ext_list):
y = popped[:]
y.append(to)
if not(search_type=='bfs' or search_type=='dfs'):
try:
y[2] += (temp_ext_list['length'].where(temp_ext_list['to'] == to).dropna().values[0])
y[0] = (temp_ext_list['adm_huristic'].where(temp_ext_list['to'] == to).dropna().values[0])
except: pass
if search_type == 'A*':
try: y[1] = y[0]+y[2]
except: pass
return y
for to in temp_ext_list['to'].values:
c += 1
if verbose > 0: print('updating...', to)
if c == 1:
y = enq(search_type, current_node, temp_ext_list)
if search_type == 'dfs': self.queue.insert(0, y)
elif search_type == 'hill_climbing': temp_queue.append(y)
else: self.queue.append(y)
else:
y = enq(search_type, current_node, temp_ext_list)
if search_type == 'dfs': self.queue.insert(0, y)
elif search_type == 'hill_climbing': temp_queue.append(y)
else: self.queue.append(y)
if search_type == 'hill_climbing':
temp_queue = sorted(temp_queue, key=(lambda x: x[0]), reverse=True)
for i in temp_queue:
self.queue.insert(0, i)
print('')
return self.queue
def run(self, starting_node, end_node, search_type='bfs', extended_nodes=True, max_depth=50, beam=2, verbose=0):
if verbose > 0: print(f'starting search with {search_type}')
self.initialise(starting_node, end_node, verbose)
if extended_nodes == True:
if verbose > 1: print('extended nodes being captured...')
current = self.start
found = False
self.depth = 0
while not found and self.depth < max_depth:
# checking if condition for extended list is True
if extended_nodes == True:
'''checking if current node in extended nodes list, if so
removing the node from queue list and not to be explored
again and choosing the next node as the current node'''
if current in self.extended_nodes_set:
self.queue.pop(0)
current = self.queue[0][-1]
continue
if extended_nodes == True:
if verbose > 1:
print('extended nodes', self.extended_nodes_set)
# extending current node
temp_ext_list = self.extend(current, extended_nodes)
# updating enquing list depending on search type
if search_type == 'best_first':
self.queue = self.enqeueing(search_type, current, temp_ext_list, verbose)
min_list = min(self.queue, key=(lambda x: x[0]))
self.queue.remove(min_list);
self.queue.insert(0, min_list)
elif search_type == 'hill_climbing':
self.queue = self.enqeueing(search_type, current, temp_ext_list, verbose)
elif search_type == 'bfs':
self.queue = self.enqeueing(search_type, current, temp_ext_list, verbose)
elif search_type == 'beam':
self.queue = self.enqeueing(search_type, current, temp_ext_list, verbose)
try: self.queue = sorted(self.queue, key=(lambda x: x[0]))[:beam]
except: pass
elif search_type == 'dfs':
self.queue = self.enqeueing(search_type, current, temp_ext_list, verbose)
elif search_type == 'A*':
self.queue = self.enqeueing(search_type, current, temp_ext_list, verbose)
min_list = min(self.queue, key=(lambda x: x[1]))
self.queue.remove(min_list)
self.queue.insert(0, min_list)
elif search_type == 'branch_and_bound':
self.queue = self.enqeueing(search_type, current, temp_ext_list, verbose)
min_list = min(self.queue, key=(lambda x: x[2]))
self.queue.remove(min_list)
self.queue.insert(0, min_list)
'''
checking if goal node is found
'''
if search_type== 'branch_and_bound' or search_type=='A*':
if self.goal == self.queue[0][-1]:
self.ans_path = self.queue[0]
found = True
if verbose > 0: print('Goal Has Been Found!')
return
else:
for i in range(len(self.queue)):
if self.goal == self.queue[i][-1]:
self.ans_path = self.queue[i]
found = True
if verbose > 0: print('Goal Has Been Found!')
return
# adding current node in the extended nodes list and updating it
self.extended_nodes_set.add(current)
current = self.queue[0][-1]
self.depth = len(self.queue[0])-3
else:
print('No path to goal has been Found, check whether depths is sufficient and try after increasing it')
#%%
if __name__ == "__main__":
# search_types = ['dfs', 'bfs', 'hill_climbing', 'beam', 'best_first','branch_and_bound', 'A*']
# verbose = [0, 1, 2]
my_path = os.getcwd()
# path = os.path.join(my_path, 'new_path.csv')
path = os.path.join(my_path, 'paths3.csv')
s = Searches(path)
# print(s.paths)
s.run('s','g', search_type='bfs', max_depth=10, extended_nodes=True, verbose=0)
print(s.path_to_goal())
# print(s.depth)
# print(s.cost)
|
from turkey import Turkey
class WildTurkey(Turkey):
def gobble(self):
print('Gobble gobble')
def fly(self):
print('I am flying a short distance')
|
import argparse
import joblib
from typing import Mapping
from pathlib import Path
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score
from sklearn.ensemble import RandomForestClassifier
from numpy import where
class RandomForest(object):
def __init__(
self,
data_path: str,
max_features: int = 1000,
test_size: float = .3,
verbose: bool = True,
):
"""
Initialize a random forest model.
Arguments:
- data_path: represents the path to the folder where the data is stored.
- max_features: represents the max number of features used in the tf-idf vectorizer
(default to 1000)
- test_size: represents the fraction of the training data to be used as validation set
(default to 0.3) NOTE: at the moment this is not useful since the validation test
is not used.
- verbose: whether to print information (default to True)
"""
if max_features < 1:
raise ValueError("Please specify a value greater than 0 for max_features!")
if test_size < 0 or test_size > 1:
raise ValueError("Please specify a value greater than 0 for test_size!")
self.data_path = Path(data_path)
self.test_size = test_size
self.max_features = max_features
self.model = None
self.vect = None
self.genres = None
self.verbose = verbose
def load(self) -> None:
"""Load the model and vectorizer previously stored.
"""
if self.verbose:
print("Loading model..")
model_path = self.data_path / "model.pkl"
if not model_path.exists():
raise FileNotFoundError(f"There is no model stored at {model_path}")
# TODO: we should check the model is a dict
# containing the required keys
model_dict = joblib.load(model_path)
self.model = model_dict["model"]
self.vect = model_dict["vect"]
self.genres = model_dict["genres"]
return
def train(self, features, targets, genres, save: bool = True) -> None:
"""Train a Random Forest Classifier.
Arguments:
features: Pandas series where each value is a string
representing the description of the corresponding movie
which will be used for training.
targets: Pandas DataFrame where row i is a 0/1 vector
representing the presence of the corresponding label.
genres: list of strings representing the genres.
save: boolean specifying whether to save the trained model.
"""
if len(features) != len(targets):
raise ValueError("Length of features and targets differ!")
if len(features) < len(genres):
raise RuntimeError("Please consider using a bigger training set!")
if len(genres) == 0:
raise RuntimeError("No genres provided!")
# initialize the random forest and vectorizer
self.model = RandomForestClassifier()
self.vect = TfidfVectorizer(
max_features=self.max_features, stop_words="english", lowercase=True
)
self.genres = genres
# split dataset
X_train, X_valid, y_train, y_valid = train_test_split(
features, targets, test_size=self.test_size, random_state=42
)
# transform descriptions into arrays
X_train_vec = self.vect.fit_transform(X_train)
X_valid_vec = self.vect.transform(X_valid)
# fit the model
if self.verbose:
print("Training the model...")
self.model.fit(X_train_vec, y_train)
# save the model
if save:
if self.verbose:
print("Saving the model...")
model_dict = dict()
model_dict["model"] = self.model
model_dict["vect"] = self.vect
model_dict["genres"] = self.genres
joblib.dump(model_dict, self.data_path / "model.pkl")
if self.verbose:
print("Done!")
# display results on training and validation set
if self.verbose:
train_pred = self.model.predict(X_train_vec)
valid_pred = self.model.predict(X_valid_vec)
# print results
print("Classification Report")
print(
"Training:\n",
classification_report(
y_true=y_train, y_pred=train_pred, target_names=self.genres
),
)
print(
"Validation:\n",
classification_report(
y_true=y_valid, y_pred=valid_pred, target_names=self.genres
),
)
print("Accuracy")
train_acc = accuracy_score(y_true=y_train, y_pred=train_pred)
valid_acc = accuracy_score(y_true=y_valid, y_pred=valid_pred)
print("Traning: ", train_acc)
print("Validation: ", valid_acc)
return
def predict(self, title: str, description: str) -> Mapping[str, str]:
"""Predict movie genre based on description.
Arguments:
- title: title of the movie, it's not used for the prediction
- description: short description of the movie, used for the prediction
"""
if type(title) != str:
raise TypeError("Please provide title as string.")
if type(description) != str:
raise TypeError("Please provide description as string.")
if not self.model or not self.vect:
raise RuntimeError("Model not trained!")
if self.verbose:
print("Now predicting...")
# transform description into array
feat_vec = self.vect.transform([description])
# generate prediction
pred = self.model.predict(feat_vec)
try:
genre_ind = where(pred[0] == 1)[0][0]
result = {
"title": title,
"description": description,
"genre": self.genres[genre_ind],
}
except IndexError:
print(
"Sorry, the model was not able to classify this movie :(\n\
Try changing the description!"
)
result = {"title": title, "description": description, "genre": "Not found"}
return result
|
n = int(input())
for i in range(n):
print(' '*(n-i-1)+chr(65+i),end=' ')
if i>0:
print(' '*(2*i-1)+chr(65+i),end='')
print()
for i in range(n-1):
print(' '*(i+1)+chr(65+n-i-2),end='')
if i<n-2:
print(' '*(2*(n-i-2)-1)+' '+chr(65+n-i-2)) |
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from pathlib import Path
from typing import Dict, List, Tuple
import param
import pytest
from InnerEye.Azure.azure_config import AzureConfig
from InnerEye.Common import fixed_paths
from InnerEye.Common.generic_parsing import GenericConfig
from InnerEye.Common.output_directories import OutputFolderForTests
from InnerEye.Common.spawn_subprocess import spawn_and_monitor_subprocess
from InnerEye.ML.config import SegmentationModelBase
from InnerEye.ML.deep_learning_config import CHECKPOINT_FOLDER
from InnerEye.ML.model_inference_config import ModelInferenceConfig
from InnerEye.ML.run_ml import MLRunner
class SubprocessConfig(GenericConfig):
"""
Config class to store settings for sub-process spawning
"""
process: str = param.String(None, doc="Path to the process to spawn")
args: List[str] = param.List(instantiate=True, doc="List of arguments to pass to the spawned process")
env: Dict[str, str] = param.Dict(instantiate=True, doc="Dictionary of environment variables "
"to override for this process")
def spawn_and_monitor_subprocess(self) -> Tuple[int, List[str]]:
return spawn_and_monitor_subprocess(process=self.process, args=self.args, env=self.env)
def create_checkpoints(model_config: SegmentationModelBase, is_ensemble: bool) -> Tuple[List[Path], List[Path]]:
"""
Creates 1 or 2 empty checkpoint files in the model's checkpoint folder, and returns
the paths of those files, both absolute paths and paths relative to the checkpoint folder.
:param model_config: The model configuration, where a correct output folder must be set.
:param is_ensemble: If true, 2 checkpoints (simulating an ensemble run) will be created. If false, only a
single checkpoint will be created.
:return: Tuple[absolute checkpoint paths, relative checkpoint paths]
"""
# To simulate ensemble models, there are two checkpoints, one in the root dir and one in a folder
folder = model_config.checkpoint_folder
checkpoints_absolute = [folder / "foo.ckpt"]
if is_ensemble:
checkpoints_absolute.append(folder / "other" / "foo2.ckpt")
for checkpoint in checkpoints_absolute:
checkpoint.parent.mkdir(parents=True, exist_ok=True)
checkpoint.touch()
checkpoints_relative = [checkpoint.relative_to(folder) for checkpoint in checkpoints_absolute]
return checkpoints_absolute, checkpoints_relative
@pytest.mark.parametrize("is_ensemble", [True, False])
@pytest.mark.parametrize("extra_code_directory", ["TestsOutsidePackage", ""])
def test_copy_child_paths_to_folder(is_ensemble: bool,
extra_code_directory: str,
test_output_dirs: OutputFolderForTests) -> None:
azure_config = AzureConfig(extra_code_directory=extra_code_directory)
fake_model = SegmentationModelBase(should_validate=False)
fake_model.set_output_to(test_output_dirs.root_dir)
# To simulate ensemble models, there are two checkpoints, one in the root dir and one in a folder
checkpoints_absolute, checkpoints_relative = create_checkpoints(fake_model, is_ensemble)
# Simulate a project root: We can't derive that from the repository root because that might point
# into Python's package folder
project_root = Path(__file__).parent.parent
ml_runner = MLRunner(model_config=fake_model, azure_config=azure_config, project_root=project_root)
model_folder = test_output_dirs.root_dir / "final"
ml_runner.copy_child_paths_to_folder(model_folder=model_folder, checkpoint_paths=checkpoints_absolute)
expected_files = [
fixed_paths.ENVIRONMENT_YAML_FILE_NAME,
fixed_paths.MODEL_INFERENCE_JSON_FILE_NAME,
"InnerEye/ML/runner.py",
"InnerEye/ML/model_testing.py",
"InnerEye/Common/fixed_paths.py",
"InnerEye/Common/common_util.py",
]
for r in checkpoints_relative:
expected_files.append(f"{CHECKPOINT_FOLDER}/{r}")
for expected_file in expected_files:
assert (model_folder / expected_file).is_file(), f"File missing: {expected_file}"
trm = model_folder / "TestsOutsidePackage/test_register_model.py"
if extra_code_directory:
assert trm.is_file()
else:
assert not trm.is_file()
def test_model_inference_config() -> None:
# check if normal path works
normal_path = "/".join(list(map(str, range(1, 91))))
assert len(normal_path) == 260
ModelInferenceConfig(model_name="Test", checkpoint_paths=[normal_path], structure_names=["organ1", "tumour2"],
colours=[(255, 0, 0), (255, 0, 0)],
fill_holes=[True, False])
# check if long path fails ie: > 260
long_path = normal_path + "/"
assert len(long_path) == 261
with pytest.raises(ValueError):
ModelInferenceConfig(model_name="Test", checkpoint_paths=[long_path], structure_names=["organ1", "tumour2"],
colours=[(255, 0, 0), (255, 0, 0)],
fill_holes=[True, False])
|
class Nave:
id_nave = None
id_fabricante = None
nome = None
modelo = None
tripulacao = None
passageiros = None
capacidade_carga = None
preco = None
def getIdNave(self):
return self.id_nave
def setIdNave(self, idnave):
self.id_nave = idnave
def getIdFabricante(self):
return self.id_fabricante
def setIdFabricante(self, idFabricante):
self.id_fabricante = idFabricante
def getNome(self):
return self.nome
def setNome(self, nome):
self.nome = nome
def getModelo(self):
return self.modelo
def setModelo(self, modelo):
self.modelo = modelo
def getTripulacao(self):
return self.tripulacao
def setTripulacao(self, tripulacao):
self.tripulacao = tripulacao
def getPassageiros(self):
return self.passageiros
def setPassageiros(self, passageiros):
self.passageiros = passageiros
def getCapacidadeCarga(self):
return self.capacidade_carga
def setCapacidadeCarga(self, capacidade_carga):
self.capacidade_carga = capacidade_carga
def getPreco(self):
return self.preco
def setPreco(self, preco):
self.preco = preco
|
# 06
# group sizes with histogram
import tacoma as tc
from demo_utils import pl, disp
from tacoma.analysis import plot_group_size_histogram
hs13 = tc.load_json_taco('~/.tacoma/hs13.taco')
groups = tc.measure_group_sizes_and_durations(hs13)
fig, ax = pl.subplots(1,1)
plot_group_size_histogram(groups,ax)
pl.show()
|
from django.urls import path, include
from rest_framework import routers
from greenbudget.app.history.urls import (
actuals_history_urlpatterns, actual_history_urlpatterns)
from .views import (
ActualsViewSet, AccountActualsViewSet, SubAccountActualsViewSet,
BudgetActualsViewSet)
app_name = "actual"
account_actuals_router = routers.SimpleRouter()
account_actuals_router.register(
r'', AccountActualsViewSet, basename='actual')
account_actuals_urlpatterns = account_actuals_router.urls
subaccount_actuals_router = routers.SimpleRouter()
subaccount_actuals_router.register(
r'', SubAccountActualsViewSet, basename='actual')
subaccount_actuals_urlpatterns = subaccount_actuals_router.urls
budget_actuals_router = routers.SimpleRouter()
budget_actuals_router.register(
r'', BudgetActualsViewSet, basename='actual')
budget_actuals_urlpatterns = budget_actuals_router.urls + [
path('history/', include(actuals_history_urlpatterns)),
]
router = routers.SimpleRouter()
router.register(r'', ActualsViewSet, basename='actual')
urlpatterns = router.urls + [
path('<int:actual_pk>/', include([
path('history/', include(actual_history_urlpatterns)),
]))
]
|
"""
The visualization module contains tools for real-time visualization as
well as utilities to help in plotting.
"""
from .instrument_monitor import InstrumentMonitor
from .pyqt_plotmon import PlotMonitor_pyqt
__all__ = ["PlotMonitor_pyqt", "InstrumentMonitor"]
|
GLOBAL_SETTINGS = {
"SITES" : ["enwiki","frwiki","arwiki","ruwiki"],
"NAME_INSTANCES" : ["Q101352","Q202444","Q3409032","Q11879590","Q12308941","Q1243157","Q1076664","Q110874"],
"DISAMBIGUATION_INSTANCES" : ["Q4167410"],
"CREDENTIALS_PATH": '../conf/local/credentials.yml'
} |
# Generated by Django 3.0.3 on 2020-02-22 00:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20200217_1548'),
]
operations = [
migrations.RemoveField(
model_name='service',
name='image',
),
migrations.AlterField(
model_name='file',
name='url',
field=models.URLField(),
),
migrations.CreateModel(
name='ServiceImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='image_service', to='api.File')),
('service', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='service_image', to='api.Service')),
],
),
]
|
""" An example how to plot animation like in README
Please replace the regex string for your batch log output
"""
from hsicbt.utils import plot
import matplotlib.pyplot as plt
import numpy as np
import glob
def plot_needle_distribution():
regex = "./assets/activation/raw/070820_152112_needle-hsictrain-mnist-*.npy"
filepaths = sorted(glob.glob(regex))
for idx, filepath in enumerate(filepaths):
plot.plot_1d_activation_kde(filepath)
plt.title("Epoch {}".format(idx), fontsize=30)
plot.save_figure(filepath[:-3]+"png")
def plot_batch_hsicsolve():
regex = "./assets/activation/raw/200807_180226_hsic-solve-hsictrain-mnist_batch-*.npy"
filepaths = sorted(glob.glob(regex))[::2]
for idx, filepath in enumerate(filepaths):
title = "Iteration {} @Epoch 1".format(idx*2)
plot.plot_activation_distribution(filepath, title)
out_path = filepath[:-8]+"{:04d}.png".format(idx)
plot.save_figure(out_path)
if __name__ == "__main__":
plot_needle_distribution()
plot_batch_hsicsolve()
# Then use imagemagic command to make gif animation
# convert -delay 2 /path/to/name.*.png /path/out.gif
|
'''
while em Python
Utilizado para realizar ações enquanto uma condição for verdadeira.
'''
x = 0
while x < 10:
print(x)
x = x + 1
print('FIM!')
print('-'*12)
y = 0
while y < 10:
if y == 3:
y = y + 1
#continue
#break
print(y)
y = y + 1
print('-'*12)
x = 0 # coluna
while x < 10:
y = 0 # linha
print(f'Para x = {x}')
while y < 5:
print(f'Coord. ({x}, {y})')
y += 1
x += 1
print('Acabou!')
print('-'*12) |
import json
from os import walk
import numpy as np
import pandas as pd
import plotly.express as px
from py4j.java_gateway import JavaGateway
import itertools
DISTR_SRC_FILE_PATH = '../../java/pt/ist/socialsoftware/mono2micro/utils/mojoCalculator/src/main/resources/distrSrc.rsf'
DISTR_TARGET_FILE_PATH = '../../java/pt/ist/socialsoftware/mono2micro/utils/mojoCalculator/src/main/resources' \
'/distrTarget.rsf'
# Warning:
# Run MoJo Calculator Java code before running this script
# More info on ./mojoCalculator/ folder
# Calculates how many changes have to be made in the best decomposition
# of N = n, so that the best decomposition of N = n + 1 can be obtained
# incrementally from the first.
data_dict = {
'transition': [],
'mojoFM': [],
'hoverText': [],
'entityCount': []
}
def getClusters(complexityWeights):
cutName = ",".join(
[
str(int(complexityWeights[0])),
str(int(complexityWeights[1])),
str(int(complexityWeights[2])),
str(int(complexityWeights[3])),
str(int(n))
]
) + ".json"
with open('../codebases/' +
parsedFileName + '/analyser/cuts/' + cutName) as f:
dataFile = json.load(f)
return dataFile['clusters']
# each entry in the list 'clustersForN' corresponds to a list of clusters of a specific decomposition
# calculates the MoJoFM result between a decomposition[n-1] and each possible decomposition
# with n-1 clusters given a decomposition with n clusters, i.e., each parent decomposition of a
# decomposition with n clusters so that this one can be obtained incrementally from the parent
def calculateTransitionMoJos(clustersForN):
for i in range(1, len(clustersForN)):
clustersN = clustersForN[i]
clustersNLess1 = clustersForN[i - 1]
clustersNLess1FromN_Comb = [] # matrix [combinationN][ClustersList]
numOfClustersN = len(clustersN)
lst1 = list(range(numOfClustersN))
# possible "parent decompositions" of a a system with n clusters
combinationsList = list(itertools.combinations(lst1, 2))
for comb in combinationsList:
possibleCombinations = []
clusterMerge1Index = comb[0]
clusterMerge2Index = comb[1]
remainingIndexes = list(range(numOfClustersN))
remainingIndexes.remove(clusterMerge1Index)
remainingIndexes.remove(clusterMerge2Index)
possibleCombinations.append(clustersN[str(clusterMerge1Index)] + clustersN[str(clusterMerge2Index)])
for j in remainingIndexes:
possibleCombinations.append(clustersN[str(j)])
clustersNLess1FromN_Comb.append(possibleCombinations)
distrSrc = ""
entityCount1 = 0
for clusterKey in clustersNLess1.keys():
for entity in clustersNLess1[clusterKey]:
entityCount1 += 1
distrSrc += "contain " + clusterKey + " " + str(entity) + "\n"
text_file = open(DISTR_SRC_FILE_PATH, "w+")
text_file.write(distrSrc)
text_file.close()
maxResult = 0 # MojoResult = 100, both decompositions are equals
# clusters aggregate = one possible set of clusters of the previous n given the actual set of clusters
for clustersAggregate in clustersNLess1FromN_Comb:
distrTarget = ""
for j in range(0, len(clustersAggregate)):
clusterI = clustersAggregate[j]
for entity in clusterI:
distrTarget += "contain " + str(j) + " " + str(entity) + "\n"
# possible decomposition obtained
# Calculate de MojoFM between src and target
text_file = open(DISTR_TARGET_FILE_PATH, "w+")
text_file.write(distrTarget)
text_file.close()
# run Java to calculate MoJoFM
try:
gateway = JavaGateway()
result = gateway.entry_point.runMoJo()
except Exception:
print("Warning: Entry point for the MoJoFM calculator not running")
raise SystemExit
if result > maxResult:
maxResult = result
if result == 100:
break
transitionString = str(numOfClustersN - 1) + '->' + str(numOfClustersN)
data_dict['mojoFM'].append(maxResult)
data_dict['entityCount'].append(entityCount1)
data_dict['transition'].append(transitionString)
data_dict['hoverText'].append(file)
files = []
for (dirpath, dirnames, filenames) in walk("./data/"):
files.extend(filenames)
break
for file in files:
print(file)
data = pd.read_csv("./data/" + file)
minComplexityClusters = []
for n in range(3, 11):
minComplexity = float("inf")
minComplexityWeights = [] # a, w, r, s
for entry in data.values:
if entry[0] != n:
continue
if entry[8] < minComplexity:
minComplexity = entry[8]
minComplexityWeights = [entry[1], entry[2], entry[3], entry[4]]
if minComplexity == float("inf"): # no entries for this N
continue
parsedFileName = "_".join(file.split("_")[2:])
parsedFileName = parsedFileName[0:len(parsedFileName) - 4]
minComplexityClusters.append(getClusters(minComplexityWeights))
if len(minComplexityClusters) <= 1: # n=3 only
continue
calculateTransitionMoJos(minComplexityClusters)
data_dict = pd.DataFrame(data_dict)
# box plot style
boxFig = px.box(
data_dict,
x="transition",
y="mojoFM",
# hover_name='hoverText',
title='Transition From Best N\'s decomposition to N+1\'s closest parent',
labels={
'transition': 'Transition',
'mojoFM': 'MoJoFM'
}
# points='all',
# range_y=[0, 100]
)
# boxFig.update_traces(marker=dict(size=2))
boxFig.show()
# boxFig.write_html('incrementalMigrationEvaluation.html')
entityCountGE80 = []
entityCountLT80 = []
for mojo, entityCount in zip(data_dict['mojoFM'], data_dict['entityCount']):
if mojo >= 80:
entityCountGE80.append(entityCount)
else:
entityCountLT80.append(entityCount)
print('mean entityCount GE 80: ' + str(np.mean(entityCountGE80)))
print('std GE 80: ' + str(np.std(entityCountGE80)))
print('mean entityCount LT 80: ' + str(np.mean(entityCountLT80)))
print('std entityCount LT 80: ' + str(np.std(entityCountLT80)))
print()
print("100 > mojoFM >= 90:")
print(data_dict[(data_dict['mojoFM'] >= 90)].count() / data_dict[:].count())
print()
print("90 > mojoFM >= 80:")
print(data_dict[(data_dict['mojoFM'] < 90) & (data_dict['mojoFM'] >= 80)].count() / data_dict[:].count())
print()
print("80 > mojoFM:")
print(data_dict[(data_dict['mojoFM'] < 80)].count() / data_dict[:].count())
print()
print("mojoFM == 100%:")
print(data_dict[(data_dict['mojoFM'] == 100)].count() / data_dict[:].count())
|
# Copyright (c) 2014-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from invoke import Context
from paramiko import RSAKey
from mock import patch, MagicMock, Mock
from cloudify import ctx
from cloudify.workflows import local
from cloudify.decorators import workflow
from cloudify.endpoint import LocalEndpoint
from cloudify.workflows import ctx as workflow_ctx
from cloudify.exceptions import NonRecoverableError
from fabric_plugin import tasks
from fabric_plugin._compat import StringIO
class BaseFabricPluginTest(unittest.TestCase):
def setUp(self):
self.default_fabric_env = {
'host_string': 'test',
'user': 'test',
'key_filename': 'test',
}
self.bootstrap_context = {}
LocalEndpoint.get_bootstrap_context = lambda _: self.bootstrap_context
def _execute(self,
operation,
connection=None,
fabric_env=None,
task_name=None,
tasks_file=None,
task_properties=None,
task_mapping=None,
commands=None,
bootstrap_context=None,
script_path=None,
process=None,
ip=None,
custom_input='value',
use_sudo=False,
non_recoverable_error_exit_codes=None):
bootstrap_context = bootstrap_context or {}
self.bootstrap_context.update(bootstrap_context)
inputs = {
'fabric_env': fabric_env or self.default_fabric_env,
'task_name': task_name or 'stub',
'commands': commands or [],
'use_sudo': use_sudo,
'tasks_file': tasks_file or 'fabric_tasks.py',
'task_properties': task_properties or {},
'task_mapping': task_mapping or '',
'ip': ip or '',
'script_path': script_path or '',
'process': process or {},
'custom_input': custom_input,
'non_recoverable_error_exit_codes':
non_recoverable_error_exit_codes or [],
}
blueprint_path = os.path.join(os.path.dirname(__file__),
'blueprint', 'blueprint.yaml')
self.env = local.init_env(blueprint_path,
name=self._testMethodName,
inputs=inputs)
self.conn = connection or MockConnection()
self.conn_factory = Mock(return_value=self.conn)
with patch('fabric_plugin.tasks.Connection', self.conn_factory):
result = self.env.execute('execute_operation',
parameters={'operation': operation},
task_retry_interval=0,
task_retries=0)
return result
class MockConnection(MagicMock, Context):
def __init__(self, **kw):
super(MockConnection, self).__init__()
self.run = Mock()
self.sudo = Mock()
@property
def cwd(self):
return '/'
class FabricPluginTest(BaseFabricPluginTest):
def _get_conn_kwargs(self):
return self.conn_factory.mock_calls[-1].kwargs
def test_missing_tasks_file(self):
with self.assertRaisesRegexp(NonRecoverableError,
"Could not get 'missing.py'"):
self._execute('test.run_task', tasks_file='missing.py')
def test_bad_tasks_file(self):
with self.assertRaisesRegexp(NonRecoverableError, "No module named"):
self._execute('test.run_task', tasks_file='corrupted_file.py')
def test_missing_task(self):
with self.assertRaisesRegexp(NonRecoverableError,
"Could not find task 'missing'"):
self._execute('test.run_task', task_name='missing')
def test_non_callable_task(self):
with self.assertRaisesRegexp(NonRecoverableError,
"is not callable"):
self._execute('test.run_task', task_name='non_callable')
def test_missing_tasks_module(self):
with self.assertRaisesRegexp(NonRecoverableError,
"Could not load 'module_that"):
self._execute('test.run_module_task',
task_mapping='module_that_does_not_exist.some_task')
def test_missing_module_task_attribute(self):
with self.assertRaisesRegexp(NonRecoverableError,
"Could not find 'whoami' in fabric_"):
self._execute(
'test.run_module_task',
task_mapping='fabric_plugin.tests.test_fabric_plugin.whoami')
def test_non_callable_module_task(self):
with self.assertRaisesRegexp(NonRecoverableError,
"is not callable"):
self._execute(
'test.run_module_task',
task_mapping='fabric_plugin.tests.'
'test_fabric_plugin.non_callable')
def test_conn_kwargs(self):
self._execute('test.run_task', task_name='task')
kw = self._get_conn_kwargs()
self.assertEqual(
self.default_fabric_env['user'],
kw['user']
)
self.assertEqual(
self.default_fabric_env['key_filename'],
kw['config']['connect_kwargs']['key_filename']
)
self.assertEqual(
self.default_fabric_env['host_string'],
kw['host']
)
def test_run_task(self):
self._execute('test.run_task', task_name='task')
instance = self.env.storage.get_node_instances()[0]
self.assertEqual(instance.runtime_properties['task_called'], 'called')
def test_run_module_task(self):
self._execute(
'test.run_module_task',
task_mapping='fabric_plugin.tests.test_fabric_plugin.module_task')
instance = self.env.storage.get_node_instances()[0]
self.assertEqual(instance.runtime_properties['task_called'], 'called')
def test_task_properties(self):
self._execute('test.run_task', task_name='test_task_properties',
task_properties={'arg': 'value'})
instance = self.env.storage.get_node_instances()[0]
self.assertEqual(instance.runtime_properties['arg'], 'value')
def _test_run_commands(self, use_sudo=False):
commands = ['command1', 'command2']
connection = MockConnection()
run = 'run'
setattr(
getattr(connection, run), 'return_value',
Mock(stdout='Run command successfully', stderr='')
)
self._execute(
'test.run_commands',
connection=connection,
commands=commands,
use_sudo=use_sudo)
mock_calls = self.conn.run.mock_calls
mock_commands = [args[0] for c, args, kwargs in mock_calls]
if use_sudo:
commands = ["echo '{}' | sudo -i --".format(c) for c in commands]
self.assertEqual(commands, mock_commands)
def test_run_commands(self):
self._test_run_commands()
def test_run_sudo_commands(self):
self._test_run_commands(use_sudo=True)
def test_missing_user(self):
with self.assertRaisesRegexp(NonRecoverableError,
"ssh user definition missing"):
self._execute('test.run_task',
task_name='task',
fabric_env={'password': 'test',
'host_string': 'test'})
def test_missing_key_or_password(self):
with self.assertRaisesRegexp(NonRecoverableError,
"key_filename/key or password"):
self._execute('test.run_task',
task_name='task',
fabric_env={'user': 'test',
'host_string': 'test'})
def test_fabric_env_default_override(self):
# first sanity for no override
self._execute('test.run_task', task_name='task')
kw = self._get_conn_kwargs()
self.assertEqual(
kw['config']['timeouts']['connect'],
tasks.FABRIC_ENV_DEFAULTS['connect_timeout'])
# now override
invocation_fabric_env = self.default_fabric_env.copy()
invocation_fabric_env['connect_timeout'] = 1000000
self._execute(
'test.run_task',
task_name='task',
fabric_env=invocation_fabric_env)
kw = self._get_conn_kwargs()
self.assertEqual(kw['config']['timeouts']['connect'], 1000000)
def test_implicit_host_string(self):
fabric_env = self.default_fabric_env.copy()
del fabric_env['host_string']
self._execute(
'test.run_task',
task_name='test_implicit_host_string',
ip='1.1.1.1',
fabric_env=fabric_env)
kw = self._get_conn_kwargs()
instance = self.env.storage.get_node_instances()[0]
self.assertEqual(instance.runtime_properties['expected_host_string'],
kw['host'])
def test_explicit_host_string(self):
fabric_env = self.default_fabric_env.copy()
fabric_env['host_string'] = 'explicit_host_string'
self._execute(
'test.run_task',
task_name='task',
fabric_env=fabric_env)
kw = self._get_conn_kwargs()
self.assertEqual('explicit_host_string',
kw['host'])
def test_explicit_password(self):
fabric_env = self.default_fabric_env.copy()
fabric_env['password'] = 'explicit_password'
self._execute(
'test.run_task',
task_name='task',
fabric_env=fabric_env)
kw = self._get_conn_kwargs()
self.assertEqual('explicit_password',
kw['config']['connect_kwargs']['password'])
def test_implicit_key_filename(self):
fabric_env = self.default_fabric_env.copy()
del fabric_env['key_filename']
bootstrap_context = {
'cloudify_agent': {
'agent_key_path': 'implicit_key_filename'
}
}
self._execute(
'test.run_task',
task_name='task',
fabric_env=fabric_env,
bootstrap_context=bootstrap_context)
kw = self._get_conn_kwargs()
self.assertEqual('implicit_key_filename',
kw['config']['connect_kwargs']['key_filename'])
def test_explicit_key_filename(self):
fabric_env = self.default_fabric_env.copy()
fabric_env['key_filename'] = 'explicit_key_filename'
self._execute(
'test.run_task',
task_name='task',
fabric_env=fabric_env)
kw = self._get_conn_kwargs()
self.assertEqual('explicit_key_filename',
kw['config']['connect_kwargs']['key_filename'])
def test_explicit_key(self):
fabric_env = self.default_fabric_env.copy()
key_file = StringIO()
RSAKey.generate(2048).write_private_key(key_file)
key_file.seek(0)
fabric_env['key'] = key_file.read()
self._execute('test.run_task',
task_name='task',
fabric_env=fabric_env)
kw = self._get_conn_kwargs()
self.assertIsInstance(kw['config']['connect_kwargs']['pkey'], RSAKey)
def test_implicit_user(self):
fabric_env = self.default_fabric_env.copy()
del fabric_env['user']
bootstrap_context = {
'cloudify_agent': {
'user': 'implicit_user'
}
}
self._execute('test.run_task',
task_name='task',
fabric_env=fabric_env,
bootstrap_context=bootstrap_context)
kw = self._get_conn_kwargs()
self.assertEqual('implicit_user', kw['user'])
def test_explicit_user(self):
fabric_env = self.default_fabric_env.copy()
fabric_env['user'] = 'explicit_user'
self._execute('test.run_task',
task_name='task',
fabric_env=fabric_env)
kw = self._get_conn_kwargs()
self.assertEqual('explicit_user', kw['user'])
def _test_run_commands_non_recoverable(self, use_sudo=False):
with self.assertRaises(NonRecoverableError):
commands = ['command1', 'command2']
connection = MockConnection()
run = 'run'
setattr(
getattr(connection, run), 'side_effect',
CustomError({'return_code': 1})
)
self._execute(
'test.run_commands',
connection=connection,
commands=commands,
use_sudo=use_sudo,
non_recoverable_error_exit_codes=[1, 2])
def test_run_commands_non_recoverable(self):
self._test_run_commands_non_recoverable()
def test_run_sudo_commands_non_recoverable(self):
self._test_run_commands_non_recoverable(use_sudo=True)
def test_run_task_non_recoverable(self):
with self.assertRaises(NonRecoverableError):
with patch('fabric_plugin.tasks._run_task', raise_custom_error):
self._execute('test.run_task', task_name='task',
non_recoverable_error_exit_codes=[1, 2])
def test_run_module_task_non_recoverable(self):
with self.assertRaises(NonRecoverableError):
with patch('fabric_plugin.tasks._run_task', raise_custom_error):
self._execute(
'test.run_module_task',
task_mapping='fabric_plugin.tests.test_fabric_plugin'
'.module_task',
non_recoverable_error_exit_codes=[1, 2])
@workflow
def execute_operation(operation, **kwargs):
node = next(workflow_ctx.nodes)
instance = next(node.instances)
return instance.execute_operation(operation).get()
def module_task():
ctx.instance.runtime_properties['task_called'] = 'called'
non_callable = 1
class CustomError(Exception):
def __init__(self, result):
self.result = tasks._AttributeDict(**result)
def raise_custom_error(a, b, c, d):
raise CustomError({'return_code': 1})
|
import socket
# http层
# tcp udp
# socket是操作系统用来进行网络通信的底层方案
# 1. create socket
# 2. connect domain port
# 3. create request
# 4. unicode -> encode -> binary binary -> decode -> str
# 5. socket send
# 6. socket recv
# 参数 socket.AF_INET 表示是 ipv4 协议
# 参数 socket.SOCK_STREAM 表示是 tcp 协议
# 这是普通的 http socket,不是https,所以端口不能是443,只能是80
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 以上2个参数为默认值,可以不写
s = socket.socket()
host = '163.com'
port = 80
# 参数是一个 tuple
s.connect((host, port))
ip, port = s.getsockname()
print('本机ip and port 是 {} {}'.format(ip, port))
# 构造request
request_str = 'GET / HTTP/1.1\r\nhost:{}\r\n\r\n'.format(host)
request_bytes = request_str.encode('utf-8')
print('request_str: ', request_str)
print('request_bytes: ', request_bytes)
s.send(request_bytes)
# 组装response
response_bytes = b''
while True:
buf = s.recv(1024)
if not buf:
break
response_bytes += buf
response_str = response_bytes.decode('utf-8')
print('response_bytes: ', response_bytes)
print('response_str: ', response_str)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for depthwise convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def ConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[4, 5, 5, 48], [4, 8, 8, 84], [4, 17, 17, 48], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 147, 147, 2],
[3, 299, 299, 3], [5, 183, 183, 1]]
filter_sizes = [[1, 1, 48, 2], [1, 3, 84, 1], [3, 1, 48, 4], [3, 3, 8, 1],
[3, 3, 7, 1], [5, 5, 2, 1], [3, 3, 2, 8], [2, 2, 3,
8], [5, 5, 1, 2]]
out_sizes = [[4, 5, 5, 96], [4, 8, 8, 84], [4, 17, 17, 192], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 49, 49, 16],
[3, 150, 150, 24], [5, 92, 92, 2]]
strides = [1, 1, 1, 1, 1, 1, 3, 2, 2]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, SAME, SAME, SAME, SAME, SAME, VALID, SAME, SAME, SAME]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def CheckGradConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
compute_gradient_error() is very expensive. So the configs should be
relatively small.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[2, 5, 8, 1], [4, 5, 5, 1], [2, 4, 4, 2], [1, 15, 15, 2],
[2, 15, 16, 1]]
filter_sizes = [[4, 4, 1, 2], [2, 2, 1, 2], [3, 1, 2, 2], [1, 3, 2, 1],
[3, 3, 1, 2]]
out_sizes = [[2, 5, 8, 2], [4, 2, 2, 2], [2, 4, 4, 4], [1, 15, 15, 2],
[2, 5, 5, 2]]
strides = [1, 2, 1, 1, 3]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, VALID, SAME, SAME, VALID]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
class DepthwiseConv2DTest(test.TestCase):
# This is testing that depthwise_conv2d and depthwise_conv2d_native
# produce the same results. It also tests that NCHW and NHWC
# formats agree, by comparing the depthwise_conv2d_native with
# 'NCHW' format (with transposition) matches the 'NHWC' format using
# the higher level interface.
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_type,
use_gpu,
grouped_conv=False,
data_format="NHWC"):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
data_type: The data type to use.
use_gpu: Whether to use GPU.
grouped_conv: Whether to use cuDNN 7's grouped convolution.
data_format: The data_format of the input. "NHWC" or "NCHW".
"""
input_size = 1
filter_size = 1
for s in tensor_in_sizes:
input_size *= s
for s in filter_in_sizes:
filter_size *= s
# Initializes the input and filter tensor with numbers incrementing from 1.
x1 = [f * 1.0 / input_size for f in range(1, input_size + 1)]
x2 = [f * 1.0 / filter_size for f in range(1, filter_size + 1)]
ops.reset_default_graph()
graph = ops.get_default_graph()
with self.session(graph=graph, use_gpu=use_gpu) as sess:
tolerance = {
dtypes.float16: 4e-2,
dtypes.float32: 1e-6,
dtypes.float64: 1e-12,
}[data_type]
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=data_type)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=data_type)
native_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
# Transpose from NHWC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with sess.graph._kernel_label_map({
"DepthwiseConv2dNative": "cudnn_grouped_convolution"
} if grouped_conv else {}):
conv_native = nn_ops.depthwise_conv2d_native(
native_t1,
t2,
strides=strides,
data_format=data_format,
padding=padding)
if data_format == "NCHW":
# Transpose back from NCHW to NHWC
conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])
try:
native_result = sess.run(conv_native)
except errors.InvalidArgumentError as e:
# Grouped convolution kernel is only registered for cuDNN 7. Silently
# return when we are running on an earlier version or without GPU.
if e.message.startswith(
"No OpKernel was registered to support Op 'DepthwiseConv2dNative'"):
tf_logging.warn("Skipping grouped convolution test")
return
raise e
conv_interface = nn_impl.depthwise_conv2d(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
interface_result = sess.run(conv_interface)
tf_logging.info(
"data_type: %r, use_gpu: %r, grouped_conv: %r, max diff = %f",
data_type, use_gpu, grouped_conv,
np.amax(np.absolute(native_result - interface_result)))
self.assertArrayNear(
np.ravel(native_result), np.ravel(interface_result), tolerance)
self.assertShapeEqual(native_result, conv_native)
self.assertShapeEqual(native_result, conv_interface)
def testDepthwiseConv2D(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2D, %dth config: %r * %r, stride: %d, padding: "
"%s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
tf_logging.info("Testing without grouped_conv")
self._VerifyValues(
input_size, filter_size, stride, padding, data_type, use_gpu=True)
tf_logging.info("Testing with grouped_conv")
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
data_type,
use_gpu=True,
grouped_conv=True)
def testDepthwiseConv2DWithUnknownShape(self):
# GitHub issue 22110.
if not test.is_gpu_available():
return
with self.test_session(use_gpu=True):
x = array_ops.placeholder(dtypes.float32)
f = np.ones([1, 1, 1, 1], np.float32)
v = nn_impl.depthwise_conv2d(
x, f, [1, 1, 1, 1], "VALID", rate=[2, 1], data_format="NCHW")
self.assertAllEqual(
np.ones([1, 1, 1, 1], np.float32),
v.eval(feed_dict={x: np.ones([1, 1, 1, 1], np.float32)}))
def testDepthwiseConv2DFormat(self):
if not test.is_gpu_available():
return
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFormat, %dth config: %r * %r, stride: %d, "
"padding: %s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
data_type,
use_gpu=True,
data_format="NCHW")
# This is testing against hand calculated results.
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected, use_gpu):
"""Verifies the output values of the depthwise convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
use_gpu: Whether to use GPU.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
conv = nn_ops.depthwise_conv2d_native(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = sess.run(conv)
tf_logging.info("value = %r", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output,
use_gpu=False)
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output,
use_gpu=True)
# Gradient checkers. This tests depthwise gradient computations for both
# BackpropFilter and BackpropInput by comparing gradients computed by the
# depthwise gradient ops with the gradients computed numerically (details can
# be found in the compute_gradient_error().
# Note this check is very expensive so the input should not be too big.
def _ConstructAndTestGradient(self,
input_shape,
filter_shape,
output_shape,
stride,
padding,
data_type,
test_input,
use_gpu,
grouped_conv=False,
data_format="NHWC"):
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
ops.reset_default_graph()
graph = ops.get_default_graph()
with self.session(graph=graph, use_gpu=use_gpu) as sess:
tolerance = {
dtypes.float16: 4e-0,
dtypes.float32: 8e-4,
dtypes.float64: 1e-12,
}[data_type]
input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=data_type, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=data_type, name="filter")
native_input = input_tensor
strides = [1, stride, stride, 1]
if data_format == "NCHW":
# Transpose from NHWC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_input = array_ops.transpose(input_tensor, [0, 3, 1, 2])
input_shape = [
input_shape[0], input_shape[3], input_shape[1], input_shape[2]
]
output_shape = [
output_shape[0], output_shape[3], output_shape[1], output_shape[2]
]
strides = [1, 1, stride, stride]
with sess.graph._kernel_label_map({
"DepthwiseConv2dNative": "cudnn_grouped_convolution",
"DepthwiseConv2dNativeBackpropInput": "cudnn_grouped_convolution",
"DepthwiseConv2dNativeBackpropFilter": "cudnn_grouped_convolution",
} if grouped_conv else {}):
depthwise_conv2d = nn_ops.depthwise_conv2d_native(
native_input,
filter_tensor,
strides,
padding,
data_format=data_format,
name="depthwise_conv2d")
self.assertEqual(output_shape, depthwise_conv2d.get_shape())
try:
if test_input:
err = gradient_checker.compute_gradient_error(
native_input, input_shape, depthwise_conv2d, output_shape)
else:
err = gradient_checker.compute_gradient_error(
filter_tensor, filter_shape, depthwise_conv2d, output_shape)
except errors.InvalidArgumentError as e:
# Grouped convolution kernel is only registered for cuDNN 7. Silently
# return when we are running on an earlier version or without GPU.
if grouped_conv and e.message.startswith(
"No OpKernel was registered to support Op 'DepthwiseConv2dNative'"):
tf_logging.warn("Skipping grouped convolution test")
return
raise e
tf_logging.info(
"data_type: %r, use_gpu: %r, grouped_conv: %r, error = %f", data_type,
use_gpu, grouped_conv, err)
self.assertLess(err, tolerance)
def testDepthwiseConv2DInputGrad(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DInputGrad, %dth config: %r * %r, stride: %d, "
"padding: %s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=True,
use_gpu=True)
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=True,
use_gpu=True,
grouped_conv=True)
def testDepthwiseConv2DInputGradFormat(self):
if not test.is_gpu_available():
return
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DInputGradFormat, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=True,
use_gpu=True,
data_format="NCHW")
def testDepthwiseConv2DFilterGrad(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFilterGrad, %dth config: %r * %r, stride: "
"%d, padding: %s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=False,
use_gpu=True)
def testDepthwiseConv2DFilterGradFormat(self):
if not test.is_gpu_available():
return
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFilterGradFormat, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=False,
use_gpu=True,
data_format="NCHW")
def _CompareBackpropInputFloat(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_gpu):
with self.test_session(use_gpu=use_gpu):
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval()
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def _CompareBackpropInputDouble(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float64)
x2 = np.random.rand(*output_sizes).astype(np.float64)
def _GetVal(use_gpu):
with self.test_session(use_gpu=use_gpu):
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval()
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def testDepthwiseConv2DInputGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DInputGradCompare, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
self._CompareBackpropInputFloat(input_size, filter_size, output_size,
stride, padding)
self._CompareBackpropInputDouble(input_size, filter_size, output_size,
stride, padding)
def _CompareBackpropFilterFloat(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_gpu):
with self.test_session(use_gpu=use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval()
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def _CompareBackpropFilterDouble(self, input_sizes, filter_sizes,
output_sizes, stride, padding):
x0 = np.random.rand(*input_sizes).astype(np.float64)
x2 = np.random.rand(*output_sizes).astype(np.float64)
def _GetVal(use_gpu):
with self.test_session(use_gpu=use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval()
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def testDepthwiseConv2DFilterGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFilterGradCompare, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
self._CompareBackpropFilterFloat(input_size, filter_size, output_size,
stride, padding)
self._CompareBackpropFilterDouble(input_size, filter_size, output_size,
stride, padding)
if __name__ == "__main__":
test.main()
|
from .test import TestCase
__all__ = [
'TestCase',
]
|
import tensorflow as tf
class BahdanauAttention(tf.keras.layers.Layer):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, query, values):
hidden_with_time_axis = tf.expand_dims(query, 1)
score = self.V(tf.nn.tanh(
self.W1(values) + self.W2(hidden_with_time_axis)))
attention_weights = tf.nn.softmax(score, axis=1)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
class Encoder(tf.keras.Model):
"""
seq2seq的encoder,主要就是使用Embedding和GRU对输入进行编码,
这里需要注意传入一个初始化的隐藏层,随机也可以,但是我这里就
直接写了一个隐藏层方法。
"""
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.enc_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state=hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
class Decoder(tf.keras.Model):
"""
seq2seq的decoder,将初始化的x、隐藏层和encoder的输出作为
输入,encoder的输入用来和隐藏层进行attention,得到的上下文
向量和x进行整合然后丢到gru里去,最后Dense输出一下
"""
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.dec_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(vocab_size)
self.attention = BahdanauAttention(self.dec_units)
def call(self, x, hidden, enc_output):
context_vector, attention_weights = self.attention(hidden, enc_output)
x = self.embedding(x)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
output, state = self.gru(x)
output = tf.reshape(output, (-1, output.shape[2]))
x = self.fc(output)
return x, state, attention_weights
|
"""empty message
Revision ID: 5f86e3e2b044
Revises: 4a40191fc890
Create Date: 2019-06-22 20:10:26.664893
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5f86e3e2b044'
down_revision = '4a40191fc890'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('cloud_connection', sa.Column('owner', sa.String(), server_default='legacy', nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('cloud_connection', 'owner')
# ### end Alembic commands ###
|
# pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
from typing import AsyncIterator
import pytest
from aioresponses import aioresponses
from faker import Faker
from hypothesis import HealthCheck, given, settings
from hypothesis import strategies as st
from models_library.clusters import ClusterID
from models_library.projects import ProjectID
from models_library.projects_pipeline import ComputationTask
from models_library.projects_state import RunningState
from models_library.users import UserID
from simcore_service_webserver import director_v2_api
from simcore_service_webserver.director_v2_models import (
ClusterCreate,
ClusterPatch,
ClusterPing,
)
@pytest.fixture()
async def mocked_director_v2(
director_v2_service_mock: aioresponses,
) -> AsyncIterator[aioresponses]:
yield director_v2_service_mock
@pytest.fixture
def user_id(faker: Faker) -> UserID:
return UserID(faker.pyint(min_value=1))
@pytest.fixture
def project_id(faker: Faker) -> ProjectID:
return ProjectID(faker.uuid4())
@pytest.fixture
def cluster_id(faker: Faker) -> ClusterID:
return ClusterID(faker.pyint(min_value=0))
async def test_create_pipeline(
mocked_director_v2, client, user_id: UserID, project_id: ProjectID
):
task_out = await director_v2_api.create_or_update_pipeline(
client.app, user_id, project_id
)
assert task_out
assert isinstance(task_out, dict)
assert task_out["state"] == RunningState.NOT_STARTED
async def test_get_computation_task(
mocked_director_v2,
client,
user_id: UserID,
project_id: ProjectID,
):
task_out = await director_v2_api.get_computation_task(
client.app, user_id, project_id
)
assert task_out
assert isinstance(task_out, ComputationTask)
assert task_out.state == RunningState.NOT_STARTED
async def test_delete_pipeline(
mocked_director_v2, client, user_id: UserID, project_id: ProjectID
):
await director_v2_api.delete_pipeline(client.app, user_id, project_id)
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
@given(cluster_create=st.builds(ClusterCreate))
async def test_create_cluster(
mocked_director_v2, client, user_id: UserID, cluster_create
):
created_cluster = await director_v2_api.create_cluster(
client.app, user_id=user_id, new_cluster=cluster_create
)
assert created_cluster is not None
assert isinstance(created_cluster, dict)
assert "id" in created_cluster
async def test_list_clusters(mocked_director_v2, client, user_id: UserID):
list_of_clusters = await director_v2_api.list_clusters(client.app, user_id=user_id)
assert isinstance(list_of_clusters, list)
assert len(list_of_clusters) > 0
async def test_get_cluster(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
):
cluster = await director_v2_api.get_cluster(
client.app, user_id=user_id, cluster_id=cluster_id
)
assert isinstance(cluster, dict)
assert cluster["id"] == cluster_id
async def test_get_cluster_details(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
):
cluster_details = await director_v2_api.get_cluster_details(
client.app, user_id=user_id, cluster_id=cluster_id
)
assert isinstance(cluster_details, dict)
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
@given(cluster_patch=st.from_type(ClusterPatch))
async def test_update_cluster(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID, cluster_patch
):
print(f"--> updating cluster with {cluster_patch=}")
updated_cluster = await director_v2_api.update_cluster(
client.app, user_id=user_id, cluster_id=cluster_id, cluster_patch=cluster_patch
)
assert isinstance(updated_cluster, dict)
assert updated_cluster["id"] == cluster_id
async def test_delete_cluster(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
):
await director_v2_api.delete_cluster(
client.app, user_id=user_id, cluster_id=cluster_id
)
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
@given(cluster_ping=st.builds(ClusterPing))
async def test_ping_cluster(mocked_director_v2, client, cluster_ping: ClusterPing):
await director_v2_api.ping_cluster(client.app, cluster_ping=cluster_ping)
async def test_ping_specific_cluster(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
):
await director_v2_api.ping_specific_cluster(
client.app, user_id=user_id, cluster_id=cluster_id
)
|
import datetime
import shutil
import sqlite3
import urllib.parse
from pathlib import Path
import subprocess as sp
from yaspin import yaspin
from building import build_packages
from utils import parse_package_names, read_config, find_package_deps
from snapshots import prepare_snapshot, commit_snapshot, current_snapshot_metadata
def install_package(package: str, root: Path, db, spinner, spinner_prefix=''):
spinner.text = f'Installing {package}'
bincache_archive = root / 'cache' / 'bold' / 'bincache' / f'{package}.tar.zst'
if (root / 'app' / package).exists():
return True
if not bincache_archive.exists():
# TODO: Download from https repo
config = read_config(root)
original_side = spinner.side
original_color = spinner.color
spinner.side = 'right'
spinner.color = 'cyan'
for bincache_server in config['binaryCaches']:
url = f'{bincache_server}/{package}.tar.zst'
bincache_host = urllib.parse.urlparse(bincache_server).hostname
spinner.text = f'{spinner_prefix}Downloading {package} from {bincache_host}'
try:
sp.run(
['curl', '--fail', '-L', url, '-o', str(bincache_archive)],
check=True,
stdout=sp.DEVNULL,
stderr=sp.DEVNULL,
)
except sp.CalledProcessError:
spinner.stop()
print(spinner.text + ' [FAIL]')
spinner.start()
continue
spinner.stop()
print(spinner.text + ' [OK]')
spinner.start()
break
else:
# Last resort, build it
spinner.stop()
print(f'Could not download {package} from any binary cache, building it')
spinner.start()
workspace = (root / 'cache' / 'bold' / 'build' / package)
phases = ['fetch', 'unpack', 'patch', 'build', 'check', 'install', 'fixup', 'installCheck', 'pack']
try:
build_packages(
[package], root, workspace, phases,
spinner, db, f'{spinner_prefix}Building package: '
)
finally:
shutil.rmtree(workspace, ignore_errors=True)
spinner.side = original_side
spinner.color = original_color
(root / 'app' / package).mkdir(parents=True)
sp.call(['tar', '-xf', str(bincache_archive), '-C', str(root / 'app' / package)])
return True
def cmd_install(args):
root = Path(args.root)
if not (root / 'snapshot' / 'current').exists():
print('Run `bold update` before installing packages')
return
current_metadata = current_snapshot_metadata(root)
# Make sure all packages are in index
db = sqlite3.connect(root / 'snapshot' / 'current' / 'cache.db3')
parsed_packages = parse_package_names(db, args.app)
if not parsed_packages:
exit(1)
exact_packages = [p for p in parsed_packages.values() if p not in current_metadata['packages']]
if len(exact_packages) == 0:
print('All requested packages already installed')
return
# Add dependencies
dependencies = find_package_deps(db, list(parsed_packages.values()))
current_metadata['packages'] |= {pkg: {'global': False} for pkg in dependencies}
exact_packages += list(dependencies)
with yaspin(text='') as spinner:
# TODO: Parallelize
# TODO: Fetch dependencies
for package in exact_packages:
if not install_package(package, root, db, spinner):
return
for pkg, exact_pkg in parsed_packages.items():
current_metadata['packages'][exact_pkg] = {'global': True}
current_metadata['named_packages'][pkg] = {'global': True}
# Create new snapshot
with yaspin(text='Creating snapshot with new packages'):
metadata = {
'alias': None,
'description': f'Installed {", ".join(parsed_packages)}',
'created': datetime.datetime.now().isoformat(),
'named_packages': current_metadata['named_packages'],
'packages': current_metadata['packages'],
'repoHash': current_metadata['repoHash'],
}
snapshot_dir = prepare_snapshot(root)
(root / 'snapshot/current/cache.db3').link_to(snapshot_dir / 'cache.db3')
commit_snapshot(root, metadata, switch=True)
print('Done :)')
|
"""
交易机器人
"""
import time
import datetime
import numpy as np
import pandas as pd
import pandas_datareader as pdr
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib as mpl
import flynnBot.indicators as mindicators
mpl.rcParams['grid.color'] = 'gray'
mpl.rcParams['grid.linestyle'] = '--'
mpl.rcParams['grid.linewidth'] = 0.2
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
class flynnBot():
"""
交易机器人
"""
def __init__(self, symbol='601398', init_cash=1000000,
init_share=0, start='2021-01-01', end=None):
# stock symbol
self.stock_symbol = symbol
self.cash = init_cash
self.share = init_share
self.enter_capital = 0.0
self.exit_capital = 0.0
self.capital_return = 0.0
self.capital_return_rate = 0.0
self.holding_days = 0
self.holding_price = 0.0
self.starting_price = 0.0
self.num_win = 0
self.win_rate = 0.0
self.num_trade = 0
self.df_main = None
self.df_profits = None
self.buy_actions = None
self.sell_actions = None
self.start = start
self.end = datetime.date.today()
if end is not None:
self.end = end
def fetch(self):
"""
开始从网络获取数据
Parameters
----------
None
Returns
-------
pandas data frame or None failed
"""
df_main = None
try:
df_main = pdr.get_data_tiingo(
self.stock_symbol, start=self.start, end=self.end)
except Exception as e:
print(e)
print("tiingo timeout accurrs")
time.sleep(2)
return df_main
# format data
df_main.reset_index(level=0, inplace=True)
df_main.index = df_main.index.date
df_main.drop('symbol', axis=1, inplace=True)
df_main = df_main.rename_axis('date').reset_index()
df_main['date_str'] = df_main['date'].apply(lambda x: x.strftime('%Y-%m-%d'))
# df.set_index('date_str', inplace=True)
self.starting_price = df_main.head(1)['adjClose'].values[0]
df_main['ema_48'] = df_main['adjClose'].ewm(span=22).mean()
df_main['ema_14'] = df_main['adjClose'].ewm(span=5).mean()
self.enter_capital = self.starting_price * self.share + self.cash
self.df_main = df_main
return df_main
def add_indicator(self, indicator_callback, built_in=None):
"""
添加指标,这个函数会被直接调用,
将计算结果直接合并汇总到系统pandas data frame里面去
Parameters
----------
indicator_callback : 回调函数
built_in : str
内置的指标 'macd','kdj'
Returns
-------
None
"""
df_main = self.df_main
if indicator_callback is not None:
self.df_main = indicator_callback(df_main)
if built_in == 'macd':
self.df_main = mindicators.macd(df_main)
def run_step(self, i, action):
"""
执行一次买或者卖的动作
Parameters
----------
indicator_callback - 回调函数
Returns
-------
None
"""
day = self.df_main.index.values[i]
execute_price = self.df_main.loc[day, 'adjClose']
one_hand_price = execute_price * 100
# buy
if (action == 'buy') and (self.cash >= one_hand_price):
num_hands = int(self.cash / one_hand_price)
self.cash = self.cash - num_hands * one_hand_price
self.share = num_hands * 100 + self.share
self.df_main.loc[day, 'buy'] = execute_price
self.holding_days = 1
self.holding_price = execute_price
# sell
elif (action == 'sell') and (self.share >= 100):
self.cash = self.share * execute_price + self.cash
self.share = 0
self.df_main.loc[day, 'sell'] = execute_price
self.holding_days = 0
self.holding_price = 0
elif self.share >= 100:
self.holding_days = self.holding_days + 1
self.df_main.loc[day, 'holding_days'] = self.holding_days
self.df_main.loc[day, 'capital'] = execute_price * self.share + self.cash
def run(self, strategic_callback):
"""
执行所有时间的模拟交易
Parameters
----------
indicator_callback - 回调函数
Returns
-------
None
"""
df_main = self.df_main
df_main['buy'] = np.nan
df_main['sell'] = np.nan
df_main['capital'] = np.nan
# how many days elapsed for holding the stock
df_main['holding_days'] = np.zeros
self.holding_days = 0
for i in range(len(df_main)):
# get action
action = 'none'
if strategic_callback is not None:
action = strategic_callback(i, df_main, self.starting_price)
self.run_step(i, action)
self.buy_actions = df_main['buy'].dropna()
self.sell_actions = df_main['sell'].dropna()
buy_df = self.buy_actions.reset_index()
buy_df.rename(columns={'index': 'buyDate'}, inplace=True)
sell_df = self.sell_actions.reset_index()
sell_df.rename(columns={'index': 'sellDate'}, inplace=True)
profits_df = pd.concat([buy_df, sell_df], axis=1)
profits_df['profits'] = profits_df['sell'] - profits_df['buy']
profits_df['return_rate'] = profits_df['profits'] / profits_df['buy']
self.num_trade = profits_df['profits'].count()
self.num_win = sum(profits_df['profits'] > 0)
if self.num_trade != 0:
self.win_rate = self.num_win / self.num_trade
self.df_profits = profits_df
self.exit_capital = self.df_main.tail(1)['capital'].values[0]
self.capital_return = self.exit_capital - self.enter_capital
self.capital_return_rate = self.capital_return / self.enter_capital
def plot_price_with_orders(self, indicator='macd'):
"""
绘制一段时间的价格和指标走势,标注买卖点
Parameters
----------
indicator_callback - 回调函数
Returns
-------
None
"""
plt.style.use('dark_background')
fig = plt.figure(figsize=(12, 8))
fig.suptitle('macd strategy', fontsize=10)
axs = fig.subplots(3)
self.df_main['adjClose'].plot(ax=axs[0], color='purple',
label='price', rot=60, grid=True)
ypadding = self.df_main['adjClose'].mean() * 0.2
ymin = self.df_main['adjClose'].min() - ypadding * 0.2
ymax = self.df_main['adjClose'].max() + ypadding * 0.2
self.df_main['ema_long'].plot(ax=axs[0], color='yellow', ylim=(ymin, ymax),
label='price', rot=60, grid=True)
axs[0].xaxis.set_minor_locator(mdates.DayLocator(interval=1))
axs[0].xaxis.set_major_locator(mdates.DayLocator(interval=20))
axs[0].vlines(x=self.buy_actions.index, ymin=ymin,
ymax=self.buy_actions.values, color='red', linestyle='--')
axs[0].vlines(x=self.sell_actions.index, ymin=ymin,
ymax=self.sell_actions.values, color='green', linestyle='--')
axs[0].scatter(self.df_main.index, y=self.df_main['buy'].values, label='buy',
marker='^', s=70, color='red')
axs[0].scatter(self.df_main.index, y=self.df_main['sell'].values, label='sell',
marker='x', s=70, color='#00ff00')
axs[0].legend()
percent = self.capital_return_rate * 100
ret_info = "return rate : %.2f percent\n" % percent
ret_info += "exit capital : %d" % self.exit_capital
self.df_main['capital'].plot(ax=axs[1], rot=60)
# axs[1].xaxis.set_minor_locator(mdates.DayLocator(interval=1))
# axs[1].xaxis.set_major_locator(mdates.DayLocator(interval=10))
axs[1].text(0.1, 0.8, ret_info, fontsize=8,
color="orange", transform=axs[1].transAxes)
if indicator == 'macd':
self.df_main['macd'].plot(ax=axs[2], color='green',
label='macd', grid=True, rot=60)
self.df_main['macd_signal'].plot(
ax=axs[2], color='yellow', label='signal', rot=60)
# axs[2].xaxis.set_minor_locator(mdates.DayLocator(interval=1))
# axs[2].xaxis.set_major_locator(mdates.DayLocator(interval=10))
axs[2].axhline(y=0, linestyle='--', color='gray')
min_macd = self.df_main['macd'].min()
max_macd = self.df_main['macd'].max()
axs[2].vlines(x=self.buy_actions.index, ymin=min_macd/2,
ymax=max_macd/2, color='red', linestyle='--')
axs[2].vlines(x=self.sell_actions.index, ymin=min_macd/2,
ymax=max_macd/2, color='green', linestyle='--')
plt.legend(loc="best")
plt.tight_layout()
plt.show()
def plot_deals(self):
"""
这个函数不再维护了,使用plots.plot_profits 替代
"""
fig = plt.figure(figsize=(12, 8))
fig.suptitle('orders analysis', fontsize=12)
axs = fig.subplots(3)
fig.tight_layout()
buy_df = self.buy_actions.reset_index()
buy_df.rename(columns={'index': 'buyDate'}, inplace=True)
sell_df = self.sell_actions.reset_index()
sell_df.rename(columns={'index': 'sellDate'}, inplace=True)
deal_df = pd.concat([buy_df, sell_df], axis=1)
if len(deal_df) == 0:
print("no orders!")
exit()
color_dict = {'buy': 'red', 'sell': 'green'}
df2 = deal_df.loc[:, ['buy', 'sell']]
df2.plot(ax=axs[0], kind='bar', color=color_dict, rot=90)
# profit
deal_df['profits'] = deal_df['sell'] - deal_df['buy']
deal_df['rate'] = deal_df['profits'] / deal_df['buy']
colors = np.where(deal_df['profits'].values > 0, 'r', 'g')
deal_df['profits'].plot(
ax=axs[1], kind='bar', color=colors, title='profit')
title_str = 'return rate diagram'
deal_df['rate'].plot(
ax=axs[2], title=title_str)
percent = self.win_rate * 100
trade_info = "win rate:%.2f percent\n" % percent
info = "Win : %d - Total : %d" % (self.num_win, self.num_trade)
trade_info += info
axs[2].text(0, 1, trade_info, fontsize=8, color="orange")
plt.tight_layout()
plt.show()
def botRunner(
symbol='601398', init_cash=1000000, init_share=0,
start='2021-01-01', end=None):
"""
这是一个帮助函数,负责创建flynnBot,并且获取交易数据
Parameters
----------
symbol - 股票的交易代码
init_cash - 初始资金, 建议大于100股的市值
init_share - 初始持股,建议是100的整数倍
start - 获取交易数据的开始时间,例如'2010-01-01'
end - 获取交易数据的结束时间,默认不填为今天
Returns
-------
创建的mbot对象, 或者None 表示失败
"""
bot = flynnBot(symbol, init_cash, init_share, start, end)
ret = bot.fetch()
if ret is None:
return None
print("fetch completed")
return bot
|
import pandas as pd
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import make_pipeline
from blocktorch.pipelines.components.transformers.preprocessing import (
TextTransformer
)
class LSA(TextTransformer):
"""Transformer to calculate the Latent Semantic Analysis Values of text input"""
name = "LSA Transformer"
hyperparameter_ranges = {}
def __init__(self, text_columns=None, random_state=0, **kwargs):
"""Creates a transformer to perform TF-IDF transformation and Singular Value Decomposition for text columns.
Arguments:
text_columns (list): list of feature names which should be treated as text features.
random_state (int, np.random.RandomState): Seed for the random number generator.
"""
self._all_text_columns = text_columns
self._lsa_pipeline = make_pipeline(TfidfVectorizer(), TruncatedSVD(random_state=random_state))
super().__init__(text_columns=text_columns,
random_state=random_state,
**kwargs)
def fit(self, X, y=None):
if len(self._all_text_columns) == 0:
return self
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
text_columns = self._get_text_columns(X)
corpus = X[text_columns].values.flatten()
# we assume non-str values will have been filtered out prior to calling LSA.fit. this is a safeguard.
corpus = corpus.astype(str)
self._lsa_pipeline.fit(corpus)
return self
def transform(self, X, y=None):
"""Transforms data X by applying the LSA pipeline.
Arguments:
X (pd.DataFrame): Data to transform
y (pd.Series, optional): Ignored.
Returns:
pd.DataFrame: Transformed X. The original column is removed and replaced with two columns of the
format `LSA(original_column_name)[feature_number]`, where `feature_number` is 0 or 1.
"""
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
if len(self._all_text_columns) == 0:
return X
X_t = X.copy()
text_columns = self._get_text_columns(X)
for col in text_columns:
transformed = self._lsa_pipeline.transform(X[col].astype(str).fillna('None'))
X_t['LSA({})[0]'.format(col)] = pd.Series(transformed[:, 0])
X_t['LSA({})[1]'.format(col)] = pd.Series(transformed[:, 1])
X_t = X_t.drop(columns=text_columns)
return X_t
|
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
import spacy
nlp = spacy.load("en_core_web_sm") # Don't put this inside the function- loading it in every CV iteration would tremendously slow down the pipeline.
class LemmaTokenizer:
"""
Class for custom lemmatization in `sklearn.feature_extraction.text.TfidfVectorizer
<https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html>`_
(see `this <https://scikit-learn.org/stable/modules/feature_extraction.html?highlight=stemming>`_). Uses `spaCy
<https://spacy.io/>`_ (``tknz == 'spacy'``) or `NLTK <https://www.nltk.org/>`_ (``tknz == 'wordnet'``).
"""
def __init__(self, tknz='wordnet'):
self.tknz = tknz
def __call__(self, doc):
if self.tknz == 'wordnet':
wln = WordNetLemmatizer()
return [wln.lemmatize(t) for t in word_tokenize(doc)]
if self.tknz == 'spacy':
return [t.lemma_ for t in nlp(doc,
disable=["tagger", "parser", "ner"])] |
import logging
import subprocess
import time
import os
import sys
import datetime
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
sys.path.append(os.getcwd())
import django # noqa: E402
django.setup()
from django.conf import settings
from apscheduler.schedulers.background import BackgroundScheduler # noqa: E402
from apscheduler.triggers.cron import CronTrigger # noqa: E402
from hefesto_core import models # noqa: E402
logging.config.dictConfig(settings.LOGGING)
logger = logging.getLogger(__name__)
logger.info("Iniciando ejecucion de tareas")
scheduler = BackgroundScheduler()
class function_wrap:
def __init__(self, command, *args, **kargs):
self.command = command
self.__name__ = command
def __call__(self, *args, **kargs):
return subprocess.run(self.command, shell=True)
def main():
sheduler = BackgroundScheduler()
tasks = models.Task.objects.all()
for task in tasks:
sheduler.add_job(
function_wrap(task.command),
CronTrigger.from_crontab(task.cron_expression),
id=str(task),
next_run_time=datetime.datetime.now()
)
sheduler.start()
while 1:
time.sleep(1000)
if __name__ == "__main__":
try:
logger.info("Iniciando ejecucion de tareas")
main()
except (KeyboardInterrupt, SystemExit):
pass
except Exception as e:
logger.error("La ejecucion de las tareas ha terminado")
logger.exception(e)
|
from aurora.settings.base import *
import os
# Override base.py settings
DEBUG = True
ALLOWED_HOSTS = []
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'aurora',
'USER': os.environ.get("postgres_usr"),
'PASSWORD': os.environ.get("postgres_pwd"),
'HOST': '127.0.0.1',
'PORT': '5432',
}
} |
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorforce import TensorforceError
from tensorforce.core import TensorSpec, tf_function, tf_util
from tensorforce.core.layers import TransformationBase
class Embedding(TransformationBase):
"""
Embedding layer (specification key: `embedding`).
Args:
size (int >= 0): Layer output size, 0 implies additionally removing the axis
(<span style="color:#C00000"><b>required</b></span>).
num_embeddings (int > 0): If set, specifies the number of embeddings
(<span style="color:#00C000"><b>default</b></span>: none).
max_norm (float): If set, embeddings are clipped if their L2-norm is larger
(<span style="color:#00C000"><b>default</b></span>: none).
bias (bool): Whether to add a trainable bias variable
(<span style="color:#00C000"><b>default</b></span>: true).
activation ('crelu' | 'elu' | 'leaky-relu' | 'none' | 'relu' | 'selu' | 'sigmoid' |
'softmax' | 'softplus' | 'softsign' | 'swish' | 'tanh'): Activation nonlinearity
(<span style="color:#00C000"><b>default</b></span>: tanh).
dropout (parameter, 0.0 <= float < 1.0): Dropout rate
(<span style="color:#00C000"><b>default</b></span>: 0.0).
vars_trainable (bool): Whether layer variables are trainable
(<span style="color:#00C000"><b>default</b></span>: true).
l2_regularization (float >= 0.0): Scalar controlling L2 regularization
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
name (string): Layer name
(<span style="color:#00C000"><b>default</b></span>: internally chosen).
input_spec (specification): <span style="color:#00C000"><b>internal use</b></span>.
"""
def __init__(
self, *, size, num_embeddings=None, max_norm=None, bias=True, activation='tanh',
dropout=0.0, vars_trainable=True, l2_regularization=None, name=None,
input_spec=None
):
super().__init__(
size=size, bias=bias, activation=activation, dropout=dropout,
vars_trainable=vars_trainable, l2_regularization=l2_regularization, name=name,
input_spec=input_spec
)
self.num_embeddings = num_embeddings
self.max_norm = max_norm
def default_input_spec(self):
return TensorSpec(type=('int', 'bool'), shape=None, num_values=0)
def output_spec(self):
output_spec = super().output_spec()
output_spec.type = 'float'
if not self.squeeze:
if output_spec.shape is None:
output_spec.shape = (None, self.size)
else:
output_spec.shape = output_spec.shape + (self.size,)
return output_spec
def initialize(self):
super().initialize()
if self.num_embeddings is None:
if self.input_spec.type == 'bool':
if self.num_embeddings is None:
self.num_embeddings = 2
elif self.input_spec.type == 'int':
if self.num_embeddings is None:
self.num_embeddings = self.input_spec.num_values
if self.num_embeddings is None:
raise TensorforceError.required(
name='Embedding', argument='num_embeddings',
condition='input num_values is None'
)
elif self.input_spec.num_values is not None and \
self.num_embeddings < self.input_spec.num_values:
raise TensorforceError.required(
name='Embedding', argument='num_embeddings',
expected='>= input num_values'
)
initializer = 'normal'
if self.activation is not None and self.activation.nonlinearity == 'relu':
initializer += '-relu'
self.weights = self.variable(
name='embeddings',
spec=TensorSpec(type='float', shape=(self.num_embeddings, self.size)),
initializer=initializer, is_trainable=self.vars_trainable, is_saved=True
)
@tf_function(num_args=1)
def apply(self, *, x):
x = tf_util.int32(x=x)
x = tf.nn.embedding_lookup(params=self.weights, ids=x, max_norm=self.max_norm)
return super().apply(x=x)
|
from random import randint
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
:type numsSize: int
"""
self.nums = nums
def pick(self, target):
"""
:type target: int
:rtype: int
"""
r = self.nums.index(target)
steps = [r]
i = r + 1
while i < len(self.nums):
if self.nums[i] == target:
steps.append(i)
if steps[randint(0, len(steps) - 1)] == i:
r = i
i += 1
return r
|
#Desafio 8 Escreva um programa que leia um valor em metros e o exiba convertido em centimos e milimetros.
medida = float(input('Uma distância em metros: '))
cm = medida * 100
mm = medida * 1000
print('A medida de {}m corresponde a {}cm e {}mm'.format(medida, cm, mm))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A Python implementation of the method described in [#a]_ and [#b]_ for
calculating Fourier coefficients for characterizing
closed contours.
References
----------
.. [#a] F. P. Kuhl and C. R. Giardina, “Elliptic Fourier Features of a
Closed Contour," Computer Vision, Graphics and Image Processing,
Vol. 18, pp. 236-258, 1982.
.. [#b] Oivind Due Trier, Anil K. Jain and Torfinn Taxt, “Feature Extraction
Methods for Character Recognition - A Survey”, Pattern Recognition
Vol. 29, No.4, pp. 641-662, 1996
Created by hbldh <[email protected]> on 2016-01-30.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
import numpy
from sympy import *
class Model(object):
def __init__(self, order, numPts):
# initialize the model
self.xlim = 0
self.ylim = 0
self.order = order
self.numPts = numPts
self.px = None
self.px, self.py, self.zx, self.zy, self.nx, self.ny = self.init_efd_model(order)
self.contour = None
self.coeffs = None
self.locus = None
self.P = None
self.N = None
self.Cbar = None
@staticmethod
def init_efd_model(order):
a = Symbol('a')
b = Symbol('b')
c = Symbol('c')
d = Symbol('d')
m = Symbol('m')
n = Symbol('n')
a1 = Symbol('a1')
a2 = Symbol('a2')
a3 = Symbol('a3')
a4 = Symbol('a4')
b1 = Symbol('b1')
b2 = Symbol('b2')
b3 = Symbol('b3')
b4 = Symbol('b4')
c1 = Symbol('c1')
c2 = Symbol('c2')
c3 = Symbol('c3')
c4 = Symbol('c4')
d1 = Symbol('d1')
d2 = Symbol('d2')
d3 = Symbol('d3')
d4 = Symbol('d4')
a_ = [a1, a2, a3, a4]
b_ = [b1, b2, b3, b4]
c_ = [c1, c2, c3, c4]
d_ = [d1, d2, d3, d4]
x = a * cos(2 * n * pi * m) + b * sin(2 * n * pi * m)
y = c * cos(2 * n * pi * m) + d * sin(2 * n * pi * m)
dx = x.diff(m)
dy = y.diff(m)
Zx_sym = 0
Zy_sym = 0
Px = lambdify((a, b, n, m), x)
Py = lambdify((c, d, n, m), y)
Zx = lambdify((a, b, n, m), dx)
Zy = lambdify((c, d, n, m), dy)
# precomputed symbolic stuff, will be good for real time
for n_ in range(order):
dx1 = dx.subs([(a, a_[n_]), (b, b_[n_]), (n, n_ + 1)])
dy1 = dy.subs([(c, c_[n_]), (d, d_[n_]), (n, n_ + 1)])
# symbolic value of dx,dy
Zx_sym += dx1
Zy_sym += dy1
Z = sqrt(Zx_sym ** 2 + Zy_sym ** 2)
dx_norm = Zx_sym / Z
dy_norm = Zy_sym / Z
ddx_norm = dx_norm.diff(m)
ddy_norm = dy_norm.diff(m)
tt = [m]
ax = a_ + b_ + c_ + d_ + tt
Nx = lambdify(ax, ddx_norm)
Ny = lambdify(ax, ddy_norm)
return Px, Py, Zx, Zy, Nx, Ny
def generate_model(self, contour, xlim, ylim):
self.contour = contour
self.xlim = xlim
self.ylim = ylim
self.locus = self.calculate_dc_coefficients(self.contour)
self.coeffs = self.elliptic_fourier_descriptors(self.contour, self.order)
self.P, self.N, self.Cbar = self.generate_efd_model()
# import matplotlib.pyplot as plt
# plt.plot(contour[:, 0], contour[:, 1], 'c--', linewidth=2)
# plt.plot(self.P[:, 0], self.P[:, 1], 'y', linewidth=2)
# plt.show()
return self.P, self.N, self.Cbar
def calculate_dc_coefficients(self, contour):
"""Calculate the :math:`A_0` and :math:`C_0` coefficients of the elliptic Fourier series.
:param numpy.ndarray contour: A contour array of size ``[M x 2]``.
:return: The :math:`A_0` and :math:`C_0` coefficients.
:rtype: tuple
"""
dxy = np.diff(contour, axis=0)
dt = np.sqrt((dxy ** 2).sum(axis=1))
t = np.concatenate([([0., ]), np.cumsum(dt)])
T = t[-1]
xi = np.cumsum(dxy[:, 0]) - (dxy[:, 0] / dt) * t[1:]
A0 = (1 / T) * np.sum(((dxy[:, 0] / (2 * dt)) * np.diff(t ** 2)) + xi * dt)
delta = np.cumsum(dxy[:, 1]) - (dxy[:, 1] / dt) * t[1:]
C0 = (1 / T) * np.sum(((dxy[:, 1] / (2 * dt)) * np.diff(t ** 2)) + delta * dt)
# A0 and CO relate to the first point of the contour array as origin.
# Adding those values to the coefficients to make them relate to true origin.
return contour[0, 0] + A0, contour[0, 1] + C0
def elliptic_fourier_descriptors(self, contour, order=10, normalize=False):
"""Calculate elliptical Fourier descriptors for a contour.
:param numpy.ndarray contour: A contour array of size ``[M x 2]``.
:param int order: The order of Fourier coefficients to calculate.
:param bool normalize: If the coefficients should be normalized;
see references for details.
:return: A ``[order x 4]`` array of Fourier coefficients.
:rtype: :py:class:`numpy.ndarray`
"""
dxy = np.diff(contour, axis=0)
dt = np.sqrt((dxy ** 2).sum(axis=1))
t = np.concatenate([([0., ]), np.cumsum(dt)])
T = t[-1]
phi = (2 * np.pi * t) / T
coeffs = np.zeros((order, 4))
for n in range(1, order + 1):
const = T / (2 * n * n * np.pi * np.pi)
phi_n = phi * n
d_cos_phi_n = np.cos(phi_n[1:]) - np.cos(phi_n[:-1])
d_sin_phi_n = np.sin(phi_n[1:]) - np.sin(phi_n[:-1])
a_n = const * np.sum((dxy[:, 0] / dt) * d_cos_phi_n)
b_n = const * np.sum((dxy[:, 0] / dt) * d_sin_phi_n)
c_n = const * np.sum((dxy[:, 1] / dt) * d_cos_phi_n)
d_n = const * np.sum((dxy[:, 1] / dt) * d_sin_phi_n)
coeffs[n - 1, :] = a_n, b_n, c_n, d_n
if normalize:
coeffs = self.normalize_efd(coeffs)
return coeffs
def normalize_efd(self, coeffs, size_invariant=True):
"""Normalizes an array of Fourier coefficients.
See [#a]_ and [#b]_ for details.
:param numpy.ndarray coeffs: A ``[n x 4]`` Fourier coefficient array.
:param bool size_invariant: If size invariance normalizing should be done as well.
Default is ``True``.
:return: The normalized ``[n x 4]`` Fourier coefficient array.
:rtype: :py:class:`numpy.ndarray`
"""
# Make the coefficients have a zero phase shift from
# the first major axis. Theta_1 is that shift angle.
theta_1 = 0.5 * np.arctan2(
2 * ((coeffs[0, 0] * coeffs[0, 1]) + (coeffs[0, 2] * coeffs[0, 3])),
((coeffs[0, 0] ** 2) - (coeffs[0, 1] ** 2) + (coeffs[0, 2] ** 2) - (coeffs[0, 3] ** 2)))
# Rotate all coefficients by theta_1.
for n in range(1, coeffs.shape[0] + 1):
coeffs[n - 1, :] = np.dot(
np.array([[coeffs[n - 1, 0], coeffs[n - 1, 1]],
[coeffs[n - 1, 2], coeffs[n - 1, 3]]]),
np.array([[np.cos(n * theta_1), -np.sin(n * theta_1)],
[np.sin(n * theta_1), np.cos(n * theta_1)]])).flatten()
# Make the coefficients rotation invariant by rotating so that
# the semi-major axis is parallel to the x-axis.
psi_1 = np.arctan2(coeffs[0, 2], coeffs[0, 0])
psi_rotation_matrix = np.array([[np.cos(psi_1), np.sin(psi_1)],
[-np.sin(psi_1), np.cos(psi_1)]])
# Rotate all coefficients by -psi_1.
for n in range(1, coeffs.shape[0] + 1):
coeffs[n - 1, :] = psi_rotation_matrix.dot(
np.array([[coeffs[n - 1, 0], coeffs[n - 1, 1]],
[coeffs[n - 1, 2], coeffs[n - 1, 3]]])).flatten()
if size_invariant:
# Obtain size-invariance by normalizing.
coeffs /= np.abs(coeffs[0, 0])
return coeffs
def generate_efd_model(self):
m_ = np.linspace(0, 1.0, self.numPts)
Px = np.ones(self.numPts) * self.locus[0]
Py = np.ones(self.numPts) * self.locus[1]
Zx = 0
Zy = 0
a = []
b = []
c = []
d = []
# precompute symbollic stuff, will be good for real time
for n_ in range(self.coeffs.shape[0]):
a.append(self.coeffs[n_, 0])
b.append(self.coeffs[n_, 1])
c.append(self.coeffs[n_, 2])
d.append(self.coeffs[n_, 3])
Px += self.px(a[n_], b[n_], (n_ + 1), m_)
Py += self.py(c[n_], d[n_], (n_ + 1), m_)
Zx += self.zx(a[n_], b[n_], (n_ + 1), m_)
Zy += self.zy(c[n_], d[n_], (n_ + 1), m_)
# put together all the variables:
N = np.zeros((self.numPts, 3))
for i in range(0, self.numPts):
ax = a + b + c + d
ax.append(m_[i])
N[i, 0] = self.nx(*ax)
N[i, 1] = self.ny(*ax)
N[i, 2] = 0
# calculate norm of normal vector
# N = np.zeros((numPts, 3))
# N[:, 0] = Nx
# N[:, 1] = Ny
# N[:, 2] = 0
Px[Px < 0] = 0
Py[Py < 0] = 0
Px[Px > self.xlim-1] = self.xlim-1
Py[Py > self.ylim-1] = self.ylim-1
P = np.zeros((self.numPts, 3))
P[:, 0] = Px
P[:, 1] = Py
P[:, 2] = 0
C = np.linalg.norm(N, axis=1)
# cross product tells whether we have concave or convex curvature.
crossProd = np.zeros(len(Zx))
for ii in range(0, len(Zx)):
aa = np.array([Zx[ii], Zy[ii], 0])
bb = np.array(N[ii, :])
crossProd[ii] = np.cross(aa, bb)[2]
Cbar = np.sign(crossProd) * abs(C)
return P, N, Cbar
|
from carton import Carton
f = open('input','r')
entry = [int(s) for s in f.readline().split(",")]
lines = f.readlines()
t = len(lines)
i = 1
cards = []
while (i < t):
data = []
for j in range(5):
data.append([int(v) for v in lines[i].split()])
i += 1
d = Carton(data)
cards.append(d)
i += 1 ## Una linéa en blanco
for n in entry:
if (len(cards) == 0):
exit(0)
print("Probando el número: " + str(n))
borrados = []
for card in cards:
card.mark(n)
if (card.check()):
print("Hemos encontrado línea")
print(card)
print("Numero: " + str(n))
print(card.sum_unmarked() * n)
borrados.append(card)
#print("Número de tarjetas " + str(len(cards)))
#exit(0)
else:
print(card)
print("_______________________________________________________________________________________")
for b in borrados:
cards.remove(b)
|
import socket
#create an INET, raw socket
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_TCP)
# receive a packet
while True:
raw_output = s.recvfrom(65565)
bytes_raw_output = raw_output[0]
bytes_output = str(bytes_raw_output).encode("utf-8").strip()
adress_raw_output = raw_output[1]
print(bytes_output)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\PyCharmProjects\rp_air\ui\rp_air_main_ui.ui'
#
# Created: Wed Apr 15 14:21:40 2015
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(810, 550)
MainWindow.setMinimumSize(QtCore.QSize(810, 550))
MainWindow.setMaximumSize(QtCore.QSize(810, 550))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(100, 100, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(125, 125, 125))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(50, 50, 50))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(66, 66, 66))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(100, 100, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(50, 50, 50))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(100, 100, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(125, 125, 125))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(50, 50, 50))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(66, 66, 66))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(100, 100, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(50, 50, 50))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(50, 50, 50))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(100, 100, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(150, 150, 150))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(125, 125, 125))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(50, 50, 50))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(66, 66, 66))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(50, 50, 50))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(50, 50, 50))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(100, 100, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(100, 100, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(100, 100, 100))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
MainWindow.setPalette(palette)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.pushButton_refresh_data = QtGui.QPushButton(self.centralwidget)
self.pushButton_refresh_data.setGeometry(QtCore.QRect(570, 460, 231, 51))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
self.pushButton_refresh_data.setPalette(palette)
self.pushButton_refresh_data.setAutoFillBackground(False)
self.pushButton_refresh_data.setStyleSheet(_fromUtf8("background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(0, 0, 0, 255), stop:1 rgba(255, 255, 255, 255));\n"
"border-color: rgb(255, 255, 255);\n"
"color: rgb(255, 255, 255);"))
self.pushButton_refresh_data.setObjectName(_fromUtf8("pushButton_refresh_data"))
self.groupBox_analysis_today = QtGui.QGroupBox(self.centralwidget)
self.groupBox_analysis_today.setGeometry(QtCore.QRect(10, 160, 551, 171))
self.groupBox_analysis_today.setStyleSheet(_fromUtf8("color: rgb(255, 255, 255);"))
self.groupBox_analysis_today.setObjectName(_fromUtf8("groupBox_analysis_today"))
self.gridLayoutWidget_3 = QtGui.QWidget(self.groupBox_analysis_today)
self.gridLayoutWidget_3.setGeometry(QtCore.QRect(10, 20, 528, 142))
self.gridLayoutWidget_3.setObjectName(_fromUtf8("gridLayoutWidget_3"))
self.gridLayout_analysis_today = QtGui.QGridLayout(self.gridLayoutWidget_3)
self.gridLayout_analysis_today.setMargin(0)
self.gridLayout_analysis_today.setObjectName(_fromUtf8("gridLayout_analysis_today"))
self.label_analysis_time = QtGui.QLabel(self.gridLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_time.sizePolicy().hasHeightForWidth())
self.label_analysis_time.setSizePolicy(sizePolicy)
self.label_analysis_time.setMinimumSize(QtCore.QSize(200, 30))
self.label_analysis_time.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_time.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_time.setLineWidth(1)
self.label_analysis_time.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_time.setObjectName(_fromUtf8("label_analysis_time"))
self.gridLayout_analysis_today.addWidget(self.label_analysis_time, 0, 0, 1, 1)
self.gridLayout_analysis_today_table = QtGui.QGridLayout()
self.gridLayout_analysis_today_table.setObjectName(_fromUtf8("gridLayout_analysis_today_table"))
self.label_analysis_header = QtGui.QLabel(self.gridLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_header.sizePolicy().hasHeightForWidth())
self.label_analysis_header.setSizePolicy(sizePolicy)
self.label_analysis_header.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_header.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_header.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_header.setLineWidth(1)
self.label_analysis_header.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_header.setObjectName(_fromUtf8("label_analysis_header"))
self.gridLayout_analysis_today_table.addWidget(self.label_analysis_header, 0, 0, 1, 1)
self.label_analysis_pm25_today_04 = QtGui.QLabel(self.gridLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm25_today_04.sizePolicy().hasHeightForWidth())
self.label_analysis_pm25_today_04.setSizePolicy(sizePolicy)
self.label_analysis_pm25_today_04.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm25_today_04.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm25_today_04.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm25_today_04.setLineWidth(1)
self.label_analysis_pm25_today_04.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm25_today_04.setObjectName(_fromUtf8("label_analysis_pm25_today_04"))
self.gridLayout_analysis_today_table.addWidget(self.label_analysis_pm25_today_04, 2, 4, 1, 1)
self.label_analysis_pm10_today_03 = QtGui.QLabel(self.gridLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm10_today_03.sizePolicy().hasHeightForWidth())
self.label_analysis_pm10_today_03.setSizePolicy(sizePolicy)
self.label_analysis_pm10_today_03.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm10_today_03.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm10_today_03.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm10_today_03.setLineWidth(1)
self.label_analysis_pm10_today_03.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm10_today_03.setObjectName(_fromUtf8("label_analysis_pm10_today_03"))
self.gridLayout_analysis_today_table.addWidget(self.label_analysis_pm10_today_03, 1, 3, 1, 1)
self.label_analysis_pm25_today_01 = QtGui.QLabel(self.gridLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm25_today_01.sizePolicy().hasHeightForWidth())
self.label_analysis_pm25_today_01.setSizePolicy(sizePolicy)
self.label_analysis_pm25_today_01.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm25_today_01.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm25_today_01.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm25_today_01.setLineWidth(1)
self.label_analysis_pm25_today_01.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm25_today_01.setObjectName(_fromUtf8("label_analysis_pm25_today_01"))
self.gridLayout_analysis_today_table.addWidget(self.label_analysis_pm25_today_01, 2, 1, 1, 1)
self.label_analysis_pm10_column = QtGui.QLabel(self.gridLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm10_column.sizePolicy().hasHeightForWidth())
self.label_analysis_pm10_column.setSizePolicy(sizePolicy)
self.label_analysis_pm10_column.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm10_column.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm10_column.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm10_column.setLineWidth(1)
self.label_analysis_pm10_column.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm10_column.setObjectName(_fromUtf8("label_analysis_pm10_column"))
self.gridLayout_analysis_today_table.addWidget(self.label_analysis_pm10_column, 1, 0, 1, 1)
self.label_analysis_city_01_header = QtGui.QLabel(self.gridLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_city_01_header.sizePolicy().hasHeightForWidth())
self.label_analysis_city_01_header.setSizePolicy(sizePolicy)
self.label_analysis_city_01_header.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_city_01_header.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_city_01_header.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_city_01_header.setLineWidth(1)
self.label_analysis_city_01_header.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_city_01_header.setObjectName(_fromUtf8("label_analysis_city_01_header"))
self.gridLayout_analysis_today_table.addWidget(self.label_analysis_city_01_header, 0, 1, 1, 1)
self.label_analysis_pm10_today_01 = QtGui.QLabel(self.gridLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm10_today_01.sizePolicy().hasHeightForWidth())
self.label_analysis_pm10_today_01.setSizePolicy(sizePolicy)
self.label_analysis_pm10_today_01.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm10_today_01.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm10_today_01.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm10_today_01.setLineWidth(1)
self.label_analysis_pm10_today_01.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm10_today_01.setObjectName(_fromUtf8("label_analysis_pm10_today_01"))
self.gridLayout_analysis_today_table.addWidget(self.label_analysis_pm10_today_01, 1, 1, 1, 1)
self.label_analysis_city_04_header = QtGui.QLabel(self.gridLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_city_04_header.sizePolicy().hasHeightForWidth())
self.label_analysis_city_04_header.setSizePolicy(sizePolicy)
self.label_analysis_city_04_header.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_city_04_header.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_city_04_header.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_city_04_header.setLineWidth(1)
self.label_analysis_city_04_header.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_city_04_header.setObjectName(_fromUtf8("label_analysis_city_04_header"))
self.gridLayout_analysis_today_table.addWidget(self.label_analysis_city_04_header, 0, 4, 1, 1)
self.label_analysis_city_02_header = QtGui.QLabel(self.gridLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_city_02_header.sizePolicy().hasHeightForWidth())
self.label_analysis_city_02_header.setSizePolicy(sizePolicy)
self.label_analysis_city_02_header.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_city_02_header.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_city_02_header.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_city_02_header.setLineWidth(1)
self.label_analysis_city_02_header.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_city_02_header.setObjectName(_fromUtf8("label_analysis_city_02_header"))
self.gridLayout_analysis_today_table.addWidget(self.label_analysis_city_02_header, 0, 2, 1, 1)
self.label_analysis_pm25_column = QtGui.QLabel(self.gridLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm25_column.sizePolicy().hasHeightForWidth())
self.label_analysis_pm25_column.setSizePolicy(sizePolicy)
self.label_analysis_pm25_column.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm25_column.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm25_column.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm25_column.setLineWidth(1)
self.label_analysis_pm25_column.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm25_column.setObjectName(_fromUtf8("label_analysis_pm25_column"))
self.gridLayout_analysis_today_table.addWidget(self.label_analysis_pm25_column, 2, 0, 1, 1)
self.label_analysis_pm25_today_02 = QtGui.QLabel(self.gridLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm25_today_02.sizePolicy().hasHeightForWidth())
self.label_analysis_pm25_today_02.setSizePolicy(sizePolicy)
self.label_analysis_pm25_today_02.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm25_today_02.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm25_today_02.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm25_today_02.setLineWidth(1)
self.label_analysis_pm25_today_02.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm25_today_02.setObjectName(_fromUtf8("label_analysis_pm25_today_02"))
self.gridLayout_analysis_today_table.addWidget(self.label_analysis_pm25_today_02, 2, 2, 1, 1)
self.label_analysis_pm10_today_04 = QtGui.QLabel(self.gridLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm10_today_04.sizePolicy().hasHeightForWidth())
self.label_analysis_pm10_today_04.setSizePolicy(sizePolicy)
self.label_analysis_pm10_today_04.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm10_today_04.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm10_today_04.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm10_today_04.setLineWidth(1)
self.label_analysis_pm10_today_04.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm10_today_04.setObjectName(_fromUtf8("label_analysis_pm10_today_04"))
self.gridLayout_analysis_today_table.addWidget(self.label_analysis_pm10_today_04, 1, 4, 1, 1)
self.label_analysis_pm10_today_02 = QtGui.QLabel(self.gridLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm10_today_02.sizePolicy().hasHeightForWidth())
self.label_analysis_pm10_today_02.setSizePolicy(sizePolicy)
self.label_analysis_pm10_today_02.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm10_today_02.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm10_today_02.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm10_today_02.setLineWidth(1)
self.label_analysis_pm10_today_02.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm10_today_02.setObjectName(_fromUtf8("label_analysis_pm10_today_02"))
self.gridLayout_analysis_today_table.addWidget(self.label_analysis_pm10_today_02, 1, 2, 1, 1)
self.label_analysis_city_03_header = QtGui.QLabel(self.gridLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_city_03_header.sizePolicy().hasHeightForWidth())
self.label_analysis_city_03_header.setSizePolicy(sizePolicy)
self.label_analysis_city_03_header.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_city_03_header.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_city_03_header.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_city_03_header.setLineWidth(1)
self.label_analysis_city_03_header.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_city_03_header.setObjectName(_fromUtf8("label_analysis_city_03_header"))
self.gridLayout_analysis_today_table.addWidget(self.label_analysis_city_03_header, 0, 3, 1, 1)
self.label_analysis_pm25_today_03 = QtGui.QLabel(self.gridLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm25_today_03.sizePolicy().hasHeightForWidth())
self.label_analysis_pm25_today_03.setSizePolicy(sizePolicy)
self.label_analysis_pm25_today_03.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm25_today_03.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm25_today_03.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm25_today_03.setLineWidth(1)
self.label_analysis_pm25_today_03.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm25_today_03.setObjectName(_fromUtf8("label_analysis_pm25_today_03"))
self.gridLayout_analysis_today_table.addWidget(self.label_analysis_pm25_today_03, 2, 3, 1, 1)
self.gridLayout_analysis_today.addLayout(self.gridLayout_analysis_today_table, 1, 0, 1, 1)
self.groupBox_analysis_tomorrow = QtGui.QGroupBox(self.centralwidget)
self.groupBox_analysis_tomorrow.setGeometry(QtCore.QRect(10, 340, 551, 171))
self.groupBox_analysis_tomorrow.setStyleSheet(_fromUtf8("color: rgb(255, 255, 255);"))
self.groupBox_analysis_tomorrow.setObjectName(_fromUtf8("groupBox_analysis_tomorrow"))
self.gridLayoutWidget_6 = QtGui.QWidget(self.groupBox_analysis_tomorrow)
self.gridLayoutWidget_6.setGeometry(QtCore.QRect(10, 20, 528, 142))
self.gridLayoutWidget_6.setObjectName(_fromUtf8("gridLayoutWidget_6"))
self.gridLayout_analysis_tomorrow = QtGui.QGridLayout(self.gridLayoutWidget_6)
self.gridLayout_analysis_tomorrow.setMargin(0)
self.gridLayout_analysis_tomorrow.setObjectName(_fromUtf8("gridLayout_analysis_tomorrow"))
self.label_analysis_tomorrow_title = QtGui.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_tomorrow_title.sizePolicy().hasHeightForWidth())
self.label_analysis_tomorrow_title.setSizePolicy(sizePolicy)
self.label_analysis_tomorrow_title.setMinimumSize(QtCore.QSize(200, 30))
self.label_analysis_tomorrow_title.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_tomorrow_title.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_tomorrow_title.setLineWidth(1)
self.label_analysis_tomorrow_title.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_tomorrow_title.setObjectName(_fromUtf8("label_analysis_tomorrow_title"))
self.gridLayout_analysis_tomorrow.addWidget(self.label_analysis_tomorrow_title, 0, 0, 1, 1)
self.gridLayout_analysis_tomorrow_table = QtGui.QGridLayout()
self.gridLayout_analysis_tomorrow_table.setObjectName(_fromUtf8("gridLayout_analysis_tomorrow_table"))
self.label_analysis_pm25_tomorrow_column = QtGui.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm25_tomorrow_column.sizePolicy().hasHeightForWidth())
self.label_analysis_pm25_tomorrow_column.setSizePolicy(sizePolicy)
self.label_analysis_pm25_tomorrow_column.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm25_tomorrow_column.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm25_tomorrow_column.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm25_tomorrow_column.setLineWidth(1)
self.label_analysis_pm25_tomorrow_column.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm25_tomorrow_column.setObjectName(_fromUtf8("label_analysis_pm25_tomorrow_column"))
self.gridLayout_analysis_tomorrow_table.addWidget(self.label_analysis_pm25_tomorrow_column, 2, 0, 1, 1)
self.label_analysis_pm25_tomorrow_02 = QtGui.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm25_tomorrow_02.sizePolicy().hasHeightForWidth())
self.label_analysis_pm25_tomorrow_02.setSizePolicy(sizePolicy)
self.label_analysis_pm25_tomorrow_02.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm25_tomorrow_02.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm25_tomorrow_02.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm25_tomorrow_02.setLineWidth(1)
self.label_analysis_pm25_tomorrow_02.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm25_tomorrow_02.setObjectName(_fromUtf8("label_analysis_pm25_tomorrow_02"))
self.gridLayout_analysis_tomorrow_table.addWidget(self.label_analysis_pm25_tomorrow_02, 2, 2, 1, 1)
self.label_analysis_city_02_tomorrow_header = QtGui.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_city_02_tomorrow_header.sizePolicy().hasHeightForWidth())
self.label_analysis_city_02_tomorrow_header.setSizePolicy(sizePolicy)
self.label_analysis_city_02_tomorrow_header.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_city_02_tomorrow_header.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_city_02_tomorrow_header.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_city_02_tomorrow_header.setLineWidth(1)
self.label_analysis_city_02_tomorrow_header.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_city_02_tomorrow_header.setObjectName(_fromUtf8("label_analysis_city_02_tomorrow_header"))
self.gridLayout_analysis_tomorrow_table.addWidget(self.label_analysis_city_02_tomorrow_header, 0, 2, 1, 1)
self.label_analysis_pm25_tomorrow_04 = QtGui.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm25_tomorrow_04.sizePolicy().hasHeightForWidth())
self.label_analysis_pm25_tomorrow_04.setSizePolicy(sizePolicy)
self.label_analysis_pm25_tomorrow_04.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm25_tomorrow_04.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm25_tomorrow_04.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm25_tomorrow_04.setLineWidth(1)
self.label_analysis_pm25_tomorrow_04.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm25_tomorrow_04.setObjectName(_fromUtf8("label_analysis_pm25_tomorrow_04"))
self.gridLayout_analysis_tomorrow_table.addWidget(self.label_analysis_pm25_tomorrow_04, 2, 4, 1, 1)
self.label_analysis_city_04_tomorrow_header = QtGui.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_city_04_tomorrow_header.sizePolicy().hasHeightForWidth())
self.label_analysis_city_04_tomorrow_header.setSizePolicy(sizePolicy)
self.label_analysis_city_04_tomorrow_header.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_city_04_tomorrow_header.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_city_04_tomorrow_header.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_city_04_tomorrow_header.setLineWidth(1)
self.label_analysis_city_04_tomorrow_header.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_city_04_tomorrow_header.setObjectName(_fromUtf8("label_analysis_city_04_tomorrow_header"))
self.gridLayout_analysis_tomorrow_table.addWidget(self.label_analysis_city_04_tomorrow_header, 0, 4, 1, 1)
self.label_analysis_pm10_tomorrow_column = QtGui.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm10_tomorrow_column.sizePolicy().hasHeightForWidth())
self.label_analysis_pm10_tomorrow_column.setSizePolicy(sizePolicy)
self.label_analysis_pm10_tomorrow_column.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm10_tomorrow_column.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm10_tomorrow_column.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm10_tomorrow_column.setLineWidth(1)
self.label_analysis_pm10_tomorrow_column.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm10_tomorrow_column.setObjectName(_fromUtf8("label_analysis_pm10_tomorrow_column"))
self.gridLayout_analysis_tomorrow_table.addWidget(self.label_analysis_pm10_tomorrow_column, 1, 0, 1, 1)
self.label_analysis_pm10_tomorrow_03 = QtGui.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm10_tomorrow_03.sizePolicy().hasHeightForWidth())
self.label_analysis_pm10_tomorrow_03.setSizePolicy(sizePolicy)
self.label_analysis_pm10_tomorrow_03.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm10_tomorrow_03.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm10_tomorrow_03.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm10_tomorrow_03.setLineWidth(1)
self.label_analysis_pm10_tomorrow_03.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm10_tomorrow_03.setObjectName(_fromUtf8("label_analysis_pm10_tomorrow_03"))
self.gridLayout_analysis_tomorrow_table.addWidget(self.label_analysis_pm10_tomorrow_03, 1, 3, 1, 1)
self.label_analysis_tomorrow_header = QtGui.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_tomorrow_header.sizePolicy().hasHeightForWidth())
self.label_analysis_tomorrow_header.setSizePolicy(sizePolicy)
self.label_analysis_tomorrow_header.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_tomorrow_header.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_tomorrow_header.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_tomorrow_header.setLineWidth(1)
self.label_analysis_tomorrow_header.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_tomorrow_header.setObjectName(_fromUtf8("label_analysis_tomorrow_header"))
self.gridLayout_analysis_tomorrow_table.addWidget(self.label_analysis_tomorrow_header, 0, 0, 1, 1)
self.label_analysis_pm10_tomorrow_04 = QtGui.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm10_tomorrow_04.sizePolicy().hasHeightForWidth())
self.label_analysis_pm10_tomorrow_04.setSizePolicy(sizePolicy)
self.label_analysis_pm10_tomorrow_04.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm10_tomorrow_04.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm10_tomorrow_04.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm10_tomorrow_04.setLineWidth(1)
self.label_analysis_pm10_tomorrow_04.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm10_tomorrow_04.setObjectName(_fromUtf8("label_analysis_pm10_tomorrow_04"))
self.gridLayout_analysis_tomorrow_table.addWidget(self.label_analysis_pm10_tomorrow_04, 1, 4, 1, 1)
self.label_analysis_pm25_tomorrow_01 = QtGui.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm25_tomorrow_01.sizePolicy().hasHeightForWidth())
self.label_analysis_pm25_tomorrow_01.setSizePolicy(sizePolicy)
self.label_analysis_pm25_tomorrow_01.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm25_tomorrow_01.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm25_tomorrow_01.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm25_tomorrow_01.setLineWidth(1)
self.label_analysis_pm25_tomorrow_01.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm25_tomorrow_01.setObjectName(_fromUtf8("label_analysis_pm25_tomorrow_01"))
self.gridLayout_analysis_tomorrow_table.addWidget(self.label_analysis_pm25_tomorrow_01, 2, 1, 1, 1)
self.label_analysis_pm10_tomorrow_01 = QtGui.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm10_tomorrow_01.sizePolicy().hasHeightForWidth())
self.label_analysis_pm10_tomorrow_01.setSizePolicy(sizePolicy)
self.label_analysis_pm10_tomorrow_01.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm10_tomorrow_01.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm10_tomorrow_01.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm10_tomorrow_01.setLineWidth(1)
self.label_analysis_pm10_tomorrow_01.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm10_tomorrow_01.setObjectName(_fromUtf8("label_analysis_pm10_tomorrow_01"))
self.gridLayout_analysis_tomorrow_table.addWidget(self.label_analysis_pm10_tomorrow_01, 1, 1, 1, 1)
self.label_analysis_city_01_tomorrow_header = QtGui.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_city_01_tomorrow_header.sizePolicy().hasHeightForWidth())
self.label_analysis_city_01_tomorrow_header.setSizePolicy(sizePolicy)
self.label_analysis_city_01_tomorrow_header.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_city_01_tomorrow_header.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_city_01_tomorrow_header.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_city_01_tomorrow_header.setLineWidth(1)
self.label_analysis_city_01_tomorrow_header.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_city_01_tomorrow_header.setObjectName(_fromUtf8("label_analysis_city_01_tomorrow_header"))
self.gridLayout_analysis_tomorrow_table.addWidget(self.label_analysis_city_01_tomorrow_header, 0, 1, 1, 1)
self.label_analysis_pm25_tomorrow_03 = QtGui.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm25_tomorrow_03.sizePolicy().hasHeightForWidth())
self.label_analysis_pm25_tomorrow_03.setSizePolicy(sizePolicy)
self.label_analysis_pm25_tomorrow_03.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm25_tomorrow_03.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm25_tomorrow_03.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm25_tomorrow_03.setLineWidth(1)
self.label_analysis_pm25_tomorrow_03.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm25_tomorrow_03.setObjectName(_fromUtf8("label_analysis_pm25_tomorrow_03"))
self.gridLayout_analysis_tomorrow_table.addWidget(self.label_analysis_pm25_tomorrow_03, 2, 3, 1, 1)
self.label_analysis_pm10_tomorrow_02 = QtGui.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_pm10_tomorrow_02.sizePolicy().hasHeightForWidth())
self.label_analysis_pm10_tomorrow_02.setSizePolicy(sizePolicy)
self.label_analysis_pm10_tomorrow_02.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_pm10_tomorrow_02.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_pm10_tomorrow_02.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_pm10_tomorrow_02.setLineWidth(1)
self.label_analysis_pm10_tomorrow_02.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_pm10_tomorrow_02.setObjectName(_fromUtf8("label_analysis_pm10_tomorrow_02"))
self.gridLayout_analysis_tomorrow_table.addWidget(self.label_analysis_pm10_tomorrow_02, 1, 2, 1, 1)
self.label_analysis_city_03_tomorrow_header = QtGui.QLabel(self.gridLayoutWidget_6)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_analysis_city_03_tomorrow_header.sizePolicy().hasHeightForWidth())
self.label_analysis_city_03_tomorrow_header.setSizePolicy(sizePolicy)
self.label_analysis_city_03_tomorrow_header.setMinimumSize(QtCore.QSize(100, 30))
self.label_analysis_city_03_tomorrow_header.setFrameShape(QtGui.QFrame.Box)
self.label_analysis_city_03_tomorrow_header.setFrameShadow(QtGui.QFrame.Plain)
self.label_analysis_city_03_tomorrow_header.setLineWidth(1)
self.label_analysis_city_03_tomorrow_header.setAlignment(QtCore.Qt.AlignCenter)
self.label_analysis_city_03_tomorrow_header.setObjectName(_fromUtf8("label_analysis_city_03_tomorrow_header"))
self.gridLayout_analysis_tomorrow_table.addWidget(self.label_analysis_city_03_tomorrow_header, 0, 3, 1, 1)
self.gridLayout_analysis_tomorrow.addLayout(self.gridLayout_analysis_tomorrow_table, 1, 0, 1, 1)
self.groupBox_forecast = QtGui.QGroupBox(self.centralwidget)
self.groupBox_forecast.setGeometry(QtCore.QRect(10, 10, 431, 131))
self.groupBox_forecast.setStyleSheet(_fromUtf8("color: rgb(255, 255, 255);"))
self.groupBox_forecast.setCheckable(False)
self.groupBox_forecast.setObjectName(_fromUtf8("groupBox_forecast"))
self.gridLayoutWidget_2 = QtGui.QWidget(self.groupBox_forecast)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 20, 408, 104))
self.gridLayoutWidget_2.setObjectName(_fromUtf8("gridLayoutWidget_2"))
self.gridLayout_forecast = QtGui.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_forecast.setMargin(0)
self.gridLayout_forecast.setObjectName(_fromUtf8("gridLayout_forecast"))
self.label_forecast_today_header = QtGui.QLabel(self.gridLayoutWidget_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_forecast_today_header.sizePolicy().hasHeightForWidth())
self.label_forecast_today_header.setSizePolicy(sizePolicy)
self.label_forecast_today_header.setMinimumSize(QtCore.QSize(200, 30))
self.label_forecast_today_header.setFrameShape(QtGui.QFrame.Box)
self.label_forecast_today_header.setFrameShadow(QtGui.QFrame.Plain)
self.label_forecast_today_header.setLineWidth(1)
self.label_forecast_today_header.setAlignment(QtCore.Qt.AlignCenter)
self.label_forecast_today_header.setObjectName(_fromUtf8("label_forecast_today_header"))
self.gridLayout_forecast.addWidget(self.label_forecast_today_header, 0, 0, 1, 1)
self.label_forecast_tomorrow_header = QtGui.QLabel(self.gridLayoutWidget_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_forecast_tomorrow_header.sizePolicy().hasHeightForWidth())
self.label_forecast_tomorrow_header.setSizePolicy(sizePolicy)
self.label_forecast_tomorrow_header.setMinimumSize(QtCore.QSize(200, 30))
self.label_forecast_tomorrow_header.setFrameShape(QtGui.QFrame.Box)
self.label_forecast_tomorrow_header.setFrameShadow(QtGui.QFrame.Plain)
self.label_forecast_tomorrow_header.setLineWidth(1)
self.label_forecast_tomorrow_header.setAlignment(QtCore.Qt.AlignCenter)
self.label_forecast_tomorrow_header.setObjectName(_fromUtf8("label_forecast_tomorrow_header"))
self.gridLayout_forecast.addWidget(self.label_forecast_tomorrow_header, 0, 1, 1, 1)
self.label_forecast_today_time = QtGui.QLabel(self.gridLayoutWidget_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_forecast_today_time.sizePolicy().hasHeightForWidth())
self.label_forecast_today_time.setSizePolicy(sizePolicy)
self.label_forecast_today_time.setMinimumSize(QtCore.QSize(200, 30))
self.label_forecast_today_time.setFrameShape(QtGui.QFrame.Box)
self.label_forecast_today_time.setFrameShadow(QtGui.QFrame.Plain)
self.label_forecast_today_time.setLineWidth(1)
self.label_forecast_today_time.setAlignment(QtCore.Qt.AlignCenter)
self.label_forecast_today_time.setObjectName(_fromUtf8("label_forecast_today_time"))
self.gridLayout_forecast.addWidget(self.label_forecast_today_time, 1, 0, 1, 1)
self.label_forecast_tomorrow_time = QtGui.QLabel(self.gridLayoutWidget_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_forecast_tomorrow_time.sizePolicy().hasHeightForWidth())
self.label_forecast_tomorrow_time.setSizePolicy(sizePolicy)
self.label_forecast_tomorrow_time.setMinimumSize(QtCore.QSize(200, 30))
self.label_forecast_tomorrow_time.setFrameShape(QtGui.QFrame.Box)
self.label_forecast_tomorrow_time.setFrameShadow(QtGui.QFrame.Plain)
self.label_forecast_tomorrow_time.setLineWidth(1)
self.label_forecast_tomorrow_time.setAlignment(QtCore.Qt.AlignCenter)
self.label_forecast_tomorrow_time.setObjectName(_fromUtf8("label_forecast_tomorrow_time"))
self.gridLayout_forecast.addWidget(self.label_forecast_tomorrow_time, 1, 1, 1, 1)
self.label_forecast_today_status = QtGui.QLabel(self.gridLayoutWidget_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_forecast_today_status.sizePolicy().hasHeightForWidth())
self.label_forecast_today_status.setSizePolicy(sizePolicy)
self.label_forecast_today_status.setMinimumSize(QtCore.QSize(200, 30))
self.label_forecast_today_status.setFrameShape(QtGui.QFrame.Box)
self.label_forecast_today_status.setFrameShadow(QtGui.QFrame.Plain)
self.label_forecast_today_status.setLineWidth(1)
self.label_forecast_today_status.setAlignment(QtCore.Qt.AlignCenter)
self.label_forecast_today_status.setObjectName(_fromUtf8("label_forecast_today_status"))
self.gridLayout_forecast.addWidget(self.label_forecast_today_status, 2, 0, 1, 1)
self.label_forecast_tomorrow_status = QtGui.QLabel(self.gridLayoutWidget_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_forecast_tomorrow_status.sizePolicy().hasHeightForWidth())
self.label_forecast_tomorrow_status.setSizePolicy(sizePolicy)
self.label_forecast_tomorrow_status.setMinimumSize(QtCore.QSize(200, 30))
self.label_forecast_tomorrow_status.setFrameShape(QtGui.QFrame.Box)
self.label_forecast_tomorrow_status.setFrameShadow(QtGui.QFrame.Plain)
self.label_forecast_tomorrow_status.setLineWidth(1)
self.label_forecast_tomorrow_status.setAlignment(QtCore.Qt.AlignCenter)
self.label_forecast_tomorrow_status.setObjectName(_fromUtf8("label_forecast_tomorrow_status"))
self.gridLayout_forecast.addWidget(self.label_forecast_tomorrow_status, 2, 1, 1, 1)
self.groupBox_cai = QtGui.QGroupBox(self.centralwidget)
self.groupBox_cai.setGeometry(QtCore.QRect(570, 20, 230, 431))
self.groupBox_cai.setStyleSheet(_fromUtf8("color: rgb(255, 255, 255);"))
self.groupBox_cai.setObjectName(_fromUtf8("groupBox_cai"))
self.gridLayout = QtGui.QGridLayout(self.groupBox_cai)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.gridLayout_cai = QtGui.QGridLayout()
self.gridLayout_cai.setObjectName(_fromUtf8("gridLayout_cai"))
self.gridLayout_cai_header = QtGui.QGridLayout()
self.gridLayout_cai_header.setObjectName(_fromUtf8("gridLayout_cai_header"))
self.label_cai_time = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_time.sizePolicy().hasHeightForWidth())
self.label_cai_time.setSizePolicy(sizePolicy)
self.label_cai_time.setMinimumSize(QtCore.QSize(200, 30))
self.label_cai_time.setFrameShape(QtGui.QFrame.Box)
self.label_cai_time.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_time.setLineWidth(1)
self.label_cai_time.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_time.setObjectName(_fromUtf8("label_cai_time"))
self.gridLayout_cai_header.addWidget(self.label_cai_time, 0, 0, 1, 1)
self.gridLayout_cai.addLayout(self.gridLayout_cai_header, 0, 0, 1, 1)
self.gridLayout_cai_table = QtGui.QGridLayout()
self.gridLayout_cai_table.setObjectName(_fromUtf8("gridLayout_cai_table"))
self.label_cai_cai_column = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_cai_column.sizePolicy().hasHeightForWidth())
self.label_cai_cai_column.setSizePolicy(sizePolicy)
self.label_cai_cai_column.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_cai_column.setFrameShape(QtGui.QFrame.Box)
self.label_cai_cai_column.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_cai_column.setLineWidth(1)
self.label_cai_cai_column.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_cai_column.setObjectName(_fromUtf8("label_cai_cai_column"))
self.gridLayout_cai_table.addWidget(self.label_cai_cai_column, 7, 0, 1, 1)
self.label_cai_pm10_column = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_pm10_column.sizePolicy().hasHeightForWidth())
self.label_cai_pm10_column.setSizePolicy(sizePolicy)
self.label_cai_pm10_column.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_pm10_column.setFrameShape(QtGui.QFrame.Box)
self.label_cai_pm10_column.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_pm10_column.setLineWidth(1)
self.label_cai_pm10_column.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_pm10_column.setObjectName(_fromUtf8("label_cai_pm10_column"))
self.gridLayout_cai_table.addWidget(self.label_cai_pm10_column, 5, 0, 1, 1)
self.label_cai_o3_column = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_o3_column.sizePolicy().hasHeightForWidth())
self.label_cai_o3_column.setSizePolicy(sizePolicy)
self.label_cai_o3_column.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_o3_column.setFrameShape(QtGui.QFrame.Box)
self.label_cai_o3_column.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_o3_column.setLineWidth(1)
self.label_cai_o3_column.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_o3_column.setObjectName(_fromUtf8("label_cai_o3_column"))
self.gridLayout_cai_table.addWidget(self.label_cai_o3_column, 3, 0, 1, 1)
self.label_cai_pm25_01 = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_pm25_01.sizePolicy().hasHeightForWidth())
self.label_cai_pm25_01.setSizePolicy(sizePolicy)
self.label_cai_pm25_01.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_pm25_01.setFrameShape(QtGui.QFrame.Box)
self.label_cai_pm25_01.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_pm25_01.setLineWidth(1)
self.label_cai_pm25_01.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_pm25_01.setObjectName(_fromUtf8("label_cai_pm25_01"))
self.gridLayout_cai_table.addWidget(self.label_cai_pm25_01, 6, 1, 1, 1)
self.label_cai_o3_01 = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_o3_01.sizePolicy().hasHeightForWidth())
self.label_cai_o3_01.setSizePolicy(sizePolicy)
self.label_cai_o3_01.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_o3_01.setFrameShape(QtGui.QFrame.Box)
self.label_cai_o3_01.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_o3_01.setLineWidth(1)
self.label_cai_o3_01.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_o3_01.setObjectName(_fromUtf8("label_cai_o3_01"))
self.gridLayout_cai_table.addWidget(self.label_cai_o3_01, 3, 1, 1, 1)
self.label_cai_co_column = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_co_column.sizePolicy().hasHeightForWidth())
self.label_cai_co_column.setSizePolicy(sizePolicy)
self.label_cai_co_column.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_co_column.setFrameShape(QtGui.QFrame.Box)
self.label_cai_co_column.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_co_column.setLineWidth(1)
self.label_cai_co_column.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_co_column.setObjectName(_fromUtf8("label_cai_co_column"))
self.gridLayout_cai_table.addWidget(self.label_cai_co_column, 4, 0, 1, 1)
self.label_cai_co_01 = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_co_01.sizePolicy().hasHeightForWidth())
self.label_cai_co_01.setSizePolicy(sizePolicy)
self.label_cai_co_01.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_co_01.setFrameShape(QtGui.QFrame.Box)
self.label_cai_co_01.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_co_01.setLineWidth(1)
self.label_cai_co_01.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_co_01.setObjectName(_fromUtf8("label_cai_co_01"))
self.gridLayout_cai_table.addWidget(self.label_cai_co_01, 4, 1, 1, 1)
self.label_cai_pm25_column = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_pm25_column.sizePolicy().hasHeightForWidth())
self.label_cai_pm25_column.setSizePolicy(sizePolicy)
self.label_cai_pm25_column.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_pm25_column.setFrameShape(QtGui.QFrame.Box)
self.label_cai_pm25_column.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_pm25_column.setLineWidth(1)
self.label_cai_pm25_column.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_pm25_column.setObjectName(_fromUtf8("label_cai_pm25_column"))
self.gridLayout_cai_table.addWidget(self.label_cai_pm25_column, 6, 0, 1, 1)
self.label_cai_status_01 = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_status_01.sizePolicy().hasHeightForWidth())
self.label_cai_status_01.setSizePolicy(sizePolicy)
self.label_cai_status_01.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_status_01.setFrameShape(QtGui.QFrame.Box)
self.label_cai_status_01.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_status_01.setLineWidth(1)
self.label_cai_status_01.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_status_01.setObjectName(_fromUtf8("label_cai_status_01"))
self.gridLayout_cai_table.addWidget(self.label_cai_status_01, 8, 1, 1, 1)
self.label_cai_city_column = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_city_column.sizePolicy().hasHeightForWidth())
self.label_cai_city_column.setSizePolicy(sizePolicy)
self.label_cai_city_column.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_city_column.setFrameShape(QtGui.QFrame.Box)
self.label_cai_city_column.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_city_column.setLineWidth(1)
self.label_cai_city_column.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_city_column.setObjectName(_fromUtf8("label_cai_city_column"))
self.gridLayout_cai_table.addWidget(self.label_cai_city_column, 0, 0, 1, 1)
self.label_cai_maxstatus_01 = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_maxstatus_01.sizePolicy().hasHeightForWidth())
self.label_cai_maxstatus_01.setSizePolicy(sizePolicy)
self.label_cai_maxstatus_01.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_maxstatus_01.setFrameShape(QtGui.QFrame.Box)
self.label_cai_maxstatus_01.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_maxstatus_01.setLineWidth(1)
self.label_cai_maxstatus_01.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_maxstatus_01.setObjectName(_fromUtf8("label_cai_maxstatus_01"))
self.gridLayout_cai_table.addWidget(self.label_cai_maxstatus_01, 9, 1, 1, 1)
self.label_cai_cai_01 = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_cai_01.sizePolicy().hasHeightForWidth())
self.label_cai_cai_01.setSizePolicy(sizePolicy)
self.label_cai_cai_01.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_cai_01.setFrameShape(QtGui.QFrame.Box)
self.label_cai_cai_01.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_cai_01.setLineWidth(1)
self.label_cai_cai_01.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_cai_01.setObjectName(_fromUtf8("label_cai_cai_01"))
self.gridLayout_cai_table.addWidget(self.label_cai_cai_01, 7, 1, 1, 1)
self.label_cai_so2_column = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_so2_column.sizePolicy().hasHeightForWidth())
self.label_cai_so2_column.setSizePolicy(sizePolicy)
self.label_cai_so2_column.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_so2_column.setFrameShape(QtGui.QFrame.Box)
self.label_cai_so2_column.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_so2_column.setLineWidth(1)
self.label_cai_so2_column.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_so2_column.setObjectName(_fromUtf8("label_cai_so2_column"))
self.gridLayout_cai_table.addWidget(self.label_cai_so2_column, 1, 0, 1, 1)
self.label_cai_so2_01 = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_so2_01.sizePolicy().hasHeightForWidth())
self.label_cai_so2_01.setSizePolicy(sizePolicy)
self.label_cai_so2_01.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_so2_01.setFrameShape(QtGui.QFrame.Box)
self.label_cai_so2_01.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_so2_01.setLineWidth(1)
self.label_cai_so2_01.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_so2_01.setObjectName(_fromUtf8("label_cai_so2_01"))
self.gridLayout_cai_table.addWidget(self.label_cai_so2_01, 1, 1, 1, 1)
self.label_cai_pm10_01 = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_pm10_01.sizePolicy().hasHeightForWidth())
self.label_cai_pm10_01.setSizePolicy(sizePolicy)
self.label_cai_pm10_01.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_pm10_01.setFrameShape(QtGui.QFrame.Box)
self.label_cai_pm10_01.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_pm10_01.setLineWidth(1)
self.label_cai_pm10_01.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_pm10_01.setObjectName(_fromUtf8("label_cai_pm10_01"))
self.gridLayout_cai_table.addWidget(self.label_cai_pm10_01, 5, 1, 1, 1)
self.label_cai_no2_column = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_no2_column.sizePolicy().hasHeightForWidth())
self.label_cai_no2_column.setSizePolicy(sizePolicy)
self.label_cai_no2_column.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_no2_column.setFrameShape(QtGui.QFrame.Box)
self.label_cai_no2_column.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_no2_column.setLineWidth(1)
self.label_cai_no2_column.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_no2_column.setObjectName(_fromUtf8("label_cai_no2_column"))
self.gridLayout_cai_table.addWidget(self.label_cai_no2_column, 2, 0, 1, 1)
self.label_cai_no2_01 = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_no2_01.sizePolicy().hasHeightForWidth())
self.label_cai_no2_01.setSizePolicy(sizePolicy)
self.label_cai_no2_01.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_no2_01.setFrameShape(QtGui.QFrame.Box)
self.label_cai_no2_01.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_no2_01.setLineWidth(1)
self.label_cai_no2_01.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_no2_01.setObjectName(_fromUtf8("label_cai_no2_01"))
self.gridLayout_cai_table.addWidget(self.label_cai_no2_01, 2, 1, 1, 1)
self.label_cai_status_column = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_status_column.sizePolicy().hasHeightForWidth())
self.label_cai_status_column.setSizePolicy(sizePolicy)
self.label_cai_status_column.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_status_column.setFrameShape(QtGui.QFrame.Box)
self.label_cai_status_column.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_status_column.setLineWidth(1)
self.label_cai_status_column.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_status_column.setObjectName(_fromUtf8("label_cai_status_column"))
self.gridLayout_cai_table.addWidget(self.label_cai_status_column, 8, 0, 1, 1)
self.label_cai_maxstatus_column = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_maxstatus_column.sizePolicy().hasHeightForWidth())
self.label_cai_maxstatus_column.setSizePolicy(sizePolicy)
self.label_cai_maxstatus_column.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_maxstatus_column.setFrameShape(QtGui.QFrame.Box)
self.label_cai_maxstatus_column.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_maxstatus_column.setLineWidth(1)
self.label_cai_maxstatus_column.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_maxstatus_column.setObjectName(_fromUtf8("label_cai_maxstatus_column"))
self.gridLayout_cai_table.addWidget(self.label_cai_maxstatus_column, 9, 0, 1, 1)
self.label_cai_city_01 = QtGui.QLabel(self.groupBox_cai)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_cai_city_01.sizePolicy().hasHeightForWidth())
self.label_cai_city_01.setSizePolicy(sizePolicy)
self.label_cai_city_01.setMinimumSize(QtCore.QSize(100, 30))
self.label_cai_city_01.setFrameShape(QtGui.QFrame.Box)
self.label_cai_city_01.setFrameShadow(QtGui.QFrame.Plain)
self.label_cai_city_01.setLineWidth(1)
self.label_cai_city_01.setAlignment(QtCore.Qt.AlignCenter)
self.label_cai_city_01.setObjectName(_fromUtf8("label_cai_city_01"))
self.gridLayout_cai_table.addWidget(self.label_cai_city_01, 0, 1, 1, 1)
self.gridLayout_cai.addLayout(self.gridLayout_cai_table, 1, 0, 1, 1)
self.gridLayout.addLayout(self.gridLayout_cai, 0, 0, 1, 1)
self.pushButton_forecast = QtGui.QPushButton(self.centralwidget)
self.pushButton_forecast.setGeometry(QtCore.QRect(450, 20, 111, 51))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
self.pushButton_forecast.setPalette(palette)
self.pushButton_forecast.setAutoFillBackground(False)
self.pushButton_forecast.setStyleSheet(_fromUtf8("background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(0, 0, 0, 255), stop:1 rgba(255, 255, 255, 255));\n"
"border-color: rgb(255, 255, 255);\n"
"color: rgb(255, 255, 255);"))
self.pushButton_forecast.setObjectName(_fromUtf8("pushButton_forecast"))
self.pushButton_cai = QtGui.QPushButton(self.centralwidget)
self.pushButton_cai.setGeometry(QtCore.QRect(450, 90, 111, 51))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
gradient = QtGui.QLinearGradient(0.0, 0.0, 1.0, 0.0)
gradient.setSpread(QtGui.QGradient.PadSpread)
gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
gradient.setColorAt(0.0, QtGui.QColor(0, 0, 0))
gradient.setColorAt(1.0, QtGui.QColor(255, 255, 255))
brush = QtGui.QBrush(gradient)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
self.pushButton_cai.setPalette(palette)
self.pushButton_cai.setAutoFillBackground(False)
self.pushButton_cai.setStyleSheet(_fromUtf8("background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(0, 0, 0, 255), stop:1 rgba(255, 255, 255, 255));\n"
"border-color: rgb(255, 255, 255);\n"
"color: rgb(255, 255, 255);"))
self.pushButton_cai.setObjectName(_fromUtf8("pushButton_cai"))
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "인천 미세먼지 정보", None))
self.pushButton_refresh_data.setText(_translate("MainWindow", "가져오기", None))
self.groupBox_analysis_today.setTitle(_translate("MainWindow", "미세먼지 예보분석(오늘)", None))
self.label_analysis_time.setText(_translate("MainWindow", "-", None))
self.label_analysis_header.setText(_translate("MainWindow", "미세먼지", None))
self.label_analysis_pm25_today_04.setText(_translate("MainWindow", "-", None))
self.label_analysis_pm10_today_03.setText(_translate("MainWindow", "-", None))
self.label_analysis_pm25_today_01.setText(_translate("MainWindow", "-", None))
self.label_analysis_pm10_column.setText(_translate("MainWindow", "PM-10", None))
self.label_analysis_city_01_header.setText(_translate("MainWindow", "인천", None))
self.label_analysis_pm10_today_01.setText(_translate("MainWindow", "-", None))
self.label_analysis_city_04_header.setText(_translate("MainWindow", "경기남부", None))
self.label_analysis_city_02_header.setText(_translate("MainWindow", "서울", None))
self.label_analysis_pm25_column.setText(_translate("MainWindow", "PM-2.5", None))
self.label_analysis_pm25_today_02.setText(_translate("MainWindow", "-", None))
self.label_analysis_pm10_today_04.setText(_translate("MainWindow", "-", None))
self.label_analysis_pm10_today_02.setText(_translate("MainWindow", "-", None))
self.label_analysis_city_03_header.setText(_translate("MainWindow", "경기북부", None))
self.label_analysis_pm25_today_03.setText(_translate("MainWindow", "-", None))
self.groupBox_analysis_tomorrow.setTitle(_translate("MainWindow", "미세먼지 예보분석(내일)", None))
self.label_analysis_tomorrow_title.setText(_translate("MainWindow", "-", None))
self.label_analysis_pm25_tomorrow_column.setText(_translate("MainWindow", "PM-2.5", None))
self.label_analysis_pm25_tomorrow_02.setText(_translate("MainWindow", "-", None))
self.label_analysis_city_02_tomorrow_header.setText(_translate("MainWindow", "서울", None))
self.label_analysis_pm25_tomorrow_04.setText(_translate("MainWindow", "-", None))
self.label_analysis_city_04_tomorrow_header.setText(_translate("MainWindow", "경기남부", None))
self.label_analysis_pm10_tomorrow_column.setText(_translate("MainWindow", "PM-10", None))
self.label_analysis_pm10_tomorrow_03.setText(_translate("MainWindow", "-", None))
self.label_analysis_tomorrow_header.setText(_translate("MainWindow", "미세먼지", None))
self.label_analysis_pm10_tomorrow_04.setText(_translate("MainWindow", "-", None))
self.label_analysis_pm25_tomorrow_01.setText(_translate("MainWindow", "-", None))
self.label_analysis_pm10_tomorrow_01.setText(_translate("MainWindow", "-", None))
self.label_analysis_city_01_tomorrow_header.setText(_translate("MainWindow", "인천", None))
self.label_analysis_pm25_tomorrow_03.setText(_translate("MainWindow", "-", None))
self.label_analysis_pm10_tomorrow_02.setText(_translate("MainWindow", "-", None))
self.label_analysis_city_03_tomorrow_header.setText(_translate("MainWindow", "경기북부", None))
self.groupBox_forecast.setTitle(_translate("MainWindow", "미세먼지 예보현황(인천 전지역)", None))
self.label_forecast_today_header.setText(_translate("MainWindow", "오늘예보", None))
self.label_forecast_tomorrow_header.setText(_translate("MainWindow", "내일예보", None))
self.label_forecast_today_time.setText(_translate("MainWindow", "-", None))
self.label_forecast_tomorrow_time.setText(_translate("MainWindow", "-", None))
self.label_forecast_today_status.setText(_translate("MainWindow", "-", None))
self.label_forecast_tomorrow_status.setText(_translate("MainWindow", "-", None))
self.groupBox_cai.setTitle(_translate("MainWindow", "통합대기환경지수(CAI)", None))
self.label_cai_time.setText(_translate("MainWindow", "-", None))
self.label_cai_cai_column.setText(_translate("MainWindow", "CAI 지수", None))
self.label_cai_pm10_column.setText(_translate("MainWindow", "PM-10", None))
self.label_cai_o3_column.setText(_translate("MainWindow", "O₃", None))
self.label_cai_pm25_01.setText(_translate("MainWindow", "-", None))
self.label_cai_o3_01.setText(_translate("MainWindow", "-", None))
self.label_cai_co_column.setText(_translate("MainWindow", "CO", None))
self.label_cai_co_01.setText(_translate("MainWindow", "-", None))
self.label_cai_pm25_column.setText(_translate("MainWindow", "PM-2.5", None))
self.label_cai_status_01.setText(_translate("MainWindow", "-", None))
self.label_cai_city_column.setText(_translate("MainWindow", "측정소", None))
self.label_cai_maxstatus_01.setText(_translate("MainWindow", "-", None))
self.label_cai_cai_01.setText(_translate("MainWindow", "-", None))
self.label_cai_so2_column.setText(_translate("MainWindow", "SO₂", None))
self.label_cai_so2_01.setText(_translate("MainWindow", "-", None))
self.label_cai_pm10_01.setText(_translate("MainWindow", "-", None))
self.label_cai_no2_column.setText(_translate("MainWindow", "NO₂", None))
self.label_cai_no2_01.setText(_translate("MainWindow", "-", None))
self.label_cai_status_column.setText(_translate("MainWindow", "구분", None))
self.label_cai_maxstatus_column.setText(_translate("MainWindow", "대표오염물질", None))
self.label_cai_city_01.setText(_translate("MainWindow", "-", None))
self.pushButton_forecast.setText(_translate("MainWindow", "웹 (예보)", None))
self.pushButton_cai.setText(_translate("MainWindow", "웹 (CAI)", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
#!/usr/bin/env python
def issquare(n):
return (int(round(n**.5)))**2 == n
total=m=0
while total < 10**6:
m += 1
for ipj in xrange(2*m+1):
if not issquare(ipj**2+m**2): continue
if ipj > m+1: total += (2*m+2-ipj)/2
else: total += ipj/2
print m
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'demoProgressBar.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(350, 153)
self.progressBar = QtWidgets.QProgressBar(Dialog)
self.progressBar.setGeometry(QtCore.QRect(40, 60, 281, 23))
font = QtGui.QFont()
font.setPointSize(12)
self.progressBar.setFont(font)
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName("progressBar")
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(80, 20, 181, 21))
font = QtGui.QFont()
font.setPointSize(12)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButtonStart = QtWidgets.QPushButton(Dialog)
self.pushButtonStart.setGeometry(QtCore.QRect(80, 110, 151, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButtonStart.setFont(font)
self.pushButtonStart.setObjectName("pushButtonStart")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "Downloading the file"))
self.pushButtonStart.setText(_translate("Dialog", "Start Downloading"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
#!/usr/bin/env python2.7
# Amazon FPGA Hardware Development Kit
#
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import os
from os.path import dirname, realpath
import pytest
import subprocess
import sys
import traceback
import re
try:
import aws_fpga_utils
import aws_fpga_test_utils
from aws_fpga_test_utils.AwsFpgaTestBase import AwsFpgaTestBase
except ImportError as e:
traceback.print_tb(sys.exc_info()[2])
print("error: {}\nMake sure to source hdk_setup.sh".format(sys.exc_info()[1]))
sys.exit(1)
logger = aws_fpga_utils.get_logger(__name__)
class TestSims(AwsFpgaTestBase):
"""
Pytest test class.
NOTE: Cannot have an __init__ method.
"""
ADD_SIMULATOR = True
ADD_BATCH = True
@classmethod
def setup_class(cls):
"""
Do any setup required for tests.
"""
AwsFpgaTestBase.setup_class(cls, __file__)
AwsFpgaTestBase.assert_hdk_setup()
cls.RUN_SIM_SCRIPT = dirname(realpath(__file__)) + "/run_sim.sh"
assert os.path.exists(cls.RUN_SIM_SCRIPT)
cls.set_simulation_error_signatures()
cls.set_simulation_pass_signatures()
return
@classmethod
def set_simulation_error_signatures(cls):
"""
Adding compiled errors
"""
cls.failure_messages = [
r'.*\*{0,3}\s*ERROR\s*\*{0,3}',
r'.*\*{0,3}\s*TEST[\s_-]{0,2}FAILED\s*\*{0,3}.*',
r'.*Detected\s*[1-9]\d*\s*error[s]?.*'
]
cls.compiled_failure_messages = []
for failure_message in cls.failure_messages:
cls.compiled_failure_messages.append(re.compile(failure_message))
@classmethod
def set_simulation_pass_signatures(cls):
"""
Adding compiled pass signatures
"""
cls.pass_messages = [
r'.*[\*\!]{0,3}\s*TEST[\s_-]{0,2}PASSED\s*[\*\!]{0,3}.*',
]
cls.compiled_pass_messages = []
for pass_message in cls.pass_messages:
cls.compiled_pass_messages.append(re.compile(pass_message))
@classmethod
def parse_simulation_output(cls, test_name, test_type, test_stdout, test_stderr):
"""
Parse stdout and stderr and see if the test had any fail signatures
Also check if Test Passed. a no Test passed signature is
"""
failure_messages = []
pass_messages = []
# Check failures
for stdout_line in test_stdout:
for fail_regex in cls.compiled_failure_messages:
if fail_regex.match(stdout_line):
failure_messages.append(stdout_line)
# Check passes
for stdout_line in test_stdout:
for pass_regex in cls.compiled_pass_messages:
if pass_regex.match(stdout_line):
pass_messages.append(stdout_line)
return_dict = {
"passes": pass_messages,
"fails": failure_messages
}
return return_dict
def run_sim(self, test_dir="", test_name="", test_type="", simulator="", batch=""):
vivado_version = os.environ.get('VIVADO_TOOL_VERSION', 'unknown')
# Error on defaults
if not(test_dir and test_name and test_type):
self.fail("Please enter non empty test_dir, test_name and test_type when calling run_sim")
command_line = [self.RUN_SIM_SCRIPT,
'--test-name', test_name,
'--test-dir', test_dir,
'--test-type', test_type,
'--simulator', simulator,
'--batch', batch,
'--vivado-version', vivado_version
]
(rc, stdout_lines, stderr_lines) = self.run_cmd(" ".join(command_line))
# write simulation output
if simulator == "vivado":
simulator_version = "{}_{}".format(simulator, vivado_version)
else:
simulator_version = simulator
stdout_file_name = "{}/{}_{}_{}.stdout.sim.log".format(test_dir, test_name, test_type, simulator_version)
with open(stdout_file_name, 'w') as f:
for item in stdout_lines:
f.write("%s\n" % item)
# Only write if there is something to write
if stderr_lines:
stderr_file_name = "{}/{}_{}_{}.stderr.sim.log".format(test_dir, test_name, test_type, simulator_version)
with open(stderr_file_name, 'w') as f:
for item in stderr_lines:
f.write("%s\n" % item)
# Check exit code
assert rc == 0, "Sim failed. Received Non-Zero return code"
return_dict = self.parse_simulation_output(test_name=test_name,
test_type=test_type,
test_stdout=stdout_lines,
test_stderr=stderr_lines)
# Check for fail signatures
assert [] == return_dict["fails"], "Found failures {}".format(return_dict["fails"])
# Check for pass signatures. We need at least one to make the test as a pass
assert [] != return_dict["passes"], "Found no matching pass statements"
# cl_dram_dma sv
def test_cl_dram_dma__dram_dma__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_axi_mstr__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_axi_mstr'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_rnd__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_rnd'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_rnd__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_rnd'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_rnd__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_rnd'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_rnd__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_rnd'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_4k_crossing__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_4k_crossing'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_single_beat_4k__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_single_beat_4k'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_single_beat_4k__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_single_beat_4k'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_single_beat_4k__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_single_beat_4k'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_single_beat_4k__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_single_beat_4k'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_pcis_concurrent__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_pcis_concurrent'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_pcis_concurrent__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_pcis_concurrent'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_pcis_concurrent__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_pcis_concurrent'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_pcis_concurrent__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_pcis_concurrent'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__host_pcim__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_host_pcim'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_pcim_concurrent__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_pcim_concurrent'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_pcim_concurrent__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_pcim_concurrent'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_pcim_concurrent__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_pcim_concurrent'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_pcim_concurrent__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_pcim_concurrent'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_sda_concurrent__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_sda_concurrent'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_sda_concurrent__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_sda_concurrent'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_sda_concurrent__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_sda_concurrent'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dma_sda_concurrent__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dma_sda_concurrent'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__ddr__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_ddr'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__clk_recipe__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_clk_recipe'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__int__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_int'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_wc__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_wc'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_wc__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_wc'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_wc__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_wc'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_wc__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_wc'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_len__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_len'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_len__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_len'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_len__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_len'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_len__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_len'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_pcis_axsize__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_pcis_axsize'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_pcis_axsize__sv_fast(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_pcis_axsize'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_pcis_axsize__sv_fast_ecc_direct(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_pcis_axsize'
test_type = 'sv_fast_ecc_direct'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__peek_poke_pcis_axsize__sv_fast_ecc_rnd(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_peek_poke_pcis_axsize'
test_type = 'sv_fast_ecc_rnd'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__ddr_peek_poke__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_ddr_peek_poke'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__ddr_peek_bdr_walking_ones__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_ddr_peek_bdr_walking_ones'
test_type = 'sv_ddr_bkdr'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_dram_bdr_row_col_combo__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_dram_bdr_row_col_combo'
test_type = 'sv_ddr_bkdr'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_mem_model_bdr_wr__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_mem_model_bdr_wr'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_mem_model_bdr_rd__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_mem_model_bdr_rd'
test_type = 'sv_fast'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__axi_mstr_multi_rw__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_axi_mstr_multi_rw'
test_type = 'sv'
def test_cl_dram_dma__bar1__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_bar1'
test_type = 'sv'
def test_cl_dram_dma__dram_dma_allgn_addr_4k__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_allgn_addr_4k'
test_type = 'sv'
def test_ddr_peek_bdr_walking_ones__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_ddr_peek_bdr_walking_ones'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
# cl_uram_example c
def test_cl_uram_example__uram_example__c(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_uram_example/verif/scripts'
test_name = 'test_uram_example'
test_type = 'c'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
# cl_dram_dma c
def test_cl_dram_dma__sda__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_sda'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
def test_cl_dram_dma__dram_dma_hwsw_cosim__c(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_dram_dma/verif/scripts'
test_name = 'test_dram_dma_hwsw_cosim'
test_type = 'c'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
# cl_hello_world sv
def test_cl_hello_world__hello_world__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_hello_world/verif/scripts'
test_name = 'test_hello_world'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
# cl_test_gl_cntr sv
def test_cl_hello_world__gl_cntr__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_hello_world/verif/scripts'
test_name = 'test_gl_cntr'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
# cl_hello_world vhdl
def test_cl_vhdl_hello_world__hello_world__vhdl(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_hello_world_vhdl/verif/scripts'
test_name = 'test_hello_world'
test_type = 'vhdl'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
# cl_hello_world c
def test_cl_hello_world__hello_world__c(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_hello_world/verif/scripts'
test_name = 'test_hello_world'
test_type = 'c'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
# cl_sde_c2h sv
def test_cl_sde__test_simple_c2h__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_sde/verif/scripts'
test_name = 'test_simple_c2h'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
# cl_sde_h2c sv
def test_cl_sde__test_simple_h2c__sv(self, simulator, batch):
test_dir = self.WORKSPACE + '/hdk/cl/examples/cl_sde/verif/scripts'
test_name = 'test_simple_h2c'
test_type = 'sv'
self.run_sim(test_dir=test_dir, test_name=test_name, test_type=test_type, simulator=simulator, batch=batch)
|
GOOD = 0 # value is perfectly A-okay
INSUF_HIST = 1 # value best-guess was made due to insufficient history (ex, acceleration set to zero due to one timestamp)
MISSING = 2 # value is missing (no car in front, etc.)
CENSORED_HI = 3 # value is past an operating threshold
CENSORED_LO = 4 # value is below an operating threshold
class FeatureValue:
def __init__(self, v: float, i: int = GOOD):
self.v = v
self.i = i
def inverse_ttc_to_ttc(inv_ttc: FeatureValue, censor_hi: float = 30.0):
if inv_ttc.i == MISSING:
# if the value is missing then censor hi and set missing
return FeatureValue(censor_hi, MISSING)
elif inv_ttc.i == GOOD and inv_ttc.v == 0.0:
# if the car in front is pulling away, then set to a censored hi value
return FeatureValue(censor_hi, CENSORED_HI)
else:
# even if the value was censored hi, can still take the inverse
ttc = 1.0 / inv_ttc.v
if ttc > censor_hi:
return FeatureValue(censor_hi, CENSORED_HI)
else:
return FeatureValue(ttc, GOOD) |
import math
import torch
from tqdm import tqdm
def train_eval(model, criterion, eval_iter, rnn_out=False):
model.eval()
acc = 0.0
n_total = 0
n_correct = 0
test_loss = 0.0
for x, y in eval_iter:
with torch.no_grad():
if torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
output = None
if rnn_out:
output, _ = model(x)
else:
output = model(x)
loss = criterion(output, y)
n_correct += (output.argmax(1) == y).sum().item()
n_total += len(y)
test_loss += loss.item() / len(y)
test_loss /= len(eval_iter)
acc = 100. * (n_correct / n_total)
print(f'Test Accuracy: {acc:.2f}\tTest Loss (avg): {test_loss}')
def train(model, optim, criterion, train_iter, epochs, clip=0,
eval_iter=None, eval_every=50):
for epoch in tqdm(range(1, epochs + 1)):
model.train()
total_epoch_loss = 0.0
for x, y in train_iter:
if torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
# forward
output = model(x)
# backward
optim.zero_grad()
loss = criterion(output, y)
loss.backward()
if clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optim.step()
loss_val = loss.item()
if math.isnan(loss_val):
print('loss = nan')
else:
total_epoch_loss += loss.item() / len(y)
# display epoch stats
total_epoch_loss /= len(train_iter)
# eval
if eval_iter and epoch % eval_every == 0:
print(f'Epoch: {epoch}\tTrain Loss (avg): {total_epoch_loss}')
train_eval(model, criterion, eval_iter)
def train_reg(model, optim, criterion, train_iter, epochs, clip=0,
ar=0, tar=0, eval_iter=None, eval_every=50):
for epoch in tqdm(range(1, epochs + 1)):
model.train()
total_epoch_loss = 0.0
for x, y in train_iter:
if torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
# forward
output, rnn_out = model(x)
# backward
optim.zero_grad()
loss = criterion(output, y)
loss_val = loss.item()
# Activation Regularization
if ar:
loss += ar * rnn_out.pow(2).mean()
# Temporal Activation Regularization (slowness)
if tar:
loss += tar * (rnn_out[1:] - rnn_out[:-1]).pow(2).mean()
# Backprop
loss.backward()
if clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optim.step()
if math.isnan(loss_val):
print('loss = nan')
else:
total_epoch_loss += loss_val / len(y)
# display epoch stats
total_epoch_loss /= len(train_iter)
# eval
if eval_iter and epoch % eval_every == 0:
print(f'Epoch: {epoch}\tTrain Loss (avg): {total_epoch_loss}')
train_eval(model, criterion, eval_iter, True)
|
#
# Plasma
# Copyright (c) 2020 Homedeck, LLC.
#
from cv2 import cornerHarris, findTransformECC, MOTION_TRANSLATION, TERM_CRITERIA_COUNT, TERM_CRITERIA_EPS
from numpy import array, argpartition, asarray, column_stack, concatenate, eye, float32, ndarray, ones, split, stack as stack_array, unravel_index
from numpy.linalg import norm
from PIL import Image
from sklearn.linear_model import LinearRegression
from torch import cat, linspace, meshgrid, stack, Tensor
from torch.nn.functional import grid_sample
from torchvision.transforms import ToPILImage, ToTensor
from typing import Tuple
from .device import get_io_device
def tca_correction (*images: Image.Image) -> Image.Image:
"""
Appply transverse chromatic aberration correction on an image.
Parameters:
images (PIL.Image | list): Input image(s).
Returns:
PIL.Image | list: Corrected image(s).
"""
# Check
if len(images) == 0:
return None
# Save EXIF
exifs = [image.info.get("exif") for image in images]
# Create exposure stack tensor
device = get_io_device()
exposure_stack = stack([ToTensor()(image) for image in images], dim=0).to(device)
# Correct
red_coeffs, blue_coeffs = _compute_coefficients(images[0])
result_stack = _tca_forward(exposure_stack, red_coeffs, blue_coeffs)
# Convert back to images
exposures = result_stack.split(1, dim=0)
images = [ToPILImage()(exposure.squeeze(dim=0).cpu()) for exposure in exposures]
# Add EXIF and return
for image, exif in zip(images, exifs):
image.info["exif"] = exif
return images if len(images) > 1 else images[0]
def _compute_coefficients (image: Image.Image) -> Tuple[ndarray, ndarray]:
"""
Compute TCA correction coefficients.
We use a cubic lens distortion model.
Parameters:
image (PIL.Image): Input image.
Returns:
tuple: Red and blue channel correction coefficients, each with shape (4,)
"""
# Compute displacements
image_array = asarray(image)
height, width, _ = image_array.shape
# Extract patches to inspect
corners = _find_corners(image_array, count=100)
patches, centers = _extract_patches(image_array, corners, size=100)
# Compute displacements
displacements, mask = _compute_patch_displacements(patches)
displacements_red, displacements_blue = displacements[:,0], displacements[:,1]
# Compute radial field
image_size = array([ width, height ])
image_center = image_size / 2.
patch_radii = norm((centers[mask] - image_center) / image_center, axis=1)
displaced_radii_red = norm((centers[mask] - displacements_red - image_center) / image_center, axis=1)
displaced_radii_blue = norm((centers[mask] - displacements_blue - image_center) / image_center, axis=1)
# Compute coefficients
regressor_red = LinearRegression(fit_intercept=False)
regressor_blue = LinearRegression(fit_intercept=False)
X = stack_array([patch_radii, patch_radii ** 2, patch_radii ** 3, patch_radii ** 4], axis=1)
regressor_red.fit(X, displaced_radii_red)
regressor_blue.fit(X, displaced_radii_blue)
return regressor_red.coef_, regressor_blue.coef_
def _tca_forward (input: Tensor, red_coeffs: ndarray, blue_coeffs: ndarray) -> Tensor:
"""
Apply the cubic TCA correction forward model.
Parameters:
input (Tensor): Image stack with shape (N,3,H,W).
red_coeffs (ndarray): Red channel correction coefficients with shape (4,).
blue_coeffs (ndarray): Blue channel correction coefficients with shape (4,).
Returns:
Tensor: Corrected image stack with shape (N,3,H,W).
"""
# Construct sample grid
batch, _, height, width = input.shape
hg, wg = meshgrid(linspace(-1., 1., height), linspace(-1., 1., width))
hg = hg.repeat(batch, 1, 1).unsqueeze(dim=3).to(input.device)
wg = wg.repeat(batch, 1, 1).unsqueeze(dim=3).to(input.device)
sample_field = cat([wg, hg], dim=3)
r_dst = sample_field.norm(dim=3, keepdim=True)
# Compute distortions
r_a, r_b, r_c, r_d = red_coeffs
b_a, b_b, b_c, b_d = blue_coeffs
red_distortion = r_a + r_b * r_dst.pow(1) + r_c * r_dst.pow(2) + r_d * r_dst.pow(3)
blue_distortion = b_a + b_b * r_dst.pow(1) + b_c * r_dst.pow(2) + b_d * r_dst.pow(3)
# Compute sample grids
red_grid = sample_field * red_distortion
blue_grid = sample_field * blue_distortion
# Sample
red, green, blue = input.split(1, dim=1)
red_shifted = grid_sample(red, red_grid, mode="bilinear", padding_mode="border", align_corners=False)
blue_shifted = grid_sample(blue, blue_grid, mode="bilinear", padding_mode="border", align_corners=False)
# Combine
result = cat([red_shifted, green, blue_shifted], dim=1)
return result
def _find_corners (input: ndarray, count: int=100) -> ndarray:
"""
Find corners in an image.
Parameters:
input (ndarray): Input image with shape (H,W,3).
count (int): Maximum number of corners to return.
Returns:
ndarray: Coordinates of corners with shape (N,2).
"""
# Find corners in green channel
_, g, _ = split(input, 3, axis=2)
corners = cornerHarris(g.astype(float32), 2, 3, 0.04)
# Get coordinates with max response
corner_indices = argpartition(corners, -count, axis=None)[-count:]
y_coords, x_coords = unravel_index(corner_indices, corners.shape)
# Return
coords = column_stack([x_coords, y_coords])
return coords
def _extract_patches (input: ndarray, centers: ndarray, size: int=100) -> Tuple[ndarray, ndarray]:
"""
Extract image patches centered around a set of patches.
Note that the number of returned patches might be less than N, as patches that are not full-size are discarded.
Parameters:
input (ndarray): Input image with shape (H,W,3).
centers (ndarray): Patch centers (x,y) coordinates with shape (N,2).
size (int): Size in each dimension.
Returns:
tuple: Patch stack with shape (M,S,S,3) and patch centers with shape (M,2).
"""
negatives = centers - size // 2
patches = [input[y_min:y_max, x_min:x_max] for x_min, y_min, x_max, y_max in concatenate([negatives, negatives + size], axis=1)]
patches = [(patch, center) for patch, center in zip(patches, centers) if patch.shape[0] == patch.shape[1] == size]
patches, centers = zip(*patches)
patches, centers = stack_array(patches), stack_array(centers)
return patches, centers
def _compute_patch_displacements (patches: ndarray) -> Tuple[ndarray, ndarray]:
"""
Compute per-patch alignment displacements for N patches.
Note that the number of returned displacements might be less than N.
This happens when no suitable displacement can be computed for a given patch.
Parameters:
patches (ndarray): Patch stack with shape (N,S,S,3).
Returns:
tuple: Red and blue channel displacement vectors with shape (M,2,2), selection mask with shape (N,).
"""
# Constants
IDENTITY = eye(2, 3, dtype=float32)
CRITERIA = (TERM_CRITERIA_EPS | TERM_CRITERIA_COUNT, 100, 1e-4)
# Compute
displacements = []
mask = ones(patches.shape[0]).astype(bool)
for i, patch in enumerate(patches):
try:
patch_r, patch_g, patch_b = split(patch, 3, axis=2)
_, warp_matrix_r = findTransformECC(patch_g, patch_r, IDENTITY.copy(), MOTION_TRANSLATION, CRITERIA, None, 5)
_, warp_matrix_b = findTransformECC(patch_g, patch_b, IDENTITY.copy(), MOTION_TRANSLATION, CRITERIA, None, 5)
displacement = -stack_array([warp_matrix_r[:,2], warp_matrix_b[:,2]], axis=0) # invert displacement
displacements.append(displacement)
except:
mask[i] = False
# Return
displacements = stack_array(displacements)
return displacements, mask |
#!/usr/bin/env python
#
# This library is for Grove - Time of Flight Distance Sensor VL53L0X
# (https://www.seeedstudio.com/Grove-Time-of-Flight-Distance-Sensor-VL53L0-p-3086.html)
# which is a high speed, high accuracy and long range distance sensor based on VL53L0X.
#
# This is the library for Grove Base Hat which used to connect grove sensors for Raspberry Pi.
#
'''
## License
The MIT License (MIT)
Grove Base Hat for the Raspberry Pi, used to connect grove sensors.
Copyright (C) 2018 Seeed Technology Co.,Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
from grove.i2c import Bus
from rpi_vl53l0x.vl53l0x import VL53L0X
_adapter = Bus()
GroveTofDistanceVL53L0X = VL53L0X(bus = _adapter.bus)
def main():
print("Make sure Time-of-Flight-Distance-Sensor-VL53L0X inserted")
print(" in one I2C slot of Grove-Base-Hat")
vl53 = GroveTofDistanceVL53L0X
vl53.begin()
version = vl53.get_devver()
# print("VL53L0X_GetDeviceInfo:")
# print(" Device Type : %s" % version["type"])
# print(" Device Name : %s" % version["name"])
# print(" Device ID : %s" % version["id"])
# print(" RevisionMajor : %d" % version["major"])
# print(" RevisionMinor : %d" % version["minor"])
while True:
st = vl53.wait_ready()
if not st:
continue
# print("Distance = {} mm".format(vl53.get_distance()))
print(vl53.get_distance())
time.sleep(0.5)
if __name__ == '__main__':
main()
|
"""Tests for the text reuse module."""
import logging
from unittest import TestCase, skip
import spacy
from dphon.reuse import MatchGraph
from spacy.tokens import Doc
from dphon.extend import LevenshteinExtender
from dphon.match import Match
# disconnect logging for testing
logging.captureWarnings(True)
logging.disable(logging.CRITICAL)
class TestMatchGraph(TestCase):
"""Test the MatchGraph class."""
maxDiff = None
def setUp(self) -> None:
"""create a spaCy pipeline and match graph for testing"""
self.nlp = spacy.blank(
"zh", meta={"tokenizer": {"config": {"use_jieba": False}}})
self.G = MatchGraph()
if not Doc.has_extension("id"):
Doc.set_extension("id", default="")
# doc1 = self.nlp.make_doc("與朋友交言而有信雖曰未學吾必謂之學矣")
# doc2 = self.nlp.make_doc("與朋友交言而有信雖曰已學吾必謂之未也")
# doc3 = self.nlp.make_doc("與朋友交言而有信雖未讀書吾亦謂之學矣")
def test_extend(self) -> None:
"""extend should reduce graph to maximal matches only"""
doc1 = self.nlp.make_doc("與朋友交言而有信雖曰未學吾")
doc2 = self.nlp.make_doc("與朋友交言而有信雖曰已學吾")
doc3 = self.nlp.make_doc("與朋友交言而有信雖未讀書吾")
self.G.add_docs([("論語·學而", doc1),
("藝文類聚·錢", doc2),
("顏氏家訓·勉學", doc3)])
self.G.add_matches([
Match("論語·學而", "藝文類聚·錢", doc1[0:4], doc2[0:4]), # 與朋友交
Match("論語·學而", "藝文類聚·錢", doc1[4:8], doc2[4:8]), # 言而有信
Match("論語·學而", "顏氏家訓·勉學", doc1[0:4], doc3[0:4]), # 與朋友交
Match("論語·學而", "顏氏家訓·勉學", doc1[4:8], doc3[4:8]), # 言而有信
Match("藝文類聚·錢", "顏氏家訓·勉學", doc2[0:4], doc3[0:4]), # 與朋友交
Match("藝文類聚·錢", "顏氏家訓·勉學", doc2[4:8], doc3[4:8]), # 言而有信
])
extender = LevenshteinExtender(threshold=0.8, len_limit=50)
self.G.extend(extender)
matches = [(m.u, m.v, m.utxt.text, m.vtxt.text)
for m in self.G.matches]
self.assertEqual(len(matches), 3)
self.assertEqual(matches[0], ("論語·學而", "藝文類聚·錢",
"與朋友交言而有信雖曰未學吾", "與朋友交言而有信雖曰已學吾"))
self.assertEqual(matches[1], ("論語·學而", "顏氏家訓·勉學",
"與朋友交言而有信雖曰未學吾", "與朋友交言而有信雖未讀書吾"))
self.assertEqual(matches[2], ("藝文類聚·錢", "顏氏家訓·勉學",
"與朋友交言而有信雖", "與朋友交言而有信雖"))
|
import cs.utils as utils
import gzip
import json
import logging
import os
import shutil
import uuid
import cs_cmdline
import cs_dhcp
import cs_firewallrules
import cs_forwardingrules
import cs_loadbalancer
import cs_network_acl
import cs_public_ip_acl
import cs_staticroutes
import cs_virtualrouter
class DataBag:
DPATH = "/etc/cosmic/router"
def __init__(self):
self.bdata = {}
def load(self):
data = self.bdata
if not os.path.exists(self.DPATH):
os.makedirs(self.DPATH)
self.fpath = self.DPATH + '/' + self.key + '.json'
try:
handle = open(self.fpath)
except IOError:
logging.debug("Creating data bag type %s", self.key)
data.update({"id": self.key})
else:
logging.debug("Loading data bag type %s", self.key)
data = json.load(handle)
handle.close()
self.dbag = data
def save(self, dbag):
try:
handle = open(self.fpath, 'w')
except IOError:
logging.error("Could not write data bag %s", self.key)
else:
logging.debug("Writing data bag type %s", self.key)
logging.debug(dbag)
jsono = json.dumps(dbag, indent=4, sort_keys=True)
handle.write(jsono)
def getDataBag(self):
return self.dbag
def setKey(self, key):
self.key = key
class updateDataBag:
DPATH = "/etc/cosmic/router"
def __init__(self, qFile):
self.qFile = qFile
self.fpath = ''
self.bdata = {}
self.process()
def process(self):
self.db = DataBag()
if self.qFile.type == "staticnatrules" or self.qFile.type == "forwardrules":
self.db.setKey("forwardingrules")
else:
self.db.setKey(self.qFile.type)
self.db.load()
logging.info("Command of type %s received", self.qFile.type)
if self.qFile.type == 'cmdline':
dbag = self.processCL(self.db.getDataBag())
elif self.qFile.type == 'network_overview':
dbag = self.qFile.data
elif self.qFile.type == 'vm_overview':
dbag = self.qFile.data
elif self.qFile.type == 'networkacl':
dbag = self.process_network_acl(self.db.getDataBag())
elif self.qFile.type == 'publicipacl':
dbag = self.process_public_ip_acl(self.db.getDataBag())
elif self.qFile.type == 'firewallrules':
dbag = self.process_firewallrules(self.db.getDataBag())
elif self.qFile.type == 'loadbalancer':
dbag = self.process_loadbalancer(self.db.getDataBag())
elif self.qFile.type == 'dhcpentry':
dbag = self.process_dhcp_entry(self.db.getDataBag())
elif self.qFile.type == 'staticnatrules' or self.qFile.type == 'forwardrules':
dbag = self.processForwardingRules(self.db.getDataBag())
elif self.qFile.type == 'staticroutes':
dbag = self.process_staticroutes(self.db.getDataBag())
elif self.qFile.type == 'virtualrouter':
dbag = self.process_virtualrouter(self.db.getDataBag())
else:
logging.error("Error I do not know what to do with file of type %s", self.qFile.type)
return
self.db.save(dbag)
def process_dhcp_entry(self, dbag):
return cs_dhcp.merge(dbag, self.qFile.data)
def process_network_acl(self, dbag):
d_to_merge = self.validate_device_based_on_mac_address()
return cs_network_acl.merge(dbag, d_to_merge)
def process_public_ip_acl(self, dbag):
d_to_merge = self.validate_device_based_on_mac_address()
return cs_public_ip_acl.merge(dbag, d_to_merge)
def process_firewallrules(self, dbag):
return cs_firewallrules.merge(dbag, self.qFile.data)
def process_loadbalancer(self, dbag):
return cs_loadbalancer.merge(dbag, self.qFile.data)
def process_staticroutes(self, dbag):
return cs_staticroutes.merge(dbag, self.qFile.data)
def processForwardingRules(self, dbag):
# to be used by both staticnat and portforwarding
return cs_forwardingrules.merge(dbag, self.qFile.data)
def process_virtualrouter(self, dbag):
return cs_virtualrouter.merge(dbag, self.qFile.data)
def processCL(self, dbag):
# Convert the ip stuff to an ip object and pass that into cs_ip_merge
# "eth0ip": "192.168.56.32",
# "eth0mask": "255.255.255.0",
self.newData = []
if self.qFile.data['cmd_line']['type'] == "router":
self.processCLItem('0', "guest")
self.processCLItem('1', "control")
self.processCLItem('2', "public")
elif self.qFile.data['cmd_line']['type'] == "vpcrouter":
self.processCLItem('0', "control")
elif self.qFile.data['cmd_line']['type'] == "dhcpsrvr":
self.processCLItem('0', "guest")
self.processCLItem('1', "control")
return cs_cmdline.merge(dbag, self.qFile.data)
def processCLItem(self, num, nw_type):
key = 'eth' + num + 'ip'
if num == 0:
key = "controlmac"
dp = {}
if key in self.qFile.data['cmd_line']:
dp['public_ip'] = self.qFile.data['cmd_line'][key]
dp['netmask'] = self.qFile.data['cmd_line']['eth' + num + 'mask']
dp['source_nat'] = False
dp['add'] = True
dp['one_to_one_nat'] = False
dp['mac_address'] = self.qFile.data['cmd_line']['eth' + num + 'mac']
if nw_type == "public":
dp['gateway'] = self.qFile.data['cmd_line']['gateway']
else:
if ('localgw' in self.qFile.data['cmd_line']):
dp['gateway'] = self.qFile.data['cmd_line']['localgw']
else:
dp['gateway'] = 'None'
dp['nw_type'] = nw_type
qf = QueueFile()
qf.load({'ip_address': [dp], 'type': 'ips'})
def validate_device_based_on_mac_address(self):
d_to_merge = self.qFile.data
if 'mac_address' not in d_to_merge:
return d_to_merge
device_name = utils.get_interface_name_from_mac_address(d_to_merge['mac_address'])
if device_name:
d_to_merge['device'] = device_name
return d_to_merge
class QueueFile:
fileName = ''
type = ''
configCache = "/var/cache/cloud"
keep = True
do_merge = True
data = {}
def update_databag(self):
if self.do_merge:
logging.info("Merging because do_merge is %s" % self.do_merge)
updateDataBag(self)
else:
logging.info("Not merging because do_merge is %s" % self.do_merge)
def load(self, data):
if data is not None:
self.data = data
self.type = self.data["type"]
self.update_databag()
return
filename = '{cache_location}/{json_file}'.format(cache_location=self.configCache, json_file=self.fileName)
try:
handle = open(filename)
except IOError as exception:
error_message = (
"Exception occurred with the following exception error '{error}'. Could not open '{file}'. "
"It seems that the file has already been moved.".format(error=exception, file=filename))
logging.error(error_message)
else:
logging.info("Continuing with the processing of file '{file}'".format(file=filename))
self.data = json.load(handle)
if 'type' in self.data:
self.type = self.data["type"]
else:
self.type = self.fileName.split('.')[0]
handle.close()
if self.keep:
self.__moveFile(filename, self.configCache + "/processed")
else:
os.remove(filename)
self.update_databag()
def setFile(self, name):
self.fileName = name
def getType(self):
return self.type
def getData(self):
return self.data
def setPath(self, path):
self.configCache = path
def __moveFile(self, origPath, path):
if not os.path.exists(path):
os.makedirs(path)
originalName = os.path.basename(origPath)
if originalName.count(".") == 1:
originalName += "." + str(uuid.uuid4())
zipped_file_name = path + "/" + originalName + ".gz"
with open(origPath, 'rb') as f_in, gzip.open(zipped_file_name, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(origPath)
logging.debug("Processed file written to %s", zipped_file_name)
|
#!/usr/bin/python
import os, sys
from jinja2 import Template
if len(sys.argv) != 4:
print("Allowed paremeters are 3, the source, destination and environment variable prefix parameters and you are passing %d args" % (len(sys.argv) - 1))
sys.exit(1)
template_file = sys.argv[1]
config_file = sys.argv[2]
env_prefix = sys.argv[3]
print ("template: " + template_file + ", destination: " + config_file + ", env variable prefix: " + env_prefix)
def getEnvironmentVariables(env_prefix):
all_env = os.environ
hue_env = {}
for key in all_env.keys():
if env_prefix in key:
new_key = key.replace(env_prefix + "_", '')
hue_env[new_key] = all_env[key]
return hue_env
if __name__ == "__main__":
template = open(template_file,"r")
template_content = template.read()
template.close()
hue_env = getEnvironmentVariables(env_prefix)
result_content = Template(template_content).render(hue_env)
result = open(config_file,"w")
result.write(result_content)
result.close()
|
import pytest
from lib.two_sum import two_sum
def test_example_one():
nums = [2, 7, 11, 15]
target = 9
assert two_sum(nums, target) == [0, 1]
def test_example_two():
nums = [2, 3, 4]
target = 6
assert two_sum(nums, target) == [0, 2]
def test_example_three():
nums = [-1, 0]
target = -1
assert two_sum(nums, target) == [0, 1]
@pytest.mark.skip(reason="Students will write this!")
def test_edge_case():
# You write an edge-case test here
pass
|
#!/usr/bin/env python3
# Cordwood Puzzle (second edition) by Boldport
# Boldport Club project #3, May 2016
# This program controls the Boldport Cordwood Puzzle (v2) using the 2N7000 n-channel FETs on the board.
# Made by Dries Renmans, 20160516 (mailto:[email protected])
# Dependencies: python3, RPi.GPIO
import RPi.GPIO as GPIO
from time import sleep
from random import randint
# List for the LEDs on the board (2 red, 2 yellow, 2 green).
# If you want to use other GPIO pins, only this list needs to be changed.
# See info for connections.
led_list = [13, 21, 19, 20, 26, 16]
GPIO.setmode(GPIO.BCM)
# Setup for LEDs
GPIO.setup(led_list, GPIO.OUT, initial=GPIO.LOW)
def main():
# print('You are using this program at your own risk.')
# input('Press enter if you want to continue.')
while True:
print()
print('Main menu:', '\n')
print('1 - Random mode')
print('2 - Traffic light mode')
print('3 - LED chase mode')
print('4 - Blink mode')
print('5 - Arrows mode')
print('6 - Jumpy LED mode', '\n')
print('I - Info')
print('Q - Quit', '\n')
print('Note - Press Ctrl+C while looping to come back to this menu!', '\n')
mode_select = input('Enter mode: ')
if mode_select not in ('1', '2', '3', '4', '5', '6', 'i', 'I', 'q', 'Q'):
print('\n', '--- Not an option! ---', '\n')
continue
if mode_select == '1':
print('Random mode')
state = (0, 0, 0, 0, 0, 0)
try:
while True:
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (randint(0, 1), randint(0, 1), randint(0, 1), randint(0, 1), randint(0, 1), randint(0, 1))
except KeyboardInterrupt:
pass
elif mode_select == '2':
print('Traffic light mode')
try:
while True:
state = (0, 0, 0, 0, 1, 1)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(5)
state = (0, 0, 1, 1, 0, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(1)
state = (1, 1, 0, 0, 0, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(5)
except KeyboardInterrupt:
pass
elif mode_select == '3':
print('Circular mode')
try:
while True:
state = (0, 0, 0, 0, 0, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (1, 0, 0, 0, 0, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (1, 1, 0, 0, 0, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (1, 1, 1, 0, 0, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (1, 1, 1, 1, 0, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (1, 1, 1, 1, 1, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (1, 1, 1, 1, 1, 1)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (0, 1, 1, 1, 1, 1)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (0, 0, 1, 1, 1, 1)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (0, 0, 0, 1, 1, 1)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (0, 0, 0, 0, 1, 1)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (0, 0, 0, 0, 0, 1)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
except KeyboardInterrupt:
pass
elif mode_select == '4':
print('Blink mode')
try:
while True:
state = (0, 0, 0, 0, 0, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(1)
state = (1, 1, 1, 1, 1, 1)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(1)
except KeyboardInterrupt:
pass
elif mode_select == '5':
print('Arrows mode')
try:
while True:
state = (1, 0, 0, 1, 1, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(1)
state = (0, 1, 1, 0, 0, 1)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(1)
except KeyboardInterrupt:
pass
elif mode_select == '6':
print('Jumpy LED mode')
try:
while True:
state = (1, 0, 0, 0, 0, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (0, 1, 0, 0, 0, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (0, 0, 1, 0, 0, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (0, 0, 0, 1, 0, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (0, 0, 0, 0, 1, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (0, 0, 0, 0, 0, 1)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (0, 0, 0, 0, 1, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (0, 0, 0, 1, 0, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (0, 0, 1, 0, 0, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
state = (0, 1, 0, 0, 0, 0)
print('State: {0}'.format(state))
GPIO.output(led_list, state)
sleep(0.5)
except KeyboardInterrupt:
pass
elif mode_select == 'I' or mode_select == 'i':
print()
print('Connect the pins of the Cordwood board to following GPIO pins on the Raspberry Pi:', '\n')
print('Red LED 1 (pin 1): GPIO 13')
print('Red LED 2 (pin 4): GPIO 16')
print('Yellow LED 1 (pin 2): GPIO 19')
print('Yellow LED 2 (pin 5): GPIO 20')
print('Green LED 1 (pin 3): GPIO 26')
print('Green LED 2 (pin 6): GPIO 21', '\n')
print('Connect the ground pin on the opposite side of the Cordwood board to any ground on the Raspberry Pi.')
print('Connect the power to a 3.3V pin on the Raspberry Pi.')
print('WARNING: POWERING THE BOARD WITH 5V MIGHT DAMAGE YOUR RPI GPIOs AS THE FETs ON THE BOARD ARE PULLED UP TO THE SAME VOLTAGE!')
print('RASPBERRY PI GPIOs ARE 3.3V!!')
print()
input('Press enter to continue.')
pass
elif mode_select == 'Q' or mode_select == 'q':
print()
print('Goodbye!')
break
GPIO.cleanup()
print('GPIOs cleaned!')
if __name__ == '__main__':
main()
|
import numpy as np
from cost_functions import trajectory_cost_fn
import time
import logging
def dd(s):
logging.getLogger("hw4").debug(s)
def di(s):
logging.getLogger("hw4").info(s)
class Controller():
def __init__(self):
pass
# Get the appropriate action(s) for this state(s)
def get_action(self, state):
pass
class RandomController(Controller):
def __init__(self, env):
""" YOUR CODE HERE """
self.env = env
def get_action(self, state):
""" YOUR CODE HERE """
""" Your code should randomly sample an action uniformly from the action space """
return self.env.action_space.sample() # pick a random action
class MPCcontroller(Controller):
""" Controller built using the MPC method outlined in https://arxiv.org/abs/1708.02596 """
def __init__(self,
env,
dyn_model,
horizon=5,
cost_fn=None,
num_simulated_paths=10,
):
self.env = env
self.dyn_model = dyn_model
self.horizon = horizon
self.cost_fn = cost_fn
self.num_simulated_paths = num_simulated_paths
def get_action(self, state):
""" YOUR CODE HERE """
""" Note: be careful to batch your simulations through the model for speed """
# state is the initial state
# we need to generate self.num_simulated_paths trajectories at length self.horizon steps
curr_state = np.tile(state, (
self.num_simulated_paths, 1)) # create a batch of start state: [num_simulated_paths,obs_dim]
states = []
actions = []
next_states = []
for i in range(self.horizon):
# sample an action per each path
curr_action = []
for _ in range(self.num_simulated_paths):
curr_action.append(self.env.action_space.sample()) # curr action per each path
curr_action = np.concatenate([curr_action]) # shape : [num_simulated_paths,act_dim]
next_state = self.dyn_model.predict(curr_state, curr_action) # shape: [num_simulated_paths,obs_dim]
# append it to the path data structure
states.append(curr_state)
actions.append(curr_action)
next_states.append(next_state)
# progress one step
curr_state = next_state
# at this point we have the following lists:
# states = a list of numpy arrays, each is a set of states for a time step t, for num_simulated_paths
# so states[t] is numpy array of size [num_simulated_paths,obs_dim]
# actions = list of numpy array of actions for all paths. actions[t] is of shape [num_simulated_paths,act_dim]
# next_states = like states but its the state of time t+1. np array shape [num_simulated_paths,obs_dim]
# we now need to find the cost of each path
paths_costs = trajectory_cost_fn(self.cost_fn, states, actions, next_states)
# now we have array of num_simulated_paths cost values. we need to find the argmin and take the corresponding action
return actions[0][np.argmin(paths_costs)]
|
import os
import mmcv
import pytest
import torch
from mmgen.apis import (init_model, sample_img2img_model,
sample_uncoditional_model)
class TestSampleUnconditionalModel:
@classmethod
def setup_class(cls):
project_dir = os.path.abspath(os.path.join(__file__, '../../..'))
config = mmcv.Config.fromfile(
os.path.join(
project_dir,
'configs/dcgan/dcgan_celeba-cropped_64_b128x1_300k.py'))
cls.model = init_model(config, checkpoint=None, device='cpu')
def test_sample_unconditional_model_cpu(self):
res = sample_uncoditional_model(
self.model, 5, num_batches=2, sample_model='orig')
assert res.shape == (5, 3, 64, 64)
res = sample_uncoditional_model(
self.model, 4, num_batches=2, sample_model='orig')
assert res.shape == (4, 3, 64, 64)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_sample_unconditional_model_cuda(self):
model = self.model.cuda()
res = sample_uncoditional_model(
model, 5, num_batches=2, sample_model='orig')
assert res.shape == (5, 3, 64, 64)
res = sample_uncoditional_model(
model, 4, num_batches=2, sample_model='orig')
assert res.shape == (4, 3, 64, 64)
class TestSampleTranslationModel:
@classmethod
def setup_class(cls):
project_dir = os.path.abspath(os.path.join(__file__, '../../..'))
pix2pix_config = mmcv.Config.fromfile(
os.path.join(
project_dir,
'configs/pix2pix/pix2pix_vanilla_unet_bn_facades_b1x1_80k.py'))
cls.pix2pix = init_model(pix2pix_config, checkpoint=None, device='cpu')
cyclegan_config = mmcv.Config.fromfile(
os.path.join(
project_dir,
'configs/cyclegan/cyclegan_lsgan_resnet_in_facades_b1x1_80k.py'
))
cls.cyclegan = init_model(
cyclegan_config, checkpoint=None, device='cpu')
cls.img_path = os.path.join(
os.path.dirname(__file__), '..', 'data/unpaired/testA/5.jpg')
def test_translation_model_cpu(self):
res = sample_img2img_model(
self.pix2pix, self.img_path, target_domain='photo')
assert res.shape == (1, 3, 256, 256)
res = sample_img2img_model(
self.cyclegan, self.img_path, target_domain='photo')
assert res.shape == (1, 3, 256, 256)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='requires cuda')
def test_translation_model_cuda(self):
res = sample_img2img_model(
self.pix2pix.cuda(), self.img_path, target_domain='photo')
assert res.shape == (1, 3, 256, 256)
res = sample_img2img_model(
self.cyclegan.cuda(), self.img_path, target_domain='photo')
assert res.shape == (1, 3, 256, 256)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 20 09:45:33 2018
@author: lenovo
"""
# created by cui 20180720 Friday 09:45
# finished by cui 20180720 Friday 09:56
def CountSort(A, k):
B = []
for i in range(len(A)):
B.append(0)
C = []
for j in range(k + 1):
C.append(0)
for i in range(len(A)):
C[A[i]] = C[A[i]] + 1
for j in range(1, k + 1):
C[j] = C[j] + C[j - 1]
for j in range(len(A)):
B[C[A[j]] - 1] = A[j]
C[A[j]] = C[A[j]] - 1
return B
testList = [5,4,3,3,2,1,1]
B = CountSort(testList, 10)
print(B)
|
from . import rpn, rrpn # noqa F401 isort:skip
def build_proposal_generator(cfg, input_shape):
"""
Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`.
The name can be "PrecomputedProposals" to use no proposal generator.
"""
name = cfg.MODEL.PROPOSAL_GENERATOR.NAME
if name == "PrecomputedProposals":
return None
return getattr(rpn, name)(cfg, input_shape) |
#!/usr/bin/env python3
for value in range(1, 15):
print("insert " + str(value) + " user" + str(value) + " user" + str(value) + "@codinglab.fr")
# insert 15 user15 user15codinglab.fr
|
"""
feature map input : (batch_size, num_channels, fm_size[0], fm_size[1])
feature map output size : (input_size, num_filters, fm_size[0], fm_size[1])
filter_size : (, fm_size[0], fm_size[1])
"""
import numpy as np;
import theano;
import theano.tensor as T;
from scae_destin.convnet import ConvNetBase
class CKMLayer(ConvNetBase):
"""
A implementation of Convolutional K-means Layer
"""
def __init__(self,
feature_shape,
filter_size,
num_filters,
num_channels,
fm_size=None,
batch_size=None,
step=(1,1),
border_mode="valid",
use_bias=True,
**kwargs):
"""
Initialize a CKM Layer.
"""
super(CKMLayer, self).__init__(filter_size=filter_size,
num_filters=num_filters,
num_channels=num_channels,
fm_size=fm_size,
batch_size=batch_size,
step=step,
border_mode=border_mode,
use_bias=use_bias,
**kwargs)
self.feature_shape=feature_shape
def ckm_updates(self, X):
"""
This function computes updates of filters and total changes
"""
feature_shape_temp=np.asarray(self.feature_shape);
filter_shape_temp=np.asarray([self.num_filters, self.num_channels, self.filter_size[0], self.filter_size[1]]);
ams_shape=(filter_shape_temp[1],
feature_shape_temp[0],
feature_shape_temp[2]-filter_shape_temp[2]+1,
feature_shape_temp[3]-filter_shape_temp[3]+1);
fmaps = self.apply_lin(X,
image_shape=self.feature_shape,
filter_shape=filter_shape_temp) # (num_examples, num_filters, fm_height, fm_width)
fmaps = fmaps.dimshuffle(1,0,2,3) # (num_filters, num_examples , fm_height, fm_width)
print "sum of filters is : ",T.sum(self.filters)
activation_maps=T.cast(T.max(fmaps, axis=(0), keepdims=True), dtype="float32")
# ams_sum=T.cast(T.sum(activation_maps, axis=(1,2,3), keepdims=True), dtype="float32");
feature_shape_new=(feature_shape_temp[1], feature_shape_temp[0], feature_shape_temp[2], feature_shape_temp[3])
update_out = self.apply_lin(X.dimshuffle(1,0,2,3),
filters=activation_maps,
image_shape=feature_shape_new,
filter_shape=ams_shape)
update_out=update_out.dimshuffle(1,0,2,3);
update_out+=self.filters
# update_out/=(ams_sum+1);
updates=[(self.filters, 0*self.filters+update_out)];
return updates, T.sum(self.filters), update_out;
|
import random as rand
from yachalk import chalk
import emoji
def welcome_en():
print("")
name = str(input("What's your name ? "))
if name == '':
name = 'Jakob'
else:
pass
print("Welcome, " + chalk.red(chalk.bg_white("{} ")).format(name) + emoji.emojize(":red_exclamation_mark:"))
print("")
questionsNum = input("How many questions do you want ? ")
print("")
if questionsNum == '':
questionsNum = 1
else:
questionsNum = int(questionsNum)
return name, questionsNum
def welcome_sv():
print("")
name = str(input("Vad heter du ? "))
if name == '':
name = 'Jakob'
else:
pass
print("Välkommen, " + chalk.red(chalk.bg_white("{} ")).format(name) + emoji.emojize(":red_exclamation_mark:"))
print("")
questionsNum = input("Hur många frågor vill du ha ? ")
print("")
if questionsNum == '':
questionsNum = 1
else:
questionsNum = int(questionsNum)
return name, questionsNum
def welcome_ru():
print("")
name = str(input("Как тебя завут ? "))
print("Привет, " + chalk.red(chalk.bg_white("{}")).format(name) + emoji.emojize(":red_exclamation_mark:"))
print("")
questionsNum = input("Сколько вопросов задать ? ")
print("")
if questionsNum == '':
questionsNum = 1
else:
questionsNum = int(questionsNum)
return name, questionsNum |
# FIND FIRST AND LAST POSITION OF ELEMENT IN SORTED ARRAY LEETCODE SOLUTION:
# creating a class.
class Solution(object):
# creating a function to solve the problem.
def searchRange(self, nums, target):
# creating a list.
l = []
# creating an if-statement to check if the target is not within the given array.
if target not in nums:
# returning '[-1, -1]' if the condition is met.
return -1, -1
# creating a for-loop to iterate for the elements in the given array.
for i in range(len(nums)):
# creating a nested if-statement to check if the target is within the given array.
if nums[i] == target:
# code to append the index at which the target is present.
l.append(i)
# returning the first (minimum) and last (maximum) index of the element.
return min(l), max(l) |
from .load_backend import *
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016-2018 by Ihor E. Novikov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import os
from base64 import b64decode, b64encode
from io import StringIO
from copy import deepcopy
from PIL import Image
from qc3 import qc3const, libgeom, libpango, cms, sk2const, utils
from qc3.utils import fsutils
from qc3.formats.sk2 import sk2_model
from qc3.formats.svg import svg_const, svg_utils
from qc3.formats.svg.svg_utils import (
get_svg_trafo,
check_svg_attr,
parse_svg_points,
parse_svg_coords,
parse_svg_color,
parse_svg_stops,
get_svg_level_trafo,
)
LOG = logging.getLogger(__name__)
SK2_UNITS = {
svg_const.SVG_PX: qc3const.UNIT_PX,
svg_const.SVG_PC: qc3const.UNIT_PX,
svg_const.SVG_PT: qc3const.UNIT_PT,
svg_const.SVG_MM: qc3const.UNIT_MM,
svg_const.SVG_CM: qc3const.UNIT_CM,
svg_const.SVG_M: qc3const.UNIT_M,
svg_const.SVG_IN: qc3const.UNIT_IN,
svg_const.SVG_FT: qc3const.UNIT_FT,
}
FONT_COEFF = 0.938
SK2_FILL_RULE = {
"nonzero": sk2const.FILL_NONZERO,
"evenodd": sk2const.FILL_EVENODD,
}
SK2_LINE_JOIN = {
"miter": sk2const.JOIN_MITER,
"round": sk2const.JOIN_ROUND,
"bevel": sk2const.JOIN_BEVEL,
}
SK2_LINE_CAP = {
"butt": sk2const.CAP_BUTT,
"round": sk2const.CAP_ROUND,
"square": sk2const.CAP_SQUARE,
}
SK2_TEXT_ALIGN = {
"start": sk2const.TEXT_ALIGN_LEFT,
"middle": sk2const.TEXT_ALIGN_CENTER,
"end": sk2const.TEXT_ALIGN_RIGHT,
}
SK2_GRAD_EXTEND = {
"pad": sk2const.GRADIENT_EXTEND_PAD,
"reflect": sk2const.GRADIENT_EXTEND_REFLECT,
"repeat": sk2const.GRADIENT_EXTEND_REPEAT,
}
class SVG_to_SK2_Translator(object):
page = None
layer = None
traffo = []
dpi_coeff = 1.0
user_space = []
style_opts = {}
classes = {}
profiles = {}
unit_mapping = None
current_color = ""
svg_doc = None
sk2_doc = None
svg_mt = None
sk2_mt = None
sk2_mtds = None
svg_mtds = None
id_map = None
def translate(self, svg_doc, sk2_doc):
self.svg_doc = svg_doc
self.sk2_doc = sk2_doc
self.svg_mt = svg_doc.model
self.sk2_mt = sk2_doc.model
self.sk2_mtds = sk2_doc.methods
self.svg_mtds = svg_doc.methods
self.classes = {}
self.id_map = self.svg_mt.id_map
self.profiles = {}
self.current_color = ""
self.define_units()
self.translate_units()
self.translate_page()
for item in self.svg_mt.childs:
style = self.get_level_style(self.svg_mt, svg_const.SVG_STYLE)
self.translate_obj(self.layer, item, self.trafo, style)
if len(self.page.childs) > 1 and not self.layer.childs:
self.page.childs.remove(self.layer)
self.sk2_mt.do_update()
self._clear_objs()
def _clear_objs(self):
for item in self.__dict__.keys():
obj = self.__dict__[item]
if isinstance(obj, list):
self.__dict__[item] = []
elif isinstance(obj, dict):
self.__dict__[item] = {}
else:
self.__dict__[item] = None
self.dpi_coeff = 1.0
self.current_color = ""
# --- Utility methods
def define_units(self):
if not self.svg_doc.config.svg_dpi:
if "width" in self.svg_mt.attrs and self.svg_mtds.get_units(
self.svg_mt.attrs["width"]
) not in (svg_const.SVG_PX, svg_const.SVG_PC):
self.svg_doc.config.svg_dpi = 90.0
dpi_coeff = (
self.svg_doc.config.svg_dpi / svg_const.SVG_DPI
if self.svg_doc.config.svg_dpi
else 1.0
)
self.unit_mapping = {
svg_const.SVG_PX: svg_const.svg_px_to_pt / dpi_coeff,
svg_const.SVG_PT: 1.0,
svg_const.SVG_PC: 15.0 * svg_const.svg_px_to_pt / dpi_coeff,
svg_const.SVG_MM: qc3const.mm_to_pt,
svg_const.SVG_CM: qc3const.cm_to_pt,
svg_const.SVG_IN: qc3const.in_to_pt,
svg_const.SVG_M: qc3const.m_to_pt,
}
self.dpi_coeff = dpi_coeff
def recalc_size(self, val):
if not val:
return None
unit = self.svg_mtds.get_units(val)
size = float(val.replace(unit, ""))
return size * self.unit_mapping[unit] * self.dpi_coeff
def get_font_size(self, sval):
val = self.recalc_size(sval) / self.dpi_coeff
pts = [[0.0, 0.0], [0.0, val]]
pts = libgeom.apply_trafo_to_points(pts, self.trafo)
return libgeom.distance(*pts)
def get_viewbox(self, vbox):
vbox = vbox.replace(",", " ").replace(" ", " ")
return [self.recalc_size(item) for item in vbox.split()]
def parse_def(self, svg_obj):
if "color" in svg_obj.attrs:
if svg_obj.attrs["color"] == "inherit":
pass
else:
self.current_color = svg_obj.attrs["color"]
stops = []
if svg_obj.tag == "linearGradient":
if "xlink:href" in svg_obj.attrs:
cid = svg_obj.attrs["xlink:href"][1:]
if cid in self.id_map:
stops = self.parse_def(self.id_map[cid])[2][2]
if not stops:
return []
elif svg_obj.childs:
stops = parse_svg_stops(svg_obj.childs, self.current_color)
if not stops:
return []
else:
return []
x1 = 0.0
y1 = 0.0
x2 = self.user_space[2]
y2 = 0.0
if "x1" in svg_obj.attrs:
x1 = self.recalc_size(svg_obj.attrs["x1"])
if "y1" in svg_obj.attrs:
y1 = self.recalc_size(svg_obj.attrs["y1"])
if "x2" in svg_obj.attrs:
x2 = self.recalc_size(svg_obj.attrs["x2"])
if "y2" in svg_obj.attrs:
y2 = self.recalc_size(svg_obj.attrs["y2"])
if "gradientTransform" in svg_obj.attrs:
strafo = svg_obj.attrs["gradientTransform"]
self.style_opts["grad-trafo"] = get_svg_trafo(strafo)
extend = sk2const.GRADIENT_EXTEND_PAD
if "spreadMethod" in svg_obj.attrs:
val = svg_obj.attrs["spreadMethod"]
if val in SK2_GRAD_EXTEND:
extend = SK2_GRAD_EXTEND[val]
vector = [[x1, y1], [x2, y2]]
return [
0,
sk2const.FILL_GRADIENT,
[sk2const.GRADIENT_LINEAR, vector, stops, extend],
]
elif svg_obj.tag == "radialGradient":
if "xlink:href" in svg_obj.attrs:
cid = svg_obj.attrs["xlink:href"][1:]
if cid in self.id_map:
stops = self.parse_def(self.id_map[cid])[2][2]
if not stops:
return []
elif svg_obj.childs:
stops = parse_svg_stops(svg_obj.childs, self.current_color)
if not stops:
return []
else:
return []
cx = self.user_space[2] / 2.0 + self.user_space[0]
cy = self.user_space[3] / 2.0 + self.user_space[1]
if "cx" in svg_obj.attrs:
cx = self.recalc_size(svg_obj.attrs["cx"])
if "cy" in svg_obj.attrs:
cy = self.recalc_size(svg_obj.attrs["cy"])
r = self.user_space[2] / 2.0 + self.user_space[0]
if "r" in svg_obj.attrs:
r = self.recalc_size(svg_obj.attrs["r"])
if "gradientTransform" in svg_obj.attrs:
strafo = svg_obj.attrs["gradientTransform"]
self.style_opts["grad-trafo"] = get_svg_trafo(strafo)
extend = sk2const.GRADIENT_EXTEND_PAD
if "spreadMethod" in svg_obj.attrs:
val = svg_obj.attrs["spreadMethod"]
if val in SK2_GRAD_EXTEND:
extend = SK2_GRAD_EXTEND[val]
vector = [[cx, cy], [cx + r, cy]]
return [
0,
sk2const.FILL_GRADIENT,
[sk2const.GRADIENT_RADIAL, vector, stops, extend],
]
return []
def parse_clippath(self, svg_obj):
if svg_obj.tag == "clipPath" and svg_obj.childs:
container = sk2_model.Container(self.layer.config)
style = self.get_level_style(self.svg_mt, svg_const.SVG_STYLE)
for child in svg_obj.childs:
trafo = [] + libgeom.NORMAL_TRAFO
self.translate_obj(container, child, trafo, style)
if not container.childs:
return None
if len(container.childs) > 1:
curves = []
for item in container.childs:
item.update()
curve = item.to_curve()
pths = curve.get_initial_paths()
pths = libgeom.apply_trafo_to_paths(pths, curve.trafo)
curves.append(pths)
paths = curves[0]
for item in curves[1:]:
paths = libgeom.fuse_paths(paths, item)
else:
container.childs[0].update()
curve = container.childs[0].to_curve()
pths = curve.get_initial_paths()
paths = libgeom.apply_trafo_to_paths(pths, curve.trafo)
if not paths:
return None
curve = sk2_model.Curve(container.config, container, paths)
container.childs = [
curve,
]
return container
return None
def get_level_style(self, svg_obj, style_in):
if "color" in svg_obj.attrs:
if svg_obj.attrs["color"] == "inherit":
pass
else:
self.current_color = svg_obj.attrs["color"]
style = deepcopy(style_in)
for item in svg_const.SVG_STYLE.keys():
if item in svg_obj.attrs:
val = svg_obj.attrs[item]
if not val == "inherit":
style[item] = val
if "class" in svg_obj.attrs:
class_names = svg_obj.attrs["class"].split(" ")
for class_name in class_names:
if class_name in self.classes:
class_ = self.classes[class_name]
for item in class_.keys():
if item == "opacity" and item in style_in:
op = float(class_[item]) * float(style_in[item])
style["opacity"] = str(op)
else:
style[item] = class_[item]
if "style" in svg_obj.attrs:
stls = svg_obj.attrs["style"].split(";")
for stl in stls:
vals = stl.split(":")
if len(vals) == 2:
key = vals[0].strip()
val = vals[1].strip()
if key == "opacity" and key in style_in:
op = float(val) * float(style_in[key])
style["opacity"] = str(op)
else:
style[key] = val
return style
def get_sk2_style(self, svg_obj, style, text_style=False):
sk2_style = [[], [], [], []]
style = self.get_level_style(svg_obj, style)
self.style_opts = {}
if "display" in style and style["display"] == "none":
return sk2_style
if "visibility" in style and style["visibility"] in ("hidden", "collapse"):
return sk2_style
# fill parsing
if not style["fill"] == "none":
fillrule = SK2_FILL_RULE[style["fill-rule"]]
fill = style["fill"].replace('"', "")
alpha = float(style["fill-opacity"]) * float(style["opacity"])
def_id = ""
if len(fill) > 3 and fill[:3] == "url":
val = fill[5:].split(")")[0]
if val in self.id_map:
def_id = val
elif fill[0] == "#" and fill[1:] in self.id_map:
def_id = fill[1:]
if def_id:
sk2_style[0] = self.parse_def(self.id_map[def_id])
if sk2_style[0]:
sk2_style[0][0] = fillrule
if sk2_style[0][1] == sk2const.FILL_GRADIENT:
for stop in sk2_style[0][2][2]:
color = stop[1]
color[2] *= alpha
if "grad-trafo" in self.style_opts:
tr = [] + self.style_opts["grad-trafo"]
self.style_opts["fill-grad-trafo"] = tr
else:
clr = parse_svg_color(fill, alpha, self.current_color)
if clr:
sk2_style[0] = [fillrule, sk2const.FILL_SOLID, clr]
# stroke parsing
if not style["stroke"] == "none":
stroke = style["stroke"].replace('"', "")
stroke_rule = sk2const.STROKE_MIDDLE
stroke_width = self.recalc_size(style["stroke-width"])
stroke_width = stroke_width / self.dpi_coeff
stroke_linecap = SK2_LINE_CAP[style["stroke-linecap"]]
stroke_linejoin = SK2_LINE_JOIN[style["stroke-linejoin"]]
stroke_miterlimit = float(style["stroke-miterlimit"])
alpha = float(style["stroke-opacity"]) * float(style["opacity"])
dash = []
if style["stroke-dasharray"] != "none":
try:
code = compile("dash=[" + style["stroke-dasharray"] + "]", "<string>", "exec")
exec(code)
except Exception:
dash = []
if dash:
sk2_dash = []
for item in dash:
sk2_dash.append(item / stroke_width)
dash = sk2_dash
def_id = ""
if len(stroke) > 3 and stroke[:3] == "url":
val = stroke[5:].split(")")[0]
if val in self.id_map:
def_id = val
elif stroke[0] == "#" and stroke[1:] in self.id_map:
def_id = stroke[1:]
if def_id:
stroke_fill = self.parse_def(self.id_map[def_id])
if stroke_fill:
stroke_fill[0] = sk2const.FILL_NONZERO
if stroke_fill[1] == sk2const.FILL_GRADIENT:
for stop in stroke_fill[2][2]:
color = stop[1]
color[2] *= alpha
self.style_opts["stroke-fill"] = stroke_fill
self.style_opts["stroke-fill-color"] = stroke_fill[2][2][0][1]
clr = parse_svg_color("black")
sk2_style[1] = [
stroke_rule,
stroke_width,
clr,
dash,
stroke_linecap,
stroke_linejoin,
stroke_miterlimit,
0,
1,
[],
]
if "grad-trafo" in self.style_opts:
tr = [] + self.style_opts["grad-trafo"]
self.style_opts["stroke-grad-trafo"] = tr
else:
clr = parse_svg_color(stroke, alpha, self.current_color)
if clr:
sk2_style[1] = [
stroke_rule,
stroke_width,
clr,
dash,
stroke_linecap,
stroke_linejoin,
stroke_miterlimit,
0,
1,
[],
]
if text_style:
# font family
font_family = "Sans"
if style["font-family"] in libpango.get_fonts()[0]:
font_family = style["font-family"]
# font face
font_face = "Regular"
faces = libpango.get_fonts()[1][font_family]
if font_face not in faces:
font_face = faces[0]
bold = italic = False
if style["font-style"] in ("italic", "oblique"):
italic = True
if style["font-weight"] in ("bold", "bolder"):
bold = True
if bold and italic:
if "Bold Italic" in faces:
font_face = "Bold Italic"
elif "Bold Oblique" in faces:
font_face = "Bold Oblique"
elif bold and not italic:
if "Bold" in faces:
font_face = "Bold"
elif not bold and italic:
if "Italic" in faces:
font_face = "Italic"
elif "Oblique" in faces:
font_face = "Oblique"
# text size
font_size = 12.0
try:
font_size = self.get_font_size(style["font-size"])
except Exception:
pass
# text alignment
alignment = sk2const.TEXT_ALIGN_LEFT
if style["text-anchor"] in SK2_TEXT_ALIGN:
alignment = SK2_TEXT_ALIGN[style["text-anchor"]]
sk2_style[2] = [font_family, font_face, font_size, alignment, [], True]
return sk2_style
def get_image(self, svg_obj):
if "xlink:href" not in svg_obj.attrs:
return None
link = svg_obj.attrs["xlink:href"]
if link[:4] == "http":
pass
elif link[:4] == "data":
pos = 0
for sig in svg_const.IMG_SIGS:
if link[: len(sig)] == sig:
pos = len(sig)
if pos:
try:
raw_image = Image.open(StringIO(b64decode(link[pos:])))
raw_image.load()
return raw_image
except Exception:
pass
elif self.svg_doc.doc_file:
file_dir = os.path.dirname(self.svg_doc.doc_file)
image_path = os.path.join(file_dir, link)
image_path = os.path.abspath(image_path)
if fsutils.exists(image_path):
raw_image = Image.open(image_path)
raw_image.load()
return raw_image
return None
# --- Translation metods
def translate_units(self):
units = SK2_UNITS[self.svg_mtds.doc_units()]
self.sk2_mt.doc_units = units
def translate_page(self):
width = height = 0.0
vbox = []
if "viewBox" in self.svg_mt.attrs:
vbox = self.get_viewbox(self.svg_mt.attrs["viewBox"])
if "width" in self.svg_mt.attrs:
if not self.svg_mt.attrs["width"][-1] == "%":
width = self.recalc_size(self.svg_mt.attrs["width"])
else:
if vbox:
width = vbox[2]
if "height" in self.svg_mt.attrs and not self.svg_mt.attrs["height"][-1] == "%":
height = self.recalc_size(self.svg_mt.attrs["height"])
else:
if vbox:
height = vbox[3]
elif vbox:
width = vbox[2]
height = vbox[3]
if not width:
width = self.recalc_size("210mm")
if not height:
height = self.recalc_size("297mm")
page_fmt = [
"Custom",
(width / self.dpi_coeff, height / self.dpi_coeff),
qc3const.LANDSCAPE if width > height else qc3const.PORTRAIT,
]
pages_obj = self.sk2_mtds.get_pages_obj()
pages_obj.page_format = page_fmt
self.page = sk2_model.Page(pages_obj.config, pages_obj, "SVG page")
self.page.page_format = deepcopy(page_fmt)
pages_obj.childs = [
self.page,
]
pages_obj.page_counter = 1
self.layer = sk2_model.Layer(self.page.config, self.page)
self.page.childs = [
self.layer,
]
# Document trafo calculation
self.trafo = [1 / self.dpi_coeff, 0.0, 0.0, 1 / self.dpi_coeff, 0.0, 0.0]
dx = -width / 2.0
dy = height / 2.0
tr = [1.0, 0.0, 0.0, -1.0, dx, dy]
self.user_space = [0.0, 0.0, width, height]
self.trafo = libgeom.multiply_trafo(tr, self.trafo)
if vbox:
dx = -vbox[0]
dy = -vbox[1]
xx = width / vbox[2]
yy = height / vbox[3]
if (
"xml:space" in self.svg_mt.attrs
and self.svg_mt.attrs["xml:space"] == "preserve"
):
xx = yy = min(xx, yy)
tr = [xx, 0.0, 0.0, yy, 0.0, 0.0]
tr = libgeom.multiply_trafo([1.0, 0.0, 0.0, 1.0, dx, dy], tr)
self.trafo = libgeom.multiply_trafo(tr, self.trafo)
self.user_space = vbox
def translate_obj(self, parent, svg_obj, trafo, style):
obj_mapping = {
"g": self.translate_g,
"rect": self.translate_rect,
"circle": self.translate_circle,
"ellipse": self.translate_ellipse,
"line": self.translate_line,
"polyline": self.translate_polyline,
"polygon": self.translate_polygon,
"path": self.translate_path,
"use": self.translate_use,
"text": self.translate_text,
"image": self.translate_image,
}
if svg_obj.attrs.get("display") == "none":
return
try:
if svg_obj.tag == "defs":
self.translate_defs(svg_obj)
elif svg_obj.tag == "sodipodi:namedview":
self.translate_namedview(svg_obj)
elif svg_obj.tag == "sodipodi:guide":
self.translate_guide(svg_obj)
elif svg_obj.tag in obj_mapping:
obj_mapping[svg_obj.tag](parent, svg_obj, trafo, style)
elif svg_obj.tag == "linearGradient":
return
elif svg_obj.tag == "radialGradient":
return
elif svg_obj.tag == "style":
self.translate_style(svg_obj)
elif svg_obj.tag == "pattern":
return
elif svg_obj.tag == "clipPath":
return
elif svg_obj.childs:
self.translate_unknown(parent, svg_obj, trafo, style)
except Exception as e:
LOG.warn("Cannot translate <%s> object, tag <%s>", repr(svg_obj), svg_obj.tag)
if "id" in svg_obj.attrs:
LOG.warn("Object id: %s", svg_obj.attrs["id"])
LOG.warn("Error traceback: %s", e)
def translate_defs(self, svg_obj):
for item in svg_obj.childs:
if item.tag == "style":
self.translate_style(item)
elif item.tag == "color-profile":
self.translate_color_profile(item)
def translate_namedview(self, svg_obj):
for item in svg_obj.childs:
self.translate_obj(None, item, None, None)
def translate_guide(self, svg_obj):
position = parse_svg_points(svg_obj.attrs["position"])[0]
position = libgeom.apply_trafo_to_point(position, self.trafo)
orientation = parse_svg_points(svg_obj.attrs["orientation"])[0]
if position and orientation:
if not orientation[0] and orientation[1]:
orientation = qc3const.HORIZONTAL
position = -position[1]
elif not orientation[1] and orientation[0]:
orientation = qc3const.VERTICAL
position = position[0]
else:
return
guide_layer = self.sk2_mtds.get_guide_layer()
guide = sk2_model.Guide(guide_layer.config, guide_layer, position, orientation)
guide_layer.childs.append(guide)
def translate_style(self, svg_obj):
items = []
for item in svg_obj.childs:
if item.is_content():
val = item.text.strip()
if val:
items.append(val)
if not items:
return
items = " ".join(items)
if "." not in items:
return
items = items.split(".")[1:]
for item in items:
if "{" not in item:
continue
class_, stylestr = item.split("{")
stylestr = stylestr.replace("}", "")
stls = stylestr.split(";")
style = {}
for stl in stls:
vals = stl.split(":")
if len(vals) == 2:
style[vals[0].strip()] = vals[1].strip()
self.classes[class_.strip()] = style
def translate_color_profile(self, svg_obj):
self.profiles[svg_obj.attrs["name"]] = svg_obj
def translate_g(self, parent, svg_obj, trafo, style):
tr = get_svg_level_trafo(svg_obj, trafo)
stl = self.get_level_style(svg_obj, style)
container = None
if "inkscape:groupmode" in svg_obj.attrs:
if svg_obj.attrs["inkscape:groupmode"] == "layer":
name = "Layer %d" % len(self.page.childs)
if "inkscape:label" in svg_obj.attrs:
name = svg_obj.attrs["inkscape:label"]
if not self.layer.childs:
self.page.childs.remove(self.layer)
self.layer = sk2_model.Layer(self.page.config, self.page, name)
self.page.childs.append(self.layer)
if check_svg_attr(svg_obj, "sodipodi:insensitive", "true"):
self.layer.properties[1] = 0
if "display" in stl and stl["display"] == "none":
self.layer.properties[0] = 0
for item in svg_obj.childs:
self.translate_obj(self.layer, item, tr, stl)
self.layer = sk2_model.Layer(self.page.config, self.page)
self.page.childs.append(self.layer)
return
elif "clip-path" in svg_obj.attrs:
clip_id = svg_obj.attrs["clip-path"][5:-1].strip()
if clip_id in self.id_map:
container = self.parse_clippath(self.id_map[clip_id])
if container:
container.childs[0].trafo = [] + tr
for item in svg_obj.childs:
self.translate_obj(container, item, tr, stl)
if len(container.childs) > 1:
parent.childs.append(container)
return
if not svg_obj.childs:
return
group = sk2_model.Group(parent.config, parent)
for item in svg_obj.childs:
self.translate_obj(group, item, tr, stl)
if group.childs:
if len(group.childs) == 1:
parent.childs.append(group.childs[0])
else:
parent.childs.append(group)
def translate_unknown(self, parent, svg_obj, trafo, style):
group = sk2_model.Group(parent.config, parent)
tr = get_svg_level_trafo(svg_obj, trafo)
stl = self.get_level_style(svg_obj, style)
for item in svg_obj.childs:
self.translate_obj(group, item, tr, stl)
if group.childs:
parent.childs.append(group)
def append_obj(self, parent, svg_obj, obj, trafo, style):
obj.stroke_trafo = [] + trafo
if style[0] and style[0][1] == sk2const.FILL_GRADIENT:
obj.fill_trafo = [] + trafo
if "fill-grad-trafo" in self.style_opts:
tr0 = self.style_opts["fill-grad-trafo"]
obj.fill_trafo = libgeom.multiply_trafo(tr0, trafo)
curve = None
if style[1] and "stroke-fill" in self.style_opts:
obj.update()
stroke_obj = obj.to_curve()
pths = libgeom.apply_trafo_to_paths(
stroke_obj.get_initial_paths(), stroke_obj.trafo
)
try:
pths = libgeom.stroke_to_curve(pths, obj.style[1])
obj_style = [self.style_opts["stroke-fill"], [], [], []]
curve = sk2_model.Curve(parent.config, parent, pths, style=obj_style)
obj.style[1] = []
curve.fill_trafo = [] + trafo
if "stroke-grad-trafo" in self.style_opts:
tr0 = self.style_opts["stroke-grad-trafo"]
curve.fill_trafo = libgeom.multiply_trafo(tr0, trafo)
except Exception:
if "stroke-fill-color" in self.style_opts:
obj.style[1][2] = self.style_opts["stroke-fill-color"]
else:
obj.style[1] = []
container = None
if "clip-path" in svg_obj.attrs:
clip_id = svg_obj.attrs["clip-path"][5:-1].strip()
if clip_id in self.id_map:
container = self.parse_clippath(self.id_map[clip_id])
if container:
container.childs[0].trafo = [] + trafo
if container:
container.childs.append(obj)
if curve:
container.childs.append(curve)
parent.childs.append(container)
else:
parent.childs.append(obj)
if curve:
parent.childs.append(curve)
def translate_rect(self, parent, svg_obj, trafo, style):
cfg = parent.config
sk2_style = self.get_sk2_style(svg_obj, style)
tr = get_svg_level_trafo(svg_obj, trafo)
x = y = w = h = 0
if "x" in svg_obj.attrs:
x = self.recalc_size(svg_obj.attrs["x"])
if "y" in svg_obj.attrs:
y = self.recalc_size(svg_obj.attrs["y"])
if "width" in svg_obj.attrs:
w = self.recalc_size(svg_obj.attrs["width"])
if "height" in svg_obj.attrs:
h = self.recalc_size(svg_obj.attrs["height"])
if not w or not h:
return
corners = [] + sk2const.CORNERS
rx = ry = None
if "rx" in svg_obj.attrs:
rx = self.recalc_size(svg_obj.attrs["rx"])
if "ry" in svg_obj.attrs:
ry = self.recalc_size(svg_obj.attrs["ry"])
if rx is None and ry is not None:
rx = ry
elif ry is None and rx is not None:
ry = rx
if not rx or not ry:
rx = ry = None
if rx is not None:
rx = abs(rx)
ry = abs(ry)
if rx > w / 2.0:
rx = w / 2.0
if ry > h / 2.0:
ry = h / 2.0
coeff = rx / ry
w = w / coeff
trafo = [1.0, 0.0, 0.0, 1.0, -x, -y]
trafo1 = [coeff, 0.0, 0.0, 1.0, 0.0, 0.0]
trafo2 = [1.0, 0.0, 0.0, 1.0, x, y]
trafo = libgeom.multiply_trafo(trafo, trafo1)
trafo = libgeom.multiply_trafo(trafo, trafo2)
tr = libgeom.multiply_trafo(trafo, tr)
corners = [
2.0 * ry / min(w, h),
] * 4
rect = sk2_model.Rectangle(cfg, parent, [x, y, w, h], tr, sk2_style, corners)
self.append_obj(parent, svg_obj, rect, tr, sk2_style)
def translate_ellipse(self, parent, svg_obj, trafo, style):
cfg = parent.config
sk2_style = self.get_sk2_style(svg_obj, style)
tr = get_svg_level_trafo(svg_obj, trafo)
cx = cy = rx = ry = 0.0
if "cx" in svg_obj.attrs:
cx = self.recalc_size(svg_obj.attrs["cx"])
if "cy" in svg_obj.attrs:
cy = self.recalc_size(svg_obj.attrs["cy"])
if "rx" in svg_obj.attrs:
rx = self.recalc_size(svg_obj.attrs["rx"])
if "ry" in svg_obj.attrs:
ry = self.recalc_size(svg_obj.attrs["ry"])
if not rx or not ry:
return
rect = [cx - rx, cy - ry, 2.0 * rx, 2.0 * ry]
ellipse = sk2_model.Circle(cfg, parent, rect, style=sk2_style)
ellipse.trafo = libgeom.multiply_trafo(ellipse.trafo, tr)
self.append_obj(parent, svg_obj, ellipse, tr, sk2_style)
def translate_circle(self, parent, svg_obj, trafo, style):
cfg = parent.config
sk2_style = self.get_sk2_style(svg_obj, style)
tr = get_svg_level_trafo(svg_obj, trafo)
cx = cy = r = 0.0
if "cx" in svg_obj.attrs:
cx = self.recalc_size(svg_obj.attrs["cx"])
if "cy" in svg_obj.attrs:
cy = self.recalc_size(svg_obj.attrs["cy"])
if "r" in svg_obj.attrs:
r = self.recalc_size(svg_obj.attrs["r"])
if not r:
return
rect = [cx - r, cy - r, 2.0 * r, 2.0 * r]
ellipse = sk2_model.Circle(cfg, parent, rect, style=sk2_style)
ellipse.trafo = libgeom.multiply_trafo(ellipse.trafo, tr)
self.append_obj(parent, svg_obj, ellipse, tr, sk2_style)
def translate_line(self, parent, svg_obj, trafo, style):
cfg = parent.config
sk2_style = self.get_sk2_style(svg_obj, style)
tr = get_svg_level_trafo(svg_obj, trafo)
x1 = y1 = x2 = y2 = 0.0
if "x1" in svg_obj.attrs:
x1 = self.recalc_size(svg_obj.attrs["x1"])
if "y1" in svg_obj.attrs:
y1 = self.recalc_size(svg_obj.attrs["y1"])
if "x2" in svg_obj.attrs:
x2 = self.recalc_size(svg_obj.attrs["x2"])
if "y2" in svg_obj.attrs:
y2 = self.recalc_size(svg_obj.attrs["y2"])
paths = [
[
[x1, y1],
[
[x2, y2],
],
sk2const.CURVE_OPENED,
],
]
curve = sk2_model.Curve(cfg, parent, paths, tr, sk2_style)
self.append_obj(parent, svg_obj, curve, tr, sk2_style)
def _line(self, point1, point2):
paths = [
[
[] + point1,
[
[] + point2,
],
sk2const.CURVE_OPENED,
],
]
tr = [] + self.trafo
style = [[], self.layer.config.default_stroke, [], []]
curve = sk2_model.Curve(self.layer.config, self.layer, paths, tr, style)
self.layer.childs.append(curve)
def _point(self, point, trafo=None):
if not trafo:
trafo = [] + self.trafo
style = [[], self.layer.config.default_stroke, [], []]
rect = sk2_model.Rectangle(
self.layer.config, self.layer, point + [1.0, 1.0], trafo, style=style
)
self.layer.childs.append(rect)
def translate_polyline(self, parent, svg_obj, trafo, style):
cfg = parent.config
sk2_style = self.get_sk2_style(svg_obj, style)
tr = get_svg_level_trafo(svg_obj, trafo)
if "points" not in svg_obj.attrs:
return
points = parse_svg_points(svg_obj.attrs["points"])
if not points or len(points) < 2:
return
paths = [
[points[0], points[1:], sk2const.CURVE_OPENED],
]
curve = sk2_model.Curve(cfg, parent, paths, tr, sk2_style)
self.append_obj(parent, svg_obj, curve, tr, sk2_style)
def translate_polygon(self, parent, svg_obj, trafo, style):
cfg = parent.config
sk2_style = self.get_sk2_style(svg_obj, style)
tr = get_svg_level_trafo(svg_obj, trafo)
if "points" not in svg_obj.attrs:
return
points = parse_svg_points(svg_obj.attrs["points"])
if not points or len(points) < 3:
return
points.append([] + points[0])
paths = [
[points[0], points[1:], sk2const.CURVE_CLOSED],
]
curve = sk2_model.Curve(cfg, parent, paths, tr, sk2_style)
self.append_obj(parent, svg_obj, curve, tr, sk2_style)
def translate_path(self, parent, svg_obj, trafo, style):
cfg = parent.config
sk2_style = self.get_sk2_style(svg_obj, style)
tr = get_svg_level_trafo(svg_obj, trafo)
if check_svg_attr(svg_obj, "sodipodi:type", "arc"):
cx = self.recalc_size(svg_obj.attrs["sodipodi:cx"])
cy = self.recalc_size(svg_obj.attrs["sodipodi:cy"])
rx = self.recalc_size(svg_obj.attrs["sodipodi:rx"])
ry = self.recalc_size(svg_obj.attrs["sodipodi:ry"])
angle1 = angle2 = 0.0
if "sodipodi:start" in svg_obj.attrs:
angle1 = float(svg_obj.attrs["sodipodi:start"])
if "sodipodi:end" in svg_obj.attrs:
angle2 = float(svg_obj.attrs["sodipodi:end"])
circle_type = sk2const.ARC_PIE_SLICE
if check_svg_attr(svg_obj, "sodipodi:open", "true"):
circle_type = sk2const.ARC_ARC
rect = [cx - rx, cy - ry, 2.0 * rx, 2.0 * ry]
curve = sk2_model.Circle(
cfg, parent, rect, angle1, angle2, circle_type, sk2_style
)
curve.trafo = libgeom.multiply_trafo(curve.trafo, tr)
self.append_obj(parent, svg_obj, curve, tr, sk2_style)
elif "d" in svg_obj.attrs:
paths = svg_utils.parse_svg_path_cmds(svg_obj.attrs["d"])
if not paths:
return
curve = sk2_model.Curve(cfg, parent, paths, tr, sk2_style)
self.append_obj(parent, svg_obj, curve, tr, sk2_style)
def translate_use(self, parent, svg_obj, trafo, style):
tr = get_svg_level_trafo(svg_obj, trafo)
stl = self.get_level_style(svg_obj, style)
if "xlink:href" in svg_obj.attrs:
obj_id = svg_obj.attrs["xlink:href"][1:]
if obj_id in self.id_map:
self.translate_obj(parent, self.id_map[obj_id], tr, stl)
else:
LOG.warn("<use> object id %s is not found", obj_id)
def translate_text(self, parent, svg_obj, trafo, style):
cfg = parent.config
stl = self.get_level_style(svg_obj, style)
sk2_style = self.get_sk2_style(svg_obj, stl, True)
tr_level = get_svg_level_trafo(svg_obj, trafo)
inv_tr = libgeom.invert_trafo(self.trafo)
inv_tr[3] *= -1.0
tr = libgeom.multiply_trafo(tr_level, inv_tr)
tr = libgeom.multiply_trafo([FONT_COEFF, 0.0, 0.0, -FONT_COEFF, 0.0, 0.0], tr)
x = y = 0.0
if "x" in svg_obj.attrs:
x = parse_svg_coords(svg_obj.attrs["x"])[0]
if "y" in svg_obj.attrs:
y = parse_svg_coords(svg_obj.attrs["y"])[0]
if not svg_obj.childs:
return
txt = svg_utils.parse_svg_text(svg_obj.childs)
if not txt:
return
x1, y1 = libgeom.apply_trafo_to_point([x, y], tr_level)
x2, y2 = libgeom.apply_trafo_to_point([0.0, 0.0], tr)
tr = libgeom.multiply_trafo(tr, [1.0, 0.0, 0.0, 1.0, -x2, -y2])
text = sk2_model.Text(cfg, parent, [x1, y1], txt, -1, tr, sk2_style)
self.append_obj(parent, svg_obj, text, tr_level, sk2_style)
def translate_image(self, parent, svg_obj, trafo, style):
cfg = parent.config
tr_level = get_svg_level_trafo(svg_obj, trafo)
inv_tr = libgeom.invert_trafo(self.trafo)
tr = libgeom.multiply_trafo(inv_tr, tr_level)
x = y = 0.0
if "x" in svg_obj.attrs:
x = parse_svg_coords(svg_obj.attrs["x"])[0]
if "y" in svg_obj.attrs:
y = parse_svg_coords(svg_obj.attrs["y"])[0]
w = h = 0.0
if "width" in svg_obj.attrs:
w = parse_svg_coords(svg_obj.attrs["width"])[0]
if "height" in svg_obj.attrs:
h = parse_svg_coords(svg_obj.attrs["height"])[0]
if not w or not h:
return
raw_image = self.get_image(svg_obj)
if not raw_image:
return
img_w, img_h = raw_image.size
trafo = [1.0, 0.0, 0.0, 1.0, -img_w / 2.0, -img_h / 2.0]
trafo1 = [w / img_w, 0.0, 0.0, h / img_h, 0.0, 0.0]
trafo2 = [1.0, 0.0, 0.0, 1.0, w / 2.0, h / 2.0]
trafo = libgeom.multiply_trafo(trafo, trafo1)
trafo = libgeom.multiply_trafo(trafo, trafo2)
dx, dy = libgeom.apply_trafo_to_point([x, y], self.trafo)
trafo3 = [1.0, 0.0, 0.0, 1.0, dx, dy - h]
trafo = libgeom.multiply_trafo(trafo, trafo3)
trafo = libgeom.multiply_trafo(trafo, tr)
pixmap = sk2_model.Pixmap(cfg)
pixmap.handler.load_from_images(self.sk2_doc.cms, raw_image)
pixmap.trafo = trafo
container = None
if "clip-path" in svg_obj.attrs:
clip_id = svg_obj.attrs["clip-path"][5:-1].strip()
if clip_id in self.id_map:
container = self.parse_clippath(self.id_map[clip_id])
if container:
container.childs[0].trafo = [] + tr_level
if container:
container.childs.append(pixmap)
parent.childs.append(container)
else:
parent.childs.append(pixmap)
SVG_FILL_RULE = {
sk2const.FILL_NONZERO: "nonzero",
sk2const.FILL_EVENODD: "evenodd",
}
SVG_LINE_JOIN = {
sk2const.JOIN_MITER: "miter",
sk2const.JOIN_ROUND: "round",
sk2const.JOIN_BEVEL: "bevel",
}
SVG_LINE_CAP = {
sk2const.CAP_BUTT: "butt",
sk2const.CAP_ROUND: "round",
sk2const.CAP_SQUARE: "square",
}
SVG_GRAD_EXTEND = {
sk2const.GRADIENT_EXTEND_PAD: "pad",
sk2const.GRADIENT_EXTEND_REFLECT: "reflect",
sk2const.GRADIENT_EXTEND_REPEAT: "repeat",
}
class SK2_to_SVG_Translator(object):
dx = dy = page_dx = 0.0
indent_level = -1
defs_count = 0
trafo = None
defs = None
svg_doc = None
sk2_doc = None
svg_mt = None
sk2_mt = None
sk2_mtds = None
svg_mtds = None
def translate(self, sk2_doc, svg_doc):
self.svg_doc = svg_doc
self.sk2_doc = sk2_doc
self.svg_mt = svg_doc.model
self.sk2_mt = sk2_doc.model
self.sk2_mtds = sk2_doc.methods
self.svg_mtds = svg_doc.methods
self.defs_count = 0
svg_attrs = self.svg_mt.attrs
self.trafo = [1.0, 0.0, 0.0, -1.0, 0.0, 0.0]
svg_attrs["id"] = utils.generate_guid()
units = (
svg_const.SVG_PX
if self.sk2_mt.doc_units == qc3const.UNIT_PX
else svg_const.SVG_PT
)
for item in self.svg_mt.childs:
if item.tag == "defs":
self.defs = item
break
for item in self.sk2_mt.childs:
if item.cid == sk2_model.PAGES:
page = item.childs[0]
w, h = page.page_format[1]
svg_attrs["width"] = str(w) + units
svg_attrs["height"] = str(h) + units
if units != svg_const.SVG_PX:
svg_attrs["viewBox"] = "0 0 %s %s" % (str(w), str(h))
self.dx = w / 2.0
self.dy = h / 2.0
self.trafo[4] = self.dx
self.trafo[5] = self.dy
self.page_dx = 0.0
for page in item.childs:
self.translate_page(self.svg_mt, page)
self.indent_level = 0
if self.defs.childs:
self.add_spacer(self.defs)
else:
self.svg_mt.childs.remove(self.defs)
self.add_spacer(self.svg_mt)
self.svg_doc = None
self.sk2_doc = None
self.svg_mt = None
self.sk2_mt = None
self.sk2_mtds = None
self.svg_mtds = None
def add_spacer(self, parent):
spacer = "\n" + "\t" * self.indent_level
parent.childs.append(svg_utils.create_spacer(spacer))
def append_obj(self, parent, obj):
self.add_spacer(parent)
parent.childs.append(obj)
def translate_page(self, dest_parent, source_obj):
w, h = source_obj.page_format[1]
self.trafo[4] = w / 2.0 + self.page_dx
if self.page_dx:
rect = svg_utils.create_rect(self.page_dx, self.dy - h / 2.0, w, h)
rect.attrs["style"] = "fill:none;stroke:black;"
self.append_obj(self.svg_mt, rect)
self.translate_objs(self.svg_mt, source_obj.childs)
self.page_dx += w + 30.0
def translate_objs(self, dest_parent, source_objs):
self.indent_level += 1
for source_obj in source_objs:
if source_obj.is_layer:
self.translate_layer(dest_parent, source_obj)
elif source_obj.is_group:
self.translate_group(dest_parent, source_obj)
elif source_obj.is_pixmap:
self.translate_pixmap(dest_parent, source_obj)
elif source_obj.is_primitive:
if source_obj.style[0] and source_obj.style[1] and source_obj.style[1][7]:
stroke_obj = source_obj.copy()
stroke_obj.update()
stroke_obj.style[0] = []
self.translate_primitive(dest_parent, stroke_obj)
fill_obj = source_obj.copy()
fill_obj.update()
fill_obj.style[1] = []
self.translate_primitive(dest_parent, fill_obj)
else:
self.translate_primitive(dest_parent, source_obj)
self.indent_level -= 1
def translate_layer(self, dest_parent, source_obj):
group = svg_utils.create_xmlobj("g")
if not source_obj.properties[0]:
group.attrs["style"] = "display:none;"
self.translate_objs(group, source_obj.childs)
self.add_spacer(group)
self.append_obj(dest_parent, group)
def translate_group(self, dest_parent, source_obj):
if source_obj.is_container:
clip = source_obj.childs[0]
clip_id = self.make_clippath(clip)
if clip.style[1] and clip.style[1][7]:
stroke_obj = clip.copy()
stroke_obj.update()
stroke_obj.style[0] = []
self.translate_primitive(dest_parent, stroke_obj)
if clip.style[0]:
fill_obj = clip.copy()
fill_obj.update()
fill_obj.style[1] = []
self.translate_primitive(dest_parent, fill_obj)
group = svg_utils.create_xmlobj("g")
group.attrs["clip-path"] = "url(#%s)" % clip_id
self.translate_objs(group, source_obj.childs[1:])
self.add_spacer(group)
self.append_obj(dest_parent, group)
if clip.style[1] and not clip.style[1][7]:
stroke_obj = clip.copy()
stroke_obj.update()
stroke_obj.style[0] = []
self.translate_primitive(dest_parent, stroke_obj)
else:
group = svg_utils.create_xmlobj("g")
self.translate_objs(group, source_obj.childs)
self.add_spacer(group)
self.append_obj(dest_parent, group)
def make_clippath(self, source_obj):
clippath = svg_utils.create_xmlobj("clipPath")
clippath.attrs["clipPathUnits"] = "userSpaceOnUse"
clippath.attrs["id"] = "clipPath" + str(self.defs_count + 1)
self.defs_count += 1
lvl = self.indent_level
self.indent_level = 1
self.append_obj(self.defs, clippath)
self.indent_level += 1
self.translate_primitive(clippath, source_obj)
self.indent_level = lvl
return clippath.attrs["id"]
def translate_primitive(self, dest_parent, source_obj):
curve = source_obj.to_curve()
if curve.is_group:
self.translate_group(dest_parent, curve)
return
curve.update()
style = self.translate_style(source_obj)
trafo = libgeom.multiply_trafo(curve.trafo, self.trafo)
paths = libgeom.apply_trafo_to_paths(curve.paths, trafo)
pth = svg_utils.create_xmlobj("path")
pth.attrs["style"] = style
pth.attrs["d"] = svg_utils.translate_paths_to_d(paths)
self.append_obj(dest_parent, pth)
arrows = curve.arrows_to_curve()
if arrows:
self.translate_primitive(dest_parent, arrows)
def translate_pixmap(self, dest_parent, source_obj):
surface = source_obj.handler.get_surface(self.sk2_doc.cms)
image_stream = StringIO()
surface.write_to_png(image_stream)
content = b64encode(image_stream.getvalue())
image = svg_utils.create_xmlobj("image")
w, h = source_obj.get_size()
trafo = [1.0, 0.0, 0.0, -1.0, 0.0, 0.0]
trafo = libgeom.multiply_trafo(trafo, source_obj.trafo)
trafo = libgeom.multiply_trafo(trafo, self.trafo)
image.attrs["xlink:href"] = "data:image/png;base64," + content
image.attrs["transform"] = "matrix(%s)" % trafo.__str__()[1:-1]
image.attrs["x"] = "0"
image.attrs["y"] = str(-h)
image.attrs["width"] = str(w)
image.attrs["height"] = str(h)
self.append_obj(dest_parent, image)
def translate_style(self, obj):
style = {}
self.set_fill(style, obj)
self.set_stroke(style, obj)
return svg_utils.translate_style_dict(style)
def set_stroke(self, svg_style, obj):
if not obj.style[1]:
return
# Stroke width
line_width = 0.0
if obj.style[1][8]:
stroke_trafo = obj.stroke_trafo
if not stroke_trafo:
stroke_trafo = [] + libgeom.NORMAL_TRAFO
points = [[0.0, 0.0], [1.0, 0.0]]
points = libgeom.apply_trafo_to_points(points, stroke_trafo)
coef = libgeom.distance(*points)
line_width = obj.style[1][1] * coef
svg_style["stroke-width"] = str(round(line_width, 4))
else:
if not obj.style[1][1] == 1.0:
line_width = obj.style[1][1]
svg_style["stroke-width"] = str(round(line_width, 4))
# Stroke color
clr = self.sk2_doc.cms.get_rgb_color(obj.style[1][2])
svg_style["stroke"] = cms.rgb_to_hexcolor(clr[1])
if clr[2] < 1.0:
svg_style["stroke-opacity"] = str(clr[2])
# Stroke dash
if obj.style[1][3]:
vals = []
for item in obj.style[1][3]:
vals.append(str(round(item * line_width, 4)))
svg_style["stroke-dasharray"] = ", ".join(vals)
# Stroke caps
caps = SVG_LINE_CAP[obj.style[1][4]]
if not caps == "butt":
svg_style["stroke-linecap"] = caps
# Stroke join
join = SVG_LINE_JOIN[obj.style[1][5]]
if not join == "miter":
svg_style["stroke-linejoin"] = join
# Miter limit
svg_style["stroke-miterlimit"] = str(round(obj.style[1][6], 4))
def set_fill(self, svg_style, obj):
svg_style["fill"] = "none"
if not obj.style[0]:
return
if obj.style[0][1] == sk2const.FILL_SOLID:
if obj.style[0][0] == sk2const.FILL_EVENODD:
svg_style["fill-rule"] = "evenodd"
clr = self.sk2_doc.cms.get_rgb_color(obj.style[0][2])
svg_style["fill"] = cms.rgb_to_hexcolor(clr[1])
if clr[2] < 1.0:
svg_style["fill-opacity"] = str(clr[2])
elif obj.style[0][1] == sk2const.FILL_GRADIENT:
if obj.style[0][0] == sk2const.FILL_EVENODD:
svg_style["fill-rule"] = "evenodd"
grad_id = self.translate_gradient(obj.style[0][2], obj)
svg_style["fill"] = "url(#%s)" % grad_id
def translate_gradient(self, gradient, obj):
grad_id = "grad" + str(self.defs_count + 1)
self.defs_count += 1
trafo = libgeom.multiply_trafo(obj.fill_trafo, self.trafo)
vector = libgeom.apply_trafo_to_points(gradient[1], trafo)
spread = "pad"
if len(gradient) > 3:
spread = SVG_GRAD_EXTEND[gradient[3]]
attrs = {"gradientUnits": "userSpaceOnUse"}
if gradient[0] == sk2const.GRADIENT_RADIAL:
tag = "radialGradient"
attrs["id"] = grad_id
attrs["spreadMethod"] = spread
cx, cy = gradient[1][0]
r = libgeom.distance(*gradient[1])
attrs["cx"] = str(cx)
attrs["cy"] = str(cy)
attrs["r"] = str(r)
tr = trafo.__str__()[1:-1]
attrs["gradientTransform"] = "matrix(%s)" % tr
else:
tag = "linearGradient"
x1, y1 = vector[0]
x2, y2 = vector[1]
attrs["id"] = grad_id
attrs["spreadMethod"] = spread
attrs["x1"] = str(x1)
attrs["y1"] = str(y1)
attrs["x2"] = str(x2)
attrs["y2"] = str(y2)
grad_obj = svg_utils.create_xmlobj(tag, attrs)
lvl = self.indent_level
self.indent_level = 1
self.append_obj(self.defs, grad_obj)
self.indent_level += 1
self.translate_stops(grad_obj, gradient[2])
self.indent_level = lvl
return grad_id
def translate_stops(self, parent, stops):
for stop in stops:
attrs = {}
offset, color = stop
attrs["offset"] = str(offset)
clr = self.sk2_doc.cms.get_rgb_color(color)
clr = cms.rgb_to_hexcolor(clr[1])
alpha = str(color[2])
attrs["style"] = "stop-color:%s;stop-opacity:%s;" % (clr, alpha)
stop_obj = svg_utils.create_xmlobj("stop", attrs)
self.append_obj(parent, stop_obj)
self.indent_level -= 1
self.add_spacer(parent)
|
import networkx as nx
import pandas as pd
#from community import community_louvain
from random import choice
from multiprocessing import Pool
import itertools
import os
def import_graph(path):
"""
Import and read a networkx graph to do some calculations
Args:
path (string): path of the networkx graph
Returns:
object: networkx graph object
"""
try:
G = nx.read_gexf(path)
print(f"Graph imported succesfully: {path}")
return G
except Exception as e:
print(f"Impossible to read the Networkx Graph, please check the path: {e}")
return None
# Degree centrality
def get_in_degree_centrality(G):
return pd.DataFrame.from_dict(nx.in_degree_centrality(G), orient="Index")
# Betweenness
def get_betweenness(G):
return nx.betweenness_centrality(G)
# Eigenvector centrality
def get_eigenvector_centrality(G):
return pd.DataFrame.from_dict(nx.eigenvector_centrality(G), orient="Index")
""" START Communities detection """
# Communities
def get_communities(G_undirect):
communities = partition_df(G_undirect)
communities.rename(columns={0: "community"}, inplace=True)
return communities
# Communities & degree
def append_communities_degree(communities):
degree = []
for u in communities["user"]:
degree.append(G.in_degree[u])
communities["degree"] = degree
return communities
def partition_df(G):
partition = community_louvain.best_partition(G, random_state=42)
partition_df = pd.DataFrame()
partition_df = partition_df.from_dict(partition, orient="index")
user = []
for node in G:
user.append(node)
partition_df["user"] = user
return partition_df
def max_degree_communitiy(df):
n_comm = max(df["community"])
communities_leader = pd.DataFrame(columns=["community", "user", "degree"])
i = 0
while i < n_comm:
community_df = df[df["community"] == i]
community_length = len(community_df)
max_degree = max(community_df["degree"])
user_max_degree = community_df[community_df["degree"] == max_degree][
"user"
].reset_index(drop=True)
communities_leader = communities_leader.append(
{
"community": i,
"communitu_length": community_length,
"user": user_max_degree[0],
"degree": max_degree,
},
ignore_index=True,
)
i = i + 1
return communities_leader
""" END Communities detection """
""" START Betweenness multiprocessing calculation """
def chunks(l, n):
"""Divide a list of nodes `l` in `n` chunks"""
l_c = iter(l)
while 1:
x = tuple(itertools.islice(l_c, n))
if not x:
return
yield x
def betweenness_centrality_parallel(G, processes=None):
"""Parallel betweenness centrality function"""
p = Pool(processes=processes) # if process = None use alla viable cores
node_divisor = len(p._pool) * 4
node_chunks = list(chunks(G.nodes(), int(G.order() / node_divisor)))
num_chunks = len(node_chunks)
bt_sc = p.starmap(
nx.betweenness_centrality_subset,
zip(
[G] * num_chunks,
node_chunks,
[list(G)] * num_chunks,
[True] * num_chunks,
[None] * num_chunks,
),
)
# Reduce the partial solutions
bt_c = bt_sc[0]
for bt in bt_sc[1:]:
for n in bt:
bt_c[n] += bt[n]
return pd.DataFrame.from_dict(bt_c, orient="Index")
""" END Betweenness multiprocessing """
"""Launch Function"""
def launch_calc_info(output_path, filename="500"):
"""
Calc some usefull Graph info and properties
The results are written in csv files into a given folder.
Args:
output_path (string): output files path where you want to save the results
filename (str, optional): name of output file to save. Defaults to "500".
Returns:
bool: True if the execution is correct, false instead
"""
# Do some calculations
try:
btw = betweenness_centrality_parallel(G)
# print(btw)
degree_centrality = get_in_degree_centrality(G)
eigenvector_centrality = get_eigenvector_centrality(G)
# communities = get_communities(G_undirect)
# print(communities)
# communities = append_communities_degree(communities)
# print(communities)
# User with max degree for each community
# communities_leader = max_degree_communitiy(communities)
# print(communities_leader)
except Exception as message:
print(
"Impossible to calc some info about the graph, please check the code: {message} "
)
return False
# Export the results to csv
try:
# Define the paths
btw_path = os.path.join(output_path, "betweenness_" + filename + ".csv")
dc_path = os.path.join(output_path, "degree_centrality_" + filename + ".csv")
ec_path = os.path.join(
output_path, "eigenvector_centrality_" + filename + ".csv"
)
btw.to_csv(btw_path)
degree_centrality.to_csv(dc_path)
eigenvector_centrality.to_csv(ec_path)
except Exception as message:
print(
"impossible to export the csv for the calculation infos, please check the path: {message}"
)
return False
try:
# Random nodes
random_nodes = []
for i in range(10):
random_nodes.append(choice(list(G.nodes)))
print(random_nodes)
return True
except Exception as message:
print(
"Impossible to check random nodes infos, please check the code: {message}"
)
return False
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import Q
def leave_only_last_security_scan(apps, schema_editor):
SecurityScan = apps.get_model("security", "SecurityScan")
total_scans = SecurityScan.objects.count()
base_object_ids = set(
SecurityScan.objects.values_list('base_object', flat=True)
)
for base_object_id in base_object_ids:
last_scan_id = SecurityScan.objects.filter(
base_object_id=base_object_id
).latest("last_scan_date").id
SecurityScan.objects.filter(
Q(base_object_id=base_object_id) &
~Q(id=last_scan_id)
).delete()
print()
print("Total scans: {}".format(total_scans))
print("Deleted scans: {}".format(
total_scans - SecurityScan.objects.count()
))
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('security', '0003_auto_20170110_1352'),
]
operations = [
migrations.RunPython(leave_only_last_security_scan, reverse_func),
]
|
from a10sdk.common.A10BaseClass import A10BaseClass
class SessionList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param protocol: {"type": "string", "format": "string"}
:param inside_v6_address: {"type": "string", "format": "ipv6-address"}
:param inbound: {"type": "number", "format": "number"}
:param age: {"type": "string", "format": "string"}
:param cpu: {"type": "number", "format": "number"}
:param nat_address: {"type": "string", "format": "ipv4-address"}
:param nat_port: {"type": "number", "format": "number"}
:param flags: {"type": "string", "format": "string"}
:param nat_pool_name: {"type": "string", "format": "string"}
:param inside_port: {"type": "number", "format": "number"}
:param outbound: {"type": "number", "format": "number"}
:param inside_address: {"type": "string", "format": "ipv4-address"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "session-list"
self.DeviceProxy = ""
self.protocol = ""
self.inside_v6_address = ""
self.inbound = ""
self.age = ""
self.cpu = ""
self.nat_address = ""
self.nat_port = ""
self.flags = ""
self.nat_pool_name = ""
self.inside_port = ""
self.outbound = ""
self.inside_address = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Oper(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param all_paritions: {"enum": ["true"], "type": "string", "format": "enum"}
:param partition: {"type": "string", "format": "string"}
:param session_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"protocol": {"type": "string", "format": "string"}, "inside-v6-address": {"type": "string", "format": "ipv6-address"}, "inbound": {"type": "number", "format": "number"}, "age": {"type": "string", "format": "string"}, "cpu": {"type": "number", "format": "number"}, "nat-address": {"type": "string", "format": "ipv4-address"}, "nat-port": {"type": "number", "format": "number"}, "flags": {"type": "string", "format": "string"}, "nat-pool-name": {"type": "string", "format": "string"}, "inside-port": {"type": "number", "format": "number"}, "outbound": {"type": "number", "format": "number"}, "optional": true, "inside-address": {"type": "string", "format": "ipv4-address"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "oper"
self.DeviceProxy = ""
self.all_paritions = ""
self.partition = ""
self.session_list = []
for keys, value in kwargs.items():
setattr(self,keys, value)
class FullConeSession(A10BaseClass):
"""Class Description::
Operational Status for the object full-cone-session.
Class full-cone-session supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/ds-lite/full-cone-session/oper`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "full-cone-session"
self.a10_url="/axapi/v3/cgnv6/ds-lite/full-cone-session/oper"
self.DeviceProxy = ""
self.oper = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
#-*- coding: utf-8 -*-
#For Flask-Alchemy
#使用SqlAutocode,根据数据库已有表,产生符合Flask-SqlAlchemy要求的models的定义
import os.path
from sqlautocode.declarative import name2label
from flask.ext.sqlalchemy import SQLAlchemy
from .flask_factory import FlaskModelFactory
def no_prefix_wrapper(f, prefix=None):
def _name2label(name, schema=None):
if schema:
if name.startswith(schema+'.'):
name = '.'.join(name.split('.')[1:])
if prefix and name.startswith(prefix):
name = name[ len(prefix):]
label = str(''.join([s.capitalize() for s in
re.findall(r'([A-Z][a-z0-9]+|[a-z0-9]+|[A-Z0-9]+)', name)]))
return label
return _name2label
def gen_models_dir(app, models_dir):
#找到并建立models文件夹和__init__.py文件
if not models_dir:
app_root = app.config.get('APPLICATION_ROOT', '')
if not app_root:
app_root = os.path.dirname( os.path.dirname( os.path.realpath(__file__) ) )
models_dir = os.path.join(app_root, 'models')
if not os.path.exists(models_dir):
os.mkdir(models_dir)
init_file = os.path.join(models_dir, '__init__.py')
with open(init_file, 'wb') as fh:
fh.write('#-*- coding: utf-8 -*-\n')
return models_dir
def write_db_file(db_file):
#建立数据库定义文件
with open(db_file, 'wb') as fh:
fh.write('#-*- coding: utf-8 -*-\n')
fh.write('\n')
fh.write('from flask.ext.sqlalchemy import SQLAlchemy\n')
fh.write('\n\n')
fh.write('db = SQLAlchemy()\n')
def write_schema_file(factory, schema_file, name='default'):
#建立数据库定义文件
with open(schema_file, 'wb') as fh:
fh.write("#-*- coding: utf-8 -*-\n")
fh.write('\n')
fh.write( repr(factory) )
fh.write('\n')
fh.write("if __name__ == '__main__':\n")
if name == 'default':
fh.write(" db.create_all(bind=None)\n")
else:
fh.write(" db.create_all(bind=['%s'])\n" % name)
def generate_models(app, models_dir=None):
db = SQLAlchemy(app)
conns = {
'default': app.config.get('SQLALCHEMY_DATABASE_URI') or {},
}
conns.update( app.config.get('SQLALCHEMY_BINDS') or {} )
models_dir = gen_models_dir(app, models_dir)
db_file = os.path.join(models_dir, 'db.py')
if not os.path.exists(db_file):
write_db_file(db_file)
for name, conn in conns.items():
if not conn:
continue
schema_file = os.path.join(models_dir, '%s.py' % name)
if not os.path.exists(schema_file):
factory = FlaskModelFactory(name, conn)
write_schema_file(factory, schema_file, name)
|
import asyncio
import functools
import threading
import time
def ttl_lru_cache(seconds: int, maxsize: int = 128, typed: bool = False):
def wrapper_cache(func):
wrapped_cache_func = functools.lru_cache(maxsize=maxsize, typed=typed)(func)
wrapped_cache_func.delta = seconds * 10 ** 9
wrapped_cache_func.expiration = time.monotonic_ns() + wrapped_cache_func.delta
@functools.wraps(wrapped_cache_func)
def wrapped_func(*args, **kwargs):
if not kwargs.pop('cache', True) or time.monotonic_ns() >= wrapped_cache_func.expiration:
wrapped_cache_func.cache_clear()
wrapped_cache_func.expiration = time.monotonic_ns() + wrapped_cache_func.delta
return wrapped_cache_func(*args, **kwargs)
wrapped_func.cache_info = wrapped_cache_func.cache_info
wrapped_func.cache_clear = wrapped_cache_func.cache_clear
return wrapped_func
return wrapper_cache
class Cacheable:
def __init__(self, co):
self.co = co
self.done = False
self.result = None
self.lock = asyncio.Lock()
def __await__(self):
with (yield from self.lock):
if self.done:
return self.result
self.result = yield from self.co.__await__()
self.done = True
return self.result
def async_cache(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
r = f(*args, **kwargs)
return Cacheable(r)
return wrapped
class ThreadSafeCacheable:
def __init__(self, co):
self.co = co
self.done = False
self.result = None
self.lock = threading.Lock()
def __await__(self):
while True:
if self.done:
return self.result
if self.lock.acquire(blocking=False):
self.result = yield from self.co.__await__()
self.done = True
return self.result
else:
yield from asyncio.sleep(0.005)
def thread_safe_async_cache(f):
def wrapped(*args, **kwargs):
r = f(*args, **kwargs)
return Cacheable(r)
return wrapped
|
"""Utilities related to logging."""
import os
import sys
import logging
import datetime
import pathlib
import appdirs
logger = logging.getLogger(__name__)
class WarningLoggedError(Exception):
"""Special exception to raise when a warning message is logged.
Satpy has the tendency to log warnings when things are wrong, I'd like to
raise an exception when this happens.
"""
pass
def setup_main_handler(
mods=("fogtools", "typhon", "fogpy", "sattools", "fcitools"),
level=logging.DEBUG,
stderr=True,
filename=None):
"""Set up the main handlers.
By default, setups a stderr StreamHandler. Optionally also sets up a
FileHandler.
Args:
mods (Collection[str]): Modules to log for.
level (logging level): At what level to log to stderr.
"""
handlers = []
if stderr:
handlers.append(logging.StreamHandler(sys.stderr))
if filename:
handlers.append(logging.FileHandler(filename, encoding="utf-8"))
formatter = logging.Formatter(
"{asctime:s} {levelname:<8s} {name:s} "
"{module:s}.{funcName:s}:{lineno:d}: {message:s}",
style="{")
for handler in handlers:
handler.setFormatter(formatter)
for m in mods:
log = logging.getLogger(m)
log.setLevel(level)
for handler in handlers:
log.addHandler(handler)
# this class is based on
# https://docs.python.org/3.10/howto/logging-cookbook.html#using-a-context-manager-for-selective-logging # noqa: E501
class LoggingContext:
"""Context manager to temporarily log differently."""
def __init__(self, logger, level=None, handler=None, close=True):
"""Initiate logging context manager.
Pass the logger, log level, handler, and whether it should be closed at
the end or not.
"""
self.logger = logger
self.level = level
self.handler = handler
self.close = close
def __enter__(self):
"""Enter the context manager."""
if self.level is not None:
self.old_level = self.logger.level
self.logger.setLevel(self.level)
if self.handler:
self.logger.addHandler(self.handler)
def __exit__(self, et, ev, tb):
"""Exit the context manager."""
if self.level is not None:
self.logger.setLevel(self.old_level)
if self.handler:
self.logger.removeHandler(self.handler)
if self.handler and self.close:
self.handler.close()
# implicit return of None => don't swallow exceptions
class LogToTimeFile(LoggingContext):
"""Log to file within context manager.
This is intended to be used when files are processed, and a corresponding
logfile shall be written.
Example::
with log.LogToTimeFile(logfile):
...
"""
def __init__(self, logfile):
"""Initiate the logging context manager."""
logger = logging.getLogger() # root logger
self.logfile = logfile
handler = logging.FileHandler(logfile, encoding="utf-8")
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"{asctime:s} {name:s} {levelname:s} "
"{processName:s}-{process:d} {threadName:s}-{thread:d} "
"{pathname:s}:{lineno:d} {funcName:s}: {message:s}",
style="{")
handler.setFormatter(formatter)
super().__init__(logger, level=logging.DEBUG, handler=handler,
close=True)
def __enter__(self):
"""Enter the logging to time file context manager."""
super().__enter__()
logger.info(f"Opening logfile at {self.logfile!s}")
return self
def __exit__(self, et, ev, tb):
"""Exit the logging to time file context manager."""
logger.info(f"Closing logfile at {self.logfile!s}")
super().__exit__(et, ev, tb)
def logfile(name, label, create_dir=True):
"""Return filename to log to.
I don't agree with appdirs.user_log_dir() which puts it in cache.
Logging is permanent, caching is not. Instead uses the NAS_DATA
environment variable as a base.
"""
now = datetime.datetime.now()
basedir = pathlib.Path(
os.environ.get(
"NAS_DATA",
appdirs.user_log_dir(opinion=False))
)
logfile = (basedir / "log" / name / f"{now:%Y-%m-%d}" /
f"{label:s}-{now:%Y%m%dT%H%M%S}.log")
if create_dir:
logfile.parent.mkdir(exist_ok=True, parents=True)
return logfile
class RaiseOnWarnHandler(logging.Handler):
"""Logging handler to raise exception when warning message logged."""
def emit(self, record):
"""Raise a warning if record level warning or worse."""
if record.levelno >= logging.WARNING:
raise WarningLoggedError(
"A warning was logged with message " +
record.getMessage())
def setup_error_handler(mods=["satpy"]):
"""Set up a handler that turns log warnings into exceptions.
By default only covers warnings issued by satpy.
"""
rowh = RaiseOnWarnHandler()
for m in mods:
log = logging.getLogger(m)
log.setLevel(logging.DEBUG)
log.addHandler(rowh)
class RaiseOnWarnContext(LoggingContext):
"""Context manager to turn logged warnings into exceptions."""
def __init__(self, logger):
"""Initiate the context manager."""
rowh = RaiseOnWarnHandler()
super().__init__(logger, handler=rowh)
|
from typing import Optional
from interface.iapp import IApp
from interface.igossipmanager import IGossipManager
class GossipManager(IGossipManager):
def __init__(self):
self.app: Optional[IApp] = None
def set_app(self, app: IApp):
self.app = app
def start(self):
"""Starts the runnable object."""
pass
def stop(self):
"""Stops the runnable object."""
pass
|
from typing import List
from boa3.builtin import public
@public
def Main(operation: str, args: List[int]) -> int:
if len(args) < 2:
return 0
a: int = args[0]
b: int = args[1]
c: int
if a < b:
c = Add(a, b)
elif a <= 0 and b <= 0:
c = Sub(a, b)
else:
c = 0
for x in [a, b, Add(a, b), Sub(a, b)]:
c += a + b
return c
def Add(a: int, b: int) -> int:
return a + b
@public
def Sub(a: int, b: int) -> int:
return a - b
|
'''
The Robot Model package.
A set of classes designed to represent the main aspects of the rigid-body-dynamics
model of an articulated robot, such as connectivity, numbering scheme of the
links, attached frames, geometrical measurements.
''' |
class Solution:
def numMagicSquaresInside(self, grid: List[List[int]]) -> int:
def all_vals_valid(row, col):
numbers = set()
for i in range(row, row + 3):
for j in range(col, col + 3):
if 1 > grid[i][j] or grid[i][j] > 9 or grid[i][j] in numbers:
return False
numbers.add(grid[i][j])
return True
def rows_check(row, col):
return sum(grid[row][col:col + 3]) == sum(grid[row + 1][col:col + 3]) == sum(grid[row + 2][col:col + 3])
def cols_check(row, col):
col1_sum = 0
col2_sum = 0
col3_sum = 0
for i in range(row, row + 3):
col1_sum += grid[i][col]
for i in range(row, row + 3):
col2_sum += grid[i][col + 1]
for i in range(row, row + 3):
col3_sum += grid[i][col + 2]
return col1_sum == col2_sum == col3_sum
def diags_check(row, col):
diag_sum = grid[row][col] + grid[row + 1][col + 1] + grid[row + 2][col + 2]
antidiag_sum = grid[row][col + 2] + grid[row + 1][col + 1] + grid[row + 2][col]
return diag_sum == antidiag_sum
rows = len(grid)
cols = len(grid[0])
ans = 0
if rows < 3 or cols < 3:
return 0
for i in range(rows - 2):
for j in range(cols - 2):
if all_vals_valid(i, j):
if rows_check(i, j) and cols_check(i, j) and diags_check(i, j):
ans += 1
return ans
|
# coding: utf-8
from Crypto.Cipher import AES
import base64
import requests
import json
headers = {
'Cookie': 'appver=1.5.0.75771;',
'Referer': 'http://music.163.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
}
# first_param = "{rid:\"\", offset:\"0\", total:\"true\", limit:\"20\", csrf_token:\"\"}"
first_param = "{\"ids\":\"[151619]\",\"br\":128000,\"csrf_token\":\"\"}"
second_param = "010001"
third_param = "00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7"
forth_param = "0CoJUm6Qyw8W8jud"
def get_params():
iv = "0102030405060708"
first_key = forth_param
second_key = 16 * 'F'
h_encText = AES_encrypt(first_param, first_key, iv)
h_encText = AES_encrypt(h_encText, second_key, iv)
return h_encText
def get_encSecKey():
encSecKey = "257348aecb5e556c066de214e531faadd1c55d814f9be95fd06d6bff9f4c7a41f831f6394d5a3fd2e3881736d94a02ca919d952872e7d0a50ebfa1769a7a62d512f5f1ca21aec60bc3819a9c3ffca5eca9a0dba6d6f7249b06f5965ecfff3695b54e1c28f3f624750ed39e7de08fc8493242e26dbc4484a01c76f739e135637c"
return encSecKey
def AES_encrypt(text, key, iv):
pad = 16 - len(text) % 16
text = text + pad * chr(pad)
encryptor = AES.new(key, AES.MODE_CBC, iv)
encrypt_text = encryptor.encrypt(text)
encrypt_text = base64.b64encode(encrypt_text)
return encrypt_text
def get_json(url, params, encSecKey):
data = {
"params": params,
"encSecKey": encSecKey
}
response = requests.post(url, headers=headers, data=data)
return response.content
url = 'https://music.163.com/weapi/song/enhance/player/url?csrf_token='
params = get_params()
encSecKey = get_encSecKey()
json_text = get_json(url, params, encSecKey)
json_dict = json.loads(json_text)
print(json_text)
|
import math
import multiprocessing
import os
import queue
import time
from collections import defaultdict
from copy import copy
from multiprocessing import shared_memory
import copy
import numpy as np
from scipy.signal import fftconvolve
from Implementations.helpers.Helper import toNumbers, ListToPolynomial
from Implementations.FasterSubsetSum.RandomizedBase import NearLinearBase
def partitionSetIntoKGenerator(Z, k):
k = math.ceil(k)
partition = np.zeros((k, len(Z)), dtype=np.dtype('u1')) # Otherwise we use too much memory.
listUsed = set()
for i in np.nonzero(Z)[0][1:]: # Ignore 0 component with 1:
goesTo = np.random.randint(0, k)
partition[goesTo][i] = 1
partition[goesTo][0] = 1
listUsed.add(goesTo)
for x in listUsed:
yield partition[x][:max(np.nonzero(partition[x])[0]) + 1]
def partitionSetIntoKRegularNumbers(Z, k):
k = math.ceil(k)
partition = defaultdict(list)
listUsed = set()
for i in Z: # Ignore 0 component with 1:
goesTo = np.random.randint(0, k)
partition[goesTo].append(i)
listUsed.add(goesTo)
return [partition[x] for x in listUsed]
def sumSet(A, B, threshold):
eps = 0.0001 # account for floating error
AsumsetB = fftconvolve(A, B)
return np.array(np.select([AsumsetB[:int(threshold + 1)] > eps], [1]), dtype=np.dtype('u1'))
def roundToPowerOf2(m):
return pow(2, math.ceil(math.log2(m)))
class ColorCodingWorker(multiprocessing.Process):
def __init__(self, task_queue, result_queue, threads):
multiprocessing.Process.__init__(self)
self.task_queue = task_queue
self.result_queue = result_queue
self.threads = threads
def run(self):
proc_name = self.name
tasksRun = 0
while True:
next_task = self.task_queue.get()
if next_task is None:
# Poison pill means shutdown
# print('%s: Exiting' % proc_name)
# print(combineTasksDone)
self.task_queue.task_done()
print(tasksRun)
break
# print('%s: %s' % (proc_name, next_task))
if isinstance(next_task, ColorCodingTask):
next_task(self.task_queue)
self.task_queue.task_done()
else:
start = time.time()
result = next_task()
end = time.time()
tasksRun += 1
self.result_queue.put(result)
self.task_queue.task_done()
return
class ColorCodingLayerWorker(multiprocessing.Process):
def __init__(self, task_queue, color_queue, result_que, shr_name, dim):
multiprocessing.Process.__init__(self)
self.task_queue = task_queue
self.color_queue = color_queue
self.results_que = result_que
self.shr_name = shr_name
self.dim = dim
def run(self):
proc_name = self.name
existing_shm = shared_memory.SharedMemory(name=self.shr_name)
np_array = np.ndarray(self.dim, dtype=np.int64, buffer=existing_shm.buf)
while True:
next_task = self.task_queue.get()
if next_task is None:
# Poison pill means shutdown
# print('%s: Exiting' % proc_name)
existing_shm.close()
existing_shm.unlink()
self.task_queue.task_done()
break
# mp_array, np_array = self.shared_memory
# Load the numpy array from memory, copy to avoid inconsisetency
vals = np_array[next_task.start:next_task.end]
# print('%s: %s' % (proc_name, next_task))
next_task(vals, self.color_queue)
# print('%s: solved %s in %d' % (proc_name, next_task, end - start))
self.task_queue.task_done()
return
class CombineTask(object):
def __init__(self, Z, t, layer, m, j):
self.Z = Z
self.t = t
self.layer = layer
self.m = m
self.j = j
def __call__(self):
start = time.time()
if len(self.Z) == 0:
return Result(self.layer, self.j, self.m, [0])
ans = ListToPolynomial(self.Z[0])
for i in range(1, len(self.Z)):
if len(self.Z[i]) == 0:
continue
ans = sumSet(ans, ListToPolynomial(self.Z[i]), self.t)
end = time.time()
if self.layer == 5:
print('Solved %s in %f' % (self, end - start))
return Result(self.layer, self.j, self.m, toNumbers(ans))
def __str__(self):
return 'CombineTask %d' % self.layer
class ColorCodingTask(object):
def __init__(self, repetitions, Z, t, k, delta, threads, layer, j=None, m=None):
self.repetitions = repetitions
self.Z = Z
self.t = t
self.k = k
self.delta = delta
self.threads = threads
self.layer = layer
self.j = j
self.m = m
def __call__(self, combine_que):
repetitions = self.repetitions
for j in range(0, math.ceil(repetitions)):
partition = partitionSetIntoKRegularNumbers(self.Z, self.k * self.k) # max(int(k*k//2), 2))
if len(partition) < 20: # Then do the work ourselves.
combine_que.put(CombineTask(partition, self.t, self.layer, self.m, self.j))
else: # Distribute the workload
partitionInto = 2
threadPerWork = math.ceil(len(partition) / partitionInto)
for threadPartition in range(0, partitionInto):
combine_que.put(CombineTask(partition[threadPartition * threadPerWork: min(
(threadPartition + 1) * threadPerWork, len(partition))], self.t, self.layer, self.m, self.j))
def __str__(self):
return 'ColorCoding %d' % self.layer
class Result(object):
def __init__(self, layer, j, m, result):
self.layer = layer
self.j = j
self.m = m
self.result = result
class ColorCodingLayerTask(object):
def __init__(self, start, end, i, t, l, delta, threads):
self.start = start
self.end = end
self.i = i
self.t = t
self.l = l
self.delta = delta
self.threads = threads
def __call__(self, Z, color_coding_queue):
divisor = math.log2(self.l / self.delta)
if self.l < divisor:
# color_coding_queue.put
# TODO: Add data to identify this solution
color_coding_queue.put(ColorCodingTask(1, Z, self.t, self.l, self.delta, self.threads, self.i))
return
# return color_coding(1, Z, self.t, self.l, self.delta)
m = roundToPowerOf2(self.l / divisor)
partition = partitionSetIntoKRegularNumbers(Z, m)
m = roundToPowerOf2(len(partition))
while len(partition) < m:
partition.append([0])
gamma = 6 * divisor
if gamma > self.l:
gamma = self.l
t = self.t
if 2*gamma*t/self.l <= t:
t = 2 * gamma * t / self.l
# Put color coding jobs available on the queue
for j in range(m):
# TODO: Add data to identify this solution
color_coding_queue.put(
ColorCodingTask(1, partition[j], t, round(gamma), self.delta / self.l, self.threads, self.i, j, m)
)
return
def __str__(self):
return 'ColorCodingLayer %d' % self.i
def create_shared_block(data):
a = copy.deepcopy(data) # Start with an existing NumPy array
shm = shared_memory.SharedMemory(create=True, size=a.nbytes)
# # Now create a NumPy array backed by shared memory
np_array = np.ndarray(a.shape, dtype=np.int64, buffer=shm.buf)
np_array[:] = a[:] # Copy the original data into shared memory
return shm, np_array
class RandomizedMultiThreadedVer2(NearLinearBase):
def __init__(self, debug, repetitions, threads):
super().__init__(debug, repetitions)
self.threads = threads
self.label = '%d threads' % threads
def prioritize(self, Z, l, delta):
divisor = math.log2(l / delta)
if l < divisor:
return 0
if len(Z) <= 10:
return 0
return len(Z) * math.log2(len(Z)) * divisor
def partitionIntoLayers(self, Z, n, t):
Zi = [Z[(t / pow(2, i) <= Z) & (Z < t / pow(2, i - 1))] for i in
range(1, math.ceil(math.log2(n)))]
Zi.append(Z[(0 <= Z) & (Z < t / pow(2, math.ceil(math.log2(n)) - 1))])
if self.debug:
self.layerInformation = list()
for i in range(len(Zi)):
self.layerInformation.append((len(Zi[i]), t / pow(2, i)))
self.layerInformation.append((len(Zi[len(Zi) - 1]), 0))
for i in range(len(Zi)):
if len(Zi[i]) == 0:
Zi[i] = np.array([0])
return Zi
def fasterSubsetSum(self, Z, t, delta):
n = len(Z)
self.n = n
Z = np.array(Z)
Zi = self.partitionIntoLayers(Z, n, t)
# partition_with_index = [(index, value) for index, value in enumerate(Zi)]
# partition_with_index.sort(key=lambda x: self.prioritize(x[1], math.pow(2, x[0] + 1) - 1,
# delta / (math.ceil(math.log2(n)))), reverse=True)
# partition_with_index = list(map(itemgetter(0), partition_with_index))
# partition_with_index.remove(0)
# Zi = np.array(list(map(ListToPolynomial, Zi)))
S = ListToPolynomial(Zi[0])
S[0] = 1
if len(Zi) == 1:
S = self.ColorCodingLayer(S, t, len(Z), delta / (math.ceil(math.log2(n))))
return toNumbers(S)
# Each process will get 'chunksize' nums and a queue to put his out
# dict into
color_coding_results = multiprocessing.Queue()
layer_queue = multiprocessing.JoinableQueue()
color_queue = multiprocessing.JoinableQueue()
# Align all partitions into a single layer (to reduce overhead of copying)
# Make all layers shared across memory
layerToInterval = []
nextIndex = 0
allVals = []
for value in Zi:
layerToInterval.append((nextIndex, nextIndex + len(value)))
nextIndex = nextIndex + len(value)
# Compose all partitions into one big list
allVals = allVals + list(value)
allVals = np.array(allVals, dtype=np.int64)
shr, np_array = create_shared_block(allVals)
color_workers = [ColorCodingWorker(color_queue, color_coding_results, self.threads)
for process in range(self.threads)]
layer_worker = ColorCodingLayerWorker(layer_queue, color_queue, color_coding_results, shr.name, allVals.shape)
for w in color_workers:
w.start()
layer_worker.start()
numJobs = 0
asd = time.time()
for i in range(1, len(Zi) // 2): # We take the strongest layers, and then solve the easy layers.
numJobs += 1
interval = layerToInterval[i]
start = interval[0]
end = interval[1]
layer_queue.put(
ColorCodingLayerTask(start, end, i + 1, t, pow(2, i + 1) - 1, delta / (math.ceil(math.log2(n))),
self.threads))
for i in range(len(Zi) // 2, len(Zi)):
z = ListToPolynomial(Zi[i])
if len(z) > 1:
Si = self.ColorCodingLayer(z, t, pow(2, i + 1) - 1, delta / (math.ceil(math.log2(n))),
high=pow(2, i) if i != len(Zi) - 1 else (2 ** i, "Last is zero"))
S = self.sumSet(Si, S, t)
# Wait for all layer codings and color codings to complete
layer_queue.join()
color_queue.join()
layer_queue.put(None)
layer_queue.join()
for process in range(self.threads):
color_queue.put(None)
color_queue.join()
asdfg = time.time()
print('Time to compute all solutions:', asdfg - asd)
results = list()
start = time.time()
while True:
try:
results.append(color_coding_results.get(timeout=2))
except queue.Empty:
break
print('result length:', len(results))
combineAndAppendToS = defaultdict()
binaryTreeSumWay = defaultdict(lambda: defaultdict(list))
for result in results:
# Either, it belongs to a sumset from color coding? So should be combined with existing sumsets.
if result.m is None:
if result.layer not in combineAndAppendToS:
combineAndAppendToS[result.layer] = ListToPolynomial(result.result)
else:
combineAndAppendToS[result.layer] = self.sumSet(ListToPolynomial(result.result),
combineAndAppendToS[result.layer], t)
else:
if result.j not in binaryTreeSumWay[result.layer][result.j]:
binaryTreeSumWay[result.layer][result.j] = ListToPolynomial(result.result)
else:
binaryTreeSumWay[result.layer][result.j] = self.sumSet(binaryTreeSumWay[result.layer][result.j],
ListToPolynomial(result.result), t)
for binaryTreeComputation in binaryTreeSumWay.values():
m = len(binaryTreeComputation)
for h in range(1, int(math.log2(m))):
threshold = t
for j in range(1, int(m / pow(2, h)) + 1):
binaryTreeComputation[j - 1] = self.sumSet(binaryTreeComputation[2 * j - 1 - 1],
binaryTreeComputation[2 * j - 1], threshold)
self.sumSet(S, binaryTreeComputation[0], t)
for color_coding_list in combineAndAppendToS.values():
S = self.sumSet(S, color_coding_list, t)
end = time.time()
print('Time to combine all solutions:', end - start)
del layer_queue
del color_queue
for worker in color_workers:
del worker
del color_workers
del layer_worker
del color_coding_results
# while numJobs:
# S = sumSet(S, results.get(), t)
# numJobs -= 1
# for p in procs:
# S = sumSet(S, out_q.get(), t)
return toNumbers(S)
|
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import concurrent.futures
import glob
import os
from distutils.spawn import find_executable
import cudf
import pytest
from common.parsers.benchmark_parsers import create_bench_result
from common.utils import _run_query
import nvtabular as nvt
import tests.conftest as test_utils
TEST_N_ROWS = 1024
MODEL_DIR = "/model/models/"
DATA_DIR = "/raid/data/"
DATA_DIR_MOVIELENS = "/raid/data/movielens/data/"
TRITON_SERVER_PATH = find_executable("tritonserver")
TRITON_DEVICE_ID = "1"
# Update TEST_N_ROWS param in test_nvt_tf_trainin.py to test larger sizes
@pytest.mark.parametrize("n_rows", [1024, 1000, 64, 35, 16, 5])
@pytest.mark.parametrize("err_tol", [0.00001])
def test_nvt_tf_movielens_inference_triton(asv_db, bench_info, n_rows, err_tol):
with test_utils.run_triton_server(
os.path.expanduser(MODEL_DIR),
"movielens",
TRITON_SERVER_PATH,
TRITON_DEVICE_ID,
"tensorflow",
) as client:
diff, run_time = _run_movielens_query(client, n_rows)
assert (diff < err_tol).all()
benchmark_results = []
result = create_bench_result(
"test_nvt_tf_movielens_inference_triton", [("n_rows", n_rows)], run_time, "datetime"
)
benchmark_results.append(result)
# send_results(asv_db, bench_info, benchmark_results)
@pytest.mark.parametrize("n_rows", [[1024, 1000, 35, 16]])
@pytest.mark.parametrize("err_tol", [0.00001])
def test_nvt_tf_movielens_inference_triton_mt(asv_db, bench_info, n_rows, err_tol):
futures = []
with test_utils.run_triton_server(
os.path.expanduser(MODEL_DIR),
"movielens",
TRITON_SERVER_PATH,
TRITON_DEVICE_ID,
"tensorflow",
) as client:
with concurrent.futures.ThreadPoolExecutor() as executor:
for n_row in n_rows:
futures.append(executor.submit(_run_movielens_query, client, n_row))
for future in concurrent.futures.as_completed(futures):
diff, run_time = future.result()
assert (diff < err_tol).all()
benchmark_results = []
result = create_bench_result(
"test_nvt_tf_movielens_inference_triton_mt", [("n_rows", n_rows)], run_time, "datetime"
)
benchmark_results.append(result)
# send_results(asv_db, bench_info, benchmark_results)
@pytest.mark.skipif(TEST_N_ROWS is None, reason="Requires TEST_N_ROWS")
@pytest.mark.skipif(MODEL_DIR is None, reason="Requires MODEL_DIR")
@pytest.mark.skipif(DATA_DIR is None, reason="Requires DATA_DIR")
def test_nvt_tf_movielens_inference():
from tensorflow import keras
from nvtabular.loader.tensorflow import KerasSequenceLoader
workflow_path = os.path.join(os.path.expanduser(MODEL_DIR), "movielens_nvt/1/workflow")
model_path = os.path.join(os.path.expanduser(MODEL_DIR), "movielens_tf/1/model.savedmodel")
data_path = os.path.join(os.path.expanduser(DATA_DIR), "movielens/data/valid.parquet")
output_dir = os.path.join(os.path.expanduser(DATA_DIR), "movielens/")
workflow_output_test_file_name = "test_inference_movielens_data.csv"
workflow_output_test_trans_file_name = "test_inference_movielens_data_trans.parquet"
prediction_file_name = "movielens_predictions.csv"
workflow = nvt.Workflow.load(workflow_path)
sample_data = cudf.read_parquet(data_path, nrows=TEST_N_ROWS)
sample_data.to_csv(os.path.join(output_dir, workflow_output_test_file_name))
sample_data_trans = nvt.workflow.workflow._transform_partition(
sample_data, [workflow.output_node]
)
sample_data_trans.to_parquet(os.path.join(output_dir, workflow_output_test_trans_file_name))
CATEGORICAL_COLUMNS = ["movieId", "userId"] # Single-hot
CATEGORICAL_MH_COLUMNS = ["genres"] # Multi-hot
NUMERIC_COLUMNS = []
test_data_trans_path = glob.glob(os.path.join(output_dir, workflow_output_test_trans_file_name))
train_dataset = KerasSequenceLoader(
test_data_trans_path, # you could also use a glob pattern
batch_size=TEST_N_ROWS,
label_names=[],
cat_names=CATEGORICAL_COLUMNS + CATEGORICAL_MH_COLUMNS,
cont_names=NUMERIC_COLUMNS,
engine="parquet",
shuffle=False,
buffer_size=0.06, # how many batches to load at once
parts_per_chunk=1,
)
tf_model = keras.models.load_model(model_path)
pred = tf_model.predict(train_dataset)
cudf_pred = cudf.DataFrame(pred)
cudf_pred.to_csv(os.path.join(output_dir, prediction_file_name))
os.remove(os.path.join(output_dir, workflow_output_test_trans_file_name))
@pytest.mark.parametrize("n_rows", [1024, 1000, 64, 35, 16, 5])
@pytest.mark.parametrize("err_tol", [0.00001])
def test_nvt_tf_rossmann_inference_triton(asv_db, bench_info, n_rows, err_tol):
with test_utils.run_triton_server(
os.path.expanduser(MODEL_DIR),
"rossmann",
TRITON_SERVER_PATH,
TRITON_DEVICE_ID,
"tensorflow",
) as client:
diff, run_time = _run_rossmann_query(client, n_rows)
assert (diff < err_tol).all()
benchmark_results = []
result = create_bench_result(
"test_nvt_tf_rossmann_inference_triton", [("n_rows", n_rows)], run_time, "datetime"
)
benchmark_results.append(result)
# send_results(asv_db, bench_info, benchmark_results)
@pytest.mark.parametrize("n_rows", [[1024, 1000, 35, 16, 5]])
@pytest.mark.parametrize("err_tol", [0.00001])
def test_nvt_tf_rossmann_inference_triton_mt(asv_db, bench_info, n_rows, err_tol):
futures = []
with test_utils.run_triton_server(
os.path.expanduser(MODEL_DIR),
"rossmann",
TRITON_SERVER_PATH,
TRITON_DEVICE_ID,
"tensorflow",
) as client:
with concurrent.futures.ThreadPoolExecutor() as executor:
for n_row in n_rows:
futures.append(executor.submit(_run_rossmann_query, client, n_row))
for future in concurrent.futures.as_completed(futures):
diff, run_time = future.result()
assert (diff < err_tol).all()
benchmark_results = []
result = create_bench_result(
"test_nvt_tf_rossmann_inference_triton_mt", [("n_rows", n_rows)], run_time, "datetime"
)
benchmark_results.append(result)
# send_results(asv_db, bench_info, benchmark_results)
@pytest.mark.skipif(TEST_N_ROWS is None, reason="Requires TEST_N_ROWS")
@pytest.mark.skipif(MODEL_DIR is None, reason="Requires MODEL_DIR")
@pytest.mark.skipif(DATA_DIR is None, reason="Requires DATA_DIR")
def test_nvt_tf_rossmann_inference():
import tensorflow as tf
from tensorflow import keras
from nvtabular.loader.tensorflow import KerasSequenceLoader
workflow_path = os.path.join(os.path.expanduser(MODEL_DIR), "rossmann_nvt/1/workflow")
model_path = os.path.join(os.path.expanduser(MODEL_DIR), "rossmann_tf/1/model.savedmodel")
data_path = os.path.join(os.path.expanduser(DATA_DIR), "rossman/input/valid.csv")
output_dir = os.path.join(os.path.expanduser(DATA_DIR), "rossman/")
workflow_output_test_file_name = "test_inference_rossmann_data.csv"
workflow_output_test_trans_file_name = "test_inference_rossmann_data_trans.parquet"
prediction_file_name = "rossmann_predictions.csv"
workflow = nvt.Workflow.load(workflow_path)
sample_data = cudf.read_csv(data_path, nrows=TEST_N_ROWS)
sample_data.to_csv(os.path.join(output_dir, workflow_output_test_file_name))
sample_data_trans = nvt.workflow.workflow._transform_partition(
sample_data, [workflow.output_node]
)
sample_data_trans.to_parquet(os.path.join(output_dir, workflow_output_test_trans_file_name))
CATEGORICAL_COLUMNS = [
"Store",
"DayOfWeek",
"Year",
"Month",
"Day",
"StateHoliday",
"CompetitionMonthsOpen",
"Promo2Weeks",
"StoreType",
"Assortment",
"PromoInterval",
"CompetitionOpenSinceYear",
"Promo2SinceYear",
"State",
"Week",
"Events",
"Promo_fw",
"Promo_bw",
"StateHoliday_fw",
"StateHoliday_bw",
"SchoolHoliday_fw",
"SchoolHoliday_bw",
]
CONTINUOUS_COLUMNS = [
"CompetitionDistance",
"Max_TemperatureC",
"Mean_TemperatureC",
"Min_TemperatureC",
"Max_Humidity",
"Mean_Humidity",
"Min_Humidity",
"Max_Wind_SpeedKm_h",
"Mean_Wind_SpeedKm_h",
"CloudCover",
"trend",
"trend_DE",
"AfterStateHoliday",
"BeforeStateHoliday",
"Promo",
"SchoolHoliday",
]
test_data_trans_path = glob.glob(os.path.join(output_dir, workflow_output_test_trans_file_name))
EMBEDDING_TABLE_SHAPES = nvt.ops.get_embedding_sizes(workflow)
categorical_columns = [
_make_categorical_embedding_column(name, *EMBEDDING_TABLE_SHAPES[name])
for name in CATEGORICAL_COLUMNS
]
continuous_columns = [
tf.feature_column.numeric_column(name, (1,)) for name in CONTINUOUS_COLUMNS
]
train_dataset = KerasSequenceLoader(
test_data_trans_path, # you could also use a glob pattern
feature_columns=categorical_columns + continuous_columns,
batch_size=TEST_N_ROWS,
label_names=[],
shuffle=False,
buffer_size=0.06, # amount of data, as a fraction of GPU memory, to load at once
)
tf_model = keras.models.load_model(model_path, custom_objects={"rmspe_tf": rmspe_tf})
pred = tf_model.predict(train_dataset)
cudf_pred = cudf.DataFrame(pred)
cudf_pred.to_csv(os.path.join(output_dir, prediction_file_name))
os.remove(os.path.join(output_dir, workflow_output_test_trans_file_name))
def _run_movielens_query(client, n_rows):
workflow_path = os.path.join(os.path.expanduser(MODEL_DIR), "movielens_nvt/1/workflow")
data_path = os.path.join(
os.path.expanduser(DATA_DIR), "movielens/test_inference_movielens_data.csv"
)
actual_output_filename = os.path.join(
os.path.expanduser(DATA_DIR), "movielens/movielens_predictions.csv"
)
input_col_names = ["movieId", "userId"]
return _run_query(
client,
n_rows,
"movielens",
workflow_path,
data_path,
actual_output_filename,
"output",
input_col_names,
)
def _run_rossmann_query(client, n_rows):
workflow_path = os.path.join(os.path.expanduser(MODEL_DIR), "rossmann_nvt/1/workflow")
data_path = os.path.join(
os.path.expanduser(DATA_DIR), "rossman/test_inference_rossmann_data.csv"
)
actual_output_filename = os.path.join(
os.path.expanduser(DATA_DIR), "rossman/rossmann_predictions.csv"
)
return _run_query(
client,
n_rows,
"rossmann",
workflow_path,
data_path,
actual_output_filename,
"tf.math.multiply_1",
)
def rmspe_tf(y_true, y_pred):
import tensorflow as tf
y_true = tf.exp(y_true) - 1
y_pred = tf.exp(y_pred) - 1
percent_error = (y_true - y_pred) / y_true
return tf.sqrt(tf.reduce_mean(percent_error ** 2))
def _make_categorical_embedding_column(name, dictionary_size, embedding_dim):
import tensorflow as tf
return tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(name, dictionary_size), embedding_dim
)
|
# python example 2
# http://www.steves-internet-guide.com/python-mqtt-publish-subscribe/
import paho.mqtt.client as paho
broker="broker.hivemq.com"
broker="iot.eclipse.org"
#define callback
def on_message(client, userdata, message):
time.sleep(1)
print("received message =",str(message.payload.decode("utf-8")))
client= paho.Client("client-001") #create client object client1.on_publish = on_publish #assign function to callback client1.connect(broker,port) #establish connection client1.publish("house/bulb1","on")
######Bind function to callback
client.on_message=on_message
#####
print("connecting to broker ",broker)
client.connect(broker)#connect
client.loop_start() #start loop to process received messages
print("subscribing ")
client.subscribe("house/bulb1")#subscribe
time.sleep(2)
print("publishing ")
client.publish("house/bulb1","on")#publish
time.sleep(4)
client.disconnect() #disconnect
client.loop_stop() #stop loop
|
'''OpenGL extension APPLE.texture_max_level
This module customises the behaviour of the
OpenGL.raw.GLES2.APPLE.texture_max_level to provide a more
Python-friendly API
Overview (from the spec)
This extension allows an application to specify the maximum (coarsest)
mipmap level that may be selected for the specified texture. This maximum
level is also used to determine which mip levels are considered when
determining texture completeness.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/APPLE/texture_max_level.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.APPLE.texture_max_level import *
from OpenGL.raw.GLES2.APPLE.texture_max_level import _EXTENSION_NAME
def glInitTextureMaxLevelAPPLE():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
# coding: utf-8
# flake8: noqa
from __future__ import absolute_import
# import models into model package
from openapi_server.models.create_docker_submission_request import CreateDockerSubmissionRequest
from openapi_server.models.create_file_submission_request import CreateFileSubmissionRequest
from openapi_server.models.create_queue_request import CreateQueueRequest
from openapi_server.models.create_queue_response import CreateQueueResponse
from openapi_server.models.create_submission_request import CreateSubmissionRequest
from openapi_server.models.create_submission_response import CreateSubmissionResponse
from openapi_server.models.create_workflow_submission_request import CreateWorkflowSubmissionRequest
from openapi_server.models.docker_submission import DockerSubmission
from openapi_server.models.docker_submission_docker import DockerSubmissionDocker
from openapi_server.models.error import Error
from openapi_server.models.file_submission import FileSubmission
from openapi_server.models.health_check import HealthCheck
from openapi_server.models.list_queue_response import ListQueueResponse
from openapi_server.models.list_queue_response_all_of import ListQueueResponseAllOf
from openapi_server.models.list_response_metadata import ListResponseMetadata
from openapi_server.models.list_response_metadata_links import ListResponseMetadataLinks
from openapi_server.models.list_submission_response import ListSubmissionResponse
from openapi_server.models.list_submission_response_all_of import ListSubmissionResponseAllOf
from openapi_server.models.queue import Queue
from openapi_server.models.submission import Submission
from openapi_server.models.submission_status import SubmissionStatus
from openapi_server.models.workflow_submission import WorkflowSubmission
|
"""
Obtains data for a side pairing.
"""
from click import *
from logging import *
import pandas as pd
@command()
@option("--input", required=True, help="the Feather file to read input data from")
@option("--output", required=True, help="the Feather file to write output to")
@option(
"--sides",
type=Choice(["same", "opposite"]),
required=True,
help="the sides to consider",
)
def main(input, output, sides):
basicConfig(level=DEBUG)
# Load data.
info("Loading data")
X = pd.read_feather(input)
debug(f"Result: {X.shape}")
# Filter the data.
info("Filtering data")
if sides == "same":
X = X.loc[X["reference_side"] == X["co_occurring_side"]]
else:
X = X.loc[X["reference_side"] != X["co_occurring_side"]]
debug(f"Result: {X.shape}")
# Write output.
info("Writing output")
X.reset_index(drop=True).to_feather(output)
if __name__ == "__main__":
main()
|
Subsets and Splits