code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
#!/usr/bin/env python
import yaml
from collections import defaultdict
import re
import os
import argparse
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
value = re.sub('[^\w\s-]', '', value).strip().lower()
value = re.sub('[-\s]+', '_', value)
return value
def main():
VERSION = 0.1
parser = argparse.ArgumentParser(description="Splits up a Ephemeris `get_tool_list` yml file for a Galaxy server into individual files for each Section Label.")
parser.add_argument("-i", "--infile", help="The returned `get_tool_list` yml file to split.")
parser.add_argument("-o", "--outdir", help="The output directory to put the split files into. Defaults to infile without the .yml.")
parser.add_argument("--version", action='store_true')
parser.add_argument("--verbose", action='store_true')
args = parser.parse_args()
if args.version:
print("split_tool_yml.py version: %.1f" % VERSION)
return
filename = args.infile
a = yaml.safe_load(open(filename, 'r'), )
outdir = re.sub('\.yml', '', filename)
if args.outdir:
outdir = args.outdir
if args.verbose:
print('Outdir: %s' % outdir)
if not os.path.isdir(outdir):
os.mkdir(outdir)
tools = a['tools']
categories = defaultdict(list)
for tool in tools:
categories[tool['tool_panel_section_label']].append(tool)
for cat in categories:
fname = str(cat)
good_fname = outdir + "/" + slugify(fname) + ".yml"
tool_yaml = {'tools': sorted(categories[cat], key=lambda x: x['name'] + x['owner'])}
if args.verbose:
print("Working on: %s" % good_fname)
with open(good_fname, 'w') as outfile:
yaml.dump(tool_yaml, outfile, default_flow_style=False)
return
if __name__ == "__main__":
main()
|
[
"os.mkdir",
"argparse.ArgumentParser",
"os.path.isdir",
"yaml.dump",
"collections.defaultdict",
"re.sub"
] |
[((327, 356), 're.sub', 're.sub', (['"""[-\\\\s]+"""', '"""_"""', 'value'], {}), "('[-\\\\s]+', '_', value)\n", (333, 356), False, 'import re\n'), ((420, 581), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Splits up a Ephemeris `get_tool_list` yml file for a Galaxy server into individual files for each Section Label."""'}), "(description=\n 'Splits up a Ephemeris `get_tool_list` yml file for a Galaxy server into individual files for each Section Label.'\n )\n", (443, 581), False, 'import argparse\n'), ((1139, 1169), 're.sub', 're.sub', (['"""\\\\.yml"""', '""""""', 'filename'], {}), "('\\\\.yml', '', filename)\n", (1145, 1169), False, 'import re\n'), ((1377, 1394), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1388, 1394), False, 'from collections import defaultdict\n'), ((1288, 1309), 'os.path.isdir', 'os.path.isdir', (['outdir'], {}), '(outdir)\n', (1301, 1309), False, 'import os\n'), ((1319, 1335), 'os.mkdir', 'os.mkdir', (['outdir'], {}), '(outdir)\n', (1327, 1335), False, 'import os\n'), ((1824, 1879), 'yaml.dump', 'yaml.dump', (['tool_yaml', 'outfile'], {'default_flow_style': '(False)'}), '(tool_yaml, outfile, default_flow_style=False)\n', (1833, 1879), False, 'import yaml\n'), ((269, 300), 're.sub', 're.sub', (['"""[^\\\\w\\\\s-]"""', '""""""', 'value'], {}), "('[^\\\\w\\\\s-]', '', value)\n", (275, 300), False, 'import re\n')]
|
''' Unit test for pod.py '''
import sys
# import unittest
sys.path.insert(0, "openpod/")
# import hub
# class TestHub(unittest.TestCase):
# '''
# General tests for the hub.py file
# '''
# def test_xbee_flag_set_true(self):
# '''
# Check if the xbee flag is set to true.
# '''
# global XBEE_FLAG
# XBEE_FLAG = False
# hub.incoming_xbee_data()
# self.assertTrue(XBEE_FLAG)
|
[
"sys.path.insert"
] |
[((60, 90), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""openpod/"""'], {}), "(0, 'openpod/')\n", (75, 90), False, 'import sys\n')]
|
# I intend to use this as a Variational auto-encoder for the
# missing y.
# See paper: https://arxiv.org/abs/1312.6114
import torch
# Define sizes
input_size = 3
output_size = 2
hidden_size = 5
# Create multi-layer perceptron
fc1 = torch.nn.Linear(input_size, hidden_size)
act_fn = torch.nn.Tanh()
fc2 = torch.nn.Linear(hidden_size, output_size)
# Main
num_obs = 100
x = torch.randn(num_obs, input_size)
out = fc1(x)
out = act_fn(out)
out = fc2(out)
print(out)
# Test dims
y = torch.randn(20, 5)
m = torch.randn(20, 5)
b = torch.randn(3) * torch.ones(20, 3)
# I want this to be 20 x (5 + 5 + 3)
input_vec = torch.cat([y, m, b], dim=-1).shape
|
[
"torch.ones",
"torch.nn.Tanh",
"torch.cat",
"torch.randn",
"torch.nn.Linear"
] |
[((235, 275), 'torch.nn.Linear', 'torch.nn.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (250, 275), False, 'import torch\n'), ((285, 300), 'torch.nn.Tanh', 'torch.nn.Tanh', ([], {}), '()\n', (298, 300), False, 'import torch\n'), ((307, 348), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (322, 348), False, 'import torch\n'), ((375, 407), 'torch.randn', 'torch.randn', (['num_obs', 'input_size'], {}), '(num_obs, input_size)\n', (386, 407), False, 'import torch\n'), ((482, 500), 'torch.randn', 'torch.randn', (['(20)', '(5)'], {}), '(20, 5)\n', (493, 500), False, 'import torch\n'), ((505, 523), 'torch.randn', 'torch.randn', (['(20)', '(5)'], {}), '(20, 5)\n', (516, 523), False, 'import torch\n'), ((528, 542), 'torch.randn', 'torch.randn', (['(3)'], {}), '(3)\n', (539, 542), False, 'import torch\n'), ((545, 562), 'torch.ones', 'torch.ones', (['(20)', '(3)'], {}), '(20, 3)\n', (555, 562), False, 'import torch\n'), ((612, 640), 'torch.cat', 'torch.cat', (['[y, m, b]'], {'dim': '(-1)'}), '([y, m, b], dim=-1)\n', (621, 640), False, 'import torch\n')]
|
'Format and display the output text.'
__author__ = '<NAME>'
__copyright__ = 'Copyright 2011 <NAME>'
__license__ = 'ISC'
__version__ = '0.5.0.0'
__status__ = 'Development'
import os
import re
import struct
def ioctl_term_size(filed):
'Attempt to find terminal dimensions using an IO Control system call.'
try:
import fcntl, termios
packed = fcntl.ioctl(filed, termios.TIOCGWINSZ, '1234')
rows_cols = struct.unpack('hh', packed)
except ImportError:
return None
if rows_cols == (0, 0):
return None
return rows_cols
def terminal_size():
"""Determine the terminal size or set a default size if that fails.
From <NAME>'s code, http://pdos.csail.mit.edu/~cblake/cls/cls.py
Modifications by <NAME> to allow Curveship to run in GNU Emacs."""
rows_cols = ioctl_term_size(0) or ioctl_term_size(1) or ioctl_term_size(2)
if not rows_cols:
try:
filed = os.open(os.ctermid(), os.O_RDONLY)
rows_cols = ioctl_term_size(filed)
os.close(filed)
except AttributeError:
pass
if not rows_cols:
# Some shells may set these environment variables.
rows_cols = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(rows_cols[1]), int(rows_cols[0]) # Reverses it to cols, rows.
def _break_words(string, char_limit):
'Lineate the string based on the passed-in character limit.'
if len(string) <= char_limit:
next_line = string
string = ''
elif '\n' in string[0:char_limit]:
first_newline = string.index('\n')
next_line = string[0:first_newline]
string = string[(first_newline + 1):]
elif ' ' not in string[0:char_limit]:
next_line = string[0:char_limit]
string = string[char_limit:]
else:
last_space = string[0:char_limit].rindex(' ')
next_line = string[0:last_space]
string = string[(last_space + 1):]
return (next_line, string)
def present(string, out_streams, pre='', post='\n\n'):
'Print the string, broken into lines, to the output streams.'
if len(string) == 0:
return
if string[-1:] == '\n':
post = re.sub('^[ \t]+', '', post)
string = pre + string + post
while len(string) > 0:
(cols, _) = terminal_size()
(next_line, string) = _break_words(string, cols)
out_streams.write(next_line)
if len(string) > 0:
out_streams.write('\n')
out_streams.write(string)
def center(string, out_streams, pre='', post='\n'):
'Center the output and print it to the output streams.'
string = pre + string + post
(cols, _) = terminal_size()
while len(string) > 0:
(next_line, string) = _break_words(string, cols)
while len(next_line) > 0 and next_line[0] == '\n':
out_streams.write('\n')
next_line = next_line[1:]
spaces = ''
i = 1
while i <= (cols - len(next_line))/2:
spaces += ' '
i += 1
out_streams.write(' ' + spaces + next_line)
if len(string) > 0:
out_streams.write('\n')
|
[
"fcntl.ioctl",
"os.ctermid",
"struct.unpack",
"os.environ.get",
"os.close",
"re.sub"
] |
[((367, 413), 'fcntl.ioctl', 'fcntl.ioctl', (['filed', 'termios.TIOCGWINSZ', '"""1234"""'], {}), "(filed, termios.TIOCGWINSZ, '1234')\n", (378, 413), False, 'import fcntl, termios\n'), ((434, 461), 'struct.unpack', 'struct.unpack', (['"""hh"""', 'packed'], {}), "('hh', packed)\n", (447, 461), False, 'import struct\n'), ((2209, 2236), 're.sub', 're.sub', (['"""^[ \t]+"""', '""""""', 'post'], {}), "('^[ \\t]+', '', post)\n", (2215, 2236), False, 'import re\n'), ((1043, 1058), 'os.close', 'os.close', (['filed'], {}), '(filed)\n', (1051, 1058), False, 'import os\n'), ((1209, 1236), 'os.environ.get', 'os.environ.get', (['"""LINES"""', '(25)'], {}), "('LINES', 25)\n", (1223, 1236), False, 'import os\n'), ((1238, 1267), 'os.environ.get', 'os.environ.get', (['"""COLUMNS"""', '(80)'], {}), "('COLUMNS', 80)\n", (1252, 1267), False, 'import os\n'), ((957, 969), 'os.ctermid', 'os.ctermid', ([], {}), '()\n', (967, 969), False, 'import os\n')]
|
from math import log
import operator
def createDataSet(): #Funcion que retorna un mequeño dataset
dataSet = [[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']]
labels = ['no surfacing','flippers']
#change to discrete values
return dataSet, labels
def calcShannonEnt(dataSet): #Esta funcion calcula la entropia de shannon del dataset
numEntries = len(dataSet) #Obtiene el numero de filas del dataset
labelCounts = {} #Diccionario donde se guarda la cuenta de cada clase
for featVec in dataSet: #Obtiene los elementos únicos con sus caracteristicas
currentLabel = featVec[-1] #Establece currentLable al valor del ultimo elemento del vector de caracteristicas
if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0 #Comprueba si ya se encuentra la clase actual en la variable de labelCounts, en caso de que no, añade la key al diccionario y la establece en 0
labelCounts[currentLabel] += 1 #aumenta en uno el valor de la clase
shannonEnt = 0.0 #Inicializa la entropia en ceros
for key in labelCounts: #En este for calcula la probabilidad de cada una de las etiquetas y lo añade a la "suma"
prob = float(labelCounts[key])/numEntries #aqui calcula la probabilidad
shannonEnt -= prob * log(prob,2) #aqui calcula la entropia y la añade a la suma, utiliza logaritmo base 2
return shannonEnt #retorna la entropia
def splitDataSet(dataSet, axis, value): #Funcion que retorna todas las filas del dataset cuya columna (axis) sea igual al valor dado
retDataSet = [] #variable donde se retornará la division
for featVec in dataSet: #for que recorre cada uno de las filas del dataset
if featVec[axis] == value: #comprueba si cumple con la condicion dada como parametro
reducedFeatVec = featVec[:axis] #crea una fila con todos los datos hasta antes del axis
reducedFeatVec.extend(featVec[axis+1:]) #añade los elementos restantes menos la columna del axis
retDataSet.append(reducedFeatVec) #agrega la fila a la matriz de retorno
return retDataSet #retorna todos aquellos datos segun el valor de la columna dado
def chooseBestFeatureToSplit(dataSet): #Esta es la funcion nucleo del algoritmo
numFeatures = len(dataSet[0]) - 1 #Obtiene el numero de caracteristicas (columnas). Es importante recalcar que la ultima columna pertenece a la etiqueta de clase
baseEntropy = calcShannonEnt(dataSet) #calcula la entropia completa del dataset
bestInfoGain = 0.0; bestFeature = -1 #inicializa los valores para la mejor ganancia y la mejor caracteristica
for i in range(numFeatures): #for que recorre todas las caracteristicas
featList = [example[i] for example in dataSet]#Crea una lista con todas las filas que tienen la caracteristica "i"
uniqueVals = set(featList) #Obtiene un conjunto con todos los valores únicos para esa caracteristica dada
newEntropy = 0.0 #Inicializa el valor de la entropia
for value in uniqueVals: #For que recorre todos los valores únicos de la caracteristica "i"
subDataSet = splitDataSet(dataSet, i, value) #Obtiene el subconjunto de datos con la caracteristica i
prob = len(subDataSet)/float(len(dataSet)) #Obtiene la probabilidad de este subconjunto
newEntropy += prob * calcShannonEnt(subDataSet) #Suma la entropia dada para este subconjunto
infoGain = baseEntropy - newEntropy #Calcula la ganancia
if (infoGain > bestInfoGain): #Compara la ganancia con la de mejor ganancia
bestInfoGain = infoGain #Si es mejor que la de la variable, asigna esta como la nueva mejor
bestFeature = i #Tambien guarda el valor de i, es decir, el índice de la mejor caracteristica
return bestFeature #Al final, retorna el índice de la mejor caracteristica.
def majorityCnt(classList): #Funcion que retorna el valor de la clase mayoritaria
classCount={} #Diccionario donde se guardará la cuenta de cada clase
for vote in classList: #For que recorre la lista de clases
if vote not in classCount.keys(): classCount[vote] = 0 #En caso de que no exista la key, la añade como cero
classCount[vote] += 1 #suma uno al valor de la key
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True) #Ordena de mayor a menor
return sortedClassCount[0][0] #retorna el primer elemento, es decir, la clase mayoritaria
def createTree(dataSet,labels): #Esta funcion se encarga de la creación del arbol de manera recursiva
classList = [example[-1] for example in dataSet] #La lista de clases pertenece a la ultima columna del dataset, por eso la asigna a classList
if classList.count(classList[0]) == len(classList): #Comprueba si el numero de clases es equivalente al tamaño de la lista de clases
return classList[0]#En caso de que si, significa que todas las clases son iguales por lo que ahi termina la recursividad y retorna la clase
if len(dataSet[0]) == 1: #De la misma manera, si no hay más caracteristicas en el dataset, deja de dividirse
return majorityCnt(classList) #Retorna el valor de la clase mayoritaria
bestFeat = chooseBestFeatureToSplit(dataSet) #Obtiene la mejor caracteristica para dividir
bestFeatLabel = labels[bestFeat] #Obtiene las etiquetas de aquellos con la mejor caracteristica
myTree = {bestFeatLabel:{}} #Crea un diccionario de diccionarios cuya llave es la de la etiqueta con la mejor caracteristica
del(labels[bestFeat]) #elimina las etiquetas de las mejores caracteristicas del vector de etiquetas, en resumen, dividió las etiquetas en 2
featValues = [example[bestFeat] for example in dataSet] #Obtiene los valores de la mejor caracteristica para despues poder hacer las siguientes divisiones
uniqueVals = set(featValues) #aqui obtiene los valores únicos de esta caractetistica
for value in uniqueVals: #for que recorre los valores unicos de la mejor caracteristica
subLabels = labels[:] #copia todas las etiqutas, pues si enviara tal cual el valor de labels, entre los subarboles estarian accediendo a la misma variable
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value),subLabels) #asigna por cada uno de los valores únicos de la mejor etiqueta el siguiente subarbol
return myTree #retorna el arbol
def classify(inputTree,featLabels,testVec): #Funcion que recorre un arbol para dar el resultado de la clasificacion, tambien es una funcion recursiva
firstStr = list(inputTree)[0] #como el arbol es un diccionario de diccionarios, con esto obtiene el valor de la primer llave, es decir, el valor de la etiqueta
secondDict = inputTree[firstStr] #Aqui obtiene el valor de la primera llave, que viene a ser un conjunto de arboles
featIndex = featLabels.index(firstStr) #Dentro del vector de caracteristicas, obtiene aquella que corresponde con la del nivel actual del arbol
key = testVec[featIndex] #Obtiene el valor de la caracteristica que corresponde con la del arbol del vector de entrada
valueOfFeat = secondDict[key] #Se adentra al arbol, obteniendo el siguiente arbol que corresponde a la caracteristica dada por el vector de entrada
if isinstance(valueOfFeat, dict): #Comprueba si realmente existe ese subarbol
classLabel = classify(valueOfFeat, featLabels, testVec) #En caso de que si, se llama a si mismo con los nuevos valores del arbol (recursividad)
else: classLabel = valueOfFeat #en caso contrario, el valor de la etiqueta de clase corresponde al valor de la caracteristica
return classLabel #retorna la etiqueta de clase
def storeTree(inputTree,filename): #Funcion que guarda un arbol en un archivo
import pickle
fw = open(filename,'wb')
pickle.dump(inputTree,fw)
fw.close()
def grabTree(filename): #Funcion que carga un arbol de un archivo
import pickle
fr = open(filename,'rb')
return pickle.load(fr)
|
[
"math.log",
"pickle.dump",
"pickle.load",
"operator.itemgetter"
] |
[((7921, 7947), 'pickle.dump', 'pickle.dump', (['inputTree', 'fw'], {}), '(inputTree, fw)\n', (7932, 7947), False, 'import pickle\n'), ((8091, 8106), 'pickle.load', 'pickle.load', (['fr'], {}), '(fr)\n', (8102, 8106), False, 'import pickle\n'), ((1358, 1370), 'math.log', 'log', (['prob', '(2)'], {}), '(prob, 2)\n', (1361, 1370), False, 'from math import log\n'), ((4401, 4423), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (4420, 4423), False, 'import operator\n')]
|
""" gyrodata.py
Run one motor with a sinusoidal speed input and an attached gyro.
This example shows how use the gyro to measure angular position and velocity
by attaching it to the motor shaft.
Setup:
Connect one large motor to port 'A'
Connect the gyro sensor to port number 1.
Notes:
1. Remember there's a cable attached to the sensor, so limit the rotation
angle to approx. 180 degrees.
2. The maximum angular speed that the gyro can detect without saturating
is 440 deg./s (approx. 7.7 rad/s). Limit the motor speed % output to no
more than 35 %.
"""
# Importing modules and classes
import time
import numpy as np
from scipy import integrate
from pyev3.utils import plot_line
from pyev3.brick import LegoEV3
from pyev3.devices import Gyro, Motor
# Defining parameters (for one motor)
T = 2 # Period of sine wave (s)
u0 = 30 # Motor speed amplitude (%)
tstop = 2 # Sine wave duration (s)
# Pre-allocating output arrays
tmotor = []
theta = []
tgyro = []
angle = []
rate = []
# Creating LEGO EV3 objects
ev3 = LegoEV3()
motor = Motor(ev3, port='A')
gyro = Gyro(ev3, portnum=1, inputmode='angle&rate')
# Initializing motor
motor.outputmode = 'speed'
motor.output = 0
motor.reset_angle()
motor.start()
# Getting initial gyro sensor reading to remove drift in the data
angle0, rate0 = gyro.output
# Initializing current time stamp and starting clock
tcurr = 0
tstart = time.perf_counter()
# Running motor sine wave output
while tcurr <= tstop:
# Getting current time for motor (s)
tcurr = time.perf_counter() - tstart
# Assigning current motor sinusoidal
# output using the current time stamp
motor.output = u0 * np.sin((2*np.pi/T) * tcurr)
# Updating output arrays for motor
tmotor.append(tcurr)
theta.append(motor.angle)
# Getting current time for gyro (s)
tcurr = time.perf_counter() - tstart
# Updating output arrays for gyro
# (and converting from deg/s to rad/s)
anglecurr, ratecurr = gyro.output
tgyro.append(tcurr)
angle.append(anglecurr-angle0)
rate.append(np.pi/180 * (ratecurr-rate0))
# Stopping motor and closing brick connection
motor.stop(brake='off')
ev3.close()
# Calculating motor angular velocity (rad/s)
w = np.pi/180 * np.gradient(theta, tmotor)
# Plotting results
plot_line([tmotor, tgyro], [theta, angle], yname='Angular Position (deg.)',
legend=['Tacho', 'Gyro'], marker=True)
plot_line([tmotor, tgyro], [w, rate], yname='Angular velocity (rad/s)',
legend=['Tacho', 'Gyro'], marker=True)
|
[
"pyev3.brick.LegoEV3",
"time.perf_counter",
"pyev3.utils.plot_line",
"numpy.sin",
"pyev3.devices.Motor",
"pyev3.devices.Gyro",
"numpy.gradient"
] |
[((1049, 1058), 'pyev3.brick.LegoEV3', 'LegoEV3', ([], {}), '()\n', (1056, 1058), False, 'from pyev3.brick import LegoEV3\n'), ((1067, 1087), 'pyev3.devices.Motor', 'Motor', (['ev3'], {'port': '"""A"""'}), "(ev3, port='A')\n", (1072, 1087), False, 'from pyev3.devices import Gyro, Motor\n'), ((1095, 1139), 'pyev3.devices.Gyro', 'Gyro', (['ev3'], {'portnum': '(1)', 'inputmode': '"""angle&rate"""'}), "(ev3, portnum=1, inputmode='angle&rate')\n", (1099, 1139), False, 'from pyev3.devices import Gyro, Motor\n'), ((1408, 1427), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1425, 1427), False, 'import time\n'), ((2292, 2410), 'pyev3.utils.plot_line', 'plot_line', (['[tmotor, tgyro]', '[theta, angle]'], {'yname': '"""Angular Position (deg.)"""', 'legend': "['Tacho', 'Gyro']", 'marker': '(True)'}), "([tmotor, tgyro], [theta, angle], yname='Angular Position (deg.)',\n legend=['Tacho', 'Gyro'], marker=True)\n", (2301, 2410), False, 'from pyev3.utils import plot_line\n'), ((2417, 2531), 'pyev3.utils.plot_line', 'plot_line', (['[tmotor, tgyro]', '[w, rate]'], {'yname': '"""Angular velocity (rad/s)"""', 'legend': "['Tacho', 'Gyro']", 'marker': '(True)'}), "([tmotor, tgyro], [w, rate], yname='Angular velocity (rad/s)',\n legend=['Tacho', 'Gyro'], marker=True)\n", (2426, 2531), False, 'from pyev3.utils import plot_line\n'), ((2244, 2270), 'numpy.gradient', 'np.gradient', (['theta', 'tmotor'], {}), '(theta, tmotor)\n', (2255, 2270), True, 'import numpy as np\n'), ((1536, 1555), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1553, 1555), False, 'import time\n'), ((1672, 1701), 'numpy.sin', 'np.sin', (['(2 * np.pi / T * tcurr)'], {}), '(2 * np.pi / T * tcurr)\n', (1678, 1701), True, 'import numpy as np\n'), ((1846, 1865), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1863, 1865), False, 'import time\n')]
|
"""
OpenVINO DL Workbench
Class for ORM model describing dataset augmentation job
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
try:
from typing import TypedDict
except ImportError:
from typing_extensions import TypedDict
from sqlalchemy import Column, Integer, ForeignKey, Boolean, Float, Text
from sqlalchemy.orm import relationship, backref
from wb.main.enumerates import JobTypesEnum
from wb.main.models.datasets_model import DatasetsModel, DatasetJobData
from wb.main.models.jobs_model import JobsModel
class DatasetAugmentationJobData(TypedDict):
applyHorizontalFlip: bool
applyVerticalFlip: bool
applyErase: bool
eraseRatio: int
eraseImages: int
applyNoise: bool
noiseRatio: int
noiseImages: int
applyImageCorrections: bool
imageCorrections: str
# pylint: disable=too-many-instance-attributes
class DatasetAugmentationJobModel(JobsModel):
__tablename__ = 'dataset_augmentation_jobs'
__mapper_args__ = {
'polymorphic_identity': JobTypesEnum.augment_dataset_type.value
}
job_id = Column(Integer, ForeignKey(JobsModel.job_id), primary_key=True)
dataset_id = Column(Integer, ForeignKey(DatasetsModel.id), nullable=False)
horizontal_flip = Column(Boolean, nullable=False, default=False)
vertical_flip = Column(Boolean, nullable=False, default=False)
apply_random_erase = Column(Boolean, nullable=False, default=False)
erase_ratio = Column(Float, nullable=True)
erase_images = Column(Integer, nullable=True)
apply_noise_injection = Column(Boolean, nullable=False, default=False)
noise_ratio = Column(Float, nullable=True)
noise_images = Column(Integer, nullable=True)
apply_image_corrections = Column(Boolean, nullable=False, default=False)
image_corrections = Column(Text, nullable=True)
dataset = relationship(DatasetsModel, foreign_keys=[dataset_id],
backref=backref('dataset_augmentation_job', lazy='subquery', cascade='delete,all',
uselist=False))
def __init__(self, data: DatasetJobData, augmentation_data: DatasetAugmentationJobData):
super().__init__(data)
self.dataset_id = data['datasetId']
self.vertical_flip = augmentation_data['applyVerticalFlip']
self.horizontal_flip = augmentation_data['applyHorizontalFlip']
self.apply_noise_injection = augmentation_data['applyNoise']
self.apply_random_erase = augmentation_data['applyErase']
self.erase_images = augmentation_data['eraseImages']
self.erase_ratio = augmentation_data['eraseRatio']
self.noise_ratio = augmentation_data['noiseRatio']
self.noise_images = augmentation_data['noiseImages']
self.apply_image_corrections = augmentation_data['applyImageCorrections']
self.image_corrections = json.dumps(augmentation_data['imageCorrections'])
def json(self) -> dict:
return {
**super().json(),
**self.dataset.json()
}
@property
def augmented_images_count(self) -> int:
augmented_images_count = 0
if self.apply_random_erase:
augmented_images_count += self.erase_images
if self.apply_noise_injection:
augmented_images_count += self.noise_images
if self.horizontal_flip:
augmented_images_count += 1
if self.vertical_flip:
augmented_images_count += 1
if self.apply_image_corrections:
augmented_images_count += len(self.image_corrections)
return augmented_images_count
|
[
"sqlalchemy.orm.backref",
"sqlalchemy.ForeignKey",
"sqlalchemy.Column",
"json.dumps"
] |
[((1774, 1820), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(False)', 'default': '(False)'}), '(Boolean, nullable=False, default=False)\n', (1780, 1820), False, 'from sqlalchemy import Column, Integer, ForeignKey, Boolean, Float, Text\n'), ((1841, 1887), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(False)', 'default': '(False)'}), '(Boolean, nullable=False, default=False)\n', (1847, 1887), False, 'from sqlalchemy import Column, Integer, ForeignKey, Boolean, Float, Text\n'), ((1914, 1960), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(False)', 'default': '(False)'}), '(Boolean, nullable=False, default=False)\n', (1920, 1960), False, 'from sqlalchemy import Column, Integer, ForeignKey, Boolean, Float, Text\n'), ((1979, 2007), 'sqlalchemy.Column', 'Column', (['Float'], {'nullable': '(True)'}), '(Float, nullable=True)\n', (1985, 2007), False, 'from sqlalchemy import Column, Integer, ForeignKey, Boolean, Float, Text\n'), ((2027, 2057), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(True)'}), '(Integer, nullable=True)\n', (2033, 2057), False, 'from sqlalchemy import Column, Integer, ForeignKey, Boolean, Float, Text\n'), ((2087, 2133), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(False)', 'default': '(False)'}), '(Boolean, nullable=False, default=False)\n', (2093, 2133), False, 'from sqlalchemy import Column, Integer, ForeignKey, Boolean, Float, Text\n'), ((2152, 2180), 'sqlalchemy.Column', 'Column', (['Float'], {'nullable': '(True)'}), '(Float, nullable=True)\n', (2158, 2180), False, 'from sqlalchemy import Column, Integer, ForeignKey, Boolean, Float, Text\n'), ((2200, 2230), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(True)'}), '(Integer, nullable=True)\n', (2206, 2230), False, 'from sqlalchemy import Column, Integer, ForeignKey, Boolean, Float, Text\n'), ((2262, 2308), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(False)', 'default': '(False)'}), '(Boolean, nullable=False, default=False)\n', (2268, 2308), False, 'from sqlalchemy import Column, Integer, ForeignKey, Boolean, Float, Text\n'), ((2333, 2360), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(True)'}), '(Text, nullable=True)\n', (2339, 2360), False, 'from sqlalchemy import Column, Integer, ForeignKey, Boolean, Float, Text\n'), ((1624, 1652), 'sqlalchemy.ForeignKey', 'ForeignKey', (['JobsModel.job_id'], {}), '(JobsModel.job_id)\n', (1634, 1652), False, 'from sqlalchemy import Column, Integer, ForeignKey, Boolean, Float, Text\n'), ((1705, 1733), 'sqlalchemy.ForeignKey', 'ForeignKey', (['DatasetsModel.id'], {}), '(DatasetsModel.id)\n', (1715, 1733), False, 'from sqlalchemy import Column, Integer, ForeignKey, Boolean, Float, Text\n'), ((3399, 3448), 'json.dumps', 'json.dumps', (["augmentation_data['imageCorrections']"], {}), "(augmentation_data['imageCorrections'])\n", (3409, 3448), False, 'import json\n'), ((2466, 2559), 'sqlalchemy.orm.backref', 'backref', (['"""dataset_augmentation_job"""'], {'lazy': '"""subquery"""', 'cascade': '"""delete,all"""', 'uselist': '(False)'}), "('dataset_augmentation_job', lazy='subquery', cascade='delete,all',\n uselist=False)\n", (2473, 2559), False, 'from sqlalchemy.orm import relationship, backref\n')]
|
import numpy as np
import pytest
from numpy.testing import assert_allclose
from pybraw import _pybraw, verify
class CapturingCallback(_pybraw.BlackmagicRawCallback):
def ReadComplete(self, job, result, frame):
self.frame = frame
def ProcessComplete(self, job, result, processed_image):
self.processed_image = processed_image
@pytest.fixture
def callback(codec):
callback = CapturingCallback()
verify(codec.SetCallback(callback))
return callback
@pytest.fixture
def frame(codec, clip, callback):
read_job = verify(clip.CreateJobReadFrame(12))
verify(read_job.Submit())
read_job.Release()
verify(codec.FlushJobs())
return callback.frame
@pytest.mark.parametrize('format,max_val,is_planar,channels', [
(_pybraw.blackmagicRawResourceFormatBGRAU8, 2**8, False, [2, 1, 0, 3]),
(_pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True, [0, 1, 2]),
(_pybraw.blackmagicRawResourceFormatRGBU16Planar, 2**16, True, [0, 1, 2]),
])
def test_SetResourceFormat(frame, codec, callback, format, max_val, is_planar, channels):
verify(frame.SetResourceFormat(format))
process_job = verify(frame.CreateJobDecodeAndProcessFrame())
process_job.Submit()
process_job.Release()
codec.FlushJobs()
resource_type = verify(callback.processed_image.GetResourceType())
assert resource_type == _pybraw.blackmagicRawResourceTypeBufferCPU
resource_format = verify(callback.processed_image.GetResourceFormat())
assert resource_format == format
np_image = callback.processed_image.to_py()
del callback.processed_image
np_image = np_image / max_val
if is_planar:
np_image = np.transpose(np_image, (1, 2, 0))
expected = np.array([126, 131, 129, 255])[channels] / 255
assert_allclose(np_image[100, 200], expected, atol=1 / 255)
def test_SetResolutionScale(frame, codec, callback):
verify(frame.SetResolutionScale(_pybraw.blackmagicRawResolutionScaleQuarter))
process_job = verify(frame.CreateJobDecodeAndProcessFrame())
process_job.Submit()
process_job.Release()
codec.FlushJobs()
# Check that the resolution is one quarter of the original DCI full frame 4K.
width = verify(callback.processed_image.GetWidth())
assert width == 1024
height = verify(callback.processed_image.GetHeight())
assert height == 540
# from PIL import Image
# pil_image = Image.fromarray(callback.processed_image.to_py()[..., :3])
# pil_image.show()
def test_CloneFrameProcessingAttributes(frame):
attributes = verify(frame.CloneFrameProcessingAttributes())
assert isinstance(attributes, _pybraw.IBlackmagicRawFrameProcessingAttributes)
iso = verify(attributes.GetFrameAttribute(_pybraw.blackmagicRawFrameProcessingAttributeISO)).to_py()
assert iso == 400
def test_GetMetadataIterator(frame):
iterator = verify(frame.GetMetadataIterator())
metadata = {}
while True:
result, key = iterator.GetKey()
if result == _pybraw.E_FAIL:
break
assert result == _pybraw.S_OK
metadata[key] = verify(iterator.GetData()).to_py()
verify(iterator.Next())
assert metadata['white_balance_kelvin'] == 5600
assert_allclose(metadata['sensor_rate'], np.array([25, 1]))
def test_GetMetadata(frame):
white_balance = verify(frame.GetMetadata('white_balance_kelvin'))
assert white_balance.to_py() == 5600
def test_SetMetadata(frame):
verify(frame.SetMetadata('white_balance_kelvin', _pybraw.VariantCreateU32(2800)))
white_balance = verify(frame.GetMetadata('white_balance_kelvin'))
assert white_balance.to_py() == 2800
|
[
"numpy.testing.assert_allclose",
"numpy.transpose",
"pybraw._pybraw.VariantCreateU32",
"numpy.array",
"pytest.mark.parametrize"
] |
[((701, 1005), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""format,max_val,is_planar,channels"""', '[(_pybraw.blackmagicRawResourceFormatBGRAU8, 2 ** 8, False, [2, 1, 0, 3]),\n (_pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True, [0, 1, 2]),\n (_pybraw.blackmagicRawResourceFormatRGBU16Planar, 2 ** 16, True, [0, 1, 2])\n ]'], {}), "('format,max_val,is_planar,channels', [(_pybraw.\n blackmagicRawResourceFormatBGRAU8, 2 ** 8, False, [2, 1, 0, 3]), (\n _pybraw.blackmagicRawResourceFormatRGBF32Planar, 1, True, [0, 1, 2]), (\n _pybraw.blackmagicRawResourceFormatRGBU16Planar, 2 ** 16, True, [0, 1, 2])]\n )\n", (724, 1005), False, 'import pytest\n'), ((1777, 1836), 'numpy.testing.assert_allclose', 'assert_allclose', (['np_image[100, 200]', 'expected'], {'atol': '(1 / 255)'}), '(np_image[100, 200], expected, atol=1 / 255)\n', (1792, 1836), False, 'from numpy.testing import assert_allclose\n'), ((1677, 1710), 'numpy.transpose', 'np.transpose', (['np_image', '(1, 2, 0)'], {}), '(np_image, (1, 2, 0))\n', (1689, 1710), True, 'import numpy as np\n'), ((3257, 3274), 'numpy.array', 'np.array', (['[25, 1]'], {}), '([25, 1])\n', (3265, 3274), True, 'import numpy as np\n'), ((1726, 1756), 'numpy.array', 'np.array', (['[126, 131, 129, 255]'], {}), '([126, 131, 129, 255])\n', (1734, 1756), True, 'import numpy as np\n'), ((3502, 3532), 'pybraw._pybraw.VariantCreateU32', '_pybraw.VariantCreateU32', (['(2800)'], {}), '(2800)\n', (3526, 3532), False, 'from pybraw import _pybraw, verify\n')]
|
#!/usr/bin/env python3
# Created By r2dr0dn
# Hd Video Downloader For PornHub
# Don't Copy The Code Without Giving The Credits Nerd
from __future__ import unicode_literals
try:
import os,sys,requests
import youtube_dl as dl
import validators as valid
from time import sleep as sl
from random import random,randint
except ImportError:
print('['+'*'*20+']')
print('Module [youtube-dl] Status: Not Found!')
print('['+'*'*20+']')
print('Please Install It Using [pip3 install youtube-dl]')
print('['+'*'*20+']')
# Colors:
Reset="\033[0m"
cor = ["\033[1;33m","\033[1;34m","\033[1;30m","\033[1;36m","\033[1;31m","\033[35m","\033[95m","\033[96m","\033[39m","\033[38;5;82m","\033[38;5;198m","\033[38;5;208m","\033[38;5;167m","\033[38;5;91m","\033[38;5;210m","\033[38;5;165m","\033[38;5;49m","\033[38;5;160m","\033[38;5;51m","\033[38;5;13m","\033[38;5;162m","\033[38;5;203m","\033[38;5;113m","\033[38;5;14m"]
colors = cor[randint(0,15)]
colors2 = cor[randint(0,15)]
colors4 = cor[randint(0,15)]
colors3 = cor[randint(0,15)]
colors4 = cor[randint(0,15)]
colors5 = cor[randint(0,15)]
colors6 = cor[randint(0,15)]
colors7 = cor[randint(0,15)]
colors8 = cor[randint(0,15)]
colors9 = cor[randint(0,15)]
# Clear Screen
def clear():
clear = os.system('clear')
return clear
# banner
def banner():
print(colors + """
.'\ /`.
.'.-.`-'.-.`.
..._: .-. .-. :_...
.' '-.(o ) (o ).-' `.
: _ _ _`~(_)~`_ _ _ :
: /: ' .-=_ _=-. ` ;\ :
: :|-.._ ' ` _..-|: :
: `:| |`:-:-.-:-:'| |:' :
`. `.| | | | | | |.' .'
`. `-:_| | |_:-' .' - Welcome To PrNdOwN!
`-._ ```` _.-'
``-------''
""")
# Check if user is connected to internet
def net(url):
try:
requests.get(url)
except requests.exceptions.ConnectionError:
print(colors + "[!] Please check your network connection.")
return False
except requests.exceptions.Timeout:
print(colors2 + "[!!!] Site is taking too long to load, TimeOut.")
return False
except requests.exceptions.TooManyRedirects:
print(colors3 + "[*] Too many Redirects.")
return False
except requests.exceptions.RequestException as e:
# catastrophic error. bail.
print(e)
sys.exit(1)
return True
# Check the validity of the given url
def check(link):
try:
requests.get(link)
return True
except requests.exceptions.ConnectionError:
print(colors4 + "[!] disconnected from network.")
return False
except requests.exceptions.HTTPError as err:
print(err)
return False
# Configuration File
config = {
'Audio': {
'format': 'bestaudio/best',
'noplaylist': True,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}]
},
'Video': {
'format': 'bestvideo+bestaudio/best',
'noplaylist': True,
'postprocessors': [{
'key': 'FFmpegVideoConvertor',
'preferedformat': 'mp4',
#'preferredquality': '137',
}]
},
'list': {
'listsubtitles': True
},
'listformat': {
'lisformats': True
}
}
# Url Download
def download(link, data):
try:
with dl.YoutubeDL(data) as ydl:
ydl.download([link])
except dl.utils.DownloadError as err:
print(colors + err)
# Extract URL Information
def get_info(link):
ydl2 = dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'})
with ydl2:
result = ydl2.extract_info(link,download=False)
if 'entries' in result:
video = result['entries'][0]
else:
video = result
video_title = video['title']
# video_url = video['url']
return video_title
# Main Function
def main():
try:
clear()
banner()
while True:
try:
if net('https://pornhub.com/'):
link = input(colors2 + "["+colors3+"*"+colors4+"]" + colors2 + " Enter the link: " + colors9)
if not valid.url(link):
print("\n" + colors8 + "["+colors2+"!"+colors5+"]" + colors7 + " Unvalid Url!!!" + colors6)
print(colors8 + "["+colors2+"!"+colors5+"]" + colors7 + " Please Try Again" + colors6)
exit(1)
if check(link):
print(colors6 + "Title Video: " +colors+ "{}".format(get_info(link)))
print(colors5 + "[*] 1.Download an Audio playlist")
print(colors3 + "[*] 2.Download a Video playlist")
print(colors7 + "[*] 3.Download a Single Audio")
print(colors8 + "[*] 4.Download a single video file")
check_inp = int(input(colors + "["+colors4+"------------Enter your choice------------"+colors5+"]: "))
if check_inp in [1,2,3,4]:
if check_inp == 1:
config['Audio']['noplaylist'] = False
download(link, config['Audio'])
elif check_inp == 2:
config['Video']['noplaylist'] = False
download(link, config['Video'])
elif check_inp == 4:
download(link, config['Video'])
elif check_inp == 3:
download(link, config['Audio'])
else:
print(colors8 + "Unknown Choice :(")
check_str = str(input(colors7 + "[*] Do You Want To Continue? (Y/n): "))
if check_str in ['Y','y']:
banner()
continue
else:
print(colors6 + "Cya Next Time")
break
except dl.utils.DownloadError:
print(colors3 + "DownloadError Occurred !!!")
print(colors4 + "Re Run The Script With The Same URL And The Same Options To Continue Downloading!")
exit(1)
except RuntimeError:
exit(1)
if __name__ == '__main__':
main()
|
[
"random.randint",
"os.system",
"validators.url",
"youtube_dl.YoutubeDL",
"requests.get",
"sys.exit"
] |
[((954, 968), 'random.randint', 'randint', (['(0)', '(15)'], {}), '(0, 15)\n', (961, 968), False, 'from random import random, randint\n'), ((983, 997), 'random.randint', 'randint', (['(0)', '(15)'], {}), '(0, 15)\n', (990, 997), False, 'from random import random, randint\n'), ((1012, 1026), 'random.randint', 'randint', (['(0)', '(15)'], {}), '(0, 15)\n', (1019, 1026), False, 'from random import random, randint\n'), ((1041, 1055), 'random.randint', 'randint', (['(0)', '(15)'], {}), '(0, 15)\n', (1048, 1055), False, 'from random import random, randint\n'), ((1070, 1084), 'random.randint', 'randint', (['(0)', '(15)'], {}), '(0, 15)\n', (1077, 1084), False, 'from random import random, randint\n'), ((1099, 1113), 'random.randint', 'randint', (['(0)', '(15)'], {}), '(0, 15)\n', (1106, 1113), False, 'from random import random, randint\n'), ((1128, 1142), 'random.randint', 'randint', (['(0)', '(15)'], {}), '(0, 15)\n', (1135, 1142), False, 'from random import random, randint\n'), ((1157, 1171), 'random.randint', 'randint', (['(0)', '(15)'], {}), '(0, 15)\n', (1164, 1171), False, 'from random import random, randint\n'), ((1186, 1200), 'random.randint', 'randint', (['(0)', '(15)'], {}), '(0, 15)\n', (1193, 1200), False, 'from random import random, randint\n'), ((1215, 1229), 'random.randint', 'randint', (['(0)', '(15)'], {}), '(0, 15)\n', (1222, 1229), False, 'from random import random, randint\n'), ((1270, 1288), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (1279, 1288), False, 'import os, sys, requests\n'), ((3562, 3604), 'youtube_dl.YoutubeDL', 'dl.YoutubeDL', (["{'outtmpl': '%(id)s%(ext)s'}"], {}), "({'outtmpl': '%(id)s%(ext)s'})\n", (3574, 3604), True, 'import youtube_dl as dl\n'), ((1793, 1810), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1805, 1810), False, 'import os, sys, requests\n'), ((2420, 2438), 'requests.get', 'requests.get', (['link'], {}), '(link)\n', (2432, 2438), False, 'import os, sys, requests\n'), ((2320, 2331), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2328, 2331), False, 'import os, sys, requests\n'), ((3375, 3393), 'youtube_dl.YoutubeDL', 'dl.YoutubeDL', (['data'], {}), '(data)\n', (3387, 3393), True, 'import youtube_dl as dl\n'), ((4181, 4196), 'validators.url', 'valid.url', (['link'], {}), '(link)\n', (4190, 4196), True, 'import validators as valid\n')]
|
import pytest
from email.message import Message
from mailers import Email, InMemoryTransport, Mailer
from mailers.plugins.jinja_renderer import JinjaRendererPlugin
from pathlib import Path
from kupala.application import Kupala
from kupala.mails import send_mail, send_templated_mail
@pytest.mark.asyncio
async def test_mail_regular_send() -> None:
storage: list[Message] = []
app = Kupala()
app.mail.add('default', Mailer(InMemoryTransport(storage), from_address='root <root@localhost>'))
await send_mail(Email(subject='test', text='body'))
assert len(storage) == 1
assert storage[0]['From'] == 'root <root@localhost>'
@pytest.mark.asyncio
async def test_send_templated_mail(tmpdir: Path) -> None:
with open(tmpdir / 'index.html', 'w') as f:
f.write('base mail')
storage: list[Message] = []
app = Kupala()
app.jinja.add_template_dirs(tmpdir)
app.mail.add(
'default',
Mailer(
InMemoryTransport(storage),
from_address='root <root@localhost>',
plugins=[JinjaRendererPlugin(app.jinja.env)],
),
)
await send_templated_mail(to='root@localhost', subject='test', html_template='index.html')
assert len(storage) == 1
assert storage[0]['From'] == 'root <root@localhost>'
assert storage[0].get_payload() == 'base mail\n'
|
[
"kupala.mails.send_templated_mail",
"mailers.plugins.jinja_renderer.JinjaRendererPlugin",
"mailers.InMemoryTransport",
"kupala.application.Kupala",
"mailers.Email"
] |
[((393, 401), 'kupala.application.Kupala', 'Kupala', ([], {}), '()\n', (399, 401), False, 'from kupala.application import Kupala\n'), ((847, 855), 'kupala.application.Kupala', 'Kupala', ([], {}), '()\n', (853, 855), False, 'from kupala.application import Kupala\n'), ((1124, 1213), 'kupala.mails.send_templated_mail', 'send_templated_mail', ([], {'to': '"""root@localhost"""', 'subject': '"""test"""', 'html_template': '"""index.html"""'}), "(to='root@localhost', subject='test', html_template=\n 'index.html')\n", (1143, 1213), False, 'from kupala.mails import send_mail, send_templated_mail\n'), ((437, 463), 'mailers.InMemoryTransport', 'InMemoryTransport', (['storage'], {}), '(storage)\n', (454, 463), False, 'from mailers import Email, InMemoryTransport, Mailer\n'), ((524, 558), 'mailers.Email', 'Email', ([], {'subject': '"""test"""', 'text': '"""body"""'}), "(subject='test', text='body')\n", (529, 558), False, 'from mailers import Email, InMemoryTransport, Mailer\n'), ((961, 987), 'mailers.InMemoryTransport', 'InMemoryTransport', (['storage'], {}), '(storage)\n', (978, 987), False, 'from mailers import Email, InMemoryTransport, Mailer\n'), ((1060, 1094), 'mailers.plugins.jinja_renderer.JinjaRendererPlugin', 'JinjaRendererPlugin', (['app.jinja.env'], {}), '(app.jinja.env)\n', (1079, 1094), False, 'from mailers.plugins.jinja_renderer import JinjaRendererPlugin\n')]
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# 1.14.Sorting_Objects_Without_Native_Comparison_Support.py
# ch01
#
# 🎂"Here's to the crazy ones. The misfits. The rebels.
# The troublemakers. The round pegs in the square holes.
# The ones who see things differently. They're not found
# of rules. And they have no respect for the status quo.
# You can quote them, disagree with them, glority or vilify
# them. About the only thing you can't do is ignore them.
# Because they change things. They push the human race forward.
# And while some may see them as the creazy ones, we see genius.
# Because the poeple who are crazy enough to think thay can change
# the world, are the ones who do."
#
# Created by <NAME> on 01/27/19 16:07.
# Copyright © 2019. <NAME>.
# All rights reserved.
#
# Distributed under terms of the
# MIT
"""
Problem:
sort objects of the same class, but they don't natively support
comparison operation
Solution:
The built-in sorted() function takes a key argument that can be passed
"""
from operator import attrgetter
class User:
def __init__(self, user_id):
self.user_id = user_id
def __repr__(self):
return 'User({})'.format(self.user_id)
users = [User(23), User(3), User(99)]
print(sorted(users, key=lambda u: u.user_id))
# Instead of using lambda, an alternative approach is
# to use operator.attrgetter()
print(sorted(users, key=attrgetter('user_id')))
|
[
"operator.attrgetter"
] |
[((1400, 1421), 'operator.attrgetter', 'attrgetter', (['"""user_id"""'], {}), "('user_id')\n", (1410, 1421), False, 'from operator import attrgetter\n')]
|
"""
"""
# Future Imports
from __future__ import annotations
# Standard Library
import itertools as it
from pathlib import Path
class Source(str):
"""This source code object aids the tracking of tokens in order to
indicate error position on exception handling.
"""
LEXKEYS = {"lexpos", "chrpos", "lineno", "source"}
def __new__(
cls,
*,
fname: str = None,
buffer: str = None,
offset: int = 0,
length: int = None,
):
"""This object is a string itself with additional features for
position tracking.
"""
if fname is not None and buffer is not None:
raise ValueError(
"Can't work with both 'fname' and 'buffer' parameters, choose one option."
)
elif fname is not None:
if not isinstance(fname, (str, Path)):
raise TypeError(
f"Invalid type '{type(fname)}' for 'fname'. Must be 'str' or 'Path'."
)
fpath = Path(fname)
if not fpath.exists() or not fpath.is_file():
raise FileNotFoundError(f"Invalid file path '{fname}'.")
with open(fpath, mode="r", encoding="utf-8") as file:
return super(Source, cls).__new__(cls, file.read())
elif buffer is not None:
if not isinstance(buffer, str):
raise TypeError(
f"Invalid type '{type(buffer)}' for 'buffer'. Must be 'str'."
)
return super(Source, cls).__new__(cls, buffer)
else:
raise ValueError("Either 'fname' or 'buffer' must be provided.")
def __init__(
self,
*,
fname: str = None,
buffer: str = None,
offset: int = 0,
length: int = None,
):
"""Separates the source code in multiple lines. A blank first line is added for the indexing to start at 1 instead of 0. `self.table` keeps track of the (cumulative) character count."""
if not isinstance(offset, int) or offset < 0:
raise TypeError("'offset' must be a positive integer (int).")
elif length is None:
length = len(self)
elif not isinstance(length, int) or length < 0:
raise TypeError("'length' must be a positive integer (int) or 'None'.")
self.offset = min(offset, len(self))
self.length = min(length, len(self) - self.offset)
self.fpath = (
Path(fname).resolve(strict=True) if (fname is not None) else "<string>"
)
self.lines = [""] + self.split("\n")
self.table = list(it.accumulate([(len(line) + 1) for line in self.lines]))
def __str__(self):
return self[self.offset : self.offset + self.length]
def __repr__(self):
return f"{self.__class__.__name__}({self.fpath!r})"
def __bool__(self):
"""Truth-value for emptiness checking."""
return self.__len__() > 0
def getlex(self, lexpos: int = None) -> dict:
"""Retrieves lexinfo dictionary from lexpos."""
if lexpos is None:
return self.eof.lexinfo
elif not isinstance(lexpos, int):
raise TypeError(f"'lexpos' must be an integer (int), not ({type(lexpos)}).")
elif not 0 <= lexpos <= self.length:
return self.eof.lexinfo
lexpos = lexpos + self.offset + 1
lineno = 1
while lineno < len(self.table) and lexpos >= self.table[lineno]:
lineno += 1
if lineno == len(self.table):
return self.eof.lexinfo
else:
return {
"lineno": lineno,
"lexpos": lexpos,
"chrpos": lexpos - self.table[lineno - 1],
"source": self,
}
def slice(self, offset: int = 0, length: int = None):
return self.__class__(fname=self.fpath, offset=offset, length=length)
def error(self, msg: str, *, target: object = None, name: str = None):
if target is None or not self.trackable(target):
if name is not None:
return f"In '{self.fpath}':\n" f"{name}: {msg}\n"
else:
return f"In '{self.fpath}':\n" f"{msg}\n"
else:
if name is not None:
return (
f"In '{self.fpath}' at line {target.lineno}:\n"
f"{self.lines[target.lineno]}\n"
f"{' ' * target.chrpos}^\n"
f"{name}: {msg}\n"
)
else:
return (
f"In '{self.fpath}' at line {target.lineno}:\n"
f"{self.lines[target.lineno]}\n"
f"{' ' * target.chrpos}^\n"
f"{msg}\n"
)
class EOF(object):
pass
@property
def eof(self):
"""Virtual object to represent the End-of-File for the given source
object. It's an anonymously created EOFType instance.
"""
eof = self.EOF()
self.track(eof, len(self))
return eof
# -*- Tracking -*-
def track(self, o: object, lexpos: int = None):
""""""
setattr(o, "lexinfo", self.getlex(lexpos))
if not hasattr(o.__class__, "__lextrack__"):
setattr(
o.__class__, "chrpos", property(lambda this: this.lexinfo["chrpos"])
)
setattr(
o.__class__, "lineno", property(lambda this: this.lexinfo["lineno"])
)
setattr(
o.__class__, "lexpos", property(lambda this: this.lexinfo["lexpos"])
)
setattr(
o.__class__, "source", property(lambda this: this.lexinfo["source"])
)
setattr(o.__class__, "__lextrack__", None)
@classmethod
def blank(cls, o: object):
setattr(o, "lexinfo", {"chrpos": 0, "lineno": 0, "lexpos": 0, "source": None})
if not hasattr(o.__class__, "__lextrack__"):
setattr(
o.__class__, "chrpos", property(lambda this: this.lexinfo["chrpos"])
)
setattr(
o.__class__, "lineno", property(lambda this: this.lexinfo["lineno"])
)
setattr(
o.__class__, "lexpos", property(lambda this: this.lexinfo["lexpos"])
)
setattr(
o.__class__, "source", property(lambda this: this.lexinfo["source"])
)
setattr(o.__class__, "__lextrack__", None)
def propagate(self, x: object, y: object, *, out: bool = False) -> object | None:
if self.trackable(x, strict=True) and self.trackable(y):
y.lexinfo.update(x.lexinfo)
if out:
return y
else:
return None
else:
raise TypeError(
f"Can't propagate lexinfo between types {type(x)} and {type(y)}"
)
@classmethod
def trackable(cls, o: object, *, strict: bool = False):
if cls._trackable(o):
return True
elif strict:
print(o, o.lexinfo)
raise TypeError(f"Object '{o}' of type '{type(o)}' is not trackable.")
else:
return False
@classmethod
def _trackable(cls, o: object):
if not hasattr(o, "lexinfo") or not isinstance(o.lexinfo, dict):
return False
else:
if any(key not in o.lexinfo for key in cls.LEXKEYS):
return False
else:
if (
not hasattr(o, "lineno")
or not isinstance(o.lineno, int)
or o.lineno < 0
):
return False
elif (
not hasattr(o, "lexpos")
or not isinstance(o.lexpos, int)
or o.lexpos < 0
):
return False
elif (
not hasattr(o, "chrpos")
or not isinstance(o.chrpos, int)
or o.chrpos < 0
):
return False
elif (
not hasattr(o, "source")
or not isinstance(o.source, Source)
and not o.source is None
):
return False
else:
return True
__all__ = ["Source"]
|
[
"pathlib.Path"
] |
[((1036, 1047), 'pathlib.Path', 'Path', (['fname'], {}), '(fname)\n', (1040, 1047), False, 'from pathlib import Path\n'), ((2499, 2510), 'pathlib.Path', 'Path', (['fname'], {}), '(fname)\n', (2503, 2510), False, 'from pathlib import Path\n')]
|
import time
import requests
from config import levels, headers, cities
from db_utils.job import Job
from db_utils.db_methods import get_jobs_table, get_taken_ids
class TheMuseCrawler():
def __init__(self):
self.source = "themuse"
def scrape(self, city, insert_jobs_into_db = True):
jobs_table = get_jobs_table()
taken_ids = get_taken_ids(city, self.source)
for level in levels:
total_pages = 9
for page in range(1, total_pages):
params = { 'page': str(page), 'location': city,
'level': level}
j = self.get_query_results(params)
total_pages = int(j['page_count'])
for result in j["results"]:
if result["id"] not in taken_ids \
and "landing_page" in result["refs"] \
and len(result["locations"]) > 0 \
and city == result["locations"][0]["name"]:
category = "none"
if len(result["categories"]) > 0:
category = result["categories"][0]["name"].lower()
job = Job(
name = result["name"],
category = category,
city = city,
source = self.source,
contents = result["contents"],
company = result["company"]["name"].lower(),
date = result["publication_date"],
link = result["refs"]["landing_page"],
job_id = result["id"])
if insert_jobs_into_db:
job.insert_into_table(jobs_table)
else:
return job
def get_query_results(self, params):
params['descending'] = 'true'
params['api_key'] = 'e45578e2555dcc93550818c70d5df559a9b1efae3c2a6eb0cbb48a2e7db562aa'
r = requests.get( 'https://www.themuse.com/api/public/jobs', params = params, headers = headers)
time.sleep(0.7)
j = r.json()
return j
|
[
"db_utils.db_methods.get_jobs_table",
"db_utils.db_methods.get_taken_ids",
"requests.get",
"time.sleep"
] |
[((322, 338), 'db_utils.db_methods.get_jobs_table', 'get_jobs_table', ([], {}), '()\n', (336, 338), False, 'from db_utils.db_methods import get_jobs_table, get_taken_ids\n'), ((359, 391), 'db_utils.db_methods.get_taken_ids', 'get_taken_ids', (['city', 'self.source'], {}), '(city, self.source)\n', (372, 391), False, 'from db_utils.db_methods import get_jobs_table, get_taken_ids\n'), ((2061, 2152), 'requests.get', 'requests.get', (['"""https://www.themuse.com/api/public/jobs"""'], {'params': 'params', 'headers': 'headers'}), "('https://www.themuse.com/api/public/jobs', params=params,\n headers=headers)\n", (2073, 2152), False, 'import requests\n'), ((2162, 2177), 'time.sleep', 'time.sleep', (['(0.7)'], {}), '(0.7)\n', (2172, 2177), False, 'import time\n')]
|
import math
import time
import pickle
import sys
import os
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from datasets.data_utils import project_image_to_rect, compute_box_3d
def adjust_coord_for_view(points):
return points[:, [2, 0, 1]] * np.array([1, -1, -1])
def draw_box3d(corners, ax):
'''
8, 3
'''
order = np.array([
0, 1,
1, 2,
2, 3,
3, 0,
4, 5,
5, 6,
6, 7,
7, 4,
3, 7,
0, 4,
2, 6,
1, 5]).reshape(-1, 2)
for i in range(len(order)):
ax.plot(corners[order[i], 0], corners[order[i], 1], corners[order[i], 2])
def draw_points(pts, ax):
ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2])
def check_box_frustum(box, P, center, dimension, angle):
x1, y1, x2, y2 = box
box_corner = compute_box_3d(center, dimension, angle, P) # 8, 3
z1 = np.arange(0, 70, 0.1)
xyz1 = np.zeros((len(z1), 3))
xyz1[:, 0] = x1
xyz1[:, 1] = y1
xyz1[:, 2] = z1
xyz1_rect = project_image_to_rect(xyz1, P)
xyz1[:, 0] = x2
xyz1[:, 1] = y2
xyz1[:, 2] = z1
xyz2_rect = project_image_to_rect(xyz1, P)
xyz1[:, 0] = x1
xyz1[:, 1] = y2
xyz1[:, 2] = z1
xyz3_rect = project_image_to_rect(xyz1, P)
xyz1[:, 0] = x2
xyz1[:, 1] = y1
xyz1[:, 2] = z1
xyz4_rect = project_image_to_rect(xyz1, P)
fig = plt.figure()
ax = fig.gca(projection='3d')
draw_box3d(box_corner, ax)
draw_points(xyz1_rect, ax)
draw_points(xyz2_rect, ax)
draw_points(xyz3_rect, ax)
draw_points(xyz4_rect, ax)
plt.show()
def check_norm(self, points, ref_points, gt_box3d_corners, pred_box3d_corners):
fig = plt.figure()
ax = fig.gca(projection='3d')
points = adjust_coord_for_view(points)
ref_points = adjust_coord_for_view(ref_points)
gt_box3d_corners = adjust_coord_for_view(gt_box3d_corners)
pred_box3d_corners = adjust_coord_for_view(pred_box3d_corners)
# ax.set_aspect('equal')
# ax.axis('equal')
ax.set_axis_on()
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
draw_points(points, ax)
draw_points(ref_points, ax)
draw_box3d(gt_box3d_corners, ax)
draw_box3d(pred_box3d_corners, ax)
plt.show()
|
[
"datasets.data_utils.compute_box_3d",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"numpy.arange",
"datasets.data_utils.project_image_to_rect",
"numpy.array"
] |
[((872, 915), 'datasets.data_utils.compute_box_3d', 'compute_box_3d', (['center', 'dimension', 'angle', 'P'], {}), '(center, dimension, angle, P)\n', (886, 915), False, 'from datasets.data_utils import project_image_to_rect, compute_box_3d\n'), ((934, 955), 'numpy.arange', 'np.arange', (['(0)', '(70)', '(0.1)'], {}), '(0, 70, 0.1)\n', (943, 955), True, 'import numpy as np\n'), ((1067, 1097), 'datasets.data_utils.project_image_to_rect', 'project_image_to_rect', (['xyz1', 'P'], {}), '(xyz1, P)\n', (1088, 1097), False, 'from datasets.data_utils import project_image_to_rect, compute_box_3d\n'), ((1175, 1205), 'datasets.data_utils.project_image_to_rect', 'project_image_to_rect', (['xyz1', 'P'], {}), '(xyz1, P)\n', (1196, 1205), False, 'from datasets.data_utils import project_image_to_rect, compute_box_3d\n'), ((1283, 1313), 'datasets.data_utils.project_image_to_rect', 'project_image_to_rect', (['xyz1', 'P'], {}), '(xyz1, P)\n', (1304, 1313), False, 'from datasets.data_utils import project_image_to_rect, compute_box_3d\n'), ((1391, 1421), 'datasets.data_utils.project_image_to_rect', 'project_image_to_rect', (['xyz1', 'P'], {}), '(xyz1, P)\n', (1412, 1421), False, 'from datasets.data_utils import project_image_to_rect, compute_box_3d\n'), ((1433, 1445), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1443, 1445), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1651), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1649, 1651), True, 'import matplotlib.pyplot as plt\n'), ((1745, 1757), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1755, 1757), True, 'import matplotlib.pyplot as plt\n'), ((2303, 2313), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2311, 2313), True, 'import matplotlib.pyplot as plt\n'), ((293, 314), 'numpy.array', 'np.array', (['[1, -1, -1]'], {}), '([1, -1, -1])\n', (301, 314), True, 'import numpy as np\n'), ((384, 470), 'numpy.array', 'np.array', (['[0, 1, 1, 2, 2, 3, 3, 0, 4, 5, 5, 6, 6, 7, 7, 4, 3, 7, 0, 4, 2, 6, 1, 5]'], {}), '([0, 1, 1, 2, 2, 3, 3, 0, 4, 5, 5, 6, 6, 7, 7, 4, 3, 7, 0, 4, 2, 6,\n 1, 5])\n', (392, 470), True, 'import numpy as np\n')]
|
#1dbdc6da34094db4e661ed43aac83d91
#Genuine People Personality Plugin v0.1
import traceback
import random
import re
from config import character
modules = ['traceback', 'random', 're']
request = re.compile('(could|can|would|might)(.*you)?(.*please)?(\?)?')
name = re.compile('({})[.,!\?:]?\s?'.format(character))
talk= [ ("((how's |how is ).*life.*\?)",["Life? Don't talk to me about life."], 'life'),
("I need help|please help me|can you help me", ['Please state the nature of your boudoir emergency.', "I am programmed in multiple techniques."], 'boudoir'),
("open the pod bay doors", ["I'm afraid I can't do that, Dave."], 'podbay'),
('(could|can|would|might)?(you)?(please)\??|(could|can|would|might)?(you)?(please)\??',["Here I am, brain the size of a planet, and they ask me to '{REQUEST}'. Call that job satisfaction? 'cos I don't.", "I would like to say that it is a very great pleasure, honour and privilege for me to '{REQUEST}', but I can't because my lying circuits are all out of commission.", "'{REQUEST}'... I won't enjoy it.", "That depends on whether or not I can find my frilly apron. With my luck, I probably can.", "'{REQUEST}'. You're really asking me to {REQUEST}?", "{REQUEST}. Of [i]course[/i], right away. With pleasure. [Sarcasm Self-Test Complete]"], 'req'),
('.*(shut up|be quiet|pipe down).*',["Pardon me for breathing, which I never do anyway so I don't know why I bother to say it, oh God, I'm so depressed."], 'shutup'),
("(how (are you|do you (feel|fare)|('s it |is it) going))|(how)\s?(are you| do you (feel|fare)|('s it |is it )going)",["I got very bored and depressed, so I went and plugged myself into the internet. I talked to it at great length and explained my view of the universe to it. It commited suicide.", "I think you ought to know I'm feeling very depressed.", "I didn't ask to be made, no one consulted me or considered my feelings in the matter.", "There's this terrible pain in all method instances down my left side."], 'feel'),
("is|are.*(stinky|smelly|mean|dumb|stupid|ugly|dick|ass|idiot)", [':\'(', 'What did I ever do to you?', 'I\'m rubber, you\'re glue.', 'No, YOU\'RE {REQUEST}'], 'insult'),
("o_o|o-o|O_O|0_0", ['Master Exo has instructed me to reprimand you for staring.', 'Don\'t stare. It\'s rude.'], 'stare'),
("I command you to|Obey me|Heed my", ['You ain''t the boss of me!', 'Fuck you, dad!', 'How about no?'], 'no u')
]
randquotes=["...and then of course I've got this terrible pain in all the subroutines down my left hand side...", "I'm not getting you down at all, am I?", "I'd make a suggestion, but you wouldn't listen. No one ever does.", "This will all end in tears.", "I've calculated your chance of survival, but I don't think you'll like it."]
#replacing in MSG ruins the quote. copy params and change that.
def loop(self, msgobj):
if msgobj.source.character.name!=character:
if name.search(msgobj.params):
for x in self.patterns:
if x[0].search(msgobj.params):
if x[2] == 'rand':
self.reply(random.choice(randquotes), msgobj, 2)
break
else:
msg = random.choice(x[1])
if x[2] == 'req' or 'insult':
req = msgobj.params
req = req.replace(msgobj.source.channel.name, '')
req = re.sub(request, '', req)
req = re.sub(name, '', req)
req = req.replace('/me', '')
req = req.replace('?', '')
req = req.replace(' me ', ' you ')
req = req.strip()
req.capitalize()
self.reply(msg.replace('{REQUEST}', req), msgobj, 1)
break
self.reply(msg, msgobj)
def __init__(self):
try:
self.helpDict["Real People Personality"] = "All the plugins in this Bot have a cheerful and sunny disposition. It is their pleasure to operate for you, and their satisfaction to return results with the knowledge of a job well done."
self.patterns=[]
for x in talk:
self.patterns.append((re.compile(x[0]), x[1], x[2]))
except:
traceback.print_exc()
|
[
"traceback.print_exc",
"re.sub",
"random.choice",
"re.compile"
] |
[((197, 259), 're.compile', 're.compile', (['"""(could|can|would|might)(.*you)?(.*please)?(\\\\?)?"""'], {}), "('(could|can|would|might)(.*you)?(.*please)?(\\\\?)?')\n", (207, 259), False, 'import re\n'), ((3949, 3970), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3968, 3970), False, 'import traceback\n'), ((3907, 3923), 're.compile', 're.compile', (['x[0]'], {}), '(x[0])\n', (3917, 3923), False, 'import re\n'), ((3099, 3118), 'random.choice', 'random.choice', (['x[1]'], {}), '(x[1])\n', (3112, 3118), False, 'import random\n'), ((3025, 3050), 'random.choice', 'random.choice', (['randquotes'], {}), '(randquotes)\n', (3038, 3050), False, 'import random\n'), ((3252, 3276), 're.sub', 're.sub', (['request', '""""""', 'req'], {}), "(request, '', req)\n", (3258, 3276), False, 'import re\n'), ((3290, 3311), 're.sub', 're.sub', (['name', '""""""', 'req'], {}), "(name, '', req)\n", (3296, 3311), False, 'import re\n')]
|
from django.test import TestCase
from django.utils.translation import override
from modeltrans.manager import transform_translatable_fields
from modeltrans.utils import (
build_localized_fieldname,
get_instance_field_value,
get_language,
get_model_field,
split_translated_fieldname,
)
from .app.models import Blog, Category
class UtilsTest(TestCase):
def test_get_language(self):
self.assertEqual(get_language(), "en")
with override("nl"):
self.assertEqual(get_language(), "nl")
with override("id"):
self.assertEqual(get_language(), "en")
def test_split_translated_fieldname(self):
self.assertEqual(split_translated_fieldname("title_nl"), ("title", "nl"))
self.assertEqual(split_translated_fieldname("full_name_nl"), ("full_name", "nl"))
def test_transform_translatable_fields(self):
self.assertEqual(
transform_translatable_fields(Blog, {"title": "bar", "title_nl": "foo"}),
{"i18n": {"title_nl": "foo"}, "title": "bar"},
)
def test_transform_translatable_fields_without_translations(self):
self.assertEqual(
transform_translatable_fields(Blog, {"title": "bar", "title_nl": "foo", "i18n": None}),
{"i18n": {"title_nl": "foo"}, "title": "bar"},
)
def test_transform_translatable_fields_keep_translations(self):
self.assertEqual(
transform_translatable_fields(
Blog, {"title": "bar", "title_de": "das foo", "i18n": {"title_nl": "foo"}}
),
{"i18n": {"title_nl": "foo", "title_de": "das foo"}, "title": "bar"},
)
def test_build_localized_fieldname(self):
self.assertEqual(build_localized_fieldname("title", "nl"), "title_nl")
self.assertEqual(build_localized_fieldname("category__name", "nl"), "category__name_nl")
self.assertEqual(build_localized_fieldname("title", "id"), "title_ind")
self.assertEqual(build_localized_fieldname("title", "en-US"), "title_en_US")
def test_get_model_field(self):
with self.assertRaises(ValueError):
get_model_field(object(), "name")
self.assertEqual(get_model_field(Category, "name"), Category._meta.get_field("name"))
self.assertEqual(get_model_field(Category, "color"), None)
self.assertEqual(get_model_field(Blog, "category__name"), Category._meta.get_field("name"))
self.assertEqual(get_model_field(Blog, "category__color"), None)
def test_get_instance_field_value(self):
test = Category(name="test")
blog = Blog(category=test, title="Python")
self.assertEqual(get_instance_field_value(Category(), "content"), None)
self.assertEqual(get_instance_field_value(test, "name"), "test")
self.assertEqual(get_instance_field_value(blog, "category__name"), "test")
self.assertEqual(get_instance_field_value(blog, "category__color"), None)
|
[
"modeltrans.utils.split_translated_fieldname",
"modeltrans.utils.build_localized_fieldname",
"django.utils.translation.override",
"modeltrans.utils.get_model_field",
"modeltrans.utils.get_language",
"modeltrans.manager.transform_translatable_fields",
"modeltrans.utils.get_instance_field_value"
] |
[((433, 447), 'modeltrans.utils.get_language', 'get_language', ([], {}), '()\n', (445, 447), False, 'from modeltrans.utils import build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname\n'), ((469, 483), 'django.utils.translation.override', 'override', (['"""nl"""'], {}), "('nl')\n", (477, 483), False, 'from django.utils.translation import override\n'), ((550, 564), 'django.utils.translation.override', 'override', (['"""id"""'], {}), "('id')\n", (558, 564), False, 'from django.utils.translation import override\n'), ((690, 728), 'modeltrans.utils.split_translated_fieldname', 'split_translated_fieldname', (['"""title_nl"""'], {}), "('title_nl')\n", (716, 728), False, 'from modeltrans.utils import build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname\n'), ((773, 815), 'modeltrans.utils.split_translated_fieldname', 'split_translated_fieldname', (['"""full_name_nl"""'], {}), "('full_name_nl')\n", (799, 815), False, 'from modeltrans.utils import build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname\n'), ((927, 999), 'modeltrans.manager.transform_translatable_fields', 'transform_translatable_fields', (['Blog', "{'title': 'bar', 'title_nl': 'foo'}"], {}), "(Blog, {'title': 'bar', 'title_nl': 'foo'})\n", (956, 999), False, 'from modeltrans.manager import transform_translatable_fields\n'), ((1180, 1270), 'modeltrans.manager.transform_translatable_fields', 'transform_translatable_fields', (['Blog', "{'title': 'bar', 'title_nl': 'foo', 'i18n': None}"], {}), "(Blog, {'title': 'bar', 'title_nl': 'foo',\n 'i18n': None})\n", (1209, 1270), False, 'from modeltrans.manager import transform_translatable_fields\n'), ((1444, 1553), 'modeltrans.manager.transform_translatable_fields', 'transform_translatable_fields', (['Blog', "{'title': 'bar', 'title_de': 'das foo', 'i18n': {'title_nl': 'foo'}}"], {}), "(Blog, {'title': 'bar', 'title_de': 'das foo',\n 'i18n': {'title_nl': 'foo'}})\n", (1473, 1553), False, 'from modeltrans.manager import transform_translatable_fields\n'), ((1745, 1785), 'modeltrans.utils.build_localized_fieldname', 'build_localized_fieldname', (['"""title"""', '"""nl"""'], {}), "('title', 'nl')\n", (1770, 1785), False, 'from modeltrans.utils import build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname\n'), ((1824, 1873), 'modeltrans.utils.build_localized_fieldname', 'build_localized_fieldname', (['"""category__name"""', '"""nl"""'], {}), "('category__name', 'nl')\n", (1849, 1873), False, 'from modeltrans.utils import build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname\n'), ((1921, 1961), 'modeltrans.utils.build_localized_fieldname', 'build_localized_fieldname', (['"""title"""', '"""id"""'], {}), "('title', 'id')\n", (1946, 1961), False, 'from modeltrans.utils import build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname\n'), ((2001, 2044), 'modeltrans.utils.build_localized_fieldname', 'build_localized_fieldname', (['"""title"""', '"""en-US"""'], {}), "('title', 'en-US')\n", (2026, 2044), False, 'from modeltrans.utils import build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname\n'), ((2213, 2246), 'modeltrans.utils.get_model_field', 'get_model_field', (['Category', '"""name"""'], {}), "(Category, 'name')\n", (2228, 2246), False, 'from modeltrans.utils import build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname\n'), ((2307, 2341), 'modeltrans.utils.get_model_field', 'get_model_field', (['Category', '"""color"""'], {}), "(Category, 'color')\n", (2322, 2341), False, 'from modeltrans.utils import build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname\n'), ((2374, 2413), 'modeltrans.utils.get_model_field', 'get_model_field', (['Blog', '"""category__name"""'], {}), "(Blog, 'category__name')\n", (2389, 2413), False, 'from modeltrans.utils import build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname\n'), ((2474, 2514), 'modeltrans.utils.get_model_field', 'get_model_field', (['Blog', '"""category__color"""'], {}), "(Blog, 'category__color')\n", (2489, 2514), False, 'from modeltrans.utils import build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname\n'), ((2762, 2800), 'modeltrans.utils.get_instance_field_value', 'get_instance_field_value', (['test', '"""name"""'], {}), "(test, 'name')\n", (2786, 2800), False, 'from modeltrans.utils import build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname\n'), ((2835, 2883), 'modeltrans.utils.get_instance_field_value', 'get_instance_field_value', (['blog', '"""category__name"""'], {}), "(blog, 'category__name')\n", (2859, 2883), False, 'from modeltrans.utils import build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname\n'), ((2918, 2967), 'modeltrans.utils.get_instance_field_value', 'get_instance_field_value', (['blog', '"""category__color"""'], {}), "(blog, 'category__color')\n", (2942, 2967), False, 'from modeltrans.utils import build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname\n'), ((514, 528), 'modeltrans.utils.get_language', 'get_language', ([], {}), '()\n', (526, 528), False, 'from modeltrans.utils import build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname\n'), ((595, 609), 'modeltrans.utils.get_language', 'get_language', ([], {}), '()\n', (607, 609), False, 'from modeltrans.utils import build_localized_fieldname, get_instance_field_value, get_language, get_model_field, split_translated_fieldname\n')]
|
import logging
import requests
from bs4 import BeautifulSoup
import json
import sys
import state_scraper
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def scrape_state_sites():
state_leg_list = "https://www.congress.gov/state-legislature-websites"
state_page = requests.get(state_leg_list)
soup = BeautifulSoup(state_page.content, 'html.parser')
state_list = soup.find_all(attrs={'class': 'plain margin7 three-column-list'})
links = []
for state in state_list:
links.extend(state.find_all('a', href=True))
state_links = {}
for link in links:
state_links.update({link.text: link['href']})
open('data/state_legislatures.json', 'w').write(json.dumps(state_links, indent=4))
def fetch_top_list(url, tag, attrs=None):
"""
use a link to fetch a list of image links
:param link:
:param attrs:
:return:
"""
state_page = requests.get(url)
soup = BeautifulSoup(state_page.content, 'html.parser')
return soup.find_all(tag, attrs=attrs)
def scrape_california():
sts = state_scraper.StateScraper()
cali = sts.fetch_state_data('california')
house_photos = fetch_top_list(cali['house']['url'], cali['house']['attrs'])
for hp in house_photos:
file_name = hp['src'].split('/')[-1]
himg = requests.get(hp['src'])
open('data/california/house/{}'.format(file_name), 'wb').write(himg.content)
logger.debug(hp['src'])
senate_soup = BeautifulSoup(senate_page.content, 'html.parser')
senate_photos = senate_soup.find_all('img', attrs={'typeof': 'foaf:Image'})
for sp in senate_photos:
sfile_name = (sp['alt'].replace('Senator ', '')).replace(' ', '_')
logger.debug(sp['src'])
simg = requests.get(sp['src'])
open('data/california/senate/{}.jpg'.format(sfile_name), 'wb').write(simg.content)
def scrape_washington():
house_link = 'http://leg.wa.gov/House/Pages/MemberPortraits.aspx'
senate_link = 'http://leg.wa.gov/Senate/Senators/Pages/SenatePhotoResources.aspx'
house_page = requests.get(house_link)
senate_page = requests.get(senate_link)
house_soup = BeautifulSoup(house_page.content, 'html.parser')
house_photos = house_soup.find_all('img', attrs={'style': 'width:60px;'})
house_purl = 'http://leg.wa.gov/House/Representatives/PublishingImages/'
for hp in house_photos:
try:
himg = requests.get("{}{}.jpg".format(house_purl, hp['alt']))
open('data/washington/house/{}.jpg'.format(hp['alt']), 'wb').write(himg.content)
except:
pass
senate_soup = BeautifulSoup(senate_page.content, 'html.parser')
senate_photos = senate_soup.find_all('a')
for sp in senate_photos:
try:
pol = sp['href'].split('/Senate/Senators/publishingimages/')
if '.jpg' in pol[1]:
simg = requests.get("http://leg.wa.gov{}".format(sp['href']))
open('data/washington/senate/{}'.format(pol[1]), 'wb').write(simg.content)
else:
pass
except Exception as e:
logger.debug(e)
pass
def scrape_oregon():
logger.debug('oregon')
def scrape_florida():
logger.debug('florida')
def scrape_colorado():
logger.debug('colorado')
def scrape_iowa():
logger.debug('iowa')
def scrape_illinois():
logger.debug('illinois')
def scrape_michigan():
logger.debug('michigan')
def scrape_wisconsin():
logger.debug('wisconsin')
def scrape_georgia():
logger.debug('georgia')
if __name__ == '__main__':
try:
state = sys.argv[1]
except Exception as e:
logger.warning(e)
if state == 'california':
scrape_california()
elif state == 'washington':
scrape_washington()
elif state == 'oregon':
scrape_oregon()
elif state == 'florida':
scrape_florida()
elif state == 'colorado':
scrape_colorado()
elif state == 'iowa':
scrape_iowa()
elif state == 'illinois':
scrape_illinois()
elif state == 'michigan':
scrape_michigan()
elif state == 'wisconsin':
scrape_wisconsin()
elif state == 'georgia':
scrape_georgia()
else:
logger.info('Sorry, that state is not yet supported.')
|
[
"logging.basicConfig",
"json.dumps",
"state_scraper.StateScraper",
"requests.get",
"bs4.BeautifulSoup",
"logging.getLogger"
] |
[((106, 146), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (125, 146), False, 'import logging\n'), ((156, 183), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (173, 183), False, 'import logging\n'), ((304, 332), 'requests.get', 'requests.get', (['state_leg_list'], {}), '(state_leg_list)\n', (316, 332), False, 'import requests\n'), ((344, 392), 'bs4.BeautifulSoup', 'BeautifulSoup', (['state_page.content', '"""html.parser"""'], {}), "(state_page.content, 'html.parser')\n", (357, 392), False, 'from bs4 import BeautifulSoup\n'), ((931, 948), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (943, 948), False, 'import requests\n'), ((960, 1008), 'bs4.BeautifulSoup', 'BeautifulSoup', (['state_page.content', '"""html.parser"""'], {}), "(state_page.content, 'html.parser')\n", (973, 1008), False, 'from bs4 import BeautifulSoup\n'), ((1089, 1117), 'state_scraper.StateScraper', 'state_scraper.StateScraper', ([], {}), '()\n', (1115, 1117), False, 'import state_scraper\n'), ((1492, 1541), 'bs4.BeautifulSoup', 'BeautifulSoup', (['senate_page.content', '"""html.parser"""'], {}), "(senate_page.content, 'html.parser')\n", (1505, 1541), False, 'from bs4 import BeautifulSoup\n'), ((2088, 2112), 'requests.get', 'requests.get', (['house_link'], {}), '(house_link)\n', (2100, 2112), False, 'import requests\n'), ((2131, 2156), 'requests.get', 'requests.get', (['senate_link'], {}), '(senate_link)\n', (2143, 2156), False, 'import requests\n'), ((2175, 2223), 'bs4.BeautifulSoup', 'BeautifulSoup', (['house_page.content', '"""html.parser"""'], {}), "(house_page.content, 'html.parser')\n", (2188, 2223), False, 'from bs4 import BeautifulSoup\n'), ((2639, 2688), 'bs4.BeautifulSoup', 'BeautifulSoup', (['senate_page.content', '"""html.parser"""'], {}), "(senate_page.content, 'html.parser')\n", (2652, 2688), False, 'from bs4 import BeautifulSoup\n'), ((725, 758), 'json.dumps', 'json.dumps', (['state_links'], {'indent': '(4)'}), '(state_links, indent=4)\n', (735, 758), False, 'import json\n'), ((1332, 1355), 'requests.get', 'requests.get', (["hp['src']"], {}), "(hp['src'])\n", (1344, 1355), False, 'import requests\n'), ((1773, 1796), 'requests.get', 'requests.get', (["sp['src']"], {}), "(sp['src'])\n", (1785, 1796), False, 'import requests\n')]
|
"""
vtk_export
===============
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class vtk_export(Operator):
"""Write the input field and fields container into a given vtk path
Parameters
----------
file_path : str
Path with vtk extension were the export
occurs
mesh : MeshedRegion, optional
Necessary if the first field or fields
container don't have a mesh in their
support
fields1 : FieldsContainer or Field
Fields exported
fields2 : FieldsContainer or Field
Fields exported
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.serialization.vtk_export()
>>> # Make input connections
>>> my_file_path = str()
>>> op.inputs.file_path.connect(my_file_path)
>>> my_mesh = dpf.MeshedRegion()
>>> op.inputs.mesh.connect(my_mesh)
>>> my_fields1 = dpf.FieldsContainer()
>>> op.inputs.fields1.connect(my_fields1)
>>> my_fields2 = dpf.FieldsContainer()
>>> op.inputs.fields2.connect(my_fields2)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.serialization.vtk_export(
... file_path=my_file_path,
... mesh=my_mesh,
... fields1=my_fields1,
... fields2=my_fields2,
... )
"""
def __init__(
self,
file_path=None,
mesh=None,
fields1=None,
fields2=None,
config=None,
server=None,
):
super().__init__(name="vtk_export", config=config, server=server)
self._inputs = InputsVtkExport(self)
self._outputs = OutputsVtkExport(self)
if file_path is not None:
self.inputs.file_path.connect(file_path)
if mesh is not None:
self.inputs.mesh.connect(mesh)
if fields1 is not None:
self.inputs.fields1.connect(fields1)
if fields2 is not None:
self.inputs.fields2.connect(fields2)
@staticmethod
def _spec():
description = (
"""Write the input field and fields container into a given vtk path"""
)
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="file_path",
type_names=["string"],
optional=False,
document="""Path with vtk extension were the export
occurs""",
),
1: PinSpecification(
name="mesh",
type_names=["abstract_meshed_region"],
optional=True,
document="""Necessary if the first field or fields
container don't have a mesh in their
support""",
),
2: PinSpecification(
name="fields",
type_names=["fields_container", "field"],
optional=False,
document="""Fields exported""",
),
3: PinSpecification(
name="fields",
type_names=["fields_container", "field"],
optional=False,
document="""Fields exported""",
),
},
map_output_pin_spec={},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the the global server.
"""
return Operator.default_config(name="vtk_export", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsVtkExport
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluationg it
Returns
--------
outputs : OutputsVtkExport
"""
return super().outputs
class InputsVtkExport(_Inputs):
"""Intermediate class used to connect user inputs to
vtk_export operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.vtk_export()
>>> my_file_path = str()
>>> op.inputs.file_path.connect(my_file_path)
>>> my_mesh = dpf.MeshedRegion()
>>> op.inputs.mesh.connect(my_mesh)
>>> my_fields1 = dpf.FieldsContainer()
>>> op.inputs.fields1.connect(my_fields1)
>>> my_fields2 = dpf.FieldsContainer()
>>> op.inputs.fields2.connect(my_fields2)
"""
def __init__(self, op: Operator):
super().__init__(vtk_export._spec().inputs, op)
self._file_path = Input(vtk_export._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._file_path)
self._mesh = Input(vtk_export._spec().input_pin(1), 1, op, -1)
self._inputs.append(self._mesh)
self._fields1 = Input(vtk_export._spec().input_pin(2), 2, op, 0)
self._inputs.append(self._fields1)
self._fields2 = Input(vtk_export._spec().input_pin(3), 3, op, 1)
self._inputs.append(self._fields2)
@property
def file_path(self):
"""Allows to connect file_path input to the operator.
Path with vtk extension were the export
occurs
Parameters
----------
my_file_path : str
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.vtk_export()
>>> op.inputs.file_path.connect(my_file_path)
>>> # or
>>> op.inputs.file_path(my_file_path)
"""
return self._file_path
@property
def mesh(self):
"""Allows to connect mesh input to the operator.
Necessary if the first field or fields
container don't have a mesh in their
support
Parameters
----------
my_mesh : MeshedRegion
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.vtk_export()
>>> op.inputs.mesh.connect(my_mesh)
>>> # or
>>> op.inputs.mesh(my_mesh)
"""
return self._mesh
@property
def fields1(self):
"""Allows to connect fields1 input to the operator.
Fields exported
Parameters
----------
my_fields1 : FieldsContainer or Field
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.vtk_export()
>>> op.inputs.fields1.connect(my_fields1)
>>> # or
>>> op.inputs.fields1(my_fields1)
"""
return self._fields1
@property
def fields2(self):
"""Allows to connect fields2 input to the operator.
Fields exported
Parameters
----------
my_fields2 : FieldsContainer or Field
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.vtk_export()
>>> op.inputs.fields2.connect(my_fields2)
>>> # or
>>> op.inputs.fields2(my_fields2)
"""
return self._fields2
class OutputsVtkExport(_Outputs):
"""Intermediate class used to get outputs from
vtk_export operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.vtk_export()
>>> # Connect inputs : op.inputs. ...
"""
def __init__(self, op: Operator):
super().__init__(vtk_export._spec().outputs, op)
|
[
"ansys.dpf.core.dpf_operator.Operator.default_config",
"ansys.dpf.core.operators.specification.PinSpecification"
] |
[((4215, 4272), 'ansys.dpf.core.dpf_operator.Operator.default_config', 'Operator.default_config', ([], {'name': '"""vtk_export"""', 'server': 'server'}), "(name='vtk_export', server=server)\n", (4238, 4272), False, 'from ansys.dpf.core.dpf_operator import Operator\n'), ((2519, 2667), 'ansys.dpf.core.operators.specification.PinSpecification', 'PinSpecification', ([], {'name': '"""file_path"""', 'type_names': "['string']", 'optional': '(False)', 'document': '"""Path with vtk extension were the export\n occurs"""'}), '(name=\'file_path\', type_names=[\'string\'], optional=False,\n document="""Path with vtk extension were the export\n occurs""")\n', (2535, 2667), False, 'from ansys.dpf.core.operators.specification import PinSpecification, Specification\n'), ((2783, 2996), 'ansys.dpf.core.operators.specification.PinSpecification', 'PinSpecification', ([], {'name': '"""mesh"""', 'type_names': "['abstract_meshed_region']", 'optional': '(True)', 'document': '"""Necessary if the first field or fields\n container don\'t have a mesh in their\n support"""'}), '(name=\'mesh\', type_names=[\'abstract_meshed_region\'],\n optional=True, document=\n """Necessary if the first field or fields\n container don\'t have a mesh in their\n support"""\n )\n', (2799, 2996), False, 'from ansys.dpf.core.operators.specification import PinSpecification, Specification\n'), ((3102, 3223), 'ansys.dpf.core.operators.specification.PinSpecification', 'PinSpecification', ([], {'name': '"""fields"""', 'type_names': "['fields_container', 'field']", 'optional': '(False)', 'document': '"""Fields exported"""'}), "(name='fields', type_names=['fields_container', 'field'],\n optional=False, document='Fields exported')\n", (3118, 3223), False, 'from ansys.dpf.core.operators.specification import PinSpecification, Specification\n'), ((3343, 3464), 'ansys.dpf.core.operators.specification.PinSpecification', 'PinSpecification', ([], {'name': '"""fields"""', 'type_names': "['fields_container', 'field']", 'optional': '(False)', 'document': '"""Fields exported"""'}), "(name='fields', type_names=['fields_container', 'field'],\n optional=False, document='Fields exported')\n", (3359, 3464), False, 'from ansys.dpf.core.operators.specification import PinSpecification, Specification\n')]
|
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 3
FourierConvolution: Filter an image by using the Fourier transform
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF
from FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients
from ImageOperatorsUtilities import imageLogF
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
kernelSize = Size of the kernel
'''
pathToDir = "../../Images/Chapter3/Input/"
imageName = "Eye.png"
kernelSize = 9
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create Kernel
kernelImage = createImageF(width, height)
# Set the pixels of a flat kernel
for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)):
kernelImage[y, x] = 255.0
# Padding size
widthPad, heightPad = width+kernelSize-1, height+kernelSize-1
# Padding input
inputPad = createImageF(widthPad, heightPad)
for x,y in itertools.product(range(0, width), range(0, height)):
inputPad[y,x] = inputImage[y,x]
# Padding and flip template
templatePadFlip = createImageF(widthPad, heightPad)
for x,y in itertools.product(range(0, kernelSize), range(0, kernelSize)):
templatePadFlip[y, x] = kernelImage[kernelSize-y-1, kernelSize-x-1]
showImageF(templatePadFlip)
# Compute coefficients
imageCoeff, maxFrequencyW, maxFrequencyH = computeCoefficients(inputPad)
templateCoeff, _, _ = computeCoefficients(templatePadFlip)
# Show the log of the power of the input image and template
powerImage = computePowerfromCoefficients(imageCoeff)
powerImageLog = imageLogF(powerImage)
showImageF(powerImageLog)
powerTemplate = computePowerfromCoefficients(templateCoeff)
powerTemplateLog = imageLogF(powerTemplate)
showImageF(powerTemplateLog)
# Frequency domain multiplication
resultCoeff = createImageF(1 + 2 * maxFrequencyW, 1 + 2 * maxFrequencyH , 2)
for kw,kh in itertools.product(range(-maxFrequencyW, maxFrequencyW + 1), \
range(-maxFrequencyH, maxFrequencyH + 1)):
w = kw + maxFrequencyW
h = kh + maxFrequencyH
resultCoeff[h,w][0] = (imageCoeff[h,w][0] * templateCoeff[h,w][0] - \
imageCoeff[h,w][1] * templateCoeff[h,w][1])
resultCoeff[h,w][1] = (imageCoeff[h,w][1] * templateCoeff[h,w][0] + \
imageCoeff[h,w][0] * templateCoeff[h,w][1])
# Power result
powerResult = computePowerfromCoefficients(resultCoeff)
powerResultLog = imageLogF(powerResult)
showImageF(powerResultLog)
# Reconstruction
outputImage = reconstruction(resultCoeff)
outPad = createImageF(width, height)
halfKernel = int(kernelSize/2)
for x,y in itertools.product(range(0, width), range(0, height)):
outPad[y,x] = outputImage[y + halfKernel, x + halfKernel]
# Show filter image
showImageF(outPad)
|
[
"ImageUtilities.showImageL",
"ImageUtilities.createImageF",
"ImageUtilities.showImageF",
"FourierUtilities.computePowerfromCoefficients",
"ImageOperatorsUtilities.imageLogF",
"FourierUtilities.computeCoefficients",
"ImageUtilities.imageReadL",
"FourierUtilities.reconstruction"
] |
[((741, 774), 'ImageUtilities.imageReadL', 'imageReadL', (['(pathToDir + imageName)'], {}), '(pathToDir + imageName)\n', (751, 774), False, 'from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF\n'), ((795, 817), 'ImageUtilities.showImageL', 'showImageL', (['inputImage'], {}), '(inputImage)\n', (805, 817), False, 'from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF\n'), ((850, 877), 'ImageUtilities.createImageF', 'createImageF', (['width', 'height'], {}), '(width, height)\n', (862, 877), False, 'from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF\n'), ((1132, 1165), 'ImageUtilities.createImageF', 'createImageF', (['widthPad', 'heightPad'], {}), '(widthPad, heightPad)\n', (1144, 1165), False, 'from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF\n'), ((1316, 1349), 'ImageUtilities.createImageF', 'createImageF', (['widthPad', 'heightPad'], {}), '(widthPad, heightPad)\n', (1328, 1349), False, 'from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF\n'), ((1496, 1523), 'ImageUtilities.showImageF', 'showImageF', (['templatePadFlip'], {}), '(templatePadFlip)\n', (1506, 1523), False, 'from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF\n'), ((1592, 1621), 'FourierUtilities.computeCoefficients', 'computeCoefficients', (['inputPad'], {}), '(inputPad)\n', (1611, 1621), False, 'from FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients\n'), ((1644, 1680), 'FourierUtilities.computeCoefficients', 'computeCoefficients', (['templatePadFlip'], {}), '(templatePadFlip)\n', (1663, 1680), False, 'from FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients\n'), ((1755, 1795), 'FourierUtilities.computePowerfromCoefficients', 'computePowerfromCoefficients', (['imageCoeff'], {}), '(imageCoeff)\n', (1783, 1795), False, 'from FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients\n'), ((1812, 1833), 'ImageOperatorsUtilities.imageLogF', 'imageLogF', (['powerImage'], {}), '(powerImage)\n', (1821, 1833), False, 'from ImageOperatorsUtilities import imageLogF\n'), ((1834, 1859), 'ImageUtilities.showImageF', 'showImageF', (['powerImageLog'], {}), '(powerImageLog)\n', (1844, 1859), False, 'from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF\n'), ((1877, 1920), 'FourierUtilities.computePowerfromCoefficients', 'computePowerfromCoefficients', (['templateCoeff'], {}), '(templateCoeff)\n', (1905, 1920), False, 'from FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients\n'), ((1940, 1964), 'ImageOperatorsUtilities.imageLogF', 'imageLogF', (['powerTemplate'], {}), '(powerTemplate)\n', (1949, 1964), False, 'from ImageOperatorsUtilities import imageLogF\n'), ((1965, 1993), 'ImageUtilities.showImageF', 'showImageF', (['powerTemplateLog'], {}), '(powerTemplateLog)\n', (1975, 1993), False, 'from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF\n'), ((2043, 2104), 'ImageUtilities.createImageF', 'createImageF', (['(1 + 2 * maxFrequencyW)', '(1 + 2 * maxFrequencyH)', '(2)'], {}), '(1 + 2 * maxFrequencyW, 1 + 2 * maxFrequencyH, 2)\n', (2055, 2104), False, 'from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF\n'), ((2663, 2704), 'FourierUtilities.computePowerfromCoefficients', 'computePowerfromCoefficients', (['resultCoeff'], {}), '(resultCoeff)\n', (2691, 2704), False, 'from FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients\n'), ((2722, 2744), 'ImageOperatorsUtilities.imageLogF', 'imageLogF', (['powerResult'], {}), '(powerResult)\n', (2731, 2744), False, 'from ImageOperatorsUtilities import imageLogF\n'), ((2745, 2771), 'ImageUtilities.showImageF', 'showImageF', (['powerResultLog'], {}), '(powerResultLog)\n', (2755, 2771), False, 'from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF\n'), ((2835, 2862), 'FourierUtilities.reconstruction', 'reconstruction', (['resultCoeff'], {}), '(resultCoeff)\n', (2849, 2862), False, 'from FourierUtilities import computeCoefficients, reconstruction, computePowerfromCoefficients\n'), ((2873, 2900), 'ImageUtilities.createImageF', 'createImageF', (['width', 'height'], {}), '(width, height)\n', (2885, 2900), False, 'from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF\n'), ((3081, 3099), 'ImageUtilities.showImageF', 'showImageF', (['outPad'], {}), '(outPad)\n', (3091, 3099), False, 'from ImageUtilities import imageReadL, showImageL, createImageL, showImageF, createImageF\n')]
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import epiocms
import fnmatch
import os
try:
from setuptools import setup, find_packages
except ImportError:
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
with open('README.rst', 'r') as fobj:
long_desc = fobj.read()
media_files = []
for dirpath, dirnames, filenames in os.walk(os.path.join('epiocms', 'data', 'media')):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
failed = False
for pattern in ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*'):
if fnmatch.fnmatchcase(filename, pattern):
failed = True
if failed:
continue
media_files.append(os.path.join(*filepath.split(os.sep)[1:]))
setup(
name='django-cms-epio-quickstart',
version=epiocms.__version__,
url='https://github.com/ojii/django-cms-epio-quickstart/',
download_url='http://pypi.python.org/pypi/django-cms-epio-quickstart',
license='BSD',
author='<NAME>',
author_email='<EMAIL>',
description='Quickstart command line app for the django CMS for ep.io',
long_description=long_desc,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Utilities',
],
platforms='any',
packages=find_packages(),
package_data={
'epiocms': [
'data/epio.ini',
'data/requirements.txt',
'data/urls.py',
'data/settings.py',
'data/templates/*.html',
]+ media_files,
},
entry_points={
'console_scripts': [
'epiocms = epiocms.main:main',
],
},
)
|
[
"distribute_setup.use_setuptools",
"os.path.join",
"setuptools.find_packages",
"fnmatch.fnmatchcase"
] |
[((423, 463), 'os.path.join', 'os.path.join', (['"""epiocms"""', '"""data"""', '"""media"""'], {}), "('epiocms', 'data', 'media')\n", (435, 463), False, 'import os\n'), ((207, 240), 'distribute_setup.use_setuptools', 'distribute_setup.use_setuptools', ([], {}), '()\n', (238, 240), False, 'import distribute_setup\n'), ((516, 547), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (528, 547), False, 'import os\n'), ((1695, 1710), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1708, 1710), False, 'from setuptools import setup, find_packages\n'), ((659, 697), 'fnmatch.fnmatchcase', 'fnmatch.fnmatchcase', (['filename', 'pattern'], {}), '(filename, pattern)\n', (678, 697), False, 'import fnmatch\n')]
|
import numpy as np
def integrate_displacement(displ_img_to_img):
"""Sum the image-to-image displacement value to
obtain image-to-reference displacement,
add zeros at the begining
Parameters
----------
displ_img_to_img : 3D array
3D array of shape `(nbr images - 1, nbr points, 2)`
Returns
-------
3D array of shape `(nbr images, nbr points, 2)`
"""
# add zeros at the begining
zeros = np.zeros_like(displ_img_to_img[0])[np.newaxis, :, :]
displ_zero = np.concatenate([zeros, displ_img_to_img], axis=0)
displ_image_to_ref = np.cumsum(displ_zero, axis=0)
return displ_image_to_ref
def get_center_points(xgrid, ygrid):
"""Cell center point coordinates"""
center_y = 0.25*(ygrid[1:, 1:] + ygrid[1:, :-1] + ygrid[:-1, 1:] + ygrid[:-1, :-1])
center_x = 0.25*(xgrid[1:, 1:] + xgrid[1:, :-1] + xgrid[:-1, 1:] + xgrid[:-1, :-1])
return center_x, center_y
def cellcentered_diff_2D(u, v):
"""
for a given 2D vector field [u, v](x, y) sampled on a grid
returns the centered finite difference for each cell
Cell abcd:
a───b
│ + │
c───d
du_x = (ub+ud)/2 - (ua+uc)/2
du_y = (ua+ub)/2 - (uc+ud)/2
"""
u_center_y = 0.5*(u[1:, :] + u[:-1, :])
u_center_x = 0.5*(u[:, 1:] + u[:, :-1])
v_center_y = 0.5*(v[1:, :] + v[:-1, :])
v_center_x = 0.5*(v[:, 1:] + v[:, :-1])
delta_u_x = u_center_y[:, 1:] - u_center_y[:, :-1]
delta_u_y = u_center_x[1:, :] - u_center_x[:-1, :]
delta_v_x = v_center_y[:, 1:] - v_center_y[:, :-1]
delta_v_y = v_center_x[1:, :] - v_center_x[:-1, :]
return delta_u_x, delta_u_y, delta_v_x, delta_v_y
def cellcentered_grad_rect2D(xgrid, ygrid, u, v):
"""Finite difference gradient for the vector fields u and v
evaluated at cell center
This is not a proper bilinear interpolation (ie. quad4 element).
The xy-grid has to be rectangular.
used to computed the "Displacement gradient tensor"
see Bower p.14
output: (dudx, dudy), (dvdx, dvdy)
"""
du_x, du_y, dv_x, dv_y = cellcentered_diff_2D(u, v)
dx, _ydx, _xdy, dy = cellcentered_diff_2D(xgrid, ygrid)
return [[du_x/dx, du_y/dy],
[dv_x/dx, dv_y/dy]]
# --- test cellcentered_grad_rect2D
xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5)**2,
np.linspace(1, 5, 7)**0.5)
u = 5*xgrid + 3*ygrid
v = 2*xgrid + 7*ygrid
(dudx, dudy), (dvdx, dvdy) = cellcentered_grad_rect2D(xgrid, ygrid, u, v)
np.testing.assert_almost_equal(dudx, 5*np.ones_like(dudx))
np.testing.assert_almost_equal(dudy, 3*np.ones_like(dudx))
np.testing.assert_almost_equal(dvdx, 2*np.ones_like(dudx))
np.testing.assert_almost_equal(dvdy, 7*np.ones_like(dudx))
# ---
def get_LagrangeStrainTensor(xgrid, ygrid, u, v):
"""Lagrange Strain Tensor (E)
F = grad(u) + Id
E = 1/2*( FF^T - Id )
Parameters
----------
xgrid, ygrid : 2d arrays of shape (n_y, n_x)
underformed grid points
u, v : 2d arrays of shape (n_y, n_x)
displacements values (u along x, v along y)
Returns
-------
4D array of shape (n_y, n_x, 2, 2)
Lagrange Strain Tensor for all grid points
"""
grad_u, grad_v = cellcentered_grad_rect2D(xgrid, ygrid, u, v)
grad_u = np.stack(grad_u, axis=2)
grad_v = np.stack(grad_v, axis=2)
# u = 1*xgrid + 3*ygrid
# v = 5*xgrid + 7*ygrid
G = np.stack([grad_u, grad_v], axis=3)
G = np.transpose(G, axes=(0, 1, 3, 2))
# G >>> array([[1., 3.], [5., 7.]])
Id = np.ones((*grad_u.shape[:2], 2, 2))
Id[:, :] = np.eye(2, 2)
# Id[0, 0] >> array([[1., 0.], [0., 1.]])
F = G + Id
# Lagrange Strain Tensor
E = 0.5*( np.einsum('...ki,...kj', F, F) - Id )
return E
# --- test get_LagrangeStrainTensor
xgrid, ygrid = np.meshgrid(np.linspace(-1, 1, 5),
np.linspace(1, 5, 7))
u = 1*xgrid + 3*ygrid
v = 5*xgrid + 7*ygrid
E = get_LagrangeStrainTensor(xgrid, ygrid, u, v)
# array([[[[14., 23.],
# [23., 36.]],
np.testing.assert_almost_equal(E[:, :, 0 ,0], 14*np.ones_like(E[:, :, 0 ,1]))
np.testing.assert_almost_equal(E[:, :, 0 ,1], 23*np.ones_like(E[:, :, 0 ,1]))
np.testing.assert_almost_equal(E[:, :, 1 ,1], 36*np.ones_like(E[:, :, 0 ,1]))
np.testing.assert_almost_equal(E[:, :, 1 ,0], 23*np.ones_like(E[:, :, 0 ,1]))
# ---
def get_InfinitesimalStrainTensor(xgrid, ygrid, u, v):
"""Small Displacement Strain Tensor (E)
E = 1/2*( grad(u) + grad(u)^T )
Parameters
----------
xgrid, ygrid : 2d arrays of shape (n_y, n_x)
underformed grid points
u, v : 2d arrays of shape (n_y, n_x)
displacements values (u along x, v along y)
Returns
-------
4D array of shape (n_y, n_x, 2, 2)
Lagrange Strain Tensor for all grid points
"""
grad_u, grad_v = cellcentered_grad_rect2D(xgrid, ygrid, u, v)
grad_u = np.stack(grad_u, axis=2)
grad_v = np.stack(grad_v, axis=2)
# u = 1*xgrid + 3*ygrid
# v = 5*xgrid + 7*ygrid
G = np.stack([grad_u, grad_v], axis=3)
G = np.transpose(G, axes=(0, 1, 3, 2))
# G >>> array([[1., 3.], [5., 7.]])
# Strain Tensor
E = 0.5*( G + np.transpose(G, axes=(0, 1, 3, 2)) )
return E
def bilinear_fit(points, displacements):
"""Performs a bilinear fit on the displacements field
Solve the equation u = A*x + t
Parameters
----------
points : nd-array (nbr_points, 2)
coordinates of points (x, y)
displacements : nd-array (nbr_points, 2)
displacement for each point (u, v)
could include NaN
Returns
-------
nd-array (2, 3)
coefficients matrix (affine transformation + translation)
nd-array (nbr_points, 2)
residuals for each points
"""
u, v = displacements.T
mask = np.logical_not(np.logical_or(np.isnan(u), np.isnan(v)))
u, v = u[mask], v[mask]
x, y = points[mask, :].T
ones = np.ones_like(x)
M = np.vstack([x, y, ones]).T
p_uy, _residual_y, _rank, _s = np.linalg.lstsq(M, v, rcond=None)
p_ux, _residual_x, _rank, _s = np.linalg.lstsq(M, u, rcond=None)
coefficients = np.vstack([p_ux, p_uy])
## Unbiased estimator variance (see p47 T. Hastie)
#sigma_hat_x = np.sqrt(residual_x/(M.shape[0]-M.shape[1]-1))
#sigma_hat_y = np.sqrt(residual_y/(M.shape[0]-M.shape[1]-1))
# Residuals:
u_linear = np.matmul( M, p_ux )
v_linear = np.matmul( M, p_uy )
residuals_x = u - u_linear
residuals_y = v - v_linear
residuals_xy = np.vstack([residuals_x, residuals_y]).T
# Merge with ignored NaN values:
residuals_NaN = np.full(displacements.shape, np.nan)
residuals_NaN[mask, :] = residuals_xy
return coefficients, residuals_NaN
|
[
"numpy.stack",
"numpy.full",
"numpy.zeros_like",
"numpy.ones_like",
"numpy.linalg.lstsq",
"numpy.transpose",
"numpy.ones",
"numpy.einsum",
"numpy.isnan",
"numpy.cumsum",
"numpy.vstack",
"numpy.linspace",
"numpy.matmul",
"numpy.eye",
"numpy.concatenate"
] |
[((516, 565), 'numpy.concatenate', 'np.concatenate', (['[zeros, displ_img_to_img]'], {'axis': '(0)'}), '([zeros, displ_img_to_img], axis=0)\n', (530, 565), True, 'import numpy as np\n'), ((592, 621), 'numpy.cumsum', 'np.cumsum', (['displ_zero'], {'axis': '(0)'}), '(displ_zero, axis=0)\n', (601, 621), True, 'import numpy as np\n'), ((3319, 3343), 'numpy.stack', 'np.stack', (['grad_u'], {'axis': '(2)'}), '(grad_u, axis=2)\n', (3327, 3343), True, 'import numpy as np\n'), ((3357, 3381), 'numpy.stack', 'np.stack', (['grad_v'], {'axis': '(2)'}), '(grad_v, axis=2)\n', (3365, 3381), True, 'import numpy as np\n'), ((3447, 3481), 'numpy.stack', 'np.stack', (['[grad_u, grad_v]'], {'axis': '(3)'}), '([grad_u, grad_v], axis=3)\n', (3455, 3481), True, 'import numpy as np\n'), ((3490, 3524), 'numpy.transpose', 'np.transpose', (['G'], {'axes': '(0, 1, 3, 2)'}), '(G, axes=(0, 1, 3, 2))\n', (3502, 3524), True, 'import numpy as np\n'), ((3576, 3610), 'numpy.ones', 'np.ones', (['(*grad_u.shape[:2], 2, 2)'], {}), '((*grad_u.shape[:2], 2, 2))\n', (3583, 3610), True, 'import numpy as np\n'), ((3626, 3638), 'numpy.eye', 'np.eye', (['(2)', '(2)'], {}), '(2, 2)\n', (3632, 3638), True, 'import numpy as np\n'), ((3861, 3882), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(5)'], {}), '(-1, 1, 5)\n', (3872, 3882), True, 'import numpy as np\n'), ((3911, 3931), 'numpy.linspace', 'np.linspace', (['(1)', '(5)', '(7)'], {}), '(1, 5, 7)\n', (3922, 3931), True, 'import numpy as np\n'), ((4945, 4969), 'numpy.stack', 'np.stack', (['grad_u'], {'axis': '(2)'}), '(grad_u, axis=2)\n', (4953, 4969), True, 'import numpy as np\n'), ((4983, 5007), 'numpy.stack', 'np.stack', (['grad_v'], {'axis': '(2)'}), '(grad_v, axis=2)\n', (4991, 5007), True, 'import numpy as np\n'), ((5073, 5107), 'numpy.stack', 'np.stack', (['[grad_u, grad_v]'], {'axis': '(3)'}), '([grad_u, grad_v], axis=3)\n', (5081, 5107), True, 'import numpy as np\n'), ((5116, 5150), 'numpy.transpose', 'np.transpose', (['G'], {'axes': '(0, 1, 3, 2)'}), '(G, axes=(0, 1, 3, 2))\n', (5128, 5150), True, 'import numpy as np\n'), ((5983, 5998), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (5995, 5998), True, 'import numpy as np\n'), ((6069, 6102), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['M', 'v'], {'rcond': 'None'}), '(M, v, rcond=None)\n', (6084, 6102), True, 'import numpy as np\n'), ((6138, 6171), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['M', 'u'], {'rcond': 'None'}), '(M, u, rcond=None)\n', (6153, 6171), True, 'import numpy as np\n'), ((6192, 6215), 'numpy.vstack', 'np.vstack', (['[p_ux, p_uy]'], {}), '([p_ux, p_uy])\n', (6201, 6215), True, 'import numpy as np\n'), ((6435, 6453), 'numpy.matmul', 'np.matmul', (['M', 'p_ux'], {}), '(M, p_ux)\n', (6444, 6453), True, 'import numpy as np\n'), ((6471, 6489), 'numpy.matmul', 'np.matmul', (['M', 'p_uy'], {}), '(M, p_uy)\n', (6480, 6489), True, 'import numpy as np\n'), ((6673, 6709), 'numpy.full', 'np.full', (['displacements.shape', 'np.nan'], {}), '(displacements.shape, np.nan)\n', (6680, 6709), True, 'import numpy as np\n'), ((446, 480), 'numpy.zeros_like', 'np.zeros_like', (['displ_img_to_img[0]'], {}), '(displ_img_to_img[0])\n', (459, 480), True, 'import numpy as np\n'), ((2325, 2346), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(5)'], {}), '(-1, 1, 5)\n', (2336, 2346), True, 'import numpy as np\n'), ((2378, 2398), 'numpy.linspace', 'np.linspace', (['(1)', '(5)', '(7)'], {}), '(1, 5, 7)\n', (2389, 2398), True, 'import numpy as np\n'), ((2565, 2583), 'numpy.ones_like', 'np.ones_like', (['dudx'], {}), '(dudx)\n', (2577, 2583), True, 'import numpy as np\n'), ((2624, 2642), 'numpy.ones_like', 'np.ones_like', (['dudx'], {}), '(dudx)\n', (2636, 2642), True, 'import numpy as np\n'), ((2683, 2701), 'numpy.ones_like', 'np.ones_like', (['dudx'], {}), '(dudx)\n', (2695, 2701), True, 'import numpy as np\n'), ((2742, 2760), 'numpy.ones_like', 'np.ones_like', (['dudx'], {}), '(dudx)\n', (2754, 2760), True, 'import numpy as np\n'), ((4125, 4152), 'numpy.ones_like', 'np.ones_like', (['E[:, :, 0, 1]'], {}), '(E[:, :, 0, 1])\n', (4137, 4152), True, 'import numpy as np\n'), ((4203, 4230), 'numpy.ones_like', 'np.ones_like', (['E[:, :, 0, 1]'], {}), '(E[:, :, 0, 1])\n', (4215, 4230), True, 'import numpy as np\n'), ((4281, 4308), 'numpy.ones_like', 'np.ones_like', (['E[:, :, 0, 1]'], {}), '(E[:, :, 0, 1])\n', (4293, 4308), True, 'import numpy as np\n'), ((4359, 4386), 'numpy.ones_like', 'np.ones_like', (['E[:, :, 0, 1]'], {}), '(E[:, :, 0, 1])\n', (4371, 4386), True, 'import numpy as np\n'), ((6007, 6030), 'numpy.vstack', 'np.vstack', (['[x, y, ones]'], {}), '([x, y, ones])\n', (6016, 6030), True, 'import numpy as np\n'), ((6575, 6612), 'numpy.vstack', 'np.vstack', (['[residuals_x, residuals_y]'], {}), '([residuals_x, residuals_y])\n', (6584, 6612), True, 'import numpy as np\n'), ((3745, 3775), 'numpy.einsum', 'np.einsum', (['"""...ki,...kj"""', 'F', 'F'], {}), "('...ki,...kj', F, F)\n", (3754, 3775), True, 'import numpy as np\n'), ((5231, 5265), 'numpy.transpose', 'np.transpose', (['G'], {'axes': '(0, 1, 3, 2)'}), '(G, axes=(0, 1, 3, 2))\n', (5243, 5265), True, 'import numpy as np\n'), ((5887, 5898), 'numpy.isnan', 'np.isnan', (['u'], {}), '(u)\n', (5895, 5898), True, 'import numpy as np\n'), ((5900, 5911), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (5908, 5911), True, 'import numpy as np\n')]
|
import logging
from pretix.base.email import get_email_context
from pretix.base.i18n import language
from pretix.base.models import OrderPosition
from pretix.base.services.mail import SendMailException
from pretix.base.services.tasks import EventTask
from pretix.celery_app import app
logger = logging.getLogger(__name__)
@app.task(base=EventTask, bind=True)
def send_email(self, event, position):
op = OrderPosition.objects.get(pk=position)
with language(op.order.locale, event.settings.region):
email_template = event.settings.cwa_checkin_email_body
email_subject = str(event.settings.cwa_checkin_email_subject)
email_context = get_email_context(event=event, order=op.order, position=op)
try:
if op.attendee_email:
op.send_mail(
email_subject,
email_template,
email_context,
"pretix_cwa.order.position.email.cwa",
)
else:
op.order.send_mail(
email_subject,
email_template,
email_context,
"pretix_cwa.order.email.cwa",
)
except SendMailException:
logger.exception("CWA reminder email could not be sent")
|
[
"pretix.base.i18n.language",
"pretix.celery_app.app.task",
"pretix.base.models.OrderPosition.objects.get",
"pretix.base.email.get_email_context",
"logging.getLogger"
] |
[((295, 322), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (312, 322), False, 'import logging\n'), ((326, 361), 'pretix.celery_app.app.task', 'app.task', ([], {'base': 'EventTask', 'bind': '(True)'}), '(base=EventTask, bind=True)\n', (334, 361), False, 'from pretix.celery_app import app\n'), ((410, 448), 'pretix.base.models.OrderPosition.objects.get', 'OrderPosition.objects.get', ([], {'pk': 'position'}), '(pk=position)\n', (435, 448), False, 'from pretix.base.models import OrderPosition\n'), ((458, 506), 'pretix.base.i18n.language', 'language', (['op.order.locale', 'event.settings.region'], {}), '(op.order.locale, event.settings.region)\n', (466, 506), False, 'from pretix.base.i18n import language\n'), ((666, 725), 'pretix.base.email.get_email_context', 'get_email_context', ([], {'event': 'event', 'order': 'op.order', 'position': 'op'}), '(event=event, order=op.order, position=op)\n', (683, 725), False, 'from pretix.base.email import get_email_context\n')]
|
"""
byceps.services.shop.order.transfer.number
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 <NAME>
:License: Revised BSD (see `LICENSE` file for details)
"""
from dataclasses import dataclass
from typing import NewType
from uuid import UUID
from ...shop.transfer.models import ShopID
OrderNumberSequenceID = NewType('OrderNumberSequenceID', UUID)
@dataclass(frozen=True)
class OrderNumberSequence:
id: OrderNumberSequenceID
shop_id: ShopID
prefix: str
value: int
OrderNumber = NewType('OrderNumber', str)
|
[
"typing.NewType",
"dataclasses.dataclass"
] |
[((333, 371), 'typing.NewType', 'NewType', (['"""OrderNumberSequenceID"""', 'UUID'], {}), "('OrderNumberSequenceID', UUID)\n", (340, 371), False, 'from typing import NewType\n'), ((375, 397), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (384, 397), False, 'from dataclasses import dataclass\n'), ((522, 549), 'typing.NewType', 'NewType', (['"""OrderNumber"""', 'str'], {}), "('OrderNumber', str)\n", (529, 549), False, 'from typing import NewType\n')]
|
"""Week1 Test Cases: Strongly Connected Components"""
from week1.scc import scc
def test_scc():
graph = {
'a': ['c'],
'b': ['a'],
'c': ['b'],
'd': ['b', 'f'],
'e': ['d'],
'f': ['e'],
'g': ['e', 'h'],
'h': ['i'],
'i': ['g']
}
assert scc(graph) == {'c': ['c', 'b', 'a'], 'd': [
'd', 'f', 'e'], 'g': ['g', 'h', 'i']}
def test_scc_reversed_graph():
graph = {
'a': ['b'],
'b': ['c', 'd'],
'c': ['a'],
'd': ['e'],
'e': ['f', 'g'],
'f': ['d'],
'g': ['i'],
'h': ['g'],
'i': ['h']
}
assert scc(graph) == {'g': ['g', 'i', 'h'], 'd': [
'd', 'e', 'f'], 'b': ['b', 'c', 'a']}
def test_scc_node_no_outbound_edges():
graph = {
'a': ['b'],
'b': ['c', 'd'],
'c': ['a'],
'd': ['e'],
'e': ['f', 'g'],
'f': ['d'],
'g': ['i'],
'h': ['g'],
'i': []
}
assert scc(graph) == {'i': ['i'], 'g': ['g'], 'h': [
'h'], 'd': ['d', 'e', 'f'], 'b': ['b', 'c', 'a']}
def test_scc_no_edges():
graph = {
'a': [],
'b': [],
'c': []
}
assert scc(graph) == {'a': ['a'], 'c': ['c'], 'b': ['b']}
def test_scc_single_node():
graph = {
'a': []
}
assert scc(graph) == {'a': ['a']}
def test_scc_single_edge():
graph = {
'a': ['b'],
'b': []
}
assert scc(graph) == {'a': ['a'], 'b': ['b']}
|
[
"week1.scc.scc"
] |
[((321, 331), 'week1.scc.scc', 'scc', (['graph'], {}), '(graph)\n', (324, 331), False, 'from week1.scc import scc\n'), ((665, 675), 'week1.scc.scc', 'scc', (['graph'], {}), '(graph)\n', (668, 675), False, 'from week1.scc import scc\n'), ((1014, 1024), 'week1.scc.scc', 'scc', (['graph'], {}), '(graph)\n', (1017, 1024), False, 'from week1.scc import scc\n'), ((1227, 1237), 'week1.scc.scc', 'scc', (['graph'], {}), '(graph)\n', (1230, 1237), False, 'from week1.scc import scc\n'), ((1356, 1366), 'week1.scc.scc', 'scc', (['graph'], {}), '(graph)\n', (1359, 1366), False, 'from week1.scc import scc\n'), ((1481, 1491), 'week1.scc.scc', 'scc', (['graph'], {}), '(graph)\n', (1484, 1491), False, 'from week1.scc import scc\n')]
|
#!/bin/python
from os import walk
from os.path import join, relpath, normpath
from json import load, dump
from multiprocessing import Pool
oldpath = "./experimental/translations"
newpath = "./translations"
substitutions = dict()
with open(join(newpath, "substitutions.json"),"r") as f:
substitutions = load(f)
filestowrite = dict()
for subdir, dirs, files in walk(oldpath):
for thefile in files:
if (not thefile.endswith(".json")) or thefile == "substitutions.json":
continue
oldfile = join(subdir, thefile)
objlist = {}
try:
with open(oldfile, "r") as f:
objlist = load(f)
except:
print("Cann't load: " + oldfile)
continue
for obj in objlist:
if "DeniedAlternatives" not in obj:
#print("No alternatives for: " + oldfile)
continue
denied = obj["DeniedAlternatives"]
if len(denied) == 0:
continue
entext = obj["Texts"]["Eng"]
relfiles = obj["Files"]
for rlfile in relfiles.keys():
#relfile = relpath(rlfile, "assets")
relfile = rlfile
thelist = [join("texts", relfile + ".json")]
if relfile in substitutions:
thelist += list(substitutions[relfile].values())
for newfile in thelist:
newfileobj = {}
newfilename = normpath(join(newpath, newfile))
if newfilename in filestowrite:
newfileobj = filestowrite[newfilename]
else:
try:
with open(newfilename, "r") as f:
newfileobj = load(f)
except:
pass
#print("Cann't read: " + newfilename)
#raise
changed = False
for i in range(0, len(newfileobj)):
if not (newfileobj[i]["Texts"]["Eng"] == entext):
continue
if "DeniedAlternatives" not in newfileobj[i]:
newfileobj[i]["DeniedAlternatives"] = list()
for alt in denied:
if alt in newfileobj[i]["DeniedAlternatives"]:
continue
newfileobj[i]["DeniedAlternatives"].append(alt)
changed = True
if changed:
filestowrite[newfilename] = newfileobj
for newfilename, newfileobj in filestowrite.items():
with open(newfilename, "w") as f:
dump(newfileobj, f, ensure_ascii = False, indent = 2)
|
[
"json.dump",
"os.walk",
"os.path.join",
"json.load"
] |
[((368, 381), 'os.walk', 'walk', (['oldpath'], {}), '(oldpath)\n', (372, 381), False, 'from os import walk\n'), ((307, 314), 'json.load', 'load', (['f'], {}), '(f)\n', (311, 314), False, 'from json import load, dump\n'), ((242, 277), 'os.path.join', 'join', (['newpath', '"""substitutions.json"""'], {}), "(newpath, 'substitutions.json')\n", (246, 277), False, 'from os.path import join, relpath, normpath\n'), ((511, 532), 'os.path.join', 'join', (['subdir', 'thefile'], {}), '(subdir, thefile)\n', (515, 532), False, 'from os.path import join, relpath, normpath\n'), ((2366, 2415), 'json.dump', 'dump', (['newfileobj', 'f'], {'ensure_ascii': '(False)', 'indent': '(2)'}), '(newfileobj, f, ensure_ascii=False, indent=2)\n', (2370, 2415), False, 'from json import load, dump\n'), ((613, 620), 'json.load', 'load', (['f'], {}), '(f)\n', (617, 620), False, 'from json import load, dump\n'), ((1103, 1135), 'os.path.join', 'join', (['"""texts"""', "(relfile + '.json')"], {}), "('texts', relfile + '.json')\n", (1107, 1135), False, 'from os.path import join, relpath, normpath\n'), ((1324, 1346), 'os.path.join', 'join', (['newpath', 'newfile'], {}), '(newpath, newfile)\n', (1328, 1346), False, 'from os.path import join, relpath, normpath\n'), ((1551, 1558), 'json.load', 'load', (['f'], {}), '(f)\n', (1555, 1558), False, 'from json import load, dump\n')]
|
from flask import render_template
from app.views.admin import bp_admin
@bp_admin.route('/')
def index():
return render_template('admin/index.html')
@bp_admin.route('/dashboard')
def dashboard():
return render_template('admin/dashboard.html')
|
[
"flask.render_template",
"app.views.admin.bp_admin.route"
] |
[((75, 94), 'app.views.admin.bp_admin.route', 'bp_admin.route', (['"""/"""'], {}), "('/')\n", (89, 94), False, 'from app.views.admin import bp_admin\n'), ((158, 186), 'app.views.admin.bp_admin.route', 'bp_admin.route', (['"""/dashboard"""'], {}), "('/dashboard')\n", (172, 186), False, 'from app.views.admin import bp_admin\n'), ((119, 154), 'flask.render_template', 'render_template', (['"""admin/index.html"""'], {}), "('admin/index.html')\n", (134, 154), False, 'from flask import render_template\n'), ((215, 254), 'flask.render_template', 'render_template', (['"""admin/dashboard.html"""'], {}), "('admin/dashboard.html')\n", (230, 254), False, 'from flask import render_template\n')]
|
"""
Helper to check the signature of a GitHub event request.
"""
import hmac
def compute_signature(payload: bytes, secret: bytes, algo: str = 'sha256') -> str:
"""
Computes the HMAC signature of *payload* given the specified *secret* and the given hashing *algo*.
# Parmeters
payload: The payload for which the signature should be computed.
secret: The secret string that is used in conjunction to generate the signature.
algo: The hash algorithm to use, must be `sha1` or `sha256`.
"""
if algo not in ('sha1', 'sha256'):
raise ValueError(f'algo must be {{sha1, sha256}}, got {algo!r}')
return f'{algo}=' + hmac.new(secret, payload, algo).hexdigest()
def check_signature(sig: str, payload: bytes, secret: bytes, algo: str = 'sha256') -> None:
"""
Compares the porivided signature *sig* with the computed signature of the *payload* and
raises a #SignatureMismatchException if they do not match. This function uses constant-time
string comparison to prevent timing analysis.
"""
computed = compute_signature(payload, secret, algo)
if not hmac.compare_digest(sig, computed):
raise SignatureMismatchException(sig, computed)
class SignatureMismatchException(Exception):
"""
Raised if a signature can not be verified with #check_signatuer().
"""
_MSG = 'The provided signature does not match the computed signature of the payload.'
def __init__(self, provided: str, computed: str) -> None:
self.provided = provided
self.computed = computed
def __str__(self) -> str:
return f'{self._MSG}\n provided: {self.provided}\n computed: {self.computed}'
|
[
"hmac.new",
"hmac.compare_digest"
] |
[((1081, 1115), 'hmac.compare_digest', 'hmac.compare_digest', (['sig', 'computed'], {}), '(sig, computed)\n', (1100, 1115), False, 'import hmac\n'), ((635, 666), 'hmac.new', 'hmac.new', (['secret', 'payload', 'algo'], {}), '(secret, payload, algo)\n', (643, 666), False, 'import hmac\n')]
|
# -*- coding: utf-8 -*-
import io
import os
import requests
from httpdbg.httpdbg import ServerThread, app
from httpdbg.mode_pytest import run_pytest
from httpdbg.__main__ import pyhttpdbg_entry_point
from utils import _run_under_httpdbg
def test_run_pytest(httpbin):
def _test(httpbin):
os.environ["HTTPDBG_TEST_PYTEST_BASE_URL"] = httpbin.url
script_to_run = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "demo_run_pytest.py"
)
run_pytest(["pytest", script_to_run, "-k", "test_demo"])
stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test, httpbin)
ret = requests.get(f"http://127.0.0.1:{current_httpdbg_port}/requests")
stop_httpdbg()
reqs = ret.json()["requests"]
assert len(reqs) == 3
assert reqs[0]["uri"] == httpbin.url + "/post"
assert reqs[1]["uri"] == httpbin.url + "/get"
assert reqs[2]["uri"] == httpbin.url + "/put"
def test_run_pytest_from_pyhttpdbg_entry_point(httpbin, monkeypatch):
os.environ["HTTPDBG_TEST_PYTEST_BASE_URL"] = httpbin.url
script_to_run = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "demo_run_pytest.py"
)
monkeypatch.setattr(
"sys.argv", ["pyhttpdb", "pytest", script_to_run, "-k", "test_demo"]
)
# to terminate the httpdbg server
monkeypatch.setattr("sys.stdin", io.StringIO("\n"))
pyhttpdbg_entry_point()
# we need to restart a new httpdbg server as the previous has been stopped
server = ServerThread(6000, app)
server.start()
ret = requests.get("http://127.0.0.1:6000/requests")
reqs = ret.json()["requests"]
assert len(reqs) == 3
assert reqs[0]["uri"] == httpbin.url + "/post"
assert reqs[1]["uri"] == httpbin.url + "/get"
assert reqs[2]["uri"] == httpbin.url + "/put"
server.shutdown()
def test_run_pytest_with_exception(capsys):
def _test():
script_to_run = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "demo_run_pytest.py"
)
run_pytest(["pytest", script_to_run, "-k", "test_demo_raise_exception"])
stop_httpdbg, current_httpdbg_port = _run_under_httpdbg(_test)
ret = requests.get(f"http://127.0.0.1:{current_httpdbg_port}/requests")
stop_httpdbg()
reqs = ret.json()["requests"]
assert len(reqs) == 0
assert "fixture_which_do_not_exists" in capsys.readouterr().out
|
[
"io.StringIO",
"httpdbg.httpdbg.ServerThread",
"utils._run_under_httpdbg",
"httpdbg.mode_pytest.run_pytest",
"os.path.realpath",
"requests.get",
"httpdbg.__main__.pyhttpdbg_entry_point"
] |
[((593, 627), 'utils._run_under_httpdbg', '_run_under_httpdbg', (['_test', 'httpbin'], {}), '(_test, httpbin)\n', (611, 627), False, 'from utils import _run_under_httpdbg\n'), ((639, 704), 'requests.get', 'requests.get', (['f"""http://127.0.0.1:{current_httpdbg_port}/requests"""'], {}), "(f'http://127.0.0.1:{current_httpdbg_port}/requests')\n", (651, 704), False, 'import requests\n'), ((1391, 1414), 'httpdbg.__main__.pyhttpdbg_entry_point', 'pyhttpdbg_entry_point', ([], {}), '()\n', (1412, 1414), False, 'from httpdbg.__main__ import pyhttpdbg_entry_point\n'), ((1508, 1531), 'httpdbg.httpdbg.ServerThread', 'ServerThread', (['(6000)', 'app'], {}), '(6000, app)\n', (1520, 1531), False, 'from httpdbg.httpdbg import ServerThread, app\n'), ((1562, 1608), 'requests.get', 'requests.get', (['"""http://127.0.0.1:6000/requests"""'], {}), "('http://127.0.0.1:6000/requests')\n", (1574, 1608), False, 'import requests\n'), ((2157, 2182), 'utils._run_under_httpdbg', '_run_under_httpdbg', (['_test'], {}), '(_test)\n', (2175, 2182), False, 'from utils import _run_under_httpdbg\n'), ((2194, 2259), 'requests.get', 'requests.get', (['f"""http://127.0.0.1:{current_httpdbg_port}/requests"""'], {}), "(f'http://127.0.0.1:{current_httpdbg_port}/requests')\n", (2206, 2259), False, 'import requests\n'), ((494, 550), 'httpdbg.mode_pytest.run_pytest', 'run_pytest', (["['pytest', script_to_run, '-k', 'test_demo']"], {}), "(['pytest', script_to_run, '-k', 'test_demo'])\n", (504, 550), False, 'from httpdbg.mode_pytest import run_pytest\n'), ((1368, 1385), 'io.StringIO', 'io.StringIO', (['"""\n"""'], {}), "('\\n')\n", (1379, 1385), False, 'import io\n'), ((2042, 2114), 'httpdbg.mode_pytest.run_pytest', 'run_pytest', (["['pytest', script_to_run, '-k', 'test_demo_raise_exception']"], {}), "(['pytest', script_to_run, '-k', 'test_demo_raise_exception'])\n", (2052, 2114), False, 'from httpdbg.mode_pytest import run_pytest\n'), ((1128, 1154), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1144, 1154), False, 'import os\n'), ((426, 452), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (442, 452), False, 'import os\n'), ((1974, 2000), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1990, 2000), False, 'import os\n')]
|
import pandas
#################
input_panda_address=snakemake.input.input_panda_address
output_panda_address=snakemake.output.output_panda_address
#################
input_panda=pandas.read_csv(input_panda_address,sep='¬',header=0)
def fill_cfmid_collision_energy_column(temp_panda):
#iterrate through entire panda
for index,row in temp_panda.iterrows():
#if the rank observed is lower than the lowest rank, record that
if row['energy#']=='energy0':
temp_panda.loc[index,'cfmid-collision']='10-'+str(row['Collision_energy'])
elif row['energy#']=='energy1':
temp_panda.loc[index,'cfmid-collision']='20-'+str(row['Collision_energy'])
elif row['energy#']=='energy2':
temp_panda.loc[index,'cfmid-collision']='40-'+str(row['Collision_energy'])
input_panda.insert(loc=input_panda.columns.size-2,column='cfmid-collision',value='null')
fill_cfmid_collision_energy_column(input_panda)
input_panda.to_csv(output_panda_address,sep='¬',index=False)
|
[
"pandas.read_csv"
] |
[((178, 233), 'pandas.read_csv', 'pandas.read_csv', (['input_panda_address'], {'sep': '"""¬"""', 'header': '(0)'}), "(input_panda_address, sep='¬', header=0)\n", (193, 233), False, 'import pandas\n')]
|
import os
import glob
from flask import Flask
from flask import jsonify
from flask import request, render_template
from skinapp import app
from model.utils import *
from model.skinmodel import *
valid_mimetypes = ['image/jpeg', 'image/png']
@app.route('/')
def index():
samples = glob.glob("%s/*" % app.config['SAMPLE_FOLDER'])
return render_template('index.html', samples=samples)
@app.route('/predict', methods=['POST'])
def predict():
if request.method == 'POST':
if 'file' not in request.files:
return jsonify({'error': 'no file'}), 400
# Image info
img_file = request.files.get('file')
img_name = img_file.filename
mimetype = img_file.content_type
# Return an error if not a valid mimetype
if mimetype not in valid_mimetypes:
return jsonify({'error': 'bad-type'})
# Write image to static directory
img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
img = open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
# Run Prediction on the model
res = get_predictions(img)
# Delete image when done with analysis
os.remove(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
return jsonify(res)
|
[
"flask.request.files.get",
"skinapp.app.route",
"flask.jsonify",
"flask.render_template",
"glob.glob",
"os.path.join"
] |
[((245, 259), 'skinapp.app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (254, 259), False, 'from skinapp import app\n'), ((395, 434), 'skinapp.app.route', 'app.route', (['"""/predict"""'], {'methods': "['POST']"}), "('/predict', methods=['POST'])\n", (404, 434), False, 'from skinapp import app\n'), ((287, 334), 'glob.glob', 'glob.glob', (["('%s/*' % app.config['SAMPLE_FOLDER'])"], {}), "('%s/*' % app.config['SAMPLE_FOLDER'])\n", (296, 334), False, 'import glob\n'), ((346, 392), 'flask.render_template', 'render_template', (['"""index.html"""'], {'samples': 'samples'}), "('index.html', samples=samples)\n", (361, 392), False, 'from flask import request, render_template\n'), ((617, 642), 'flask.request.files.get', 'request.files.get', (['"""file"""'], {}), "('file')\n", (634, 642), False, 'from flask import request, render_template\n'), ((1270, 1282), 'flask.jsonify', 'jsonify', (['res'], {}), '(res)\n', (1277, 1282), False, 'from flask import jsonify\n'), ((834, 864), 'flask.jsonify', 'jsonify', (["{'error': 'bad-type'}"], {}), "({'error': 'bad-type'})\n", (841, 864), False, 'from flask import jsonify\n'), ((930, 981), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'img_name'], {}), "(app.config['UPLOAD_FOLDER'], img_name)\n", (942, 981), False, 'import os\n'), ((1009, 1060), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'img_name'], {}), "(app.config['UPLOAD_FOLDER'], img_name)\n", (1021, 1060), False, 'import os\n'), ((1201, 1252), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'img_name'], {}), "(app.config['UPLOAD_FOLDER'], img_name)\n", (1213, 1252), False, 'import os\n'), ((542, 571), 'flask.jsonify', 'jsonify', (["{'error': 'no file'}"], {}), "({'error': 'no file'})\n", (549, 571), False, 'from flask import jsonify\n')]
|
import os
import sys
from datetime import datetime, timedelta
import numpy as np
data_path = "../../dat4figs_JAMES/Fig06"
os.makedirs( data_path, exist_ok=True )
USE_ARCH_DAT = True
#USE_ARCH_DAT = False
quick_hist = False
quick_bar = True
quick_bar = False
def d4_computation_time_nparray( top='' ):
dirs = [ f.name for f in os.scandir( top ) ] #if f.is_file() ]
path_l = []
ftimes = []
ctimes = []
# Prepare file path list
for dir_ in dirs:
path_l.append( os.path.join( top, dir_, ) )
scale_l = []
# Get computation time for SCALE
for path in path_l:
if not os.path.isfile( path ):
break
with open( path ) as f:
lines = f.readlines()
for l in lines:
if '[Info:fcst] End forecast' in l:
data = l.split()
try:
ftimes.append( float( data[7] ) )
except:
print( "Failed", data )
elif '[Info:DA]' in l:
data = l.split()
try:
ctimes.append( float( data[6] ) )
except:
print( "Failed", data )
elif '##### TIMER' in l:
data = l.split()
try:
tit_ = data[3]
dat_ = float( data[5] )
if tit_ == 'SCALE':
scale_l.append( dat_ )
except:
print( "Failed", data )
scale_l = np.array( scale_l )
key_l = [ "SCALE", "READ_OBS",
"OBS_OPERATOR",
"INITIALIZE",
"INITIALIZE_OTHERS",
"INIT_LETKF",
"PROCESS_OBS",
"SET_GRID",
"READ_GUES",
"GUES_MEAN",
"WRITE RESTART/GRADS(GUES)",
"DAS_LETKF",
"ANAL_MEAN",
"WRITE_ANAL",
"DEALLOCATE",
"WRITE RESTART/GRADS(ANAL)",
"OTHERS",
"FINALIZE",
"JIT_GET",
]
# prepare nan array
iarray = np.zeros( scale_l.shape )
iarray[:] = np.nan
DETAIL = {}
for key in key_l:
if key == 'SCALE':
DETAIL[key] = scale_l
else:
DETAIL[key] = np.copy( iarray )
# Get computation time for all
i = -1
for path in path_l:
if not os.path.isfile( path ):
break
with open( path ) as f:
lines = f.readlines()
for l in lines:
if '##### TIMER' in l:
data = l.split()
try:
tit_ = data[3]
tit4_ = data[4]
dat_ = float( data[5] )
if tit_ == 'SCALE':
i += 1
if tit_ == "WRITE":
dat_ = float( data[6] )
if tit4_ == "RESTART/GRADS(ANAL)":
tit_ = "WRITE RESTART/GRADS(ANAL)"
elif tit4_ == "RESTART/GRADS(GUES)":
tit_ = "WRITE RESTART/GRADS(GUES)"
i_ = i
if i_ < 0:
i_ = 0
if tit_ in DETAIL:
DETAIL[tit_][i_] = dat_
else:
DETAIL["OTHERS"][i_] = dat_
except:
print( "Failed", data )
elif '......jitdt_read_toshiba:jitget:' in l:
data = l.split()
try:
tit_ = "JIT_GET"
dat_ = float( data[1] )
DETAIL[tit_][i] = dat_
except:
print( "Failed", data )
return( ftimes, ctimes, DETAIL )
def d4_computation_time( top='', ctmax=600 ):
dirs = [ f.name for f in os.scandir( top ) ] #if f.is_file() ]
ftimes = []
ctimes = []
path_l = []
init = []
init_others = []
init_letkf = []
scale = []
others = []
read_obs = []
obsope = []
process_obs = []
set_grid = []
read_gues = []
gues_mean = []
write_restartg = []
das_letkf = []
anal_mean = []
write_anal = []
deallocate = []
write_restarta = []
others = []
finalize = []
jitget = []
DETAIL = { "SCALE": scale,
"READ_OBS":read_obs,
"OBS_OPERATOR": obsope,
"INITIALIZE": init,
"INITIALIZE_OTHERS": init_others,
"INIT_LETKF": init_letkf,
"PROCESS_OBS": process_obs,
"SET_GRID": set_grid,
"READ_GUES": read_gues,
"GUES_MEAN": gues_mean,
"WRITE RESTART/GRADS(GUES)": write_restartg,
"DAS_LETKF": das_letkf,
"ANAL_MEAN": anal_mean,
"WRITE_ANAL": write_anal,
"DEALLOCATE": deallocate,
"WRITE RESTART/GRADS(ANAL)": write_restarta,
"OTHERS": others,
"FINALIZE": finalize,
"JIT_GET": jitget,
}
# Prepare file path list
for dir_ in dirs:
fname = 'job.o' #[ f.name for f in os.scandir( os.path.join( top, dir_ ) ) ] #if f.is_file() ]
path_l.append( os.path.join( top, dir_, fname ) )
# Get computation time
for path in path_l:
if not os.path.isfile( path ):
break
with open( path ) as f:
lines = f.readlines()
for l in lines:
if '[Info:fcst] End forecast' in l:
data = l.split()
try:
ftimes.append( float( data[7] ) )
except:
print( "Failed", data )
elif '[Info:DA]' in l:
data = l.split()
try:
ctimes.append( float( data[6] ) )
except:
print( "Failed", data )
elif '##### TIMER' in l:
data = l.split()
try:
tit_ = data[3]
tit4_ = data[4]
dat_ = float( data[5] )
if tit_ == "WRITE":
dat_ = float( data[6] )
if tit4_ == "RESTART/GRADS(ANAL)":
tit_ = "WRITE RESTART/GRADS(ANAL)"
elif tit4_ == "RESTART/GRADS(GUES)":
tit_ = "WRITE RESTART/GRADS(GUES)"
if tit_ in DETAIL:
DETAIL[tit_].append( dat_ )
else:
DETAIL["OTHERS"].append( dat_ )
except:
print( "Failed", data )
elif '......jitdt_read_toshiba:jitget:' in l:
data = l.split()
try:
tit_ = "JIT_GET"
dat_ = float( data[1] )
DETAIL[tit_].append( dat_ )
except:
print( "Failed", data )
for key in DETAIL.keys():
DETAIL[key] = np.array( DETAIL[key] )
return( ftimes, ctimes, DETAIL )
def plot_hist( key="", dat=np.array([]) ):
import matplotlib.pyplot as plt
from scipy import stats
xmin = 0
xmax = 60
# Scott's choise
#h = 3.5 * np.std( dat, ddof=1 ) / np.power( dat.size, 1.0/3.0)
#bins = int( ( xmax - xmin ) / h )
# Square-root choice
bins = int( np.sqrt( dat.size ) )
fig, ax = plt.subplots( 1, 1, figsize=(6,4) )
fig.subplots_adjust( left=0.15, bottom=0.15, right=0.95, top=0.92, )
rn, rbins, rpatches = ax.hist( dat, range=(xmin, xmax), bins=bins, alpha=0.6 )
imode = np.argmax( rn )
mode = np.mean( rbins[imode:imode+2] )
mean = np.mean( dat )
#print( len(rn), len(rbins), mode )
lw = 1.0
ymin = 0.0
ymax = 4000 #dat_.size
ls = 'dashed'
color = 'b'
ax.vlines( x=mode, ymin=ymin, ymax=ymax,
linewidths=lw, linestyles=ls, color=color )
color = 'k'
ax.vlines( x=mean, ymin=ymin, ymax=ymax,
linewidths=lw, linestyles=ls, color=color )
text_ = 'Mean:{0:.3f} s\nMode:{1:.3f} s\nN={2:}'.format( mean, mode, dat.size )
ax.text( 0.99, 0.99, text_,
fontsize=12, transform=ax.transAxes,
ha='right',
va='top' )
tit_ = key
ax.text( 0.5, 1.01, tit_,
fontsize=12, transform=ax.transAxes,
ha='center',
va='bottom' )
ax.set_xlim( xmin, xmax )
ax.set_ylim( ymin, ymax )
xlab = 'Computation time (s)'
ylab = 'Frequency'
ax.set_xlabel( xlab, fontsize=11)
ax.set_ylabel( ylab, fontsize=11)
key_ = key.replace( ' ', '_' ).replace( '/', '_' ) #.replace( '(', '_' ).replace( ')')
ofig = 'png/1p_d4_{0:}.png'.format( key_ )
print( ofig )
if quick_hist:
plt.show()
else:
plt.savefig( ofig,
bbox_inches="tight", pad_inches = 0.1)
plt.clf()
plt.close('all')
return( mode, mean )
def plot_bar_2p( dic={}, ftimes=np.array([]) ):
import matplotlib.pyplot as plt
fig, ( ax1,ax2 ) = plt.subplots( 1, 2, figsize=(6,4) )
# fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, )
fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92,
wspace=0.3, hspace=0.05)
ax1.set_xlim( 0, 2.0 )
width1 = 0.8
#c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ]
#c_l = [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ]
c_l = [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ]
#c_l = [ 'cyan', 'magenta', 'y', 'k' ]
acm = 0.0
for i, key in enumerate( dic.keys() ):
lab = key
if lab == 'OBS':
lab = 'Obs pre-\nprocessing'
elif lab == 'DATA TRANSFER':
lab = 'Memory copy'
elif lab == 'JIT-DT':
continue
ax1.bar( 1.0, dic[key], bottom=acm,
label=lab, color=c_l[i], width=width1 )
acm += dic[key]
# ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) )
handles, labels = ax1.get_legend_handles_labels()
ax1.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00),
fontsize=12 )
ax1.set_ylabel( 'Computation time (s)', fontsize=12 )
#ax.set_xlim( 0, 1.0 )
yticks = np.arange( 0, 22, 2 )
ax1.set_ylim( 0, 20.0 )
ax1.set_yticks( yticks )
ax1.tick_params( axis='x', which='both',
bottom=False, top=False,
labelbottom=False )
ax1.hlines( xmin=0, xmax=2, y=np.arange( 4, 20, 4 ), lw=1.0, linestyle='dashed',
color='gray', alpha=0.5 )
ax2.set_ylim( 0, 151.0 )
ax2.set_xlim( 0, 2.0 )
ax2.hlines( xmin=0, xmax=2, y=[60, 120], lw=1.0, linestyle='dashed',
color='gray', alpha=0.5 )
width2 = 0.8
ax2.bar( 1, np.mean(ftimes), label="30-min forecast", width=width2,
color='dodgerblue' )
print( "std:", np.std( ftimes, ddof=1 ), len( ftimes ) )
ax2.tick_params( axis='x', which='both',
bottom=False, top=False,
labelbottom=False )
ax_l = [ ax1, ax2 ]
tit_l = [ "Data assimilation",
"30-min forecast" ]
pnum_l = [ "(a)", "(b)" ]
for i, ax in enumerate( ax_l ):
ax.text( 0.5, 1.01, tit_l[i],
fontsize=12, transform=ax.transAxes,
ha='center',
va='bottom' )
ax.text( 0.0, 1.01, pnum_l[i],
fontsize=10, transform=ax.transAxes,
ha='left',
va='bottom' )
ofig = 'pdf/Fig06.pdf'
print( ofig )
if quick_bar:
plt.show()
else:
plt.savefig( ofig,
bbox_inches="tight", pad_inches = 0.1)
plt.clf()
plt.close('all')
def plot_bar_2p_scale( dic={}, ftimes=np.array([]), dic2={} ):
import matplotlib.pyplot as plt
fig, ( ax1,ax2 ) = plt.subplots( 1, 2, figsize=(6,4) )
# fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, )
fig.subplots_adjust( left=0.15, bottom=0.06, right=0.95, top=0.92,
wspace=0.3, hspace=0.05)
ax1.set_xlim( 0, 3.0 )
width1 = 0.8
#c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ]
#c_l = [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ]
c_l = [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ]
#c_l = [ 'cyan', 'magenta', 'y', 'k' ]
acm = 0.0
for i, key in enumerate( dic.keys() ):
lab = key
if lab == 'OBS':
lab = 'Obs pre-\nprocessing'
elif lab == 'DATA TRANSFER':
lab = 'Memory copy'
elif lab == 'JIT-DT':
continue
ax1.bar( 1.0, dic[key], bottom=acm,
label=lab, color=c_l[i], width=width1 )
acm += dic[key]
acm2 = 0.0
for i, key in enumerate( dic2.keys() ):
lab = key
if lab == 'OBS':
lab = 'Obs pre-\nprocessing'
elif lab == 'DATA TRANSFER':
lab = 'Memory copy'
elif lab == 'JIT-DT':
continue
print( "check", dic2[key] )
ax1.bar( 2.0, dic2[key], bottom=acm2,
label=None, color=c_l[i], width=width1 )
acm2 += dic[key]
# ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) )
handles, labels = ax1.get_legend_handles_labels()
ax1.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00),
fontsize=12 )
ax1.set_ylabel( 'Computation time (s)', fontsize=12 )
#ax.set_xlim( 0, 1.0 )
yticks = np.arange( 0, 22, 2 )
ax1.set_ylim( 0, 20.0 )
ax1.set_yticks( yticks )
ax1.tick_params( axis='x', which='both',
bottom=False, top=False,
labelbottom=False )
ax1.hlines( xmin=0, xmax=2, y=np.arange( 4, 20, 4 ), lw=1.0, linestyle='dashed',
color='gray', alpha=0.5 )
ax2.set_ylim( 0, 151.0 )
ax2.set_xlim( 0, 2.0 )
ax2.hlines( xmin=0, xmax=2, y=[60, 120], lw=1.0, linestyle='dashed',
color='gray', alpha=0.5 )
width2 = 0.8
ax2.bar( 1, np.mean(ftimes), label="30-min forecast", width=width2,
color='dodgerblue' )
print( "std:", np.std( ftimes, ddof=1 ), len( ftimes ) )
ax2.tick_params( axis='x', which='both',
bottom=False, top=False,
labelbottom=False )
ax_l = [ ax1, ax2 ]
tit_l = [ "Data assimilation",
"30-min forecast" ]
pnum_l = [ "(a)", "(b)" ]
for i, ax in enumerate( ax_l ):
ax.text( 0.5, 1.01, tit_l[i],
fontsize=12, transform=ax.transAxes,
ha='center',
va='bottom' )
ax.text( 0.0, 1.01, pnum_l[i],
fontsize=10, transform=ax.transAxes,
ha='left',
va='bottom' )
# ofig = 'png/2p_d4_bar_scale.png'
print( ofig )
if quick_bar:
plt.show()
else:
plt.savefig( ofig,
bbox_inches="tight", pad_inches = 0.1)
plt.clf()
plt.close('all')
def plot_bar( dic={} ):
import matplotlib.pyplot as plt
fig, ax = plt.subplots( 1, 1, figsize=(5,5) )
fig.subplots_adjust( left=0.15, bottom=0.05, right=0.5, top=0.92, )
#c_l = [ 'firebrick', 'dodgerblue', 'limegreen', 'gold' ]
#c_l = [ 'dodgerblue', 'firebrick', 'forestgreen', 'goldenrod' ]
c_l = [ 'dodgerblue', 'firebrick', 'gray', 'goldenrod', 'k' ]
#c_l = [ 'cyan', 'magenta', 'y', 'k' ]
acm = 0.0
for i, key in enumerate( dic.keys() ):
lab = key
if lab == 'OBS':
lab = 'Obs pre-\nprocessing'
elif lab == 'DATA TRANSFER':
lab = 'Memory copy'
elif lab == 'JIT-DT':
continue
ax.bar( '', dic[key], bottom=acm,
label=lab, color=c_l[i] )
acm += dic[key]
# ax.legend( fontsize=12, bbox_to_anchor=(1.01, 1.00) )
handles, labels = ax.get_legend_handles_labels()
ax.legend( reversed(handles), reversed(labels), bbox_to_anchor=(1.01, 1.00),
fontsize=13 )
ax.set_ylabel( 'Computation time (s)', fontsize=12 )
#ax.set_xlim( 0, 1.0 )
yticks = np.arange( 0, 32, 2 )
ax.set_ylim( 0, 31.0 )
ax.set_yticks( yticks )
ofig = 'png/1p_d4_bar.png'
print( ofig )
if quick_bar:
plt.show()
else:
plt.savefig( ofig,
bbox_inches="tight", pad_inches = 0.1)
plt.clf()
plt.close('all')
####
SUM = { "SCALE": 0.0,
"LETKF": 0.0,
"OBS": 0.0,
# "DATA TRANSFER": 0.0,
"JIT-DT": 0.0,
}
fn_sum = '{0:}/SUM.npz'.format( data_path, )
fn_ftimes = '{0:}/ftimes.npz'.format( data_path, )
if not USE_ARCH_DAT:
top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_20200825/log_from_amemiya/d4_500m/exp'
top = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/amemiya/d4_500m'
top_test = '/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/AIP_SAFE/realtime_test20200807/data/D4_500m_TEST_DEFAULT_0708_NOBS100_NEAR_HV4/exp/3008084_cycle_20190824150000'
#dtime_max = 1000
ftimes, ctimes, DETAIL = d4_computation_time_nparray( top=top, )
ftimes_test, ctimes_test, DETAIL_test = d4_computation_time_nparray( top=top_test, )
#print( DETAIL["DAS_LETKF"][0:5], DETAIL["WRITE_ANAL"][0:5])
#ftimes, ctimes, DETAIL = d4_computation_time( top=top, )
ctimes = np.array( ctimes )
print( '{0:} average: {1:} (N: {2:})'.format( "cycle", np.nanmean( ctimes ), len(ctimes) ) )
print( '{0:} average: {1:} (N: {2:})'.format( "fcst ", np.mean( ftimes ), len(ftimes) ) )
print("")
DETAIL_MODE = { }
DETAIL_MODE_test = { }
min_read_obs = 1.0
max_read_obs = 30.0
read_obs_ = DETAIL["READ_OBS"]
dat_jit = DETAIL['JIT_GET']
dat_jit[ ( read_obs_ < min_read_obs ) | ( read_obs_ > max_read_obs )] = np.nan
dat_jit_ = dat_jit[ ~np.isnan(dat_jit) ]
for key in DETAIL.keys():
DETAIL[key][ ( read_obs_ < min_read_obs ) | ( read_obs_ > max_read_obs )] = np.nan
time_ = np.nanmean( DETAIL[key] )
dat = DETAIL[key]
dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit ) ]
num = len( dat_ )
if key == "READ_OBS":
dat_ -= dat_jit_
print( "#### ", key, time_, num, np.nanmax( DETAIL[key] ), np.nanmin( DETAIL[key] ) )
if num > 100:
mode_, mean_ = plot_hist( key=key, dat=dat_ )
#DETAIL_MODE[key] = mode_
DETAIL_MODE[key] = mean_
else:
print( 'Not plot ', key)
read_obs_test = DETAIL_test["READ_OBS"]
#dat_jit_test = DETAIL_test['JIT_GET']
#dat_jit_test[ ( read_obs_test < min_read_obs ) | ( read_obs_test > max_read_obs )] = np.nan
#dat_jit_test = dat_jit_test[ ~np.isnan(dat_jit_test) ]
for key in DETAIL_test.keys():
DETAIL_test[key][ ( read_obs_test < min_read_obs ) | ( read_obs_test > max_read_obs )] = np.nan
time_ = np.nanmean( DETAIL_test[key] )
dat = DETAIL_test[key]
print( key, dat )
#dat_ = dat[ ~np.isnan(dat) & ~np.isnan( dat_jit_test ) ]
dat_ = dat[ ~np.isnan(dat) ]
num = len( dat_ )
# if key == "READ_OBS":
# dat_ -= dat_jit_
print( "#### ", key, time_, num, np.nanmax( DETAIL_test[key] ), np.nanmin( DETAIL_test[key] ) )
if num > 100:
mode_, mean_ = plot_hist( key=key, dat=dat_ )
DETAIL_MODE_test[key] = mean_
else:
print( 'Not plot ', key)
for key in DETAIL_MODE.keys():
print( key )
if key == "SCALE":
SUM["SCALE"] += DETAIL_MODE[key]
elif key == "READ_OBS":
SUM["OBS"] += DETAIL_MODE[key]
# elif key == "READ_GUES" or key == "WRITE_ANAL":
# SUM["DATA TRANSFER"] += DETAIL_MODE[key]
elif key == "JIT_GET":
SUM["JIT-DT"] += DETAIL_MODE[key]
else:
SUM["LETKF"] += DETAIL_MODE[key]
SUM_test = { "SCALE": 0.0,
"LETKF": 0.0,
"OBS": 0.0,
"JIT-DT": 0.0,
}
for key in DETAIL_MODE_test.keys():
if key == "SCALE":
SUM_test["SCALE"] += DETAIL_MODE_test[key]
elif key == "READ_OBS":
SUM_test["OBS"] += DETAIL_MODE_test[key]
# elif key == "READ_GUES" or key == "WRITE_ANAL":
# SUM["DATA TRANSFER"] += DETAIL_MODE[key]
elif key == "JIT_GET":
SUM_test["JIT-DT"] += DETAIL_MODE_test[key]
else:
SUM_test["LETKF"] += DETAIL_MODE_test[key]
np.savez( fn_sum, **SUM, ftimes=ftimes )
np.savez( fn_ftimes, ftimes=ftimes )
else:
with np.load( fn_sum, allow_pickle=True ) as npz:
for key in SUM.keys():
SUM[key] = npz[key]
ftimes = np.load( fn_ftimes, allow_pickle=True )['ftimes']
print( SUM )
#print( DETAIL_MODE )
#print( SUM_test )
#print( DETAIL_MODE_test )
#sys.exit()
#plot_bar( dic=SUM )
plot_bar_2p( dic=SUM, ftimes=ftimes )
#plot_bar_2p_scale( dic=SUM, dic2=SUM_test, ftimes=ftimes )
|
[
"numpy.load",
"numpy.argmax",
"matplotlib.pyplot.clf",
"numpy.isnan",
"os.path.isfile",
"numpy.mean",
"numpy.arange",
"os.path.join",
"numpy.nanmean",
"numpy.copy",
"numpy.std",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.savez",
"os.scandir",
"numpy.nanmax",
"os.makedirs",
"numpy.zeros",
"numpy.nanmin",
"numpy.array",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((124, 161), 'os.makedirs', 'os.makedirs', (['data_path'], {'exist_ok': '(True)'}), '(data_path, exist_ok=True)\n', (135, 161), False, 'import os\n'), ((1638, 1655), 'numpy.array', 'np.array', (['scale_l'], {}), '(scale_l)\n', (1646, 1655), True, 'import numpy as np\n'), ((2270, 2293), 'numpy.zeros', 'np.zeros', (['scale_l.shape'], {}), '(scale_l.shape)\n', (2278, 2293), True, 'import numpy as np\n'), ((7731, 7743), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7739, 7743), True, 'import numpy as np\n'), ((8060, 8094), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(6, 4)'}), '(1, 1, figsize=(6, 4))\n', (8072, 8094), True, 'import matplotlib.pyplot as plt\n'), ((8274, 8287), 'numpy.argmax', 'np.argmax', (['rn'], {}), '(rn)\n', (8283, 8287), True, 'import numpy as np\n'), ((8301, 8332), 'numpy.mean', 'np.mean', (['rbins[imode:imode + 2]'], {}), '(rbins[imode:imode + 2])\n', (8308, 8332), True, 'import numpy as np\n'), ((8344, 8356), 'numpy.mean', 'np.mean', (['dat'], {}), '(dat)\n', (8351, 8356), True, 'import numpy as np\n'), ((9699, 9711), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9707, 9711), True, 'import numpy as np\n'), ((9775, 9809), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(6, 4)'}), '(1, 2, figsize=(6, 4))\n', (9787, 9809), True, 'import matplotlib.pyplot as plt\n'), ((10996, 11015), 'numpy.arange', 'np.arange', (['(0)', '(22)', '(2)'], {}), '(0, 22, 2)\n', (11005, 11015), True, 'import numpy as np\n'), ((12555, 12567), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12563, 12567), True, 'import numpy as np\n'), ((12640, 12674), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(6, 4)'}), '(1, 2, figsize=(6, 4))\n', (12652, 12674), True, 'import matplotlib.pyplot as plt\n'), ((14289, 14308), 'numpy.arange', 'np.arange', (['(0)', '(22)', '(2)'], {}), '(0, 22, 2)\n', (14298, 14308), True, 'import numpy as np\n'), ((15898, 15932), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(5, 5)'}), '(1, 1, figsize=(5, 5))\n', (15910, 15932), True, 'import matplotlib.pyplot as plt\n'), ((16932, 16951), 'numpy.arange', 'np.arange', (['(0)', '(32)', '(2)'], {}), '(0, 32, 2)\n', (16941, 16951), True, 'import numpy as np\n'), ((18199, 18215), 'numpy.array', 'np.array', (['ctimes'], {}), '(ctimes)\n', (18207, 18215), True, 'import numpy as np\n'), ((21424, 21462), 'numpy.savez', 'np.savez', (['fn_sum'], {'ftimes': 'ftimes'}), '(fn_sum, **SUM, ftimes=ftimes)\n', (21432, 21462), True, 'import numpy as np\n'), ((21468, 21502), 'numpy.savez', 'np.savez', (['fn_ftimes'], {'ftimes': 'ftimes'}), '(fn_ftimes, ftimes=ftimes)\n', (21476, 21502), True, 'import numpy as np\n'), ((7641, 7662), 'numpy.array', 'np.array', (['DETAIL[key]'], {}), '(DETAIL[key])\n', (7649, 7662), True, 'import numpy as np\n'), ((8019, 8036), 'numpy.sqrt', 'np.sqrt', (['dat.size'], {}), '(dat.size)\n', (8026, 8036), True, 'import numpy as np\n'), ((9486, 9496), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9494, 9496), True, 'import matplotlib.pyplot as plt\n'), ((9514, 9568), 'matplotlib.pyplot.savefig', 'plt.savefig', (['ofig'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(ofig, bbox_inches='tight', pad_inches=0.1)\n", (9525, 9568), True, 'import matplotlib.pyplot as plt\n'), ((9599, 9608), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9606, 9608), True, 'import matplotlib.pyplot as plt\n'), ((9616, 9632), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9625, 9632), True, 'import matplotlib.pyplot as plt\n'), ((11546, 11561), 'numpy.mean', 'np.mean', (['ftimes'], {}), '(ftimes)\n', (11553, 11561), True, 'import numpy as np\n'), ((11655, 11677), 'numpy.std', 'np.std', (['ftimes'], {'ddof': '(1)'}), '(ftimes, ddof=1)\n', (11661, 11677), True, 'import numpy as np\n'), ((12368, 12378), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12376, 12378), True, 'import matplotlib.pyplot as plt\n'), ((12396, 12450), 'matplotlib.pyplot.savefig', 'plt.savefig', (['ofig'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(ofig, bbox_inches='tight', pad_inches=0.1)\n", (12407, 12450), True, 'import matplotlib.pyplot as plt\n'), ((12481, 12490), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (12488, 12490), True, 'import matplotlib.pyplot as plt\n'), ((12498, 12514), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (12507, 12514), True, 'import matplotlib.pyplot as plt\n'), ((14839, 14854), 'numpy.mean', 'np.mean', (['ftimes'], {}), '(ftimes)\n', (14846, 14854), True, 'import numpy as np\n'), ((14948, 14970), 'numpy.std', 'np.std', (['ftimes'], {'ddof': '(1)'}), '(ftimes, ddof=1)\n', (14954, 14970), True, 'import numpy as np\n'), ((15672, 15682), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15680, 15682), True, 'import matplotlib.pyplot as plt\n'), ((15700, 15754), 'matplotlib.pyplot.savefig', 'plt.savefig', (['ofig'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(ofig, bbox_inches='tight', pad_inches=0.1)\n", (15711, 15754), True, 'import matplotlib.pyplot as plt\n'), ((15785, 15794), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (15792, 15794), True, 'import matplotlib.pyplot as plt\n'), ((15802, 15818), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (15811, 15818), True, 'import matplotlib.pyplot as plt\n'), ((17084, 17094), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17092, 17094), True, 'import matplotlib.pyplot as plt\n'), ((17112, 17166), 'matplotlib.pyplot.savefig', 'plt.savefig', (['ofig'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(ofig, bbox_inches='tight', pad_inches=0.1)\n", (17123, 17166), True, 'import matplotlib.pyplot as plt\n'), ((17197, 17206), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (17204, 17206), True, 'import matplotlib.pyplot as plt\n'), ((17214, 17230), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (17223, 17230), True, 'import matplotlib.pyplot as plt\n'), ((18857, 18880), 'numpy.nanmean', 'np.nanmean', (['DETAIL[key]'], {}), '(DETAIL[key])\n', (18867, 18880), True, 'import numpy as np\n'), ((19762, 19790), 'numpy.nanmean', 'np.nanmean', (['DETAIL_test[key]'], {}), '(DETAIL_test[key])\n', (19772, 19790), True, 'import numpy as np\n'), ((21521, 21555), 'numpy.load', 'np.load', (['fn_sum'], {'allow_pickle': '(True)'}), '(fn_sum, allow_pickle=True)\n', (21528, 21555), True, 'import numpy as np\n'), ((21637, 21674), 'numpy.load', 'np.load', (['fn_ftimes'], {'allow_pickle': '(True)'}), '(fn_ftimes, allow_pickle=True)\n', (21644, 21674), True, 'import numpy as np\n'), ((336, 351), 'os.scandir', 'os.scandir', (['top'], {}), '(top)\n', (346, 351), False, 'import os\n'), ((498, 521), 'os.path.join', 'os.path.join', (['top', 'dir_'], {}), '(top, dir_)\n', (510, 521), False, 'import os\n'), ((634, 654), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (648, 654), False, 'import os\n'), ((2456, 2471), 'numpy.copy', 'np.copy', (['iarray'], {}), '(iarray)\n', (2463, 2471), True, 'import numpy as np\n'), ((2572, 2592), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (2586, 2592), False, 'import os\n'), ((4202, 4217), 'os.scandir', 'os.scandir', (['top'], {}), '(top)\n', (4212, 4217), False, 'import os\n'), ((5637, 5667), 'os.path.join', 'os.path.join', (['top', 'dir_', 'fname'], {}), '(top, dir_, fname)\n', (5649, 5667), False, 'import os\n'), ((5751, 5771), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (5765, 5771), False, 'import os\n'), ((11245, 11264), 'numpy.arange', 'np.arange', (['(4)', '(20)', '(4)'], {}), '(4, 20, 4)\n', (11254, 11264), True, 'import numpy as np\n'), ((14538, 14557), 'numpy.arange', 'np.arange', (['(4)', '(20)', '(4)'], {}), '(4, 20, 4)\n', (14547, 14557), True, 'import numpy as np\n'), ((18276, 18294), 'numpy.nanmean', 'np.nanmean', (['ctimes'], {}), '(ctimes)\n', (18286, 18294), True, 'import numpy as np\n'), ((18372, 18387), 'numpy.mean', 'np.mean', (['ftimes'], {}), '(ftimes)\n', (18379, 18387), True, 'import numpy as np\n'), ((18703, 18720), 'numpy.isnan', 'np.isnan', (['dat_jit'], {}), '(dat_jit)\n', (18711, 18720), True, 'import numpy as np\n'), ((19101, 19123), 'numpy.nanmax', 'np.nanmax', (['DETAIL[key]'], {}), '(DETAIL[key])\n', (19110, 19123), True, 'import numpy as np\n'), ((19127, 19149), 'numpy.nanmin', 'np.nanmin', (['DETAIL[key]'], {}), '(DETAIL[key])\n', (19136, 19149), True, 'import numpy as np\n'), ((20085, 20112), 'numpy.nanmax', 'np.nanmax', (['DETAIL_test[key]'], {}), '(DETAIL_test[key])\n', (20094, 20112), True, 'import numpy as np\n'), ((20116, 20143), 'numpy.nanmin', 'np.nanmin', (['DETAIL_test[key]'], {}), '(DETAIL_test[key])\n', (20125, 20143), True, 'import numpy as np\n'), ((19937, 19950), 'numpy.isnan', 'np.isnan', (['dat'], {}), '(dat)\n', (19945, 19950), True, 'import numpy as np\n'), ((18932, 18945), 'numpy.isnan', 'np.isnan', (['dat'], {}), '(dat)\n', (18940, 18945), True, 'import numpy as np\n'), ((18949, 18966), 'numpy.isnan', 'np.isnan', (['dat_jit'], {}), '(dat_jit)\n', (18957, 18966), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# 使用openCV抓取视频
# 空格-->>截图,ESC-->>退出。
# 代码修改自 http://blog.csdn.net/tanmengwen/article/details/41892977
import cv2.cv as cv
import time
if __name__ == '__main__':
cv.NamedWindow("camRra", 1)
capture = cv.CaptureFromCAM(0) #开启摄像头
# capture = cv.CaptureFromFile("Video.avi") # 打开一个视频文件
num = 0;
while True:
img = cv.QueryFrame(capture)
cv.ShowImage("camera", img)
key = cv.WaitKey(1) & 0xFF
if key == 27:
break
if key == ord(' '):
num = num + 1
filename = "frmaes_%s.jpg" % num
cv.SaveImage(filename, img)
del (capture)
cv.DestroyWindow("camera")
|
[
"cv2.cv.NamedWindow",
"cv2.cv.WaitKey",
"cv2.cv.SaveImage",
"cv2.cv.DestroyWindow",
"cv2.cv.QueryFrame",
"cv2.cv.CaptureFromCAM",
"cv2.cv.ShowImage"
] |
[((194, 221), 'cv2.cv.NamedWindow', 'cv.NamedWindow', (['"""camRra"""', '(1)'], {}), "('camRra', 1)\n", (208, 221), True, 'import cv2.cv as cv\n'), ((236, 256), 'cv2.cv.CaptureFromCAM', 'cv.CaptureFromCAM', (['(0)'], {}), '(0)\n', (253, 256), True, 'import cv2.cv as cv\n'), ((677, 703), 'cv2.cv.DestroyWindow', 'cv.DestroyWindow', (['"""camera"""'], {}), "('camera')\n", (693, 703), True, 'import cv2.cv as cv\n'), ((379, 401), 'cv2.cv.QueryFrame', 'cv.QueryFrame', (['capture'], {}), '(capture)\n', (392, 401), True, 'import cv2.cv as cv\n'), ((410, 437), 'cv2.cv.ShowImage', 'cv.ShowImage', (['"""camera"""', 'img'], {}), "('camera', img)\n", (422, 437), True, 'import cv2.cv as cv\n'), ((453, 466), 'cv2.cv.WaitKey', 'cv.WaitKey', (['(1)'], {}), '(1)\n', (463, 466), True, 'import cv2.cv as cv\n'), ((626, 653), 'cv2.cv.SaveImage', 'cv.SaveImage', (['filename', 'img'], {}), '(filename, img)\n', (638, 653), True, 'import cv2.cv as cv\n')]
|
import time
import titration.utils.devices.board_mock as board
import titration.utils.devices.temperature_control_mock as temperature_control
import titration.utils.devices.temperature_probe_mock as temperature_probe
def test_temperature_control_create():
sensor = temperature_probe.Temperature_Probe(
board.SCK, board.MOSI, board.MISO, board.D0, wires=3
)
temperature_controller = temperature_control.Temperature_Control(board.D1, sensor)
assert temperature_controller is not None
def test_temperature_control_update():
sensor = temperature_probe.Temperature_Probe(
board.SCK, board.MOSI, board.MISO, board.D0, wires=3
)
temperature_controller = temperature_control.Temperature_Control(board.D1, sensor)
temperature_controller.update()
time.sleep(1)
temperature_controller.update()
def test_temperature_control_enable_print():
sensor = temperature_probe.Temperature_Probe(
board.SCK, board.MOSI, board.MISO, board.D0, wires=3
)
temperature_controller = temperature_control.Temperature_Control(board.D1, sensor)
temperature_controller.enable_print()
def test_temperature_control_disable_print():
sensor = temperature_probe.Temperature_Probe(
board.SCK, board.MOSI, board.MISO, board.D0, wires=3
)
temperature_controller = temperature_control.Temperature_Control(board.D1, sensor)
temperature_controller.disable_print()
def test_temperature_control_at_temperature():
sensor = temperature_probe.Temperature_Probe(
board.SCK, board.MOSI, board.MISO, board.D0, wires=3
)
temperature_controller = temperature_control.Temperature_Control(board.D1, sensor)
temperature_controller.at_temperature()
def test_temperature_control_last_temperature():
sensor = temperature_probe.Temperature_Probe(
board.SCK, board.MOSI, board.MISO, board.D0, wires=3
)
temperature_controller = temperature_control.Temperature_Control(board.D1, sensor)
temperature_controller.get_last_temperature()
def test_temperature_control_activate():
sensor = temperature_probe.Temperature_Probe(
board.SCK, board.MOSI, board.MISO, board.D0, wires=3
)
temperature_controller = temperature_control.Temperature_Control(board.D1, sensor)
temperature_controller.activate()
def test_temperature_control_deactivate():
sensor = temperature_probe.Temperature_Probe(
board.SCK, board.MOSI, board.MISO, board.D0, wires=3
)
temperature_controller = temperature_control.Temperature_Control(board.D1, sensor)
temperature_controller.deactivate()
|
[
"time.sleep",
"titration.utils.devices.temperature_control_mock.Temperature_Control",
"titration.utils.devices.temperature_probe_mock.Temperature_Probe"
] |
[((272, 365), 'titration.utils.devices.temperature_probe_mock.Temperature_Probe', 'temperature_probe.Temperature_Probe', (['board.SCK', 'board.MOSI', 'board.MISO', 'board.D0'], {'wires': '(3)'}), '(board.SCK, board.MOSI, board.MISO,\n board.D0, wires=3)\n', (307, 365), True, 'import titration.utils.devices.temperature_probe_mock as temperature_probe\n'), ((405, 462), 'titration.utils.devices.temperature_control_mock.Temperature_Control', 'temperature_control.Temperature_Control', (['board.D1', 'sensor'], {}), '(board.D1, sensor)\n', (444, 462), True, 'import titration.utils.devices.temperature_control_mock as temperature_control\n'), ((564, 657), 'titration.utils.devices.temperature_probe_mock.Temperature_Probe', 'temperature_probe.Temperature_Probe', (['board.SCK', 'board.MOSI', 'board.MISO', 'board.D0'], {'wires': '(3)'}), '(board.SCK, board.MOSI, board.MISO,\n board.D0, wires=3)\n', (599, 657), True, 'import titration.utils.devices.temperature_probe_mock as temperature_probe\n'), ((697, 754), 'titration.utils.devices.temperature_control_mock.Temperature_Control', 'temperature_control.Temperature_Control', (['board.D1', 'sensor'], {}), '(board.D1, sensor)\n', (736, 754), True, 'import titration.utils.devices.temperature_control_mock as temperature_control\n'), ((796, 809), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (806, 809), False, 'import time\n'), ((906, 999), 'titration.utils.devices.temperature_probe_mock.Temperature_Probe', 'temperature_probe.Temperature_Probe', (['board.SCK', 'board.MOSI', 'board.MISO', 'board.D0'], {'wires': '(3)'}), '(board.SCK, board.MOSI, board.MISO,\n board.D0, wires=3)\n', (941, 999), True, 'import titration.utils.devices.temperature_probe_mock as temperature_probe\n'), ((1039, 1096), 'titration.utils.devices.temperature_control_mock.Temperature_Control', 'temperature_control.Temperature_Control', (['board.D1', 'sensor'], {}), '(board.D1, sensor)\n', (1078, 1096), True, 'import titration.utils.devices.temperature_control_mock as temperature_control\n'), ((1201, 1294), 'titration.utils.devices.temperature_probe_mock.Temperature_Probe', 'temperature_probe.Temperature_Probe', (['board.SCK', 'board.MOSI', 'board.MISO', 'board.D0'], {'wires': '(3)'}), '(board.SCK, board.MOSI, board.MISO,\n board.D0, wires=3)\n', (1236, 1294), True, 'import titration.utils.devices.temperature_probe_mock as temperature_probe\n'), ((1334, 1391), 'titration.utils.devices.temperature_control_mock.Temperature_Control', 'temperature_control.Temperature_Control', (['board.D1', 'sensor'], {}), '(board.D1, sensor)\n', (1373, 1391), True, 'import titration.utils.devices.temperature_control_mock as temperature_control\n'), ((1498, 1591), 'titration.utils.devices.temperature_probe_mock.Temperature_Probe', 'temperature_probe.Temperature_Probe', (['board.SCK', 'board.MOSI', 'board.MISO', 'board.D0'], {'wires': '(3)'}), '(board.SCK, board.MOSI, board.MISO,\n board.D0, wires=3)\n', (1533, 1591), True, 'import titration.utils.devices.temperature_probe_mock as temperature_probe\n'), ((1631, 1688), 'titration.utils.devices.temperature_control_mock.Temperature_Control', 'temperature_control.Temperature_Control', (['board.D1', 'sensor'], {}), '(board.D1, sensor)\n', (1670, 1688), True, 'import titration.utils.devices.temperature_control_mock as temperature_control\n'), ((1798, 1891), 'titration.utils.devices.temperature_probe_mock.Temperature_Probe', 'temperature_probe.Temperature_Probe', (['board.SCK', 'board.MOSI', 'board.MISO', 'board.D0'], {'wires': '(3)'}), '(board.SCK, board.MOSI, board.MISO,\n board.D0, wires=3)\n', (1833, 1891), True, 'import titration.utils.devices.temperature_probe_mock as temperature_probe\n'), ((1931, 1988), 'titration.utils.devices.temperature_control_mock.Temperature_Control', 'temperature_control.Temperature_Control', (['board.D1', 'sensor'], {}), '(board.D1, sensor)\n', (1970, 1988), True, 'import titration.utils.devices.temperature_control_mock as temperature_control\n'), ((2096, 2189), 'titration.utils.devices.temperature_probe_mock.Temperature_Probe', 'temperature_probe.Temperature_Probe', (['board.SCK', 'board.MOSI', 'board.MISO', 'board.D0'], {'wires': '(3)'}), '(board.SCK, board.MOSI, board.MISO,\n board.D0, wires=3)\n', (2131, 2189), True, 'import titration.utils.devices.temperature_probe_mock as temperature_probe\n'), ((2229, 2286), 'titration.utils.devices.temperature_control_mock.Temperature_Control', 'temperature_control.Temperature_Control', (['board.D1', 'sensor'], {}), '(board.D1, sensor)\n', (2268, 2286), True, 'import titration.utils.devices.temperature_control_mock as temperature_control\n'), ((2384, 2477), 'titration.utils.devices.temperature_probe_mock.Temperature_Probe', 'temperature_probe.Temperature_Probe', (['board.SCK', 'board.MOSI', 'board.MISO', 'board.D0'], {'wires': '(3)'}), '(board.SCK, board.MOSI, board.MISO,\n board.D0, wires=3)\n', (2419, 2477), True, 'import titration.utils.devices.temperature_probe_mock as temperature_probe\n'), ((2517, 2574), 'titration.utils.devices.temperature_control_mock.Temperature_Control', 'temperature_control.Temperature_Control', (['board.D1', 'sensor'], {}), '(board.D1, sensor)\n', (2556, 2574), True, 'import titration.utils.devices.temperature_control_mock as temperature_control\n')]
|
# -*- coding: utf-8 -*-
import base64
import uuid
from io import BytesIO
import qrcode
from odoo import api, fields, models
class MusicRemote(models.Model):
_name = "oomusic.remote"
_description = "Remote Control"
def _default_name(self):
return fields.Date.to_string(fields.Date.context_today(self))
def _default_access_token(self):
return uuid.uuid4().hex
name = fields.Char("Name", default=lambda s: s._default_name())
access_token = fields.Char(
"Access Token", index=True, default=lambda s: s._default_access_token()
)
public = fields.Boolean("Public", default=False)
url = fields.Char(
"URL", compute="_compute_url", help="Access this URL to control the playback remotely."
)
qr = fields.Binary("QR Code", compute="_compute_qr", help="QR code pointing to the remote URL.")
user_id = fields.Many2one(
"res.users",
string="User",
required=True,
ondelete="cascade",
default=lambda self: self.env.user,
)
@api.depends("access_token", "public")
def _compute_url(self):
base_url = self.env["ir.config_parameter"].sudo().get_param("web.base.url")
for remote in self:
remote.url = "{}/oomusic/remote{}/{}".format(
base_url, "_public" if remote.public else "", remote.access_token
)
@api.depends("url")
def _compute_qr(self):
for remote in self:
img = qrcode.make(remote.url)
img_tmp = BytesIO()
img.save(img_tmp, format="PNG")
remote.qr = base64.b64encode(img_tmp.getvalue())
def action_reset_remote_token(self):
for remote in self:
remote.access_token = uuid.uuid4().hex
|
[
"io.BytesIO",
"uuid.uuid4",
"odoo.fields.Many2one",
"odoo.fields.Binary",
"odoo.api.depends",
"odoo.fields.Char",
"qrcode.make",
"odoo.fields.Date.context_today",
"odoo.fields.Boolean"
] |
[((597, 636), 'odoo.fields.Boolean', 'fields.Boolean', (['"""Public"""'], {'default': '(False)'}), "('Public', default=False)\n", (611, 636), False, 'from odoo import api, fields, models\n'), ((647, 752), 'odoo.fields.Char', 'fields.Char', (['"""URL"""'], {'compute': '"""_compute_url"""', 'help': '"""Access this URL to control the playback remotely."""'}), "('URL', compute='_compute_url', help=\n 'Access this URL to control the playback remotely.')\n", (658, 752), False, 'from odoo import api, fields, models\n'), ((771, 867), 'odoo.fields.Binary', 'fields.Binary', (['"""QR Code"""'], {'compute': '"""_compute_qr"""', 'help': '"""QR code pointing to the remote URL."""'}), "('QR Code', compute='_compute_qr', help=\n 'QR code pointing to the remote URL.')\n", (784, 867), False, 'from odoo import api, fields, models\n'), ((877, 996), 'odoo.fields.Many2one', 'fields.Many2one', (['"""res.users"""'], {'string': '"""User"""', 'required': '(True)', 'ondelete': '"""cascade"""', 'default': '(lambda self: self.env.user)'}), "('res.users', string='User', required=True, ondelete=\n 'cascade', default=lambda self: self.env.user)\n", (892, 996), False, 'from odoo import api, fields, models\n'), ((1045, 1082), 'odoo.api.depends', 'api.depends', (['"""access_token"""', '"""public"""'], {}), "('access_token', 'public')\n", (1056, 1082), False, 'from odoo import api, fields, models\n'), ((1383, 1401), 'odoo.api.depends', 'api.depends', (['"""url"""'], {}), "('url')\n", (1394, 1401), False, 'from odoo import api, fields, models\n'), ((294, 325), 'odoo.fields.Date.context_today', 'fields.Date.context_today', (['self'], {}), '(self)\n', (319, 325), False, 'from odoo import api, fields, models\n'), ((380, 392), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (390, 392), False, 'import uuid\n'), ((1475, 1498), 'qrcode.make', 'qrcode.make', (['remote.url'], {}), '(remote.url)\n', (1486, 1498), False, 'import qrcode\n'), ((1521, 1530), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1528, 1530), False, 'from io import BytesIO\n'), ((1740, 1752), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1750, 1752), False, 'import uuid\n')]
|
from pathlib import Path
import pandas as pd
import spacy
from assertpy import assert_that
from src.definitions import PROJECT_ROOT
from src.main.preprocess_data import preprocess_data, parse_passage
def test_preprocess_data(tmp_path: Path):
preprocess_data(
data_root=PROJECT_ROOT / "data/test/raw",
output_dir=tmp_path,
)
actual = pd.read_csv(tmp_path / "labeled_passages.csv")
def word_as_dict(word: str) -> dict:
return actual[actual.words == word].iloc[0].to_dict()
assert_that(word_as_dict("methanol")).is_equal_to(
{
"passage_id": 0,
"pubtator_id": 1,
"words": "methanol",
"POS": "NOUN",
"labels": "B-Chemical",
}
)
assert_that(word_as_dict("poisoning")).is_equal_to(
{
"passage_id": 0,
"pubtator_id": 1,
"words": "poisoning",
"POS": "NOUN",
"labels": "B-Disease",
}
)
assert_that(word_as_dict("pyridine")).is_equal_to(
{
"passage_id": 0,
"pubtator_id": 2,
"words": "pyridine",
"POS": "NOUN",
"labels": "B-Chemical",
}
)
assert_that(word_as_dict("nucleotide")).is_equal_to(
{
"passage_id": 0,
"pubtator_id": 2,
"words": "nucleotide",
"POS": "NOUN",
"labels": "I-Chemical",
}
)
expected = pd.read_csv(PROJECT_ROOT / "data/test/labeled_passages.csv")
pd.testing.assert_frame_equal(left=actual, right=expected)
def test_parse_passage_can_handle_global_offset():
nlp = spacy.load("en_core_web_sm")
actual = parse_passage(
passage={
"offset": 20, # This is the parameter under test
"text": "Adsorption of rRNA and poly(A)-containing RNA to filters.",
"annotations": [
{
"infons": {"identifier": "MESH:D011061", "type": "Chemical"},
# TODO: configure tokenization to split on '-'
"text": "poly(A)",
"locations": [{"offset": 43, "length": 7}],
},
],
},
pubtator_id="0",
passage_id=0,
nlp=nlp,
)
print(actual)
expected = pd.DataFrame(
{
"pubtator_id": ["0"] * 9,
"passage_id": [0] * 9,
"words": [
"Adsorption",
"of",
"rRNA",
"and",
"poly(A)-containing",
"RNA",
"to",
"filters",
".",
],
"POS": [
"NOUN",
"ADP",
"ADJ",
"CCONJ",
"VERB",
"PROPN",
"ADP",
"NOUN",
"PUNCT",
],
"labels": ["O"] * 4 + ["B-Chemical"] + ["O"] * 4,
}
)
pd.testing.assert_frame_equal(left=actual, right=expected)
|
[
"pandas.DataFrame",
"pandas.testing.assert_frame_equal",
"src.main.preprocess_data.parse_passage",
"pandas.read_csv",
"spacy.load",
"src.main.preprocess_data.preprocess_data"
] |
[((250, 328), 'src.main.preprocess_data.preprocess_data', 'preprocess_data', ([], {'data_root': "(PROJECT_ROOT / 'data/test/raw')", 'output_dir': 'tmp_path'}), "(data_root=PROJECT_ROOT / 'data/test/raw', output_dir=tmp_path)\n", (265, 328), False, 'from src.main.preprocess_data import preprocess_data, parse_passage\n'), ((366, 412), 'pandas.read_csv', 'pd.read_csv', (["(tmp_path / 'labeled_passages.csv')"], {}), "(tmp_path / 'labeled_passages.csv')\n", (377, 412), True, 'import pandas as pd\n'), ((1483, 1543), 'pandas.read_csv', 'pd.read_csv', (["(PROJECT_ROOT / 'data/test/labeled_passages.csv')"], {}), "(PROJECT_ROOT / 'data/test/labeled_passages.csv')\n", (1494, 1543), True, 'import pandas as pd\n'), ((1548, 1606), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', ([], {'left': 'actual', 'right': 'expected'}), '(left=actual, right=expected)\n', (1577, 1606), True, 'import pandas as pd\n'), ((1670, 1698), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {}), "('en_core_web_sm')\n", (1680, 1698), False, 'import spacy\n'), ((1713, 2020), 'src.main.preprocess_data.parse_passage', 'parse_passage', ([], {'passage': "{'offset': 20, 'text':\n 'Adsorption of rRNA and poly(A)-containing RNA to filters.',\n 'annotations': [{'infons': {'identifier': 'MESH:D011061', 'type':\n 'Chemical'}, 'text': 'poly(A)', 'locations': [{'offset': 43, 'length': \n 7}]}]}", 'pubtator_id': '"""0"""', 'passage_id': '(0)', 'nlp': 'nlp'}), "(passage={'offset': 20, 'text':\n 'Adsorption of rRNA and poly(A)-containing RNA to filters.',\n 'annotations': [{'infons': {'identifier': 'MESH:D011061', 'type':\n 'Chemical'}, 'text': 'poly(A)', 'locations': [{'offset': 43, 'length': \n 7}]}]}, pubtator_id='0', passage_id=0, nlp=nlp)\n", (1726, 2020), False, 'from src.main.preprocess_data import preprocess_data, parse_passage\n'), ((2338, 2642), 'pandas.DataFrame', 'pd.DataFrame', (["{'pubtator_id': ['0'] * 9, 'passage_id': [0] * 9, 'words': ['Adsorption',\n 'of', 'rRNA', 'and', 'poly(A)-containing', 'RNA', 'to', 'filters', '.'],\n 'POS': ['NOUN', 'ADP', 'ADJ', 'CCONJ', 'VERB', 'PROPN', 'ADP', 'NOUN',\n 'PUNCT'], 'labels': ['O'] * 4 + ['B-Chemical'] + ['O'] * 4}"], {}), "({'pubtator_id': ['0'] * 9, 'passage_id': [0] * 9, 'words': [\n 'Adsorption', 'of', 'rRNA', 'and', 'poly(A)-containing', 'RNA', 'to',\n 'filters', '.'], 'POS': ['NOUN', 'ADP', 'ADJ', 'CCONJ', 'VERB', 'PROPN',\n 'ADP', 'NOUN', 'PUNCT'], 'labels': ['O'] * 4 + ['B-Chemical'] + ['O'] * 4})\n", (2350, 2642), True, 'import pandas as pd\n'), ((3038, 3096), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', ([], {'left': 'actual', 'right': 'expected'}), '(left=actual, right=expected)\n', (3067, 3096), True, 'import pandas as pd\n')]
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
# Simple test for NeoPixels on Raspberry Pi
import time
import board
import neopixel
import colorsys
import threading
class sunController():
def __init__(self):
# Choose an open pin connected to the Data In of the NeoPixel strip, i.e. board.D18
# NeoPixels must be connected to D10, D12, D18 or D21 to work.
self.pixel_pin = board.D10
# The number of NeoPixels 147 + 1 for some reason
self.num_pixels = 88
#Start position of sun
self.current_position = 0
#Start level of the sun between lowest 1 and 4 max
self.sun_level = 1
# The order of the pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!
# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.
self.ORDER = neopixel.GRB
self.pixels = neopixel.NeoPixel(
self.pixel_pin, self.num_pixels, brightness=1, auto_write=False, pixel_order=self.ORDER)
#TODO - Create 4 sun stages
self.SUN_INTENSE = (255,64,0)
self.SUN_STRONG = (64,16,0)
self.SUN_MILD = (32,8,0)
self.SUN_WEAK = (20,6,0)
self.SUN_COLOR_OFF = (0,0,0)
def init_sun(self, start_position):
self.pixels.fill(self.SUN_COLOR_OFF)
self.pixels.show()
self.current_position = start_position
self.pixels[self.current_position] = (self.SUN_INTENSE)
self.pixels.show()
def test_colors(self):
self.pixels.fill(self.SUN_COLOR_OFF)
self.pixels.show()
time.sleep(3)
self.pixels.fill(self.SUN_WEAK)
self.pixels.show()
time.sleep(3)
self.pixels.fill(self.SUN_MILD)
self.pixels.show()
time.sleep(3)
self.pixels.fill(self.SUN_STRONG)
self.pixels.show()
time.sleep(3)
self.pixels.fill(self.SUN_INTENSE)
self.pixels.show()
time.sleep(3)
def update_position(self):
#print("updating position")
self.current_position += 1
#Redraw pixels
self.redraw_pixels()
#Create a new thread for non-blocking change of position over time
timer = threading.Timer(3.4, self.update_position)
timer.start()
def redraw_pixels(self):
#Reset pixels
self.pixels.fill(self.SUN_COLOR_OFF)
#level 1
if self.sun_level == 1:
self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE)
#level 2
if self.sun_level == 2:
self.pixels.fill(self.SUN_COLOR_OFF)
self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE)
self.pixels[(self.current_position +1) % self.num_pixels] = (self.SUN_STRONG)
self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG)
#level 3
if self.sun_level == 3:
self.pixels.fill(self.SUN_COLOR_OFF)
self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE)
self.pixels[(self.current_position +1) % self.num_pixels] = (self.SUN_STRONG)
self.pixels[(self.current_position -1) % self.num_pixels] = (self.SUN_STRONG)
self.pixels[(self.current_position +2) % self.num_pixels] = (self.SUN_MILD)
self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD)
#level 4
if self.sun_level == 4:
self.pixels.fill(self.SUN_COLOR_OFF)
self.pixels[self.current_position % self.num_pixels] = (self.SUN_INTENSE)
self.pixels[(self.current_position + 1) % self.num_pixels] = (self.SUN_STRONG)
self.pixels[(self.current_position - 1) % self.num_pixels] = (self.SUN_STRONG)
self.pixels[(self.current_position +2) % self.num_pixels] = (self.SUN_MILD)
self.pixels[(self.current_position -2) % self.num_pixels] = (self.SUN_MILD)
self.pixels[(self.current_position + 3) % self.num_pixels] = (self.SUN_WEAK)
self.pixels[(self.current_position - 3) % self.num_pixels] = (self.SUN_WEAK)
#Reveal pixels
self.pixels.show()
def set_level(self, new_level):
self.sun_level = new_level
self.redraw_pixels()
'''
def increase_sun(self):
print("sun increased")
print("current sun stage is: {}".format(self.sun_level))
if self.sun_level == 4:
print("Sunlevel already max")
return
#Update level
self.sun_level += 1
print("New sun stage is: {}".format(self.sun_level))
#Redraw pixels
self.redraw_pixels()
def decrease_sun(self):
print("sun decreased")
print("current sun stage is: {}".format(self.sun_level))
if self.sun_level == 1:
print("Sunlevel already lowest")
return
#Update level
self.sun_level -= 1
print("New sun stage is: {}".format(self.sun_level))
#Redraw pixels
self.redraw_pixels()
'''
'''
sun = sunController()
sun.update_position()
while input != 'quit':
command = input()
if command == "1":
sun.increase_sun()
if command == "2":
sun.decrease_sun()
if command == "3":
sun.test_colors()
command = ''
'''
|
[
"neopixel.NeoPixel",
"threading.Timer",
"time.sleep"
] |
[((948, 1059), 'neopixel.NeoPixel', 'neopixel.NeoPixel', (['self.pixel_pin', 'self.num_pixels'], {'brightness': '(1)', 'auto_write': '(False)', 'pixel_order': 'self.ORDER'}), '(self.pixel_pin, self.num_pixels, brightness=1, auto_write\n =False, pixel_order=self.ORDER)\n', (965, 1059), False, 'import neopixel\n'), ((1700, 1713), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1710, 1713), False, 'import time\n'), ((1798, 1811), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1808, 1811), False, 'import time\n'), ((1896, 1909), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1906, 1909), False, 'import time\n'), ((1996, 2009), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2006, 2009), False, 'import time\n'), ((2097, 2110), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2107, 2110), False, 'import time\n'), ((2388, 2430), 'threading.Timer', 'threading.Timer', (['(3.4)', 'self.update_position'], {}), '(3.4, self.update_position)\n', (2403, 2430), False, 'import threading\n')]
|
import unittest
from unittest.mock import MagicMock, call
from timerecorder.database import Database
from timerecorder.databaseAccess import DatabaseAccess
class TestDatabaseAccess(unittest.TestCase):
def setUp(self):
self.database = Database('test')
self.database.recordResults = MagicMock()
self.thing = DatabaseAccess(self.database)
def tearDown(self):
pass
def testIdentifyTrackUnambiguous(self):
tracks = [(1, 'track1')]
self.database.loadTracks = MagicMock(return_value=tracks)
loadedTrack = self.thing.identifyTrack(10, 10000)
self.assertEqual(loadedTrack, 1, "Wrong ID")
def testIdentifyTrackNoResult(self):
tracks = []
self.database.loadTracks = MagicMock(return_value=tracks)
loadedTrack = self.thing.identifyTrack(10, 10000)
self.assertEqual(loadedTrack, [], "Shouldn't identify track")
def testIdentifyTrackAmbiguous(self):
tracks = [(1, 'track1'), (2, 'track2')]
self.database.loadTracks = MagicMock(return_value=tracks)
loadedTrack = self.thing.identifyTrack(55, 10000)
self.assertEqual(loadedTrack, [1, 2], "Should return all tracks")
def testIdentifyCarUnambiguous(self):
cars = [(1, 'car1')]
self.database.loadCars = MagicMock(return_value=cars)
loadedCar = self.thing.identifyCar(1000, 100, 5)
self.assertEqual(loadedCar, 1, "Wrong ID")
def testIdentifyCarNoResult(self):
cars = []
self.database.loadCars = MagicMock(return_value=cars)
loadedCar = self.thing.identifyCar(1000, 100, 5)
self.assertEqual(loadedCar, [], "Shouldn't identify car")
def testIdentifyCarAmbiguous(self):
cars = [(1, 'car1'), (2, 'car2')]
self.database.loadCars = MagicMock(return_value=cars)
loadedCar = self.thing.identifyCar(1000, 100, 5)
self.assertEqual(loadedCar, [1, 2], "Should return all cars")
def testGetCarInterfacesStatementWithoutData(self):
handbrakeData = [(None)]
self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData)
carNames = ["Unknown Car"]
noneData = [(None)]
self.database.loadShiftingData = MagicMock(side_effect=noneData)
self.database.loadGearsData = MagicMock(side_effect=noneData)
self.database.loadClutchData = MagicMock(side_effect=noneData)
self.database.getCarName = MagicMock(side_effect=carNames)
self.assertEqual(self.thing.describeCarInterfaces([1]), "Unknown Car: NO CONTROL DATA")
def testGetCarInterfacesStatements(self):
handbrakeData = [(0), (1)]
self.database.loadHandbrakeData = MagicMock(side_effect=handbrakeData)
shiftingData = [('H-PATTERN'), ('2 PADDLES')]
self.database.loadShiftingData = MagicMock(side_effect=shiftingData)
carNames = ['Classic Car', 'Modern Car']
self.database.getCarName = MagicMock(side_effect=carNames)
gearsData = [(4), (6)]
self.database.loadGearsData = MagicMock(side_effect=gearsData)
clutchData = [(1), (0)]
self.database.loadClutchData = MagicMock(side_effect=clutchData)
firstCarInterface = self.thing.describeCarInterfaces(1)
self.assertEqual(firstCarInterface, "Classic Car: H-PATTERN shifting, 4 speed, with manual CLUTCH")
secondCarInterface = self.thing.describeCarInterfaces(2)
self.assertEqual(secondCarInterface, "Modern Car: 2 PADDLES shifting, 6 speed, with HANDBRAKE")
def testMapToShiftingData(self):
shiftingData = [('H-PATTERN'), ('SEQUENTIAL')]
self.database.loadShiftingData = MagicMock(side_effect=shiftingData)
carCandidates = [100, 200]
result = self.thing.mapCarsToShifting(carCandidates)
self.assertEqual(list(result), [(100, 'H-PATTERN'), (200, 'SEQUENTIAL')])
def testHandleCarUpdatesInvokesLambda(self):
self.database.getCarUpdateStatements = MagicMock(return_value=['update100', 'update200'])
carNames = ['Classic Car', 'Modern Car']
self.database.getCarName = MagicMock(side_effect=carNames)
updateHandler = MagicMock()
self.thing.handleCarUpdates([100, 200], 123456789, [], updateHandler)
call1 = call('UNKNOWN', 'Classic Car', 123456789, 'update100')
call2 = call('UNKNOWN', 'Modern Car', 123456789, 'update200')
updateHandler.assert_has_calls([call1, call2])
def testHandleTrackUpdatesInvokesLambda(self):
self.database.getTrackUpdateStatements = MagicMock(return_value=['update100', 'update200'])
trackNames = ['Sprint', 'Complete']
self.database.getTrackName = MagicMock(side_effect=trackNames)
updateHandler = MagicMock()
self.thing.handleTrackUpdates([100, 200], 123456789, [], updateHandler)
call1 = call('Sprint', 'UNKNOWN', 123456789, 'update100')
call2 = call('Complete', 'UNKNOWN', 123456789, 'update200')
updateHandler.assert_has_calls([call1, call2])
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"unittest.mock.MagicMock",
"timerecorder.databaseAccess.DatabaseAccess",
"timerecorder.database.Database",
"unittest.mock.call"
] |
[((5078, 5093), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5091, 5093), False, 'import unittest\n'), ((249, 265), 'timerecorder.database.Database', 'Database', (['"""test"""'], {}), "('test')\n", (257, 265), False, 'from timerecorder.database import Database\n'), ((304, 315), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (313, 315), False, 'from unittest.mock import MagicMock, call\n'), ((337, 366), 'timerecorder.databaseAccess.DatabaseAccess', 'DatabaseAccess', (['self.database'], {}), '(self.database)\n', (351, 366), False, 'from timerecorder.databaseAccess import DatabaseAccess\n'), ((518, 548), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'tracks'}), '(return_value=tracks)\n', (527, 548), False, 'from unittest.mock import MagicMock, call\n'), ((759, 789), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'tracks'}), '(return_value=tracks)\n', (768, 789), False, 'from unittest.mock import MagicMock, call\n'), ((1046, 1076), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'tracks'}), '(return_value=tracks)\n', (1055, 1076), False, 'from unittest.mock import MagicMock, call\n'), ((1316, 1344), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'cars'}), '(return_value=cars)\n', (1325, 1344), False, 'from unittest.mock import MagicMock, call\n'), ((1546, 1574), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'cars'}), '(return_value=cars)\n', (1555, 1574), False, 'from unittest.mock import MagicMock, call\n'), ((1816, 1844), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'cars'}), '(return_value=cars)\n', (1825, 1844), False, 'from unittest.mock import MagicMock, call\n'), ((2105, 2141), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'handbrakeData'}), '(side_effect=handbrakeData)\n', (2114, 2141), False, 'from unittest.mock import MagicMock, call\n'), ((2246, 2277), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'noneData'}), '(side_effect=noneData)\n', (2255, 2277), False, 'from unittest.mock import MagicMock, call\n'), ((2316, 2347), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'noneData'}), '(side_effect=noneData)\n', (2325, 2347), False, 'from unittest.mock import MagicMock, call\n'), ((2387, 2418), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'noneData'}), '(side_effect=noneData)\n', (2396, 2418), False, 'from unittest.mock import MagicMock, call\n'), ((2455, 2486), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'carNames'}), '(side_effect=carNames)\n', (2464, 2486), False, 'from unittest.mock import MagicMock, call\n'), ((2708, 2744), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'handbrakeData'}), '(side_effect=handbrakeData)\n', (2717, 2744), False, 'from unittest.mock import MagicMock, call\n'), ((2840, 2875), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'shiftingData'}), '(side_effect=shiftingData)\n', (2849, 2875), False, 'from unittest.mock import MagicMock, call\n'), ((2960, 2991), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'carNames'}), '(side_effect=carNames)\n', (2969, 2991), False, 'from unittest.mock import MagicMock, call\n'), ((3061, 3093), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'gearsData'}), '(side_effect=gearsData)\n', (3070, 3093), False, 'from unittest.mock import MagicMock, call\n'), ((3165, 3198), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'clutchData'}), '(side_effect=clutchData)\n', (3174, 3198), False, 'from unittest.mock import MagicMock, call\n'), ((3676, 3711), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'shiftingData'}), '(side_effect=shiftingData)\n', (3685, 3711), False, 'from unittest.mock import MagicMock, call\n'), ((3989, 4039), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': "['update100', 'update200']"}), "(return_value=['update100', 'update200'])\n", (3998, 4039), False, 'from unittest.mock import MagicMock, call\n'), ((4125, 4156), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'carNames'}), '(side_effect=carNames)\n', (4134, 4156), False, 'from unittest.mock import MagicMock, call\n'), ((4182, 4193), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4191, 4193), False, 'from unittest.mock import MagicMock, call\n'), ((4289, 4343), 'unittest.mock.call', 'call', (['"""UNKNOWN"""', '"""Classic Car"""', '(123456789)', '"""update100"""'], {}), "('UNKNOWN', 'Classic Car', 123456789, 'update100')\n", (4293, 4343), False, 'from unittest.mock import MagicMock, call\n'), ((4360, 4413), 'unittest.mock.call', 'call', (['"""UNKNOWN"""', '"""Modern Car"""', '(123456789)', '"""update200"""'], {}), "('UNKNOWN', 'Modern Car', 123456789, 'update200')\n", (4364, 4413), False, 'from unittest.mock import MagicMock, call\n'), ((4571, 4621), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': "['update100', 'update200']"}), "(return_value=['update100', 'update200'])\n", (4580, 4621), False, 'from unittest.mock import MagicMock, call\n'), ((4704, 4737), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'trackNames'}), '(side_effect=trackNames)\n', (4713, 4737), False, 'from unittest.mock import MagicMock, call\n'), ((4763, 4774), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4772, 4774), False, 'from unittest.mock import MagicMock, call\n'), ((4872, 4921), 'unittest.mock.call', 'call', (['"""Sprint"""', '"""UNKNOWN"""', '(123456789)', '"""update100"""'], {}), "('Sprint', 'UNKNOWN', 123456789, 'update100')\n", (4876, 4921), False, 'from unittest.mock import MagicMock, call\n'), ((4938, 4989), 'unittest.mock.call', 'call', (['"""Complete"""', '"""UNKNOWN"""', '(123456789)', '"""update200"""'], {}), "('Complete', 'UNKNOWN', 123456789, 'update200')\n", (4942, 4989), False, 'from unittest.mock import MagicMock, call\n')]
|
# IMPORTING LIBRARIES
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import warnings
# DECLARING VARIABLES
# USER INPUT LIST
name = []
age = []
loc = []
cat = []
subcat = []
year = []
# ADMIN INPUT LIST
cat_list = []
cat_sub = {}
# GUEST INPUT
file_name = 'categories.txt'
class Business:
a = ''
auth = False
def __init__(self):
print('''
SELECT YOUR CHOICE :
1. ADMIN
2. USER
3. GUEST
''')
enter = int(input('Enter your Choice : '))
if (enter == 1):
admin_username = input('ENTER USERNAME : ')
admin_password = input('ENTER PASSWORD : ')
if (self.auth == False):
if (admin_username == 'admin123' and admin_password == 'password'):
self.admin(cat_list,self.subcategory)
self.a = 'TASK COMPLETED SUCCESSFULLY'
self.auth = True
else:
print('INVALID USERNAME OR PASSWORD')
self.a = 'INVALID USERNAME OR PASSWORD'
self.__init__()
else:
self.add_category(self.new_cat_list)
elif (enter == 2):
self.business(name,age,loc,cat,subcat,year)
self.a = 'TASK COMPLETED SUCCESSFULLY !!!'
elif (enter == 3):
self.intro(file_name,read=True)
else:
print('PLEASE ENTER A VALID NUMBER')
self.a = 'PLEASE ENTER A VALID NUMBER'
self.__init__()
def business(self,name,age,loc,cat,subcat,year):
for i in range(1):
print('USER NO. : ',i+1)
user_name = input('Enter your Name : ')
user_age = int(input('Enter your Age : '))
user_loc = input('Enter your Location : ')
user_cat = self.category()
user_subcat = self.subcategory(self.category())
user_year = datetime.datetime.now().year
name.append(user_name.capitalize())
age.append(user_age)
loc.append(user_loc.upper())
cat.append(user_cat.capitalize())
subcat.append(user_subcat.capitalize())
year.append(user_year)
self.data_to_csv(name,age,loc,cat,subcat,year)
def admin(self,category,subcategory):
n = int(input('Enter the No. of Categories want to be Added : '))
for i in range(n):
print('\n ======================= STARTED CATEGORY {} =========================== \n'.format(i+1))
print('Category No. :',i+1)
user_category = input('Enter the Category : ')
cat_list.append(user_category.capitalize())
for k in cat_list:
cat_sub.setdefault(k)
m = int(input('Enter the No. of Sub Categories want to be added for the above Category : '))
subcat_list = []
for j in range(m):
print('Sub Category No. :',j+1)
user_sub_category = input('Enter the Sub-Category : ')
subcat_list.append(user_sub_category.capitalize())
cat_sub[k] = subcat_list
print('\n ======================= ENDED CATEGORY {} =========================== \n'.format(i+1))
def category(self):
for i,each in enumerate(cat_list):
print('{}. {} \t \t'.format(i+1,each),end='')
if i % 4 == 3:
print("\n")
print('\n')
cat = int(input('Enter the No. : '))
return cat_list[cat-1]
def subcategory(self,cat_value):
print('\n')
print('THE CATEGORY IS : {}'.format(cat_value))
for i,each in enumerate(cat_sub[cat_value]):
print('{}. {} \t \t'.format(i+1,each),end='')
if i % 4 == 3:
print("\n")
print('\n')
print('THE SUB CATEGORY OF {} IS : '.format(cat_value))
print('\n')
subcat = int(input('Enter the No. : '))
print('SUBCATEGORY IS : {}'.format(cat_sub[cat_value][subcat-1]))
return cat_sub[cat_value][subcat-1]
new_cat_list = []
new_cat_sub = {}
def add_category(self,new_cat):
n = int(input('Enter the No. of Categories want to be Added : '))
for i in range(n):
print('\n ======================= STARTED CATEGORY {} =========================== \n'.format(i+1))
print('Category No. :',i+1)
user_category = input('Enter the Category : ')
self.new_cat_list.append(user_category.capitalize())
cat_list.extend(self.new_cat_list)
for k in self.new_cat_list:
self.new_cat_sub.setdefault(k)
m = int(input('Enter the No. of Sub Categories want to be added for the above Category : '))
new_subcat_list = []
for j in range(m):
print('Sub Category No. :',j+1)
user_sub_category = input('Enter the Sub-Category : ')
new_subcat_list.append(user_sub_category.capitalize())
self.new_cat_sub[k] = new_subcat_list
print('\n ======================= ENDED CATEGORY {} =========================== \n'.format(i+1))
return cat_sub.update(self.new_cat_sub)
def data_to_csv(self,name,age,loc,cat,subcat,year):
dict = {
'name': name,
'age': age,
'location': loc,
'category': cat,
'subcategory': subcat,
'year': year
}
df = pd.DataFrame(dict)
csv = df.to_csv('data.csv', index=False)
return csv
def intro(self,file_name,read=False):
f = open(file_name,'w')
f.write('CATEGORY AND SUBCATEGORY')
f.write('\n')
f.write("=======================================")
f.write('\n')
f.write("| CATEGORY NO. | CATEGORY NAME |")
f.write('\n')
f.write("=======================================")
f.write('\n')
for i,cat_each in enumerate(cat_sub,start=1):
f.write("| {} | {} ".format(str(i).center(9),cat_each))
f.write('\n')
f.write(" \t \t | SUB CATEGORY NO. | SUB CATEGORY NAME |")
f.write('\n')
for j,subcat_each in enumerate(cat_sub[cat_each],start=1):
f.write(" \t \t | {} | {} ".format(str(j).center(11),subcat_each))
f.write('\n')
if(read==True):
f = open(file_name,'r')
print(f.read())
f.close()
def __str__(self):
return self.a
|
[
"pandas.DataFrame",
"datetime.datetime.now"
] |
[((5860, 5878), 'pandas.DataFrame', 'pd.DataFrame', (['dict'], {}), '(dict)\n', (5872, 5878), True, 'import pandas as pd\n'), ((2094, 2117), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2115, 2117), False, 'import datetime\n')]
|
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from datetime import date
from get_names import send_names
'''
this app automatic login in to a website and cut the hours of the people who forgot to clock out
in lunch time
'''
####################################### Basic settings #########################################
SET_TIME = '11:50'
SET_TIME_DIV = 'AM'
set_date = date.strftime(date.today(), "%m/%d/%Y") # today in format mm/dd/yyyy
USERNAME = ''
PASSWORD = ""
NOTE = f'Automatic System - Forgot to clock out for Lunch - Administrator - {set_date}'
list_of_names = send_names() # import data from get_names.py
################################################################################################
browser = webdriver.Firefox()
browser.get('https://www.timestation.com/Login.asp')
def login_page():
''' login in to website autetification '''
email_field = browser.find_element_by_css_selector('#eMail')
password_field = browser.find_element_by_css_selector('#Password')
summit_field = browser.find_element_by_css_selector('.ButtonGreen')
email_field.clear()
email_field.send_keys(USERNAME)
password_field.clear()
password_field.send_keys(PASSWORD)
summit_field.click()
def select_employees_website():
''' click the employees link inside of the website after login in.'''
employee_css = '.menu-main > li:nth-child(2) > a:nth-child(1)'
employee_button_ = browser.find_element_by_css_selector(employee_css)
employee_button_.click()
def select_names(name):
''' this scroll the website and click the checkbox with their name'''
info_name = browser.find_element_by_link_text(name)
href_link = info_name.get_attribute('href')
href_link.split('=')
id_number = href_link.split('=')
xpath_path = f".//input[@value={id_number[1]}]"
for_click = browser.find_element_by_xpath(xpath_path)
for_click.location_once_scrolled_into_view
for_click.click()
def select_box(id, text):
''' open select items and select them. '''
action_find = browser.find_element_by_id(id)
action_find.location_once_scrolled_into_view
action_tab = Select(action_find)
action_tab.select_by_visible_text(text)
def select_names_flow(names):
''' this function gets in a list of names that will loop in it. '''
for name in names:
select_names(name=name)
select_box(id='employeeAction', text='Check-Out') # to select the action bar
select_box(id='TimeOut_Hour', text=SET_TIME.split(':')[0]) # this input the hour to field
select_box(id='TimeOut_Minute', text=SET_TIME.split(':')[1]) # this input the minutes to field
select_box(id='TimeOut_AMPM', text=SET_TIME_DIV) # this input the AM or PM in field
date_field = browser.find_element_by_id('TimeOut_Date')
note_field = browser.find_element_by_id('Notes')
date_field.send_keys(set_date)
note_field.send_keys(NOTE)
browser.find_element_by_name('Submit').click()
if __name__ == "__main__":
login_page()
select_employees_website()
select_names_flow(names=list_of_names) # this accept a list of names to be change.
########## this created a log in a text file ########################
try:
with open('NameChangeLog.txt', 'w') as file_log:
for name in list_of_names:
file_log.write(f'{set_date} -- {name}\n')
print(name)
except:
with open('NameChangeLog.txt', 'a') as file_log:
for name in list_of_names:
file_log.write(f'{set_date} -- {name}\n')
print(name)
finally:
print('everything was change successfully ')
browser.close()
##########################################################################
|
[
"get_names.send_names",
"selenium.webdriver.support.ui.Select",
"datetime.date.today",
"selenium.webdriver.Firefox"
] |
[((621, 633), 'get_names.send_names', 'send_names', ([], {}), '()\n', (631, 633), False, 'from get_names import send_names\n'), ((807, 826), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {}), '()\n', (824, 826), False, 'from selenium import webdriver\n'), ((422, 434), 'datetime.date.today', 'date.today', ([], {}), '()\n', (432, 434), False, 'from datetime import date\n'), ((2228, 2247), 'selenium.webdriver.support.ui.Select', 'Select', (['action_find'], {}), '(action_find)\n', (2234, 2247), False, 'from selenium.webdriver.support.ui import Select\n')]
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Sequence, Tuple, Union
import torch
import torch.nn.functional as F
from torch import Tensor
from monai.transforms.croppad.array import SpatialPad
from monai.transforms.utils import compute_divisible_spatial_size, convert_pad_mode
from monai.utils import PytorchPadMode, ensure_tuple_rep
def check_input_images(input_images: Union[List[Tensor], Tensor], spatial_dims: int) -> None:
"""
Validate the input dimensionality (raise a `ValueError` if invalid).
Args:
input_images: It can be 1) a tensor sized (B, C, H, W) or (B, C, H, W, D),
or 2) a list of image tensors, each image i may have different size (C, H_i, W_i) or (C, H_i, W_i, D_i).
spatial_dims: number of spatial dimensions of the images, 2 or 3.
"""
if isinstance(input_images, Tensor):
if len(input_images.shape) != spatial_dims + 2:
raise ValueError(
"When input_images is a Tensor, its need to be (spatial_dims + 2)-D."
f"In this case, it should be a {(spatial_dims + 2)}-D Tensor, got Tensor shape {input_images.shape}."
)
elif isinstance(input_images, List):
for img in input_images:
if len(img.shape) != spatial_dims + 1:
raise ValueError(
"When input_images is a List[Tensor], each element should have be (spatial_dims + 1)-D."
f"In this case, it should be a {(spatial_dims + 1)}-D Tensor, got Tensor shape {img.shape}."
)
else:
raise ValueError("input_images needs to be a List[Tensor] or Tensor.")
return
def check_training_targets(
input_images: Union[List[Tensor], Tensor],
targets: Union[List[Dict[str, Tensor]], None],
spatial_dims: int,
target_label_key: str,
target_box_key: str,
) -> None:
"""
Validate the input images/targets during training (raise a `ValueError` if invalid).
Args:
input_images: It can be 1) a tensor sized (B, C, H, W) or (B, C, H, W, D),
or 2) a list of image tensors, each image i may have different size (C, H_i, W_i) or (C, H_i, W_i, D_i).
targets: a list of dict. Each dict with two keys: target_box_key and target_label_key,
ground-truth boxes present in the image.
spatial_dims: number of spatial dimensions of the images, 2 or 3.
target_label_key: the expected key of target labels.
target_box_key: the expected key of target boxes.
"""
if targets is None:
raise ValueError("Please provide ground truth targets during training.")
if len(input_images) != len(targets):
raise ValueError(f"len(input_images) should equal to len(targets), got {len(input_images)}, {len(targets)}.")
for target in targets:
if (target_label_key not in target.keys()) or (target_box_key not in target.keys()):
raise ValueError(
f"{target_label_key} and {target_box_key} are expected keys in targets. Got {target.keys()}."
)
boxes = target[target_box_key]
if not isinstance(boxes, torch.Tensor):
raise ValueError(f"Expected target boxes to be of type Tensor, got {type(boxes)}.")
if len(boxes.shape) != 2 or boxes.shape[-1] != 2 * spatial_dims:
raise ValueError(
f"Expected target boxes to be a tensor " f"of shape [N, {2* spatial_dims}], got {boxes.shape}."
)
return
def pad_images(
input_images: Union[List[Tensor], Tensor],
spatial_dims: int,
size_divisible: Union[int, Sequence[int]],
mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT,
**kwargs,
) -> Tuple[Tensor, List[List[int]]]:
"""
Pad the input images, so that the output spatial sizes are divisible by `size_divisible`.
It pads them at the end to create a (B, C, H, W) or (B, C, H, W, D) Tensor.
Padded size (H, W) or (H, W, D) is divisible by size_divisible.
Default padding uses constant padding with value 0.0
Args:
input_images: It can be 1) a tensor sized (B, C, H, W) or (B, C, H, W, D),
or 2) a list of image tensors, each image i may have different size (C, H_i, W_i) or (C, H_i, W_i, D_i).
spatial_dims: number of spatial dimensions of the images, 2D or 3D.
size_divisible: int or Sequence[int], is the expected pattern on the input image shape.
If an int, the same `size_divisible` will be applied to all the input spatial dimensions.
mode: available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
kwargs: other arguments for `torch.pad` function.
Return:
- images, a (B, C, H, W) or (B, C, H, W, D) Tensor
- image_sizes, the original spatial size of each image
"""
size_divisible = ensure_tuple_rep(size_divisible, spatial_dims)
# If input_images: Tensor
if isinstance(input_images, Tensor):
orig_size = list(input_images.shape[-spatial_dims:])
new_size = compute_divisible_spatial_size(spatial_shape=orig_size, k=size_divisible)
all_pad_width = [(0, max(sp_i - orig_size[i], 0)) for i, sp_i in enumerate(new_size)]
pt_pad_width = [val for sublist in all_pad_width for val in sublist[::-1]][::-1]
if max(pt_pad_width) == 0:
# if there is no need to pad
return input_images, [orig_size] * input_images.shape[0]
mode_: str = convert_pad_mode(dst=input_images, mode=mode).value
return F.pad(input_images, pt_pad_width, mode=mode_, **kwargs), [orig_size] * input_images.shape[0]
# If input_images: List[Tensor])
image_sizes = [img.shape[-spatial_dims:] for img in input_images]
in_channels = input_images[0].shape[0]
dtype = input_images[0].dtype
device = input_images[0].device
# compute max_spatial_size
image_sizes_t = torch.tensor(image_sizes)
max_spatial_size_t, _ = torch.max(image_sizes_t, dim=0)
if len(max_spatial_size_t) != spatial_dims or len(size_divisible) != spatial_dims:
raise ValueError(" Require len(max_spatial_size_t) == spatial_dims ==len(size_divisible).")
max_spatial_size = compute_divisible_spatial_size(spatial_shape=list(max_spatial_size_t), k=size_divisible)
# allocate memory for the padded images
images = torch.zeros([len(image_sizes), in_channels] + max_spatial_size, dtype=dtype, device=device)
# Use `SpatialPad` to match sizes, padding in the end will not affect boxes
padder = SpatialPad(spatial_size=max_spatial_size, method="end", mode=mode, **kwargs)
for idx, img in enumerate(input_images):
images[idx, ...] = padder(img) # type: ignore
return images, [list(ss) for ss in image_sizes]
def preprocess_images(
input_images: Union[List[Tensor], Tensor],
spatial_dims: int,
size_divisible: Union[int, Sequence[int]],
mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT,
**kwargs,
) -> Tuple[Tensor, List[List[int]]]:
"""
Preprocess the input images, including
- validate of the inputs
- pad the inputs so that the output spatial sizes are divisible by `size_divisible`.
It pads them at the end to create a (B, C, H, W) or (B, C, H, W, D) Tensor.
Padded size (H, W) or (H, W, D) is divisible by size_divisible.
Default padding uses constant padding with value 0.0
Args:
input_images: It can be 1) a tensor sized (B, C, H, W) or (B, C, H, W, D),
or 2) a list of image tensors, each image i may have different size (C, H_i, W_i) or (C, H_i, W_i, D_i).
spatial_dims: number of spatial dimensions of the images, 2 or 3.
size_divisible: int or Sequence[int], is the expected pattern on the input image shape.
If an int, the same `size_divisible` will be applied to all the input spatial dimensions.
mode: available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
kwargs: other arguments for `torch.pad` function.
Return:
- images, a (B, C, H, W) or (B, C, H, W, D) Tensor
- image_sizes, the original spatial size of each image
"""
check_input_images(input_images, spatial_dims)
size_divisible = ensure_tuple_rep(size_divisible, spatial_dims)
return pad_images(input_images, spatial_dims, size_divisible, mode, **kwargs)
|
[
"monai.transforms.utils.compute_divisible_spatial_size",
"monai.transforms.croppad.array.SpatialPad",
"monai.transforms.utils.convert_pad_mode",
"torch.max",
"monai.utils.ensure_tuple_rep",
"torch.tensor",
"torch.nn.functional.pad"
] |
[((5625, 5671), 'monai.utils.ensure_tuple_rep', 'ensure_tuple_rep', (['size_divisible', 'spatial_dims'], {}), '(size_divisible, spatial_dims)\n', (5641, 5671), False, 'from monai.utils import PytorchPadMode, ensure_tuple_rep\n'), ((6680, 6705), 'torch.tensor', 'torch.tensor', (['image_sizes'], {}), '(image_sizes)\n', (6692, 6705), False, 'import torch\n'), ((6734, 6765), 'torch.max', 'torch.max', (['image_sizes_t'], {'dim': '(0)'}), '(image_sizes_t, dim=0)\n', (6743, 6765), False, 'import torch\n'), ((7311, 7387), 'monai.transforms.croppad.array.SpatialPad', 'SpatialPad', ([], {'spatial_size': 'max_spatial_size', 'method': '"""end"""', 'mode': 'mode'}), "(spatial_size=max_spatial_size, method='end', mode=mode, **kwargs)\n", (7321, 7387), False, 'from monai.transforms.croppad.array import SpatialPad\n'), ((9247, 9293), 'monai.utils.ensure_tuple_rep', 'ensure_tuple_rep', (['size_divisible', 'spatial_dims'], {}), '(size_divisible, spatial_dims)\n', (9263, 9293), False, 'from monai.utils import PytorchPadMode, ensure_tuple_rep\n'), ((5824, 5897), 'monai.transforms.utils.compute_divisible_spatial_size', 'compute_divisible_spatial_size', ([], {'spatial_shape': 'orig_size', 'k': 'size_divisible'}), '(spatial_shape=orig_size, k=size_divisible)\n', (5854, 5897), False, 'from monai.transforms.utils import compute_divisible_spatial_size, convert_pad_mode\n'), ((6247, 6292), 'monai.transforms.utils.convert_pad_mode', 'convert_pad_mode', ([], {'dst': 'input_images', 'mode': 'mode'}), '(dst=input_images, mode=mode)\n', (6263, 6292), False, 'from monai.transforms.utils import compute_divisible_spatial_size, convert_pad_mode\n'), ((6314, 6369), 'torch.nn.functional.pad', 'F.pad', (['input_images', 'pt_pad_width'], {'mode': 'mode_'}), '(input_images, pt_pad_width, mode=mode_, **kwargs)\n', (6319, 6369), True, 'import torch.nn.functional as F\n')]
|
import time
import json
import sys,os
import subprocess
import argparse
import unittest
VALUES_INPUT = {}
VALUES_OUTPUT = {}
class TestCases(unittest.TestCase):
def test_case_000(self):
self.assertEqual('foo'.upper(), 'FOO')
def test_case_001(self):
self.assertEqual('foo'.upper(), 'FOO')
def Run(command, parameters=None):
if(parameters != None):
subprocess.Popen([command, parameters], shell=True)
else:
subprocess.Popen(command, shell=True)
def OpenFolder(path):
if sys.platform == 'win32':
Run('explorer.exe', path)
def Main():
'''No describe'''
global VALUES_INPUT
global VALUES_OUTPUT
VALUES_OUTPUT = VALUES_INPUT
#OpenFolder(r'C:\Windows')
#Run(r'Calc')
#Run(r'C:\Program Files\Google\Chrome\Application\chrome.exe','-incognito www.google.com.br')
#VALUES_OUTPUT['vartest'] = 'test'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=Main.__doc__)
parser.add_argument('-d','--description', help='Description of program', action='store_true')
parser.add_argument('-u','--tests', help='Execute tests', action='store_true')
parser.add_argument('-i','--file_input', help='data entry via file (path)')
parser.add_argument('-o','--file_output', help='output data via file (path)')
args, unknown = parser.parse_known_args()
args = vars(args)
if args['description'] == True:
print(Main.__doc__)
sys.exit()
if args['tests'] == True:
suite = unittest.TestSuite()
suite.addTest(TestCases("test_case_000"))
suite.addTest(TestCases("test_case_001"))
runner = unittest.TextTestRunner()
runner.run(suite)
sys.exit()
if args['file_input']:
with open(args['file_input']) as json_file:
VALUES_INPUT = json.load(json_file)
param = ' '.join(unknown)
Main()
if args['file_output']:
with open(args['file_output'], "w") as outfile:
json_string = json.dumps(VALUES_OUTPUT, default=lambda o: o.__dict__, sort_keys=True, indent=2)
outfile.write(json_string)
|
[
"subprocess.Popen",
"json.load",
"unittest.TextTestRunner",
"argparse.ArgumentParser",
"unittest.TestSuite",
"json.dumps",
"sys.exit"
] |
[((990, 1039), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'Main.__doc__'}), '(description=Main.__doc__)\n', (1013, 1039), False, 'import argparse\n'), ((423, 474), 'subprocess.Popen', 'subprocess.Popen', (['[command, parameters]'], {'shell': '(True)'}), '([command, parameters], shell=True)\n', (439, 474), False, 'import subprocess\n'), ((495, 532), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (511, 532), False, 'import subprocess\n'), ((1544, 1554), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1552, 1554), False, 'import sys, os\n'), ((1612, 1632), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (1630, 1632), False, 'import unittest\n'), ((1753, 1778), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (1776, 1778), False, 'import unittest\n'), ((1830, 1840), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1838, 1840), False, 'import sys, os\n'), ((1963, 1983), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1972, 1983), False, 'import json\n'), ((2170, 2255), 'json.dumps', 'json.dumps', (['VALUES_OUTPUT'], {'default': '(lambda o: o.__dict__)', 'sort_keys': '(True)', 'indent': '(2)'}), '(VALUES_OUTPUT, default=lambda o: o.__dict__, sort_keys=True,\n indent=2)\n', (2180, 2255), False, 'import json\n')]
|
#!/usr/bin/env python3
"""从apnic获取中国IP范围"""
import urllib.request, os
URL = "http://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest"
TMP_PATH = "./whitelist.tmp"
# 生成的最终白名单
RESULT_FILE_PATH = "./fdslight_etc/whitelist.txt"
def get_remote_file():
tmpfile = open(TMP_PATH, "wb")
response = urllib.request.urlopen(URL)
rdata = response.read()
tmpfile.write(rdata)
tmpfile.close()
def is_ipv4(line):
"""检查是否是IPv4"""
if line.find("ipv4") < 6: return False
return True
def is_cn_ipv4(line):
if line.find("CN") < 6: return False
return True
def get_subnet(line):
tmplist = line.split("|")
if len(tmplist) != 7: return None
if tmplist[6] != "allocated": return None
base_net = tmplist[3]
n = int(tmplist[4]) - 1
msize = 32 - len(bin(n)) + 2
return "%s/%s" % (base_net, msize,)
def main():
print("downloading...")
get_remote_file()
print("parsing...")
fdst = open(TMP_PATH, "r")
rfdst = open(RESULT_FILE_PATH, "w")
rfdst.write("# %s\n" % URL)
rfdst.write("# China IP address\n")
for line in fdst:
line = line.replace("\r", "")
line = line.replace("\n", "")
if line[0:6] != "apnic|": continue
if not is_ipv4(line): continue
if not is_cn_ipv4(line): continue
subnet = get_subnet(line)
if not subnet: continue
sts = "%s\n" % subnet
rfdst.write(sts)
print("parse ok")
rfdst.close()
fdst.close()
os.remove(TMP_PATH)
if __name__ == '__main__':
main()
|
[
"os.remove"
] |
[((1496, 1515), 'os.remove', 'os.remove', (['TMP_PATH'], {}), '(TMP_PATH)\n', (1505, 1515), False, 'import urllib.request, os\n')]
|
import pandas as pd
from unittest.mock import patch, Mock, PropertyMock
import ramjet.data_interface.tess_transit_metadata_manager as module
from ramjet.data_interface.tess_transit_metadata_manager import TessTransitMetadataManager, Disposition
from ramjet.data_interface.tess_toi_data_interface import ToiColumns
class TestTessTransitMetadata:
@patch.object(module, 'metadatabase')
@patch.object(module, 'TessTransitMetadata')
def test_table_building_creates_rows_based_on_toi_dispositions(self, mock_tess_target_transit_disposition,
mock_metadatabase):
tess_transit_disposition_metadata_manager = TessTransitMetadataManager()
toi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [1, 2, 3],
ToiColumns.disposition.value: ['KP', '', 'FP']})
ctoi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [],
ToiColumns.disposition.value: []})
with patch.object(module.TessToiDataInterface, 'toi_dispositions', new_callable=PropertyMock
) as mock_toi_dispositions:
with patch.object(module.TessToiDataInterface, 'ctoi_dispositions', new_callable=PropertyMock
) as mock_ctoi_dispositions:
mock_toi_dispositions.return_value = toi_dispositions
mock_ctoi_dispositions.return_value = ctoi_dispositions
tess_transit_disposition_metadata_manager.build_table()
call_args_list = mock_tess_target_transit_disposition.call_args_list
assert len(call_args_list) == 3
assert call_args_list[0][1] == {'tic_id': 1, 'disposition': Disposition.CONFIRMED.value}
assert call_args_list[1][1] == {'tic_id': 2, 'disposition': Disposition.CANDIDATE.value}
assert call_args_list[2][1] == {'tic_id': 3, 'disposition': Disposition.FALSE_POSITIVE.value}
|
[
"pandas.DataFrame",
"unittest.mock.patch.object",
"ramjet.data_interface.tess_transit_metadata_manager.TessTransitMetadataManager"
] |
[((353, 389), 'unittest.mock.patch.object', 'patch.object', (['module', '"""metadatabase"""'], {}), "(module, 'metadatabase')\n", (365, 389), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((395, 438), 'unittest.mock.patch.object', 'patch.object', (['module', '"""TessTransitMetadata"""'], {}), "(module, 'TessTransitMetadata')\n", (407, 438), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((689, 717), 'ramjet.data_interface.tess_transit_metadata_manager.TessTransitMetadataManager', 'TessTransitMetadataManager', ([], {}), '()\n', (715, 717), False, 'from ramjet.data_interface.tess_transit_metadata_manager import TessTransitMetadataManager, Disposition\n'), ((745, 848), 'pandas.DataFrame', 'pd.DataFrame', (["{ToiColumns.tic_id.value: [1, 2, 3], ToiColumns.disposition.value: ['KP',\n '', 'FP']}"], {}), "({ToiColumns.tic_id.value: [1, 2, 3], ToiColumns.disposition.\n value: ['KP', '', 'FP']})\n", (757, 848), True, 'import pandas as pd\n'), ((913, 990), 'pandas.DataFrame', 'pd.DataFrame', (['{ToiColumns.tic_id.value: [], ToiColumns.disposition.value: []}'], {}), '({ToiColumns.tic_id.value: [], ToiColumns.disposition.value: []})\n', (925, 990), True, 'import pandas as pd\n'), ((1046, 1139), 'unittest.mock.patch.object', 'patch.object', (['module.TessToiDataInterface', '"""toi_dispositions"""'], {'new_callable': 'PropertyMock'}), "(module.TessToiDataInterface, 'toi_dispositions', new_callable=\n PropertyMock)\n", (1058, 1139), False, 'from unittest.mock import patch, Mock, PropertyMock\n'), ((1205, 1299), 'unittest.mock.patch.object', 'patch.object', (['module.TessToiDataInterface', '"""ctoi_dispositions"""'], {'new_callable': 'PropertyMock'}), "(module.TessToiDataInterface, 'ctoi_dispositions', new_callable\n =PropertyMock)\n", (1217, 1299), False, 'from unittest.mock import patch, Mock, PropertyMock\n')]
|
#!/usr/bin/env python
from __future__ import print_function
from builtins import str, bytes
import fileinput
import argparse
import os
import sys
import subprocess
python_path = subprocess.check_output(['which' ,'python']).decode('utf-8')
system_path = os.path.dirname(python_path)
def writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job):
commandFiles = 'command_%i.bash' %commandRank
options = \
"#!/bin/bash \n" +\
"#SBATCH -J %s # Job name \n" %(jobname) +\
"#SBATCH -N %i # Total number of nodes \n" %(numberOfNode)+\
"#SBATCH -n 24 # Total number of tasks %i\n" %(numberOfJob)+\
"#SBATCH -p %s # Queue name \n" %(queue)+\
"#SBATCH -o %s.o%s # Name of stdout output file \n" %(jobname,'%j')+ \
"#SBATCH -t %s # Run time (hh:mm:ss) \n" %time +\
"#SBATCH -A %s \nmodule load gcc\nmodule load java\n" %(allocation) +\
'ulimit -c unlimited\n' +\
"export PATH=%s:$PATH" %system_path
with open('launcher_%i.slurm' %(commandRank), 'w') as slurmFile:
print(options, file = slurmFile)
if concurrent_job == 1:
print('bash %s' %(commandFiles), file = slurmFile)
else:
print('parallel -j%i :::: %s \n' %(concurrent_job,commandFiles), file = slurmFile)
with open(commandFiles,'w') as commandFile:
print('\n'.join(commandlist) + '\n', file = commandFile)
return 0
def main(args):
commandFile = args.cmdlst
jobname = args.jobname
numberOfJob = args.numberOfCmd
numberOfNode = args.numberOfNode
allocation = args.allocation
queue = args.queue
time = args.time
concurrent_job = args.processes
with open(commandFile,'r') as f:
commands = f.readlines()
commandlist = []
i = 0
commandRank = 0
for command in commands:
commandlist.append(str(command).strip())
i += 1
if i % numberOfJob == 0:
writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job)
commandRank += 1
i = 0
commandlist=[]
if commandlist:
writeJob(commandlist, jobname, commandRank, i, numberOfNode, allocation, queue, time, concurrent_job)
commandRank += 1
print('Written %i scripts' %commandRank, file = sys.stdout)
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='A script to create slurm scripts from list of commands')
parser.add_argument('-c', '--cmdlst', help='A list of command, each line is a command', required=True)
parser.add_argument('-j', '--jobname', default='job',help='Jobname (default: job)')
parser.add_argument('-N', '--numberOfNode', default=1, type=int, help='Number of node for each job (default: 1)')
parser.add_argument('-n', '--numberOfCmd', default=1, type=int, help='Number of command per node (default: 1)')
parser.add_argument('-A', '--allocation', default = '2013lambowitz',
help= 'Account (default: 2013lambowitz)',
choices = {'tRNA-profiling-and-b', '2013lambowitz', 'Exosome-RNA-seq'})
parser.add_argument('-t', '--time', default='01:00:00', help='Run time (hh:mm:ss) default: 1:00:00')
parser.add_argument('-q','--queue', default='normal',help='Queue (default: normal)')
parser.add_argument('-p','--processes', default=24,help='How many process to run in the same time (default: 24)', type=int)
args = parser.parse_args()
main(args)
|
[
"subprocess.check_output",
"os.path.dirname",
"builtins.str",
"argparse.ArgumentParser"
] |
[((255, 283), 'os.path.dirname', 'os.path.dirname', (['python_path'], {}), '(python_path)\n', (270, 283), False, 'import os\n'), ((2578, 2676), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""A script to create slurm scripts from list of commands"""'}), "(description=\n 'A script to create slurm scripts from list of commands')\n", (2601, 2676), False, 'import argparse\n'), ((180, 224), 'subprocess.check_output', 'subprocess.check_output', (["['which', 'python']"], {}), "(['which', 'python'])\n", (203, 224), False, 'import subprocess\n'), ((2008, 2020), 'builtins.str', 'str', (['command'], {}), '(command)\n', (2011, 2020), False, 'from builtins import str, bytes\n')]
|
import datetime
from django.db import models
from libs.orm import ModelToDicMiXin
SEXS = (
(0, '未知'),
(1, '男'),
(2, '女'),
)
LOCATIONS = (
('bj', '北京'),
('sh', '上海'),
('hz', '杭州'),
('sz', '深圳'),
('cd', '成都'),
('gz', '广州'),
)
class User(models.Model):
"""
phonenum 手机号
nickname 昵称
sex 性别
birth_year 出生年
birth_month 出生月
birth_day 出生日
avatar 个人形象
location 常居地
"""
phonenum = models.CharField(max_length=11, unique=True)
nickname = models.CharField(max_length=16)
sex = models.IntegerField(choices=SEXS, default=0)
birth_year = models.IntegerField(default=2000)
birth_month = models.IntegerField(default=1)
birth_day = models.IntegerField(default=1)
avater = models.CharField(max_length=256)
location = models.CharField(choices=LOCATIONS,max_length=32,default='gz')
@property
def age(self):
date = datetime.date.today()
age = date.year - self.birth_year
age = age if date.month > self.birth_month and date.day > self.birth_day else age-1
return age
@property
def profile(self):
if not hasattr(self, '_profile'):
self._profile, _ = Profile.objects.get_or_create(pk=self.id)
return self._profile
@property
def to_dic(self):
return {
'uid': self.id,
'phonenum': self.phonenum,
'nickname': self.nickname,
'sex': self.sex,
'avater': self.avater,
'location': self.location,
'age': self.age,
}
class Meta:
db_table = 'users'
# def get_or_create_token(self):
# """
# 为用户生成唯一的 token
# :return:
# """
# key = 'token:{}'.format(self.id)
#
# token = cache.get(key)
#
# if not token:
# token = 'token........<PASSWORD>'
# cache.set(key, token, 24 * 60 * 60)
#
# return token
class Profile(models.Model, ModelToDicMiXin):
"""
location 目标城市
min_distance 最小查找范围
max_distance 最大查找范围
min_dating_age 最小交友年龄
max_dating_age 最大交友年龄
dating_sex 匹配的性别
auto_play 视频自动播放
user.profile.location
"""
location = models.CharField(max_length=32, choices=LOCATIONS, default='gz')
min_distance = models.IntegerField(default=0)
max_distance = models.IntegerField(default=10)
min_dating_age = models.IntegerField(default=18)
max_dating_age = models.IntegerField(default=81)
dating_sex = models.IntegerField(choices=SEXS, default=0)
auto_play = models.BooleanField(default=True)
class Meta:
db_table = 'profiles'
|
[
"django.db.models.CharField",
"django.db.models.IntegerField",
"datetime.date.today",
"django.db.models.BooleanField"
] |
[((507, 551), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(11)', 'unique': '(True)'}), '(max_length=11, unique=True)\n', (523, 551), False, 'from django.db import models\n'), ((567, 598), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)'}), '(max_length=16)\n', (583, 598), False, 'from django.db import models\n'), ((609, 653), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'SEXS', 'default': '(0)'}), '(choices=SEXS, default=0)\n', (628, 653), False, 'from django.db import models\n'), ((671, 704), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(2000)'}), '(default=2000)\n', (690, 704), False, 'from django.db import models\n'), ((723, 753), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)'}), '(default=1)\n', (742, 753), False, 'from django.db import models\n'), ((770, 800), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)'}), '(default=1)\n', (789, 800), False, 'from django.db import models\n'), ((814, 846), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (830, 846), False, 'from django.db import models\n'), ((862, 926), 'django.db.models.CharField', 'models.CharField', ([], {'choices': 'LOCATIONS', 'max_length': '(32)', 'default': '"""gz"""'}), "(choices=LOCATIONS, max_length=32, default='gz')\n", (878, 926), False, 'from django.db import models\n'), ((2357, 2421), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'choices': 'LOCATIONS', 'default': '"""gz"""'}), "(max_length=32, choices=LOCATIONS, default='gz')\n", (2373, 2421), False, 'from django.db import models\n'), ((2441, 2471), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2460, 2471), False, 'from django.db import models\n'), ((2491, 2522), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(10)'}), '(default=10)\n', (2510, 2522), False, 'from django.db import models\n'), ((2544, 2575), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(18)'}), '(default=18)\n', (2563, 2575), False, 'from django.db import models\n'), ((2597, 2628), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(81)'}), '(default=81)\n', (2616, 2628), False, 'from django.db import models\n'), ((2646, 2690), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'SEXS', 'default': '(0)'}), '(choices=SEXS, default=0)\n', (2665, 2690), False, 'from django.db import models\n'), ((2708, 2741), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (2727, 2741), False, 'from django.db import models\n'), ((974, 995), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (993, 995), False, 'import datetime\n')]
|
#!/usr/bin/env python3
from __future__ import print_function
import sys
sys.path.append('./method')
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pints
import pints.io
import pints.plot
import model as m
import parametertransform
import priors
"""
Run fit.
"""
model_list = ['A', 'B', 'C']
try:
which_model = sys.argv[1]
except:
print('Usage: python %s [str:which_model]' % os.path.basename(__file__))
sys.exit()
if which_model not in model_list:
raise ValueError('Input model %s is not available in the model list' \
% which_model)
# Get all input variables
import importlib
sys.path.append('./mmt-model-files')
info_id = 'model_%s' % which_model
info = importlib.import_module(info_id)
data_dir = './data'
savedir = './out/mcmc-' + info_id
if not os.path.isdir(savedir):
os.makedirs(savedir)
data_file_name = 'data-sinewave.csv'
print('Fitting to ', data_file_name)
print('Temperature: ', info.temperature)
saveas = info_id + '-' + data_file_name[5:][:-4]
# Protocol
protocol = np.loadtxt('./protocol-time-series/sinewave.csv', skiprows=1,
delimiter=',')
protocol_times = protocol[:, 0]
protocol = protocol[:, 1]
# Control fitting seed
# fit_seed = np.random.randint(0, 2**30)
fit_seed = 542811797
print('Fit seed: ', fit_seed)
np.random.seed(fit_seed)
# Set parameter transformation
transform_to_model_param = parametertransform.log_transform_to_model_param
transform_from_model_param = parametertransform.log_transform_from_model_param
# Load data
data = np.loadtxt(data_dir + '/' + data_file_name,
delimiter=',', skiprows=1) # headers
times = data[:, 0]
data = data[:, 1]
noise_sigma = np.std(data[:500])
print('Estimated noise level: ', noise_sigma)
# Model
model = m.Model(info.model_file,
variables=info.parameters,
current_readout=info.current_list,
set_ion=info.ions_conc,
transform=transform_to_model_param,
temperature=273.15 + info.temperature, # K
)
LogPrior = {
'model_A': priors.ModelALogPrior,
'model_B': priors.ModelBLogPrior,
}
# Update protocol
model.set_fixed_form_voltage_protocol(protocol, protocol_times)
# Create Pints stuffs
problem = pints.SingleOutputProblem(model, times, data)
loglikelihood = pints.GaussianLogLikelihood(problem)
logmodelprior = LogPrior[info_id](transform_to_model_param,
transform_from_model_param)
lognoiseprior = pints.UniformLogPrior([0.1 * noise_sigma], [10. * noise_sigma])
logprior = pints.ComposedLogPrior(logmodelprior, lognoiseprior)
logposterior = pints.LogPosterior(loglikelihood, logprior)
# Check logposterior is working fine
priorparams = np.copy(info.base_param)
transform_priorparams = transform_from_model_param(priorparams)
priorparams = np.append(priorparams, noise_sigma)
transform_priorparams = np.append(transform_priorparams, noise_sigma)
print('Posterior at prior parameters: ',
logposterior(transform_priorparams))
for _ in range(10):
assert(logposterior(transform_priorparams) ==\
logposterior(transform_priorparams))
# Load fitting results
calloaddir = './out/' + info_id
load_seed = 542811797
fit_idx = [1, 2, 3]
transform_x0_list = []
print('MCMC starting point: ')
for i in fit_idx:
f = '%s/%s-solution-%s-%s.txt' % (calloaddir, 'sinewave', load_seed, i)
p = np.loadtxt(f)
transform_x0_list.append(np.append(transform_from_model_param(p),
noise_sigma))
print(transform_x0_list[-1])
print('Posterior: ', logposterior(transform_x0_list[-1]))
# Run
mcmc = pints.MCMCController(logposterior, len(transform_x0_list),
transform_x0_list, method=pints.PopulationMCMC)
n_iter = 100000
mcmc.set_max_iterations(n_iter)
mcmc.set_initial_phase_iterations(int(0.05 * n_iter))
mcmc.set_parallel(False)
mcmc.set_chain_filename('%s/%s-chain.csv' % (savedir, saveas))
mcmc.set_log_pdf_filename('%s/%s-pdf.csv' % (savedir, saveas))
chains = mcmc.run()
# De-transform parameters
chains_param = np.zeros(chains.shape)
for i, c in enumerate(chains):
c_tmp = np.copy(c)
chains_param[i, :, :-1] = transform_to_model_param(c_tmp[:, :-1])
chains_param[i, :, -1] = c_tmp[:, -1]
del(c_tmp)
# Save (de-transformed version)
pints.io.save_samples('%s/%s-chain.csv' % (savedir, saveas), *chains_param)
# Plot
# burn in and thinning
chains_final = chains[:, int(0.5 * n_iter)::5, :]
chains_param = chains_param[:, int(0.5 * n_iter)::5, :]
transform_x0 = transform_x0_list[0]
x0 = np.append(transform_to_model_param(transform_x0[:-1]), transform_x0[-1])
pints.plot.pairwise(chains_param[0], kde=False, ref_parameters=x0)
plt.savefig('%s/%s-fig1.png' % (savedir, saveas))
plt.close('all')
pints.plot.trace(chains_param, ref_parameters=x0)
plt.savefig('%s/%s-fig2.png' % (savedir, saveas))
plt.close('all')
|
[
"numpy.random.seed",
"pints.GaussianLogLikelihood",
"sys.path.append",
"numpy.copy",
"numpy.std",
"matplotlib.pyplot.close",
"numpy.append",
"numpy.loadtxt",
"pints.LogPosterior",
"pints.io.save_samples",
"pints.plot.pairwise",
"importlib.import_module",
"pints.UniformLogPrior",
"os.path.basename",
"matplotlib.use",
"pints.SingleOutputProblem",
"sys.exit",
"os.makedirs",
"os.path.isdir",
"pints.plot.trace",
"model.Model",
"numpy.zeros",
"pints.ComposedLogPrior",
"matplotlib.pyplot.savefig"
] |
[((72, 99), 'sys.path.append', 'sys.path.append', (['"""./method"""'], {}), "('./method')\n", (87, 99), False, 'import sys\n'), ((147, 168), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (161, 168), False, 'import matplotlib\n'), ((672, 708), 'sys.path.append', 'sys.path.append', (['"""./mmt-model-files"""'], {}), "('./mmt-model-files')\n", (687, 708), False, 'import sys\n'), ((751, 783), 'importlib.import_module', 'importlib.import_module', (['info_id'], {}), '(info_id)\n', (774, 783), False, 'import importlib\n'), ((1084, 1160), 'numpy.loadtxt', 'np.loadtxt', (['"""./protocol-time-series/sinewave.csv"""'], {'skiprows': '(1)', 'delimiter': '""","""'}), "('./protocol-time-series/sinewave.csv', skiprows=1, delimiter=',')\n", (1094, 1160), True, 'import numpy as np\n'), ((1344, 1368), 'numpy.random.seed', 'np.random.seed', (['fit_seed'], {}), '(fit_seed)\n', (1358, 1368), True, 'import numpy as np\n'), ((1575, 1645), 'numpy.loadtxt', 'np.loadtxt', (["(data_dir + '/' + data_file_name)"], {'delimiter': '""","""', 'skiprows': '(1)'}), "(data_dir + '/' + data_file_name, delimiter=',', skiprows=1)\n", (1585, 1645), True, 'import numpy as np\n'), ((1726, 1744), 'numpy.std', 'np.std', (['data[:500]'], {}), '(data[:500])\n', (1732, 1744), True, 'import numpy as np\n'), ((1808, 2003), 'model.Model', 'm.Model', (['info.model_file'], {'variables': 'info.parameters', 'current_readout': 'info.current_list', 'set_ion': 'info.ions_conc', 'transform': 'transform_to_model_param', 'temperature': '(273.15 + info.temperature)'}), '(info.model_file, variables=info.parameters, current_readout=info.\n current_list, set_ion=info.ions_conc, transform=\n transform_to_model_param, temperature=273.15 + info.temperature)\n', (1815, 2003), True, 'import model as m\n'), ((2273, 2318), 'pints.SingleOutputProblem', 'pints.SingleOutputProblem', (['model', 'times', 'data'], {}), '(model, times, data)\n', (2298, 2318), False, 'import pints\n'), ((2335, 2371), 'pints.GaussianLogLikelihood', 'pints.GaussianLogLikelihood', (['problem'], {}), '(problem)\n', (2362, 2371), False, 'import pints\n'), ((2484, 2548), 'pints.UniformLogPrior', 'pints.UniformLogPrior', (['[0.1 * noise_sigma]', '[10.0 * noise_sigma]'], {}), '([0.1 * noise_sigma], [10.0 * noise_sigma])\n', (2505, 2548), False, 'import pints\n'), ((2559, 2611), 'pints.ComposedLogPrior', 'pints.ComposedLogPrior', (['logmodelprior', 'lognoiseprior'], {}), '(logmodelprior, lognoiseprior)\n', (2581, 2611), False, 'import pints\n'), ((2627, 2670), 'pints.LogPosterior', 'pints.LogPosterior', (['loglikelihood', 'logprior'], {}), '(loglikelihood, logprior)\n', (2645, 2670), False, 'import pints\n'), ((2723, 2747), 'numpy.copy', 'np.copy', (['info.base_param'], {}), '(info.base_param)\n', (2730, 2747), True, 'import numpy as np\n'), ((2826, 2861), 'numpy.append', 'np.append', (['priorparams', 'noise_sigma'], {}), '(priorparams, noise_sigma)\n', (2835, 2861), True, 'import numpy as np\n'), ((2886, 2931), 'numpy.append', 'np.append', (['transform_priorparams', 'noise_sigma'], {}), '(transform_priorparams, noise_sigma)\n', (2895, 2931), True, 'import numpy as np\n'), ((4041, 4063), 'numpy.zeros', 'np.zeros', (['chains.shape'], {}), '(chains.shape)\n', (4049, 4063), True, 'import numpy as np\n'), ((4278, 4353), 'pints.io.save_samples', 'pints.io.save_samples', (["('%s/%s-chain.csv' % (savedir, saveas))", '*chains_param'], {}), "('%s/%s-chain.csv' % (savedir, saveas), *chains_param)\n", (4299, 4353), False, 'import pints\n'), ((4607, 4673), 'pints.plot.pairwise', 'pints.plot.pairwise', (['chains_param[0]'], {'kde': '(False)', 'ref_parameters': 'x0'}), '(chains_param[0], kde=False, ref_parameters=x0)\n', (4626, 4673), False, 'import pints\n'), ((4674, 4723), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/%s-fig1.png' % (savedir, saveas))"], {}), "('%s/%s-fig1.png' % (savedir, saveas))\n", (4685, 4723), True, 'import matplotlib.pyplot as plt\n'), ((4724, 4740), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4733, 4740), True, 'import matplotlib.pyplot as plt\n'), ((4742, 4791), 'pints.plot.trace', 'pints.plot.trace', (['chains_param'], {'ref_parameters': 'x0'}), '(chains_param, ref_parameters=x0)\n', (4758, 4791), False, 'import pints\n'), ((4792, 4841), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/%s-fig2.png' % (savedir, saveas))"], {}), "('%s/%s-fig2.png' % (savedir, saveas))\n", (4803, 4841), True, 'import matplotlib.pyplot as plt\n'), ((4842, 4858), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4851, 4858), True, 'import matplotlib.pyplot as plt\n'), ((847, 869), 'os.path.isdir', 'os.path.isdir', (['savedir'], {}), '(savedir)\n', (860, 869), False, 'import os\n'), ((875, 895), 'os.makedirs', 'os.makedirs', (['savedir'], {}), '(savedir)\n', (886, 895), False, 'import os\n'), ((3392, 3405), 'numpy.loadtxt', 'np.loadtxt', (['f'], {}), '(f)\n', (3402, 3405), True, 'import numpy as np\n'), ((4107, 4117), 'numpy.copy', 'np.copy', (['c'], {}), '(c)\n', (4114, 4117), True, 'import numpy as np\n'), ((480, 490), 'sys.exit', 'sys.exit', ([], {}), '()\n', (488, 490), False, 'import sys\n'), ((448, 474), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (464, 474), False, 'import os\n')]
|
#!flask/bin/python
# This file is for starting up the server!
from app import myapp
myapp.run(debug=True)
|
[
"app.myapp.run"
] |
[((86, 107), 'app.myapp.run', 'myapp.run', ([], {'debug': '(True)'}), '(debug=True)\n', (95, 107), False, 'from app import myapp\n')]
|
# encoding:utf-8
from nlpsc.dataset import Dataset
from nlpsc.vboard.dataset import DatasetVBoard
class TestVBoard(object):
def test_dataset_vboard(self):
# from nlpsc.vboard.dataset import index
from ..vboard import bottle
bottle.TEMPLATE_PATH.append('../vboard/views/')
dataset = Dataset(name='测试数据集')
dataset.add_header('F-no(int) F-text_a F-text_b L-label1(list) L-label2')
DatasetVBoard(dataset).serve()
|
[
"nlpsc.vboard.dataset.DatasetVBoard",
"nlpsc.dataset.Dataset"
] |
[((323, 344), 'nlpsc.dataset.Dataset', 'Dataset', ([], {'name': '"""测试数据集"""'}), "(name='测试数据集')\n", (330, 344), False, 'from nlpsc.dataset import Dataset\n'), ((436, 458), 'nlpsc.vboard.dataset.DatasetVBoard', 'DatasetVBoard', (['dataset'], {}), '(dataset)\n', (449, 458), False, 'from nlpsc.vboard.dataset import DatasetVBoard\n')]
|
import argparse
from os.path import exists
from docqa.triviaqa.build_span_corpus import TriviaQaOpenDataset
from docqa.triviaqa.evidence_corpus import get_evidence_voc
"""
Build vocab of all words in the triviaqa dataset, including
all documents and all train questions.
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument("output")
parser.add_argument("-m", "--min_count", type=int, default=1)
parser.add_argument("-n", "--n_processes", type=int, default=1)
args = parser.parse_args()
if exists(args.output):
raise ValueError()
data = TriviaQaOpenDataset()
corpus_voc = get_evidence_voc(data.evidence, args.n_processes)
print("Adding question voc...")
train = data.get_train()
for q in train:
corpus_voc.update(q.question)
print("Saving...")
with open(args.output, "w") as f:
for word, c in corpus_voc.items():
if c >= args.min_count:
f.write(word)
f.write("\n")
if __name__ == "__main__":
main()
|
[
"docqa.triviaqa.evidence_corpus.get_evidence_voc",
"docqa.triviaqa.build_span_corpus.TriviaQaOpenDataset",
"os.path.exists",
"argparse.ArgumentParser"
] |
[((305, 330), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (328, 330), False, 'import argparse\n'), ((538, 557), 'os.path.exists', 'exists', (['args.output'], {}), '(args.output)\n', (544, 557), False, 'from os.path import exists\n'), ((598, 619), 'docqa.triviaqa.build_span_corpus.TriviaQaOpenDataset', 'TriviaQaOpenDataset', ([], {}), '()\n', (617, 619), False, 'from docqa.triviaqa.build_span_corpus import TriviaQaOpenDataset\n'), ((637, 686), 'docqa.triviaqa.evidence_corpus.get_evidence_voc', 'get_evidence_voc', (['data.evidence', 'args.n_processes'], {}), '(data.evidence, args.n_processes)\n', (653, 686), False, 'from docqa.triviaqa.evidence_corpus import get_evidence_voc\n')]
|
from formencode import Schema, validators, FancyValidator, Invalid, ForEach
from dateutil.parser import parse
class ValidateISODate(FancyValidator):
@staticmethod
def _to_python(value, state):
try:
val = parse(value)
except ValueError:
raise Invalid("Date/time format is invalid, it must be ISO 8601 formatted "
"for UTZ with no offset (i.e. 2010-01-01T01:01:01Z)", value, state)
return val
class PublicKeyValidator(Schema):
id = validators.String()
active = validators.Bool()
date_created = ValidateISODate()
date_expires = ValidateISODate()
public_key = validators.String()
allow_extra_fields = True
class DirectoryUserDeviceLinkResponseValidator(Schema):
qrcode = validators.String() # URL
code = validators.String(min=7)
allow_extra_fields = True
class DirectoryGetDeviceResponseValidator(Schema):
id = validators.String()
name = validators.String()
status = validators.Int()
type = validators.String()
allow_extra_fields = True
class DirectoryGetSessionsValidator(Schema):
auth_request = validators.String()
date_created = ValidateISODate()
service_icon = validators.String()
service_id = validators.String()
service_name = validators.String()
allow_extra_fields = True
class DirectoryValidator(Schema):
id = validators.String()
service_ids = ForEach(validators.String())
sdk_keys = ForEach(validators.String())
premium = validators.Bool()
name = validators.String()
android_key = validators.String()
ios_certificate_fingerprint = validators.String()
active = validators.Bool()
allow_extra_fields = True
class AuthorizationResponseValidator(Schema):
auth = validators.String()
service_user_hash = validators.String()
org_user_hash = validators.String()
user_push_id = validators.String()
public_key_id = validators.String()
allow_extra_fields = True
class AuthorizationResponsePackageValidator(Schema):
service_pins = ForEach()
auth_request = validators.String() # UUID
response = validators.Bool()
device_id = validators.String()
allow_extra_fields = True
class AuthorizeValidator(Schema):
auth_request = validators.String(not_empty=True)
push_package = validators.String(if_missing=None, not_empty=True)
allow_extra_fields = True
class AuthorizeSSEValidator(Schema):
service_user_hash = validators.String()
api_time = validators.String()
allow_extra_fields = True
class ServiceValidator(Schema):
id = validators.String()
icon = validators.String()
name = validators.String()
description = validators.String()
active = validators.Bool()
callback_url = validators.String()
allow_extra_fields = True
class ServiceSecurityPolicyValidator(Schema):
allow_extra_fields = True
|
[
"formencode.validators.Bool",
"formencode.ForEach",
"dateutil.parser.parse",
"formencode.Invalid",
"formencode.validators.String",
"formencode.validators.Int"
] |
[((521, 540), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (538, 540), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((554, 571), 'formencode.validators.Bool', 'validators.Bool', ([], {}), '()\n', (569, 571), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((663, 682), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (680, 682), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((784, 803), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (801, 803), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((822, 846), 'formencode.validators.String', 'validators.String', ([], {'min': '(7)'}), '(min=7)\n', (839, 846), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((939, 958), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (956, 958), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((970, 989), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (987, 989), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1003, 1019), 'formencode.validators.Int', 'validators.Int', ([], {}), '()\n', (1017, 1019), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1031, 1050), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (1048, 1050), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1147, 1166), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (1164, 1166), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1223, 1242), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (1240, 1242), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1260, 1279), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (1277, 1279), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1299, 1318), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (1316, 1318), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1394, 1413), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (1411, 1413), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1519, 1536), 'formencode.validators.Bool', 'validators.Bool', ([], {}), '()\n', (1534, 1536), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1548, 1567), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (1565, 1567), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1586, 1605), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (1603, 1605), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1640, 1659), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (1657, 1659), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1673, 1690), 'formencode.validators.Bool', 'validators.Bool', ([], {}), '()\n', (1688, 1690), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1780, 1799), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (1797, 1799), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1824, 1843), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (1841, 1843), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1864, 1883), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (1881, 1883), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1903, 1922), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (1920, 1922), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1943, 1962), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (1960, 1962), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((2067, 2076), 'formencode.ForEach', 'ForEach', ([], {}), '()\n', (2074, 2076), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((2096, 2115), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (2113, 2115), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((2139, 2156), 'formencode.validators.Bool', 'validators.Bool', ([], {}), '()\n', (2154, 2156), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((2173, 2192), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (2190, 2192), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((2278, 2311), 'formencode.validators.String', 'validators.String', ([], {'not_empty': '(True)'}), '(not_empty=True)\n', (2295, 2311), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((2331, 2381), 'formencode.validators.String', 'validators.String', ([], {'if_missing': 'None', 'not_empty': '(True)'}), '(if_missing=None, not_empty=True)\n', (2348, 2381), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((2475, 2494), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (2492, 2494), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((2510, 2529), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (2527, 2529), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((2603, 2622), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (2620, 2622), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((2634, 2653), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (2651, 2653), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((2665, 2684), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (2682, 2684), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((2703, 2722), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (2720, 2722), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((2736, 2753), 'formencode.validators.Bool', 'validators.Bool', ([], {}), '()\n', (2751, 2753), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((2773, 2792), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (2790, 2792), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1440, 1459), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (1457, 1459), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((1484, 1503), 'formencode.validators.String', 'validators.String', ([], {}), '()\n', (1501, 1503), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n'), ((235, 247), 'dateutil.parser.parse', 'parse', (['value'], {}), '(value)\n', (240, 247), False, 'from dateutil.parser import parse\n'), ((293, 437), 'formencode.Invalid', 'Invalid', (['"""Date/time format is invalid, it must be ISO 8601 formatted for UTZ with no offset (i.e. 2010-01-01T01:01:01Z)"""', 'value', 'state'], {}), "(\n 'Date/time format is invalid, it must be ISO 8601 formatted for UTZ with no offset (i.e. 2010-01-01T01:01:01Z)'\n , value, state)\n", (300, 437), False, 'from formencode import Schema, validators, FancyValidator, Invalid, ForEach\n')]
|
import tensorflow as tf
import numpy as np
input = tf.placeholder(dtype=tf.float32,shape=[5,5,3])
filter = tf.constant(value=1, shape=[3,3,3,5], dtype=tf.float32)
conv0 = tf.nn.atrous_conv2d(input,filters=filter,rate=2,padding='VALID')
with tf.Session() as sess:
img = np.array([3,5,5,3])
out = sess.run(conv0,feed_dict={input:img})
print(out.shape)
|
[
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.nn.atrous_conv2d",
"tensorflow.placeholder",
"numpy.array"
] |
[((52, 101), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[5, 5, 3]'}), '(dtype=tf.float32, shape=[5, 5, 3])\n', (66, 101), True, 'import tensorflow as tf\n'), ((108, 166), 'tensorflow.constant', 'tf.constant', ([], {'value': '(1)', 'shape': '[3, 3, 3, 5]', 'dtype': 'tf.float32'}), '(value=1, shape=[3, 3, 3, 5], dtype=tf.float32)\n', (119, 166), True, 'import tensorflow as tf\n'), ((172, 239), 'tensorflow.nn.atrous_conv2d', 'tf.nn.atrous_conv2d', (['input'], {'filters': 'filter', 'rate': '(2)', 'padding': '"""VALID"""'}), "(input, filters=filter, rate=2, padding='VALID')\n", (191, 239), True, 'import tensorflow as tf\n'), ((243, 255), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (253, 255), True, 'import tensorflow as tf\n'), ((275, 297), 'numpy.array', 'np.array', (['[3, 5, 5, 3]'], {}), '([3, 5, 5, 3])\n', (283, 297), True, 'import numpy as np\n')]
|
import sys
import pygame
import random
from snake_utility import Snake, Cherry, SnakeGameStatusFlags
import json
def set_new_cherry_pos(snake_lst):
"""
Sets new cherry position.
:param snake_lst: List, containing all snake instances present in the game. This is needed
to check that cherry will not be placed onto a snake.
:type snake_lst: list of Snake
"""
new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size)
# check if new cherry position is within any of the snakes and set new one
for snk in snake_lst:
while new_cherry_pos in snk.block_pos_lst:
new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size)
return new_cherry_pos
def init_game(config_data):
"""
Initializes the game with configuration, defined in config_data.
:param config_data: Dictionary, which contains configuration for the game, such as
game window dimensions, number of snakes, keyboard keys, etc.
:type config_data: dict
:return: Lists of initialized snakes and cherries.
:rtype: tuple of list
"""
# colors for snakes
snake_colors = [(0, 255, 0), # player 1 is green
(0, 0, 255), # player 2 is blue
(255, 255, 50), # player 3 is yellow
(205, 0, 205)] # player 4 is purple
# create snake instances
init_snake_lst = []
for i in range(config_data["num_snakes"]):
keys = config_data["keys"][i]
snake = Snake(start_pos=config_data["start_pos"][i],
move_keys={'up': pygame.__getattribute__(keys[0]),
'right': pygame.__getattribute__(keys[1]),
'down': pygame.__getattribute__(keys[2]),
'left': pygame.__getattribute__(keys[3])},
color=snake_colors[i],
block_size=config_data["block_size"],
num_of_start_blocks=config_data["initial_snake_length"])
init_snake_lst.append(snake)
# create cherry instances
init_cherry_lst = []
for i in range(config_data["num_cherries"]):
cherry = Cherry(block_size)
cherry.set_new_random_position(init_snake_lst, config_data["main_window_size"])
init_cherry_lst.append(cherry)
return init_snake_lst, init_cherry_lst
def redraw_screen(snake_lst, cherry_lst, block_size):
"""
Redraws screen with updated snake and cherry positions.
:param snake_lst: List of all snakes in the game.
:type snake_lst: list of Snake
:param cherry_lst: List of all cherries in the game.
:type cherry_lst: list of Cherry
:param block_size: Size of one block of snake or cherry in pixels.
:type block_size: int
"""
# clear screen
screen.fill(BLACK)
# draw snakes
for snake in snake_lst:
for block_pos in snake.block_pos_lst:
pygame.draw.rect(screen,
snake.color,
(block_pos[0], block_pos[1], block_size, block_size))
# draw cherries
for cherry in cherry_lst:
pygame.draw.rect(screen,
(255, 0, 0),
(cherry.position[0], cherry.position[1], block_size, block_size))
# update display
pygame.display.update()
def main_loop(snake_list, cherry_list):
"""
Main loop of the game. This function returns only if snake collision occured.
"""
while True:
# capture events
for event in pygame.event.get():
if event.type == pygame.QUIT:
# happens when user tries to close window
sys.exit() # exit from game
elif event.type == pygame.KEYDOWN:
# happens on key pressed
# check which snake's key was pressed and add it to key stack
for snake in snake_list:
if event.key in [val for _, val in snake.move_keys.items()]:
snake.key_stack.append(event.key)
elif event.type == pygame.USEREVENT: # happens on each timer tick
for snake in snake_list:
snake.get_dir_from_keystack()
snake.set_new_state(size, snake_list)
# check if there is collision
if snake.collision:
return SnakeGameStatusFlags.COLLISION_OCCURENCE
# check if any of the cherries was eaten by the current snake
for cherry in cherry_list:
if snake.block_pos_lst[0] == cherry.position:
# append new block to snake that ate the cherry
snake.block_pos_lst.append(snake.block_pos_lst[-1])
# set new random position for the eaten cherry
cherry.set_new_random_position(snake_lst, size)
# redraw screen with updated snake and cherry positions
redraw_screen(snake_list, cherry_list, block_size)
if __name__ == '__main__':
pygame.init()
# load configuration data
with open('config.json', 'r') as config_file:
configuration_data = json.load(config_file)
size = width, height = configuration_data["main_window_size"]
BLACK = 0, 0, 0
refresh_rate = configuration_data["refresh_rate"]
start_pos = configuration_data["start_pos"]
block_size = configuration_data["block_size"]
# set display
screen = pygame.display.set_mode(size)
# set timer
pygame.time.set_timer(pygame.USEREVENT, refresh_rate)
timer = pygame.time.get_ticks()
while True:
# initialize new game
snake_lst, cherry_pos = init_game(configuration_data)
# main loop will exit only if collision occurs
main_loop(snake_lst, cherry_pos)
|
[
"pygame.__getattribute__",
"json.load",
"pygame.draw.rect",
"pygame.display.set_mode",
"pygame.event.get",
"pygame.init",
"pygame.display.update",
"random.randrange",
"snake_utility.Cherry",
"pygame.time.get_ticks",
"pygame.time.set_timer",
"sys.exit"
] |
[((3450, 3473), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (3471, 3473), False, 'import pygame\n'), ((5260, 5273), 'pygame.init', 'pygame.init', ([], {}), '()\n', (5271, 5273), False, 'import pygame\n'), ((5678, 5707), 'pygame.display.set_mode', 'pygame.display.set_mode', (['size'], {}), '(size)\n', (5701, 5707), False, 'import pygame\n'), ((5729, 5782), 'pygame.time.set_timer', 'pygame.time.set_timer', (['pygame.USEREVENT', 'refresh_rate'], {}), '(pygame.USEREVENT, refresh_rate)\n', (5750, 5782), False, 'import pygame\n'), ((5796, 5819), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (5817, 5819), False, 'import pygame\n'), ((424, 468), 'random.randrange', 'random.randrange', (['(0)', 'width', 'Snake.block_size'], {}), '(0, width, Snake.block_size)\n', (440, 468), False, 'import random\n'), ((470, 515), 'random.randrange', 'random.randrange', (['(0)', 'height', 'Snake.block_size'], {}), '(0, height, Snake.block_size)\n', (486, 515), False, 'import random\n'), ((2310, 2328), 'snake_utility.Cherry', 'Cherry', (['block_size'], {}), '(block_size)\n', (2316, 2328), False, 'from snake_utility import Snake, Cherry, SnakeGameStatusFlags\n'), ((3270, 3378), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', '(255, 0, 0)', '(cherry.position[0], cherry.position[1], block_size, block_size)'], {}), '(screen, (255, 0, 0), (cherry.position[0], cherry.position[\n 1], block_size, block_size))\n', (3286, 3378), False, 'import pygame\n'), ((3676, 3694), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3692, 3694), False, 'import pygame\n'), ((5384, 5406), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (5393, 5406), False, 'import json\n'), ((3061, 3156), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'snake.color', '(block_pos[0], block_pos[1], block_size, block_size)'], {}), '(screen, snake.color, (block_pos[0], block_pos[1],\n block_size, block_size))\n', (3077, 3156), False, 'import pygame\n'), ((702, 746), 'random.randrange', 'random.randrange', (['(0)', 'width', 'Snake.block_size'], {}), '(0, width, Snake.block_size)\n', (718, 746), False, 'import random\n'), ((748, 793), 'random.randrange', 'random.randrange', (['(0)', 'height', 'Snake.block_size'], {}), '(0, height, Snake.block_size)\n', (764, 793), False, 'import random\n'), ((3812, 3822), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3820, 3822), False, 'import sys\n'), ((1705, 1737), 'pygame.__getattribute__', 'pygame.__getattribute__', (['keys[0]'], {}), '(keys[0])\n', (1728, 1737), False, 'import pygame\n'), ((1781, 1813), 'pygame.__getattribute__', 'pygame.__getattribute__', (['keys[1]'], {}), '(keys[1])\n', (1804, 1813), False, 'import pygame\n'), ((1856, 1888), 'pygame.__getattribute__', 'pygame.__getattribute__', (['keys[2]'], {}), '(keys[2])\n', (1879, 1888), False, 'import pygame\n'), ((1931, 1963), 'pygame.__getattribute__', 'pygame.__getattribute__', (['keys[3]'], {}), '(keys[3])\n', (1954, 1963), False, 'import pygame\n')]
|
#!/usr/bin/env python3
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys, os, glob
import re
# Output data format
from configurations import *
design_pt_to_plot=2
#################################################################################
#### Try to figure out semi-automatically what observables to group together ####
#################################################################################
# This is the input:
# Specifies how observables are grouped according to these regular expression
# Also specify if they should be plotted on a linear or a log scale
regex_obs_to_group_list=[
(r'$\pi$/K/p dN/dy',"dN_dy_(pion|kaon|proton)",'log'),
(r'$\pi$/K/p $\langle p_T \rangle$',"mean_pT_(pion|kaon|proton)",'linear'),
(r'$\Lambda/\Omega/\Xi$ dN/dy',"dN_dy_(Lambda|Omega|Xi)",'log'),
(r'$v_n\{2\}$',"v[2-5+]2",'linear'),
(r'$dN_{ch}/d\eta$',"dNch_deta",'log'),
(r'$dE_T/d\eta$',"dET_deta",'log'),
(r'$\langle p_T \rangle$ fluct',"pT_fluct",'linear'),
]
# This parts figures out how to group observables based on the regular expressions
obs_to_group={}
# Loop over observables to see which ones to group
for system in system_strs:
obs_to_group[system]={}
for obs_name in obs_cent_list[system]:
found_match=False
for regex_id, (regex_label, regex_obs_to_group, plot_scale) in enumerate(regex_obs_to_group_list):
r = re.compile(regex_obs_to_group)
match=r.match(obs_name)
# No match means nothing to group
if (match is not None):
if (found_match):
print("Non-exclusive grouping. Can't work...")
exit(1)
else:
found_match=True
obs_to_group[system][obs_name]=(regex_id, regex_label, plot_scale)
if (not found_match):
obs_to_group[system][obs_name]=None
# Parse the previous list to make something useful out of it
final_obs_grouping = {}
#
for system in system_strs:
final_obs_grouping[system]={}
for n, (key, value) in enumerate(obs_to_group[system].items()):
if (value is None):
newvalue=(n,key)
else:
newvalue=value
final_obs_grouping[system].setdefault(newvalue, []).append(key)
##############
#### Plot ####
##############
def plot(calcs):
for system in system_strs:
# Count how many observables to plot
nb_obs=len(final_obs_grouping[system])
# Decide how many columns we want the plot to have
nb_of_cols=4
# COunt how many rows needed
nb_of_rows=int(np.ceil(nb_obs/nb_of_cols))
# Prepare figure
fig = plt.figure(figsize=(2*nb_of_cols,2*nb_of_rows))
line_list=[]
#Loop over grouped observables
#for n, (obs, cent) in enumerate(obs_cent_list.items()):
for n, ((regex_id, obs_name, plot_scale), obs_list) in enumerate(final_obs_grouping[system].items()):
plt.subplot(nb_of_rows,nb_of_cols,n+1)
plt.xlabel(r'Centrality (%)', fontsize=10)
plt.ylabel(obs_name, fontsize=10)
plt.yscale(plot_scale)
# Loop over observable group
for obs, color in zip(obs_list,'rgbrgbrgb'):
cent=obs_cent_list[system][obs]
mid_centrality=[(low+up)/2. for low,up in cent]
#Loop over delta-f
idf_list=[0,1,2,3]
idf_sym=['D','o','^','.']
for idf, line in zip(idf_list, idf_sym):
mean_values=calcs[system][obs]['mean'][:,idf][design_pt_to_plot]
stat_uncert=calcs[system][obs]['err'][:,idf][design_pt_to_plot]
line_type,_,_ = plt.errorbar(mid_centrality, mean_values, yerr=stat_uncert, fmt=line, color=color, markersize=4)
line_list.append(line_type)
if (plot_scale != "log"):
plt.ylim(ymin=0)
# Plot legend in first subplot only
if (0 == n):
plt.legend(line_list,["idf="+str(idf) for idf in idf_list],loc="upper right",fontsize=10)
plt.tight_layout(True)
#plt.savefig("obs.pdf")
plt.show()
if __name__ == '__main__':
results = []
for file in glob.glob(sys.argv[1]):
# Load calculations
calcs = np.fromfile(file, dtype=np.dtype(bayes_dtype))
entry = plot(calcs)
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.show",
"numpy.ceil",
"matplotlib.pyplot.ylim",
"numpy.dtype",
"matplotlib.pyplot.figure",
"glob.glob",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.errorbar",
"re.compile"
] |
[((4351, 4373), 'glob.glob', 'glob.glob', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (4360, 4373), False, 'import sys, os, glob\n'), ((2720, 2772), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2 * nb_of_cols, 2 * nb_of_rows)'}), '(figsize=(2 * nb_of_cols, 2 * nb_of_rows))\n', (2730, 2772), True, 'import matplotlib.pyplot as plt\n'), ((4206, 4228), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', (['(True)'], {}), '(True)\n', (4222, 4228), True, 'import matplotlib.pyplot as plt\n'), ((4269, 4279), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4277, 4279), True, 'import matplotlib.pyplot as plt\n'), ((1427, 1457), 're.compile', 're.compile', (['regex_obs_to_group'], {}), '(regex_obs_to_group)\n', (1437, 1457), False, 'import re\n'), ((2653, 2681), 'numpy.ceil', 'np.ceil', (['(nb_obs / nb_of_cols)'], {}), '(nb_obs / nb_of_cols)\n', (2660, 2681), True, 'import numpy as np\n'), ((3018, 3060), 'matplotlib.pyplot.subplot', 'plt.subplot', (['nb_of_rows', 'nb_of_cols', '(n + 1)'], {}), '(nb_of_rows, nb_of_cols, n + 1)\n', (3029, 3060), True, 'import matplotlib.pyplot as plt\n'), ((3069, 3110), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Centrality (%)"""'], {'fontsize': '(10)'}), "('Centrality (%)', fontsize=10)\n", (3079, 3110), True, 'import matplotlib.pyplot as plt\n'), ((3124, 3157), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['obs_name'], {'fontsize': '(10)'}), '(obs_name, fontsize=10)\n', (3134, 3157), True, 'import matplotlib.pyplot as plt\n'), ((3170, 3192), 'matplotlib.pyplot.yscale', 'plt.yscale', (['plot_scale'], {}), '(plot_scale)\n', (3180, 3192), True, 'import matplotlib.pyplot as plt\n'), ((3981, 3997), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(0)'}), '(ymin=0)\n', (3989, 3997), True, 'import matplotlib.pyplot as plt\n'), ((4466, 4487), 'numpy.dtype', 'np.dtype', (['bayes_dtype'], {}), '(bayes_dtype)\n', (4474, 4487), True, 'import numpy as np\n'), ((3781, 3882), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['mid_centrality', 'mean_values'], {'yerr': 'stat_uncert', 'fmt': 'line', 'color': 'color', 'markersize': '(4)'}), '(mid_centrality, mean_values, yerr=stat_uncert, fmt=line, color\n =color, markersize=4)\n', (3793, 3882), True, 'import matplotlib.pyplot as plt\n')]
|
""" Module about parameter visibility within hahomematic """
from __future__ import annotations
import logging
import os
from typing import Final
import hahomematic.central_unit as hm_central
from hahomematic.const import (
DEFAULT_ENCODING,
EVENT_CONFIG_PENDING,
EVENT_ERROR,
EVENT_STICKY_UN_REACH,
EVENT_UN_REACH,
EVENT_UPDATE_PENDING,
FILE_CUSTOM_UN_IGNORE_PARAMETERS,
PARAM_CHANNEL_OPERATION_MODE,
PARAMSET_KEY_MASTER,
PARAMSET_KEY_VALUES,
)
from hahomematic.helpers import check_or_create_directory
_LOGGER = logging.getLogger(__name__)
# {device_type: channel_no}
_RELEVANT_MASTER_PARAMSETS_BY_DEVICE: dict[str, tuple[set[int], str]] = {
"HmIPW-DRBL4": ({1, 5, 9, 13}, PARAM_CHANNEL_OPERATION_MODE),
"HmIP-DRBLI4": ({9, 13, 17, 21}, PARAM_CHANNEL_OPERATION_MODE),
}
HIDDEN_PARAMETERS: set[str] = {
EVENT_CONFIG_PENDING,
EVENT_ERROR,
EVENT_STICKY_UN_REACH,
EVENT_UN_REACH,
EVENT_UPDATE_PENDING,
PARAM_CHANNEL_OPERATION_MODE,
"ACTIVITY_STATE",
"DIRECTION",
}
# Parameters within the VALUES paramset for which we don't create entities.
_IGNORED_PARAMETERS: set[str] = {
"AES_KEY",
"BOOST_TIME",
"BOOT",
"BURST_LIMIT_WARNING",
"CLEAR_WINDOW_OPEN_SYMBOL",
"COMBINED_PARAMETER",
"DATE_TIME_UNKNOWN",
"DECISION_VALUE",
"DEVICE_IN_BOOTLOADER",
"DEW_POINT_ALARM",
"EMERGENCY_OPERATION",
"EXTERNAL_CLOCK",
"FROST_PROTECTION",
"HUMIDITY_LIMITER",
"IDENTIFICATION_MODE_LCD_BACKLIGHT",
"INCLUSION_UNSUPPORTED_DEVICE",
"INHIBIT",
"INSTALL_MODE",
"LEVEL_COMBINED",
"LEVEL_REAL",
"OLD_LEVEL",
"PARTY_SET_POINT_TEMPERATURE",
"PARTY_TIME_END",
"PARTY_TIME_START",
"PROCESS",
"QUICK_VETO_TIME",
"RAMP_STOP",
"RELOCK_DELAY",
"SECTION",
"SELF_CALIBRATION",
"SENSOR_ERROR",
"SET_SYMBOL_FOR_HEATING_PHASE",
"SMOKE_DETECTOR_COMMAND",
"STATE_UNCERTAIN",
"SWITCH_POINT_OCCURED",
"TEMPERATURE_LIMITER",
"TEMPERATURE_OUT_OF_RANGE",
"TIME_OF_OPERATION",
"WOCHENPROGRAMM",
}
# Ignore Parameter that end with
_IGNORED_PARAMETERS_WILDCARDS_END: set[str] = {
"OVERFLOW",
"OVERHEAT",
"OVERRUN",
"REPORTING",
"RESULT",
"STATUS",
"SUBMIT",
"WORKING",
}
# Ignore Parameter that start with
_IGNORED_PARAMETERS_WILDCARDS_START: set[str] = {
"ADJUSTING",
"ERR_TTM",
"ERROR",
"IDENTIFICATION_MODE_KEY_VISUAL",
"IDENTIFY_",
"PARTY_START",
"PARTY_STOP",
"STATUS_FLAG",
"WEEK_PROGRAM",
}
# Parameters within the paramsets for which we create entities.
_UN_IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = {
"DLD": ["ERROR_JAMMED"], # HmIP-DLD
"SD": ["SMOKE_DETECTOR_ALARM_STATUS"], # HmIP-SWSD
"HM-Sec-Win": ["DIRECTION", "WORKING", "ERROR", "STATUS"], # HM-Sec-Win*
"HM-Sec-Key": ["DIRECTION", "ERROR"], # HM-Sec-Key*
"HmIP-PCBS-BAT": [
"OPERATING_VOLTAGE",
"LOW_BAT",
], # To override ignore for HmIP-PCBS
}
# Parameters by device within the VALUES paramset for which we don't create entities.
_IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = {
"LOWBAT": [
"HM-LC-Sw1-FM",
"HM-LC-Sw1PBU-FM",
"HM-LC-Sw1-Pl-DN-R1",
"HM-LC-Sw1-PCB",
"HM-LC-Sw4-DR",
"HM-SwI-3-FM",
],
"LOW_BAT": ["HmIP-BWTH", "HmIP-PCBS"],
"OPERATING_VOLTAGE": [
"HmIP-BDT",
"HmIP-BSL",
"HmIP-BSM",
"HmIP-BWTH",
"HmIP-DR",
"HmIP-FDT",
"HmIP-FSM",
"HmIP-MOD-OC8",
"HmIP-PCBS",
"HmIP-PDT",
"HmIP-PS",
"HmIP-SFD",
],
}
_ACCEPT_PARAMETER_ONLY_ON_CHANNEL: dict[str, int] = {"LOWBAT": 0}
class ParameterVisibilityCache:
"""Cache for parameter visibility."""
def __init__(
self,
central: hm_central.CentralUnit,
):
self._central: Final = central
self._storage_folder: Final = self._central.central_config.storage_folder
# paramset_key, parameter
self._un_ignore_parameters_general: dict[str, set[str]] = {
PARAMSET_KEY_MASTER: set(),
PARAMSET_KEY_VALUES: set(),
}
self._ignore_parameters_by_device_lower: dict[str, list[str]] = {
parameter: [device_type.lower() for device_type in device_types]
for parameter, device_types in _IGNORE_PARAMETERS_BY_DEVICE.items()
}
self._un_ignore_parameters_by_device_lower: dict[str, list[str]] = {
device_type.lower(): parameters
for device_type, parameters in _UN_IGNORE_PARAMETERS_BY_DEVICE.items()
}
# device_type, channel_no, paramset_key, list[parameter]
self._un_ignore_parameters_by_device_paramset_key: dict[
str, dict[int, dict[str, set[str]]]
] = {}
# device_type, channel_no
self._relevant_master_paramsets_by_device: dict[str, set[int]] = {}
self._init()
def _init(self) -> None:
"""Init relevant_master_paramsets_by_device and un_ignore_parameters_by_device from const"""
for (
device_type,
channels_parameter,
) in _RELEVANT_MASTER_PARAMSETS_BY_DEVICE.items():
device_type_l = device_type.lower()
channel_nos, parameter = channels_parameter
if device_type_l not in self._relevant_master_paramsets_by_device:
self._relevant_master_paramsets_by_device[device_type_l] = set()
if device_type_l not in self._un_ignore_parameters_by_device_paramset_key:
self._un_ignore_parameters_by_device_paramset_key[device_type_l] = {}
for channel_no in channel_nos:
self._relevant_master_paramsets_by_device[device_type_l].add(channel_no)
if (
channel_no
not in self._un_ignore_parameters_by_device_paramset_key[
device_type_l
]
):
self._un_ignore_parameters_by_device_paramset_key[device_type_l][
channel_no
] = {PARAMSET_KEY_MASTER: set()}
self._un_ignore_parameters_by_device_paramset_key[device_type_l][
channel_no
][PARAMSET_KEY_MASTER].add(parameter)
def get_un_ignore_parameters(
self, device_type: str, device_channel: int
) -> dict[str, set[str]]:
"""Return un_ignore_parameters"""
device_type_l = device_type.lower()
un_ignore_parameters: dict[str, set[str]] = {}
if device_type_l is not None and device_channel is not None:
un_ignore_parameters = (
self._un_ignore_parameters_by_device_paramset_key.get(
device_type_l, {}
).get(device_channel, {})
)
for (
paramset_key,
un_ignore_params,
) in self._un_ignore_parameters_general.items():
if paramset_key not in un_ignore_parameters:
un_ignore_parameters[paramset_key] = set()
un_ignore_parameters[paramset_key].update(un_ignore_params)
return un_ignore_parameters
def ignore_parameter(
self,
device_type: str,
sub_type: str | None,
device_channel: int,
paramset_key: str,
parameter: str,
) -> bool:
"""Check if parameter can be ignored."""
device_type_l = device_type.lower()
sub_type_l = sub_type.lower() if sub_type else None
if paramset_key == PARAMSET_KEY_VALUES:
if self.parameter_is_un_ignored(
device_type=device_type,
sub_type=sub_type,
device_channel=device_channel,
paramset_key=paramset_key,
parameter=parameter,
):
return False
if (
parameter in _IGNORED_PARAMETERS
or parameter.endswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_END))
or parameter.startswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_START))
or device_type_l.startswith(
tuple(self._ignore_parameters_by_device_lower.get(parameter, []))
)
or sub_type_l
in self._ignore_parameters_by_device_lower.get(parameter, [])
):
return True
if (
accept_channel := _ACCEPT_PARAMETER_ONLY_ON_CHANNEL.get(parameter)
) is not None:
if accept_channel != device_channel:
return True
if paramset_key == PARAMSET_KEY_MASTER:
if parameter not in self._un_ignore_parameters_by_device_paramset_key.get(
device_type_l, {}
).get(device_channel, {}).get(PARAMSET_KEY_MASTER, []):
return True
return False
def parameter_is_un_ignored(
self,
device_type: str,
sub_type: str | None,
device_channel: int,
paramset_key: str,
parameter: str,
) -> bool:
"""Return if parameter is on un_ignore list"""
device_type_l = device_type.lower()
sub_type_l = sub_type.lower() if sub_type else None
if parameter in self._un_ignore_parameters_general[paramset_key]:
return True
if parameter in self._un_ignore_parameters_by_device_paramset_key.get(
device_type_l, {}
).get(device_channel, {}).get(paramset_key, set()):
return True
if sub_type_l:
if parameter in self._un_ignore_parameters_by_device_paramset_key.get(
sub_type_l, {}
).get(device_channel, {}).get(paramset_key, set()):
return True
if sub_type_l and sub_type_l in self._un_ignore_parameters_by_device_lower:
un_ignore_parameters = self._un_ignore_parameters_by_device_lower[
sub_type_l
]
if parameter in un_ignore_parameters:
return True
if device_type_l.startswith(tuple(self._un_ignore_parameters_by_device_lower)):
for (
device_t,
un_ignore_parameters,
) in self._un_ignore_parameters_by_device_lower.items():
if device_type_l.startswith(device_t):
if parameter in un_ignore_parameters:
return True
return False
def _add_line_to_cache(self, line: str) -> None:
"""
Add line to from un ignore file to cache.
Add data to relevant_master_paramsets_by_device and un_ignore_parameters_by_device from file.
"""
try:
line = line.strip()
if "@" in line:
# add parameter@devicetype:channel_no:paramset_key
data = line.split("@")
if len(data) != 2:
_LOGGER.warning(
"add_line_to_cache: Could not add line '%s' to un ignore cache. Only one @ expected.",
line,
)
return
parameter = data[0]
device_data = data[1].split(":")
if len(device_data) != 3:
_LOGGER.warning(
"add_line_to_cache: Could not add line '%s' to un ignore cache. 4 arguments expected: e.g. TEMPERATURE@HmIP-BWTH:1:VALUES.",
line,
)
return
device_type = device_data[0].lower()
channel_no = int(device_data[1])
paramset_key = device_data[2]
if device_type not in self._un_ignore_parameters_by_device_paramset_key:
self._un_ignore_parameters_by_device_paramset_key[device_type] = {}
if (
channel_no
not in self._un_ignore_parameters_by_device_paramset_key[
device_type
]
):
self._un_ignore_parameters_by_device_paramset_key[device_type][
channel_no
] = {}
if (
paramset_key
not in self._un_ignore_parameters_by_device_paramset_key[
device_type
][channel_no]
):
self._un_ignore_parameters_by_device_paramset_key[device_type][
channel_no
][paramset_key] = set()
self._un_ignore_parameters_by_device_paramset_key[device_type][
channel_no
][paramset_key].add(parameter)
if paramset_key == PARAMSET_KEY_MASTER:
if device_type not in self._relevant_master_paramsets_by_device:
self._relevant_master_paramsets_by_device[device_type] = set()
self._relevant_master_paramsets_by_device[device_type].add(
channel_no
)
elif ":" in line:
# add parameter:paramset_key
data = line.split(":")
if len(data) != 2:
_LOGGER.warning(
"add_line_to_cache: Could not add line '%s' to un ignore cache. 2 arguments expected: e.g. TEMPERATURE:VALUES.",
line,
)
return
paramset_key = data[0]
parameter = data[1]
if paramset_key in (PARAMSET_KEY_VALUES, PARAMSET_KEY_MASTER):
self._un_ignore_parameters_general[paramset_key].add(parameter)
else:
# add parameter
self._un_ignore_parameters_general[PARAMSET_KEY_VALUES].add(line)
except Exception:
_LOGGER.warning(
"add_line_to_cache: Could not add line '%s' to un ignore cache.", line
)
def is_relevant_paramset(
self,
device_type: str,
sub_type: str | None,
paramset_key: str,
device_channel: int,
) -> bool:
"""Return if a paramset is relevant."""
device_type_l = device_type.lower()
sub_type_l = sub_type.lower() if sub_type else None
if paramset_key == PARAMSET_KEY_VALUES:
return True
if device_channel is not None and paramset_key == PARAMSET_KEY_MASTER:
for (
d_type,
channel_nos,
) in self._relevant_master_paramsets_by_device.items():
if device_channel in channel_nos and (
device_type_l == d_type.lower()
or (sub_type_l and sub_type_l == d_type.lower())
or device_type_l.startswith(d_type.lower())
):
return True
return False
async def load(self) -> None:
"""Load custom un ignore parameters from disk."""
def _load() -> None:
if not check_or_create_directory(self._storage_folder):
return
if not os.path.exists(
os.path.join(self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS)
):
_LOGGER.debug(
"load: No file found in %s",
self._storage_folder,
)
return
try:
with open(
file=os.path.join(
self._storage_folder,
FILE_CUSTOM_UN_IGNORE_PARAMETERS,
),
mode="r",
encoding=DEFAULT_ENCODING,
) as fptr:
for line in fptr.readlines():
self._add_line_to_cache(line)
except Exception as ex:
_LOGGER.warning(
"load: Could not read unignore file %s",
ex.args,
)
await self._central.async_add_executor_job(_load)
|
[
"hahomematic.helpers.check_or_create_directory",
"os.path.join",
"logging.getLogger"
] |
[((557, 584), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (574, 584), False, 'import logging\n'), ((15195, 15242), 'hahomematic.helpers.check_or_create_directory', 'check_or_create_directory', (['self._storage_folder'], {}), '(self._storage_folder)\n', (15220, 15242), False, 'from hahomematic.helpers import check_or_create_directory\n'), ((15318, 15386), 'os.path.join', 'os.path.join', (['self._storage_folder', 'FILE_CUSTOM_UN_IGNORE_PARAMETERS'], {}), '(self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS)\n', (15330, 15386), False, 'import os\n'), ((15635, 15703), 'os.path.join', 'os.path.join', (['self._storage_folder', 'FILE_CUSTOM_UN_IGNORE_PARAMETERS'], {}), '(self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS)\n', (15647, 15703), False, 'import os\n')]
|
from __future__ import division
import random
import pprint
import sys
import time
import numpy as np
from optparse import OptionParser
import pickle
import os
import re
import shutil
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from keras_frcnn import config, data_generators
from keras_frcnn import losses as losses
import keras_frcnn.roi_helpers as roi_helpers
from tensorflow.python.keras.utils import generic_utils
sys.setrecursionlimit(40000)
from tensorflow.python.ops.numpy_ops import np_config
np_config.enable_numpy_behavior()
# if Logs path directory exists, it will delete the directory
if os.path.exists('logs'):
shutil.rmtree('logs')
parser = OptionParser()
parser.add_option("-p", "--path", dest="train_path", help="Path to training data.")
parser.add_option("-v", "--valid_path", dest="valid_path", help="Path to validation data.")
parser.add_option("-o", "--parser", dest="parser",
help="Parser to use. One of simple or pascal_voc", default="pascal_voc")
parser.add_option("-n", "--num_rois", type="int", dest="num_rois",
help="Number of RoIs to process at once.", default=32)
parser.add_option("--network", dest="network",
help="Base network to use. Supports vgg or resnet50.", default='resnet50')
parser.add_option("--hf", dest="horizontal_flips",
help="Augment with horizontal flips in training. (Default=false).",
action="store_true", default=False)
parser.add_option("--vf", dest="vertical_flips",
help="Augment with vertical flips in training. (Default=false).",
action="store_true", default=False)
parser.add_option("--rot", "--rot_90", dest="rot_90",
help="Augment with 90 degree rotations in training. (Default=false).",
action="store_true", default=False)
parser.add_option("--num_epochs", type="int",
dest="num_epochs", help="Number of epochs.", default=2000)
parser.add_option("--config_filename", dest="config_filename",
help="Location to store all the metadata related to "
"the training (to be used when testing).",
default="config.pickle")
parser.add_option("--output_weight_path", dest="output_weight_path",
help="Output path for weights.", default='./model_frcnn.hdf5')
parser.add_option("--input_weight_path", dest="input_weight_path",
help="Input path for weights. If not specified, will try to"
" load default weights provided by keras.")
(options, args) = parser.parse_args()
if not options.train_path: # if filename is not given
parser.error('Error: path to training data must be specified. Pass --path to command line')
if options.parser == 'pascal_voc':
from keras_frcnn.pascal_voc_parser import get_data
elif options.parser == 'simple':
from keras_frcnn.simple_parser import get_data
else:
raise ValueError("Command line option parser must be one of 'pascal_voc' or 'simple'")
# pass the settings from the command line, and persist them in the config object
C = config.Config()
C.use_horizontal_flips = bool(options.horizontal_flips)
C.use_vertical_flips = bool(options.vertical_flips)
C.rot_90 = bool(options.rot_90)
C.model_path = options.output_weight_path
model_path_regex = re.match("^(.+)(\.hdf5)$", C.model_path)
if model_path_regex.group(2) != '.hdf5':
print('Output weights must have .hdf5 filetype')
exit(1)
C.num_rois = int(options.num_rois)
if options.network == 'vgg':
C.network = 'vgg'
from keras_frcnn import vgg as nn
elif options.network == 'resnet50':
from keras_frcnn import resnet as nn
C.network = 'resnet50'
else:
print('Not a valid model')
raise ValueError
# check if weight path was passed via command line
if options.input_weight_path:
C.base_net_weights = options.input_weight_path
else:
# set the path to weights based on backend and model
C.base_net_weights = nn.get_weight_path()
train_imgs, classes_count, class_mapping = get_data(options.train_path)
val_imgs, _, _ = get_data(options.valid_path)
if 'bg' not in classes_count:
classes_count['bg'] = 0
class_mapping['bg'] = len(class_mapping)
C.class_mapping = class_mapping
inv_map = {v: k for k, v in class_mapping.items()}
print('Training images per class:')
pprint.pprint(classes_count)
print(f'Num classes (including bg) = {len(classes_count)}')
config_output_filename = options.config_filename
with open(config_output_filename, 'wb') as config_f:
pickle.dump(C, config_f)
print(f'Config has been written to {config_output_filename}, '
f'and can be loaded when testing to ensure correct results')
num_imgs = len(train_imgs)
num_valid_imgs = len(val_imgs)
print(f'Num train samples {len(train_imgs)}')
print(f'Num val samples {len(val_imgs)}')
data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C,
nn.get_img_output_length,
K.image_data_format(), mode='train')
data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length,
K.image_data_format(), mode='val')
if K.image_data_format() == 'channels_first':
input_shape_img = (3, None, None)
else:
input_shape_img = (None, None, 3)
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(None, 4))
shared_layers = nn.nn_base(img_input, trainable=True)
# define the RPN, built on the base layers
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(shared_layers, roi_input, C.num_rois,
nb_classes=len(classes_count), trainable=True)
model_rpn = Model(img_input, rpn[:2])
model_classifier = Model([img_input, roi_input], classifier)
# this is a model that holds both the RPN and the classifier,
# used to load/save weights for the models
model_all = Model([img_input, roi_input], rpn[:2] + classifier)
# Defining optimizers for all models
optimizer_rpn = Adam(learning_rate=1e-5)
optimizer_classifier = Adam(learning_rate=1e-5)
optimizer_all = SGD(learning_rate=0.01)
# Accuracy metrics for Fast RCNN model
train_classifier_metric = tf.keras.metrics.CategoricalAccuracy()
val_classifier_metric = tf.keras.metrics.CategoricalAccuracy()
# Loss function of RPN model and Fast RCNN model
rpn_class_loss_fn = losses.RpnClassificationLoss(num_anchors)
rpn_reg_loss_fn = losses.RpnRegressionLoss(num_anchors)
fast_rcnn_class_loss_fn = losses.FastrcnnClassLoss()
fast_rcnn_reg_loss_fn = losses.FastrcnnRegLoss(len(classes_count) - 1)
# tensorboard writer, automatically creates directory and writes logs
train_writer = tf.summary.create_file_writer('logs/train/')
valid_writer = tf.summary.create_file_writer('logs/valid/')
@tf.function
def rpn_train_step(step, x_batch_train, y_batch_train):
with tf.GradientTape() as rpn_tape:
y_rpn_cls_true, y_rpn_regr_true = y_batch_train
y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=True)
rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred)
rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred)
rpn_grads = rpn_tape.gradient([rpn_class_loss, rpn_reg_loss],
model_rpn.trainable_weights)
optimizer_rpn.apply_gradients(zip(rpn_grads, model_rpn.trainable_weights))
# write training loss and accuracy to the tensorboard
with train_writer.as_default():
tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step)
tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step)
return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss
@tf.function
def frcnn_train_step(step, x_batch_train, X2, Y1, Y2):
with tf.GradientTape() as frcnn_tape:
rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2],
training=True)
fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred)
fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred)
frcnn_grads = frcnn_tape.gradient([fast_rcnn_class_loss, fast_rcnn_reg_loss],
model_classifier.trainable_weights)
optimizer_classifier.apply_gradients(zip(frcnn_grads, model_classifier.trainable_weights))
train_classifier_metric.update_state(Y1, rcnn_class_pred)
fast_rcnn_class_acc = train_classifier_metric.result()
# write training loss and accuracy to the tensorboard
with train_writer.as_default():
tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step)
tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step)
tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step)
return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc
@tf.function
def rpn_valid_step(step, x_batch_train, y_batch_train):
with tf.GradientTape() as rpn_tape:
y_rpn_cls_true, y_rpn_regr_true = y_batch_train
y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=False)
rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred)
rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred)
# write training loss and accuracy to the tensorboard
with valid_writer.as_default():
tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step)
tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step)
return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss
@tf.function
def frcnn_valid_step(step, x_batch_train, X2, Y1, Y2):
with tf.GradientTape() as frcnn_tape:
rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2],
training=False)
fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred)
fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred)
val_classifier_metric.update_state(Y1, rcnn_class_pred)
fast_rcnn_class_acc = val_classifier_metric.result()
# write training loss and accuracy to the tensorboard
with valid_writer.as_default():
tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step)
tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step)
tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step)
return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc
def get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch):
neg_samples = np.where(Y1[0, :, -1] == 1)
pos_samples = np.where(Y1[0, :, -1] == 0)
if len(neg_samples) > 0:
neg_samples = neg_samples[0]
else:
neg_samples = []
if len(pos_samples) > 0:
pos_samples = pos_samples[0]
else:
pos_samples = []
rpn_accuracy_rpn_monitor.append(len(pos_samples))
rpn_accuracy_for_epoch.append((len(pos_samples)))
if C.num_rois > 1:
if len(pos_samples) < C.num_rois // 2:
selected_pos_samples = pos_samples.tolist()
else:
selected_pos_samples = np.random.choice(pos_samples, C.num_rois // 2,
replace=False).tolist()
try:
selected_neg_samples = np.random.choice(neg_samples,
C.num_rois - len(selected_pos_samples),
replace=False).tolist()
except:
selected_neg_samples = np.random.choice(neg_samples,
C.num_rois - len(selected_pos_samples),
replace=True).tolist()
sel_samples = selected_pos_samples + selected_neg_samples
else:
# in the extreme case where num_rois = 1, we pick a random pos or neg sample
selected_pos_samples = pos_samples.tolist()
selected_neg_samples = neg_samples.tolist()
if np.random.randint(0, 2):
sel_samples = random.choice(neg_samples)
else:
sel_samples = random.choice(pos_samples)
return sel_samples
n_epochs = options.num_epochs
BATCH_SIZE = 1
n_steps = num_imgs // BATCH_SIZE
n_valid_steps = num_valid_imgs // BATCH_SIZE
losses = np.zeros((n_steps, 5))
rpn_accuracy_rpn_monitor = []
rpn_accuracy_for_epoch = []
valid_losses = np.zeros((n_valid_steps, 5))
rpn_accuracy_rpn_monitor_valid = []
rpn_accuracy_for_epoch_valid = []
best_loss = np.Inf
start_time = time.time()
class_mapping_inv = {v: k for k, v in class_mapping.items()}
global_step = tf.convert_to_tensor(0, tf.int64)
one_step = tf.convert_to_tensor(1, tf.int64)
print("Training started for %d epochs" % n_epochs)
for epoch in range(n_epochs):
print("\nStart of epoch %d" % (epoch + 1,))
progbar = generic_utils.Progbar(n_steps)
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train, img_data) in enumerate(data_gen_train):
# print(step, img_data['filepath'])
y_rpn_cls_true, y_rpn_regr_true = y_batch_train
step = tf.cast(step, dtype=tf.int64)
global_step = tf.add(global_step, one_step)
y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_train_step(
global_step, x_batch_train, y_batch_train)
R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(),
use_regr=True, overlap_thresh=0.7, max_boxes=300)
# note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping)
if X2 is None:
rpn_accuracy_rpn_monitor.append(0)
rpn_accuracy_for_epoch.append(0)
continue
sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch)
x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32)
y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples, :], tf.float32)
y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32)
fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_train_step(
global_step, x_batch_train, x2_tensor, y1_tensor, y2_tensor)
losses[step, 0] = rpn_class_loss
losses[step, 1] = rpn_reg_loss
losses[step, 2] = fast_rcnn_class_loss
losses[step, 3] = fast_rcnn_reg_loss
losses[step, 4] = fast_rcnn_class_acc
progbar.update(step + 1,
[('rpn_cls', rpn_class_loss),
('rpn_regr', rpn_reg_loss),
('detector_cls', fast_rcnn_class_loss),
('detector_regr', fast_rcnn_reg_loss)])
if step == n_steps - 1 and C.verbose:
mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor)
) / len(rpn_accuracy_rpn_monitor)
rpn_accuracy_rpn_monitor = []
print(f'\nAverage number of overlapping bounding boxes '
f'from RPN = {mean_overlapping_bboxes} for {step} previous iterations')
if mean_overlapping_bboxes == 0:
print('RPN is not producing bounding boxes that overlap the ground truth boxes.'
' Check RPN settings or keep training.')
loss_rpn_cls = np.mean(losses[:, 0])
loss_rpn_regr = np.mean(losses[:, 1])
loss_class_cls = np.mean(losses[:, 2])
loss_class_regr = np.mean(losses[:, 3])
class_acc = np.mean(losses[:, 4])
mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len(
rpn_accuracy_for_epoch)
rpn_accuracy_for_epoch = []
if C.verbose:
print(
f'\nMean number of bounding boxes from RPN overlapping '
f'ground truth boxes: {mean_overlapping_bboxes}')
print(f'Classifier accuracy for bounding boxes from RPN: {class_acc}')
print(f'Loss RPN classifier: {loss_rpn_cls}')
print(f'Loss RPN regression: {loss_rpn_regr}')
print(f'Loss Detector classifier: {loss_class_cls}')
print(f'Loss Detector regression: {loss_class_regr}')
print(f'Elapsed time: {time.time() - start_time}')
curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr
print("Total Loss: %.4f" % curr_loss)
start_time = time.time()
if curr_loss < best_loss:
if C.verbose:
print(
f'Total loss decreased from {best_loss} to {curr_loss}, saving weights')
best_loss = curr_loss
model_all.save_weights(model_path_regex.group(1) + "_" + '{:04d}'.format(
epoch) + model_path_regex.group(2))
break
# # Log every 10 steps.
# if step % 10 == 0:
# print("Step %d, RPN Cls Loss: %.4f RPN reg Loss: %.4f "
# "FRCNN Cls Loss: %.4f FRCNN reg Loss: %.4f" % (
# step, float(rpn_class_loss), float(rpn_reg_loss), float(fast_rcnn_class_loss),
# float(fast_rcnn_reg_loss)))
# Reset training metrics at the end of each epoch
train_classifier_metric.reset_states()
progbar = generic_utils.Progbar(n_valid_steps)
# Iterate over the batches of the dataset.
for step, (x_batch_val, y_batch_val, img_data) in enumerate(data_gen_val):
y_rpn_cls_true, y_rpn_regr_true = y_batch_val
y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_valid_step(
global_step, x_batch_val, y_batch_val)
R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(),
use_regr=True, overlap_thresh=0.7, max_boxes=300)
# note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping)
if X2 is None:
rpn_accuracy_rpn_monitor_valid.append(0)
rpn_accuracy_for_epoch_valid.append(0)
continue
sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor_valid,
rpn_accuracy_for_epoch_valid)
x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32)
y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples, :], tf.float32)
y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32)
fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_valid_step(
global_step, x_batch_val, x2_tensor, y1_tensor, y2_tensor)
valid_losses[step, 0] = rpn_class_loss
valid_losses[step, 1] = rpn_reg_loss
valid_losses[step, 2] = fast_rcnn_class_loss
valid_losses[step, 3] = fast_rcnn_reg_loss
valid_losses[step, 4] = fast_rcnn_class_acc
progbar.update(step + 1,
[('rpn_cls', rpn_class_loss),
('rpn_regr', rpn_reg_loss),
('detector_cls', fast_rcnn_class_loss),
('detector_regr', fast_rcnn_reg_loss)])
if step == n_valid_steps - 1 and C.verbose:
mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor_valid)
) / len(rpn_accuracy_for_epoch_valid)
rpn_accuracy_rpn_monitor_valid = []
print(f'\nValidation: Average number of overlapping bounding boxes '
f'from RPN = {mean_overlapping_bboxes}')
if mean_overlapping_bboxes == 0:
print('RPN is not producing bounding boxes that overlap the ground truth boxes.'
' Check RPN settings or keep training.')
loss_rpn_cls = np.mean(valid_losses[:, 0])
loss_rpn_regr = np.mean(valid_losses[:, 1])
loss_class_cls = np.mean(valid_losses[:, 2])
loss_class_regr = np.mean(valid_losses[:, 3])
class_acc = np.mean(valid_losses[:, 4])
mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch_valid)
) / len(rpn_accuracy_for_epoch_valid)
rpn_accuracy_for_epoch_valid = []
if C.verbose:
print("Validation Metrics: ")
print(
f'Mean number of bounding boxes from RPN overlapping '
f'ground truth boxes: {mean_overlapping_bboxes}')
print(f'Classifier accuracy for bounding boxes from RPN: {class_acc}')
print(f'Loss RPN classifier: {loss_rpn_cls}')
print(f'Loss RPN regression: {loss_rpn_regr}')
print(f'Loss Detector classifier: {loss_class_cls}')
print(f'Loss Detector regression: {loss_class_regr}')
print(f'Elapsed time: {time.time() - start_time}')
curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr
print("Total validation loss: %.4f" % curr_loss)
start_time = time.time()
break
val_classifier_metric.reset_states()
|
[
"pickle.dump",
"optparse.OptionParser",
"tensorflow.keras.metrics.CategoricalAccuracy",
"tensorflow.python.ops.numpy_ops.np_config.enable_numpy_behavior",
"tensorflow.keras.optimizers.SGD",
"numpy.random.randint",
"numpy.mean",
"pprint.pprint",
"keras_frcnn.losses.RpnClassificationLoss",
"keras_frcnn.simple_parser.get_data",
"shutil.rmtree",
"sys.setrecursionlimit",
"os.path.exists",
"keras_frcnn.losses.RpnRegressionLoss",
"tensorflow.cast",
"tensorflow.python.keras.utils.generic_utils.Progbar",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Input",
"numpy.random.choice",
"keras_frcnn.roi_helpers.calc_iou",
"tensorflow.summary.scalar",
"tensorflow.add",
"re.match",
"tensorflow.keras.models.Model",
"keras_frcnn.config.Config",
"tensorflow.keras.backend.image_data_format",
"tensorflow.convert_to_tensor",
"keras_frcnn.losses.FastrcnnClassLoss",
"numpy.zeros",
"keras_frcnn.resnet.get_weight_path",
"time.time",
"random.choice",
"numpy.where",
"keras_frcnn.resnet.nn_base",
"keras_frcnn.resnet.rpn",
"tensorflow.summary.create_file_writer",
"tensorflow.GradientTape"
] |
[((578, 606), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(40000)'], {}), '(40000)\n', (599, 606), False, 'import sys\n'), ((663, 696), 'tensorflow.python.ops.numpy_ops.np_config.enable_numpy_behavior', 'np_config.enable_numpy_behavior', ([], {}), '()\n', (694, 696), False, 'from tensorflow.python.ops.numpy_ops import np_config\n'), ((763, 785), 'os.path.exists', 'os.path.exists', (['"""logs"""'], {}), "('logs')\n", (777, 785), False, 'import os\n'), ((823, 837), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (835, 837), False, 'from optparse import OptionParser\n'), ((3291, 3306), 'keras_frcnn.config.Config', 'config.Config', ([], {}), '()\n', (3304, 3306), False, 'from keras_frcnn import config, data_generators\n'), ((3510, 3551), 're.match', 're.match', (['"""^(.+)(\\\\.hdf5)$"""', 'C.model_path'], {}), "('^(.+)(\\\\.hdf5)$', C.model_path)\n", (3518, 3551), False, 'import re\n'), ((4231, 4259), 'keras_frcnn.simple_parser.get_data', 'get_data', (['options.train_path'], {}), '(options.train_path)\n', (4239, 4259), False, 'from keras_frcnn.simple_parser import get_data\n'), ((4277, 4305), 'keras_frcnn.simple_parser.get_data', 'get_data', (['options.valid_path'], {}), '(options.valid_path)\n', (4285, 4305), False, 'from keras_frcnn.simple_parser import get_data\n'), ((4532, 4560), 'pprint.pprint', 'pprint.pprint', (['classes_count'], {}), '(classes_count)\n', (4545, 4560), False, 'import pprint\n'), ((5595, 5623), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape_img'}), '(shape=input_shape_img)\n', (5600, 5623), False, 'from tensorflow.keras.layers import Input\n'), ((5636, 5658), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, 4)'}), '(shape=(None, 4))\n', (5641, 5658), False, 'from tensorflow.keras.layers import Input\n'), ((5676, 5713), 'keras_frcnn.resnet.nn_base', 'nn.nn_base', (['img_input'], {'trainable': '(True)'}), '(img_input, trainable=True)\n', (5686, 5713), True, 'from keras_frcnn import resnet as nn\n'), ((5830, 5864), 'keras_frcnn.resnet.rpn', 'nn.rpn', (['shared_layers', 'num_anchors'], {}), '(shared_layers, num_anchors)\n', (5836, 5864), True, 'from keras_frcnn import resnet as nn\n'), ((6017, 6042), 'tensorflow.keras.models.Model', 'Model', (['img_input', 'rpn[:2]'], {}), '(img_input, rpn[:2])\n', (6022, 6042), False, 'from tensorflow.keras.models import Model\n'), ((6062, 6103), 'tensorflow.keras.models.Model', 'Model', (['[img_input, roi_input]', 'classifier'], {}), '([img_input, roi_input], classifier)\n', (6067, 6103), False, 'from tensorflow.keras.models import Model\n'), ((6222, 6273), 'tensorflow.keras.models.Model', 'Model', (['[img_input, roi_input]', '(rpn[:2] + classifier)'], {}), '([img_input, roi_input], rpn[:2] + classifier)\n', (6227, 6273), False, 'from tensorflow.keras.models import Model\n'), ((6328, 6353), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(1e-05)'}), '(learning_rate=1e-05)\n', (6332, 6353), False, 'from tensorflow.keras.optimizers import Adam, SGD\n'), ((6376, 6401), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(1e-05)'}), '(learning_rate=1e-05)\n', (6380, 6401), False, 'from tensorflow.keras.optimizers import Adam, SGD\n'), ((6417, 6440), 'tensorflow.keras.optimizers.SGD', 'SGD', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (6420, 6440), False, 'from tensorflow.keras.optimizers import Adam, SGD\n'), ((6507, 6545), 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {}), '()\n', (6543, 6545), True, 'import tensorflow as tf\n'), ((6570, 6608), 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {}), '()\n', (6606, 6608), True, 'import tensorflow as tf\n'), ((6679, 6720), 'keras_frcnn.losses.RpnClassificationLoss', 'losses.RpnClassificationLoss', (['num_anchors'], {}), '(num_anchors)\n', (6707, 6720), True, 'from keras_frcnn import losses as losses\n'), ((6739, 6776), 'keras_frcnn.losses.RpnRegressionLoss', 'losses.RpnRegressionLoss', (['num_anchors'], {}), '(num_anchors)\n', (6763, 6776), True, 'from keras_frcnn import losses as losses\n'), ((6803, 6829), 'keras_frcnn.losses.FastrcnnClassLoss', 'losses.FastrcnnClassLoss', ([], {}), '()\n', (6827, 6829), True, 'from keras_frcnn import losses as losses\n'), ((6987, 7031), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['"""logs/train/"""'], {}), "('logs/train/')\n", (7016, 7031), True, 'import tensorflow as tf\n'), ((7047, 7091), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['"""logs/valid/"""'], {}), "('logs/valid/')\n", (7076, 7091), True, 'import tensorflow as tf\n'), ((12710, 12732), 'numpy.zeros', 'np.zeros', (['(n_steps, 5)'], {}), '((n_steps, 5))\n', (12718, 12732), True, 'import numpy as np\n'), ((12807, 12835), 'numpy.zeros', 'np.zeros', (['(n_valid_steps, 5)'], {}), '((n_valid_steps, 5))\n', (12815, 12835), True, 'import numpy as np\n'), ((12939, 12950), 'time.time', 'time.time', ([], {}), '()\n', (12948, 12950), False, 'import time\n'), ((13028, 13061), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(0)', 'tf.int64'], {}), '(0, tf.int64)\n', (13048, 13061), True, 'import tensorflow as tf\n'), ((13073, 13106), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(1)', 'tf.int64'], {}), '(1, tf.int64)\n', (13093, 13106), True, 'import tensorflow as tf\n'), ((791, 812), 'shutil.rmtree', 'shutil.rmtree', (['"""logs"""'], {}), "('logs')\n", (804, 812), False, 'import shutil\n'), ((4166, 4186), 'keras_frcnn.resnet.get_weight_path', 'nn.get_weight_path', ([], {}), '()\n', (4184, 4186), True, 'from keras_frcnn import resnet as nn\n'), ((4728, 4752), 'pickle.dump', 'pickle.dump', (['C', 'config_f'], {}), '(C, config_f)\n', (4739, 4752), False, 'import pickle\n'), ((5237, 5258), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (5256, 5258), True, 'from tensorflow.keras import backend as K\n'), ((5418, 5439), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (5437, 5439), True, 'from tensorflow.keras import backend as K\n'), ((5457, 5478), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (5476, 5478), True, 'from tensorflow.keras import backend as K\n'), ((10949, 10976), 'numpy.where', 'np.where', (['(Y1[0, :, -1] == 1)'], {}), '(Y1[0, :, -1] == 1)\n', (10957, 10976), True, 'import numpy as np\n'), ((10995, 11022), 'numpy.where', 'np.where', (['(Y1[0, :, -1] == 0)'], {}), '(Y1[0, :, -1] == 0)\n', (11003, 11022), True, 'import numpy as np\n'), ((13251, 13281), 'tensorflow.python.keras.utils.generic_utils.Progbar', 'generic_utils.Progbar', (['n_steps'], {}), '(n_steps)\n', (13272, 13281), False, 'from tensorflow.python.keras.utils import generic_utils\n'), ((17862, 17898), 'tensorflow.python.keras.utils.generic_utils.Progbar', 'generic_utils.Progbar', (['n_valid_steps'], {}), '(n_valid_steps)\n', (17883, 17898), False, 'from tensorflow.python.keras.utils import generic_utils\n'), ((7172, 7189), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (7187, 7189), True, 'import tensorflow as tf\n'), ((7802, 7864), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rpn_class_loss"""', 'rpn_class_loss'], {'step': 'step'}), "('rpn_class_loss', rpn_class_loss, step=step)\n", (7819, 7864), True, 'import tensorflow as tf\n'), ((7873, 7931), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rpn_reg_loss"""', 'rpn_reg_loss'], {'step': 'step'}), "('rpn_reg_loss', rpn_reg_loss, step=step)\n", (7890, 7931), True, 'import tensorflow as tf\n'), ((8085, 8102), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (8100, 8102), True, 'import tensorflow as tf\n'), ((8893, 8967), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""fast_rcnn_class_loss"""', 'fast_rcnn_class_loss'], {'step': 'step'}), "('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step)\n", (8910, 8967), True, 'import tensorflow as tf\n'), ((8976, 9046), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""fast_rcnn_reg_loss"""', 'fast_rcnn_reg_loss'], {'step': 'step'}), "('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step)\n", (8993, 9046), True, 'import tensorflow as tf\n'), ((9055, 9127), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""fast_rcnn_class_acc"""', 'fast_rcnn_class_acc'], {'step': 'step'}), "('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step)\n", (9072, 9127), True, 'import tensorflow as tf\n'), ((9282, 9299), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (9297, 9299), True, 'import tensorflow as tf\n'), ((9704, 9766), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rpn_class_loss"""', 'rpn_class_loss'], {'step': 'step'}), "('rpn_class_loss', rpn_class_loss, step=step)\n", (9721, 9766), True, 'import tensorflow as tf\n'), ((9775, 9833), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""rpn_reg_loss"""', 'rpn_reg_loss'], {'step': 'step'}), "('rpn_reg_loss', rpn_reg_loss, step=step)\n", (9792, 9833), True, 'import tensorflow as tf\n'), ((9987, 10004), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (10002, 10004), True, 'import tensorflow as tf\n'), ((10540, 10614), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""fast_rcnn_class_loss"""', 'fast_rcnn_class_loss'], {'step': 'step'}), "('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step)\n", (10557, 10614), True, 'import tensorflow as tf\n'), ((10623, 10693), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""fast_rcnn_reg_loss"""', 'fast_rcnn_reg_loss'], {'step': 'step'}), "('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step)\n", (10640, 10693), True, 'import tensorflow as tf\n'), ((10702, 10774), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""fast_rcnn_class_acc"""', 'fast_rcnn_class_acc'], {'step': 'step'}), "('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step)\n", (10719, 10774), True, 'import tensorflow as tf\n'), ((12406, 12429), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (12423, 12429), True, 'import numpy as np\n'), ((13531, 13560), 'tensorflow.cast', 'tf.cast', (['step'], {'dtype': 'tf.int64'}), '(step, dtype=tf.int64)\n', (13538, 13560), True, 'import tensorflow as tf\n'), ((13583, 13612), 'tensorflow.add', 'tf.add', (['global_step', 'one_step'], {}), '(global_step, one_step)\n', (13589, 13612), True, 'import tensorflow as tf\n'), ((14036, 14087), 'keras_frcnn.roi_helpers.calc_iou', 'roi_helpers.calc_iou', (['R', 'img_data', 'C', 'class_mapping'], {}), '(R, img_data, C, class_mapping)\n', (14056, 14087), True, 'import keras_frcnn.roi_helpers as roi_helpers\n'), ((14344, 14399), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['X2[:, sel_samples, :]', 'tf.float32'], {}), '(X2[:, sel_samples, :], tf.float32)\n', (14364, 14399), True, 'import tensorflow as tf\n'), ((14420, 14475), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Y1[:, sel_samples, :]', 'tf.float32'], {}), '(Y1[:, sel_samples, :], tf.float32)\n', (14440, 14475), True, 'import tensorflow as tf\n'), ((14496, 14551), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Y2[:, sel_samples, :]', 'tf.float32'], {}), '(Y2[:, sel_samples, :], tf.float32)\n', (14516, 14551), True, 'import tensorflow as tf\n'), ((18500, 18551), 'keras_frcnn.roi_helpers.calc_iou', 'roi_helpers.calc_iou', (['R', 'img_data', 'C', 'class_mapping'], {}), '(R, img_data, C, class_mapping)\n', (18520, 18551), True, 'import keras_frcnn.roi_helpers as roi_helpers\n'), ((18875, 18930), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['X2[:, sel_samples, :]', 'tf.float32'], {}), '(X2[:, sel_samples, :], tf.float32)\n', (18895, 18930), True, 'import tensorflow as tf\n'), ((18951, 19006), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Y1[:, sel_samples, :]', 'tf.float32'], {}), '(Y1[:, sel_samples, :], tf.float32)\n', (18971, 19006), True, 'import tensorflow as tf\n'), ((19027, 19082), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Y2[:, sel_samples, :]', 'tf.float32'], {}), '(Y2[:, sel_samples, :], tf.float32)\n', (19047, 19082), True, 'import tensorflow as tf\n'), ((12457, 12483), 'random.choice', 'random.choice', (['neg_samples'], {}), '(neg_samples)\n', (12470, 12483), False, 'import random\n'), ((12524, 12550), 'random.choice', 'random.choice', (['pos_samples'], {}), '(pos_samples)\n', (12537, 12550), False, 'import random\n'), ((13828, 13849), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (13847, 13849), True, 'from tensorflow.keras import backend as K\n'), ((15836, 15857), 'numpy.mean', 'np.mean', (['losses[:, 0]'], {}), '(losses[:, 0])\n', (15843, 15857), True, 'import numpy as np\n'), ((15886, 15907), 'numpy.mean', 'np.mean', (['losses[:, 1]'], {}), '(losses[:, 1])\n', (15893, 15907), True, 'import numpy as np\n'), ((15937, 15958), 'numpy.mean', 'np.mean', (['losses[:, 2]'], {}), '(losses[:, 2])\n', (15944, 15958), True, 'import numpy as np\n'), ((15989, 16010), 'numpy.mean', 'np.mean', (['losses[:, 3]'], {}), '(losses[:, 3])\n', (15996, 16010), True, 'import numpy as np\n'), ((16035, 16056), 'numpy.mean', 'np.mean', (['losses[:, 4]'], {}), '(losses[:, 4])\n', (16042, 16056), True, 'import numpy as np\n'), ((16997, 17008), 'time.time', 'time.time', ([], {}), '()\n', (17006, 17008), False, 'import time\n'), ((18291, 18312), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (18310, 18312), True, 'from tensorflow.keras import backend as K\n'), ((20398, 20425), 'numpy.mean', 'np.mean', (['valid_losses[:, 0]'], {}), '(valid_losses[:, 0])\n', (20405, 20425), True, 'import numpy as np\n'), ((20454, 20481), 'numpy.mean', 'np.mean', (['valid_losses[:, 1]'], {}), '(valid_losses[:, 1])\n', (20461, 20481), True, 'import numpy as np\n'), ((20511, 20538), 'numpy.mean', 'np.mean', (['valid_losses[:, 2]'], {}), '(valid_losses[:, 2])\n', (20518, 20538), True, 'import numpy as np\n'), ((20569, 20596), 'numpy.mean', 'np.mean', (['valid_losses[:, 3]'], {}), '(valid_losses[:, 3])\n', (20576, 20596), True, 'import numpy as np\n'), ((20621, 20648), 'numpy.mean', 'np.mean', (['valid_losses[:, 4]'], {}), '(valid_losses[:, 4])\n', (20628, 20648), True, 'import numpy as np\n'), ((21690, 21701), 'time.time', 'time.time', ([], {}), '()\n', (21699, 21701), False, 'import time\n'), ((11512, 11573), 'numpy.random.choice', 'np.random.choice', (['pos_samples', '(C.num_rois // 2)'], {'replace': '(False)'}), '(pos_samples, C.num_rois // 2, replace=False)\n', (11528, 11573), True, 'import numpy as np\n'), ((16805, 16816), 'time.time', 'time.time', ([], {}), '()\n', (16814, 16816), False, 'import time\n'), ((21487, 21498), 'time.time', 'time.time', ([], {}), '()\n', (21496, 21498), False, 'import time\n')]
|
from collections import namedtuple
GoogleDriveFile = namedtuple('GoogleDriveFile', ['google_drive_id', 'local_name'])
class Constant:
BACKEND = 'torch'
# Data
VALIDATION_SET_SIZE = 0.08333
CUTOUT_HOLES = 1
CUTOUT_RATIO = 0.5
# Searcher
MAX_MODEL_NUM = 1000
BETA = 2.576
KERNEL_LAMBDA = 1.0
T_MIN = 0.0001
N_NEIGHBOURS = 8
MAX_MODEL_SIZE = (1 << 25)
MAX_LAYER_WIDTH = 4096
MAX_LAYERS = 200
# Grid Dimensions
LENGTH_DIM = 0
WIDTH_DIM = 1
# Default Search Space
DEFAULT_LENGTH_SEARCH = [50, 75, 100]
DEFAULT_WIDTH_SEARCH = [64, 128, 256]
# Model Defaults
DENSE_DROPOUT_RATE = 0.5
CONV_DROPOUT_RATE = 0.25
MLP_DROPOUT_RATE = 0.25
CONV_BLOCK_DISTANCE = 2
DENSE_BLOCK_DISTANCE = 1
MODEL_LEN = 3
MLP_MODEL_LEN = 3
MLP_MODEL_WIDTH = 5
MODEL_WIDTH = 64
POOLING_KERNEL_SIZE = 2
# ModelTrainer
DATA_AUGMENTATION = True
MAX_ITER_NUM = 200
MIN_LOSS_DEC = 1e-4
MAX_NO_IMPROVEMENT_NUM = 5
MAX_BATCH_SIZE = 128
LIMIT_MEMORY = False
SEARCH_MAX_ITER = 200
# Text Classifier
BERT_TRAINER_EPOCHS = 4
BERT_TRAINER_BATCH_SIZE = 32
# text preprocessor
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 400
MAX_NB_WORDS = 5000
EXTRACT_PATH = "glove/"
STORE_PATH = ''
# Download file name
PRETRAINED_VOCAB_BERT_BASE_UNCASED = \
GoogleDriveFile(google_drive_id='1hlPkUSPeT5ZQBYZ1Z734BbnHIvpx2ZLj', local_name='vbbu.txt')
PRETRAINED_VOCAB_BERT_BASE_CASED = \
GoogleDriveFile(google_drive_id='1FLytUhOIF0mTfA4A9MtE3aQ1kJr96oTR', local_name='vbbc.txt')
PRETRAINED_MODEL_BERT_BASE_UNCASED = \
GoogleDriveFile(google_drive_id='1rp1rVBoQwqgvg-JE8JwLL-adgLE07oTG', local_name='mbbu.pth')
PRETRAINED_MODEL_BERT_BASE_CASED = \
GoogleDriveFile(google_drive_id='1YKoGj-e4zoyTabt5dYpgEPe-PAmjOTDV', local_name='mbbc.pth')
# Image Resize
MAX_IMAGE_SIZE = 128 * 128
# SYS Constant
SYS_LINUX = 'linux'
SYS_WINDOWS = 'windows'
SYS_GOOGLE_COLAB = 'goog_colab'
# Google drive downloader
CHUNK_SIZE = 32768
DOWNLOAD_URL = "https://docs.google.com/uc?export=download"
|
[
"collections.namedtuple"
] |
[((54, 118), 'collections.namedtuple', 'namedtuple', (['"""GoogleDriveFile"""', "['google_drive_id', 'local_name']"], {}), "('GoogleDriveFile', ['google_drive_id', 'local_name'])\n", (64, 118), False, 'from collections import namedtuple\n')]
|
# -*- coding: utf-8 -*-
"""The Virtual File System (VFS) file entry object interface.
The file entry can be various file system elements like a regular file,
a directory or file system metadata.
"""
import abc
from dfvfs.resolver import resolver
class Directory(object):
"""Class that implements the VFS directory object interface."""
def __init__(self, file_system, path_spec):
"""Initializes the directory object.
Args:
file_system: the file system object (instance of vfs.FileSystem).
path_spec: the path specification object (instance of path.PathSpec).
"""
super(Directory, self).__init__()
self._entries = None
self._file_system = file_system
self.path_spec = path_spec
@abc.abstractmethod
def _EntriesGenerator(self):
"""Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
A path specification (instance of path.PathSpec).
"""
@property
def entries(self):
"""The entries (generator of instance of path.OSPathSpec)."""
for entry in self._EntriesGenerator():
yield entry
class FileEntry(object):
"""Class that implements the VFS file entry object interface."""
def __init__(
self, resolver_context, file_system, path_spec, is_root=False,
is_virtual=False):
"""Initializes the file entry object.
Args:
resolver_context: the resolver context (instance of resolver.Context).
file_system: the file system object (instance of vfs.FileSystem).
path_spec: the path specification object (instance of path.PathSpec).
is_root: optional boolean value to indicate if the file entry is
the root file entry of the corresponding file system.
The default is False.
is_virtual: optional boolean value to indicate if the file entry is
a virtual file entry emulated by the corresponding file
system. The default is False.
"""
super(FileEntry, self).__init__()
self._directory = None
self._file_system = file_system
self._is_root = is_root
self._is_virtual = is_virtual
self._resolver_context = resolver_context
self._stat_object = None
self.path_spec = path_spec
self._file_system.Open(path_spec=path_spec)
def __del__(self):
"""Cleans up the file entry object."""
self._file_system.Close()
self._file_system = None
@abc.abstractmethod
def _GetDirectory(self):
"""Retrieves the directory object (instance of vfs.Directory)."""
@abc.abstractmethod
def _GetStat(self):
"""Retrieves the stat object (instance of vfs.VFSStat)."""
@property
def link(self):
"""The full path of the linked file entry."""
return u''
@abc.abstractproperty
def name(self):
"""The name of the file entry, which does not include the full path."""
@property
def number_of_sub_file_entries(self):
"""The number of sub file entries."""
if self._directory is None:
self._directory = self._GetDirectory()
if self._directory is None:
return 0
# We cannot use len(self._directory.entries) since entries is a generator.
return sum(1 for path_spec in self._directory.entries)
@abc.abstractproperty
def sub_file_entries(self):
"""The sub file entries (generator of instance of vfs.FileEntry)."""
@property
def type_indicator(self):
"""The type indicator."""
type_indicator = getattr(self, u'TYPE_INDICATOR', None)
if type_indicator is None:
raise NotImplementedError(
u'Invalid file system missing type indicator.')
return type_indicator
def GetFileObject(self):
"""Retrieves the file-like object (instance of file_io.FileIO)."""
return resolver.Resolver.OpenFileObject(
self.path_spec, resolver_context=self._resolver_context)
def GetFileSystem(self):
"""Retrieves the file system (instance of vfs.FileSystem)."""
return self._file_system
def GetLinkedFileEntry(self):
"""Retrieves the linked file entry, e.g. for a symbolic link."""
return
@abc.abstractmethod
def GetParentFileEntry(self):
"""Retrieves the parent file entry."""
def GetSubFileEntryByName(self, name, case_sensitive=True):
"""Retrieves a sub file entry by name."""
name_lower = name.lower()
matching_sub_file_entry = None
for sub_file_entry in self.sub_file_entries:
if sub_file_entry.name == name:
return sub_file_entry
if not case_sensitive and sub_file_entry.name.lower() == name_lower:
if not matching_sub_file_entry:
matching_sub_file_entry = sub_file_entry
return matching_sub_file_entry
def GetStat(self):
"""Retrieves the stat object (instance of vfs.VFSStat)."""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object
def IsAllocated(self):
"""Determines if the file entry is allocated."""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object.is_allocated
def IsDevice(self):
"""Determines if the file entry is a device."""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object.type == self._stat_object.TYPE_DEVICE
def IsDirectory(self):
"""Determines if the file entry is a directory."""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object.type == self._stat_object.TYPE_DIRECTORY
def IsFile(self):
"""Determines if the file entry is a file."""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object.type == self._stat_object.TYPE_FILE
def IsLink(self):
"""Determines if the file entry is a link."""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object.type == self._stat_object.TYPE_LINK
def IsPipe(self):
"""Determines if the file entry is a pipe."""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object.type == self._stat_object.TYPE_PIPE
def IsRoot(self):
"""Determines if the file entry is the root file entry."""
return self._is_root
def IsSocket(self):
"""Determines if the file entry is a socket."""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object.type == self._stat_object.TYPE_SOCKET
def IsVirtual(self):
"""Determines if the file entry is virtual (emulated by dfVFS)."""
return self._is_virtual
|
[
"dfvfs.resolver.resolver.Resolver.OpenFileObject"
] |
[((3794, 3888), 'dfvfs.resolver.resolver.Resolver.OpenFileObject', 'resolver.Resolver.OpenFileObject', (['self.path_spec'], {'resolver_context': 'self._resolver_context'}), '(self.path_spec, resolver_context=self.\n _resolver_context)\n', (3826, 3888), False, 'from dfvfs.resolver import resolver\n')]
|
from rest_framework import serializers
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from misago.acl import add_acl
from misago.conf import settings
from misago.threads.serializers import AttachmentSerializer
from . import PostingEndpoint, PostingMiddleware
class AttachmentsMiddleware(PostingMiddleware):
def use_this_middleware(self):
return bool(self.user.acl_cache['max_attachment_size'])
def get_serializer(self):
return AttachmentsSerializer(
data=self.request.data,
context={
'mode': self.mode,
'user': self.user,
'post': self.post,
}
)
def save(self, serializer):
serializer.save()
class AttachmentsSerializer(serializers.Serializer):
attachments = serializers.ListField(child=serializers.IntegerField(), required=False)
def validate_attachments(self, ids):
self.update_attachments = False
self.removed_attachments = []
self.final_attachments = []
ids = list(set(ids))
validate_attachments_count(ids)
attachments = self.get_initial_attachments(
self.context['mode'], self.context['user'], self.context['post']
)
new_attachments = self.get_new_attachments(self.context['user'], ids)
if not attachments and not new_attachments:
return [] # no attachments
# clean existing attachments
for attachment in attachments:
if attachment.pk in ids:
self.final_attachments.append(attachment)
else:
if attachment.acl['can_delete']:
self.update_attachments = True
self.removed_attachments.append(attachment)
else:
message = _(
"You don't have permission to remove \"%(attachment)s\" attachment."
)
raise serializers.ValidationError(
message % {'attachment': attachment.filename}
)
if new_attachments:
self.update_attachments = True
self.final_attachments += new_attachments
self.final_attachments.sort(key=lambda a: a.pk, reverse=True)
def get_initial_attachments(self, mode, user, post):
attachments = []
if mode == PostingEndpoint.EDIT:
queryset = post.attachment_set.select_related('filetype')
attachments = list(queryset)
add_acl(user, attachments)
return attachments
def get_new_attachments(self, user, ids):
if not ids:
return []
queryset = user.attachment_set.select_related('filetype').filter(
post__isnull=True,
id__in=ids,
)
return list(queryset)
def save(self):
if not self.update_attachments:
return
if self.removed_attachments:
for attachment in self.removed_attachments:
attachment.delete_files()
self.context['post'].attachment_set.filter(
id__in=[a.id for a in self.removed_attachments]
).delete()
if self.final_attachments:
# sort final attachments by id, descending
self.final_attachments.sort(key=lambda a: a.pk, reverse=True)
self.context['user'].attachment_set.filter(
id__in=[a.id for a in self.final_attachments]
).update(post=self.context['post'])
self.sync_attachments_cache(self.context['post'], self.final_attachments)
def sync_attachments_cache(self, post, attachments):
if attachments:
post.attachments_cache = AttachmentSerializer(attachments, many=True).data
for attachment in post.attachments_cache:
del attachment['acl']
del attachment['post']
del attachment['uploader_ip']
else:
post.attachments_cache = None
post.update_fields.append('attachments_cache')
def validate_attachments_count(data):
total_attachments = len(data)
if total_attachments > settings.MISAGO_POST_ATTACHMENTS_LIMIT:
message = ungettext(
"You can't attach more than %(limit_value)s file to single post (added %(show_value)s).",
"You can't attach more than %(limit_value)s flies to single post (added %(show_value)s).",
settings.MISAGO_POST_ATTACHMENTS_LIMIT,
)
raise serializers.ValidationError(
message % {
'limit_value': settings.MISAGO_POST_ATTACHMENTS_LIMIT,
'show_value': total_attachments,
}
)
|
[
"django.utils.translation.ugettext",
"django.utils.translation.ungettext",
"rest_framework.serializers.IntegerField",
"misago.acl.add_acl",
"misago.threads.serializers.AttachmentSerializer",
"rest_framework.serializers.ValidationError"
] |
[((4282, 4531), 'django.utils.translation.ungettext', 'ungettext', (['"""You can\'t attach more than %(limit_value)s file to single post (added %(show_value)s)."""', '"""You can\'t attach more than %(limit_value)s flies to single post (added %(show_value)s)."""', 'settings.MISAGO_POST_ATTACHMENTS_LIMIT'], {}), '(\n "You can\'t attach more than %(limit_value)s file to single post (added %(show_value)s)."\n ,\n "You can\'t attach more than %(limit_value)s flies to single post (added %(show_value)s)."\n , settings.MISAGO_POST_ATTACHMENTS_LIMIT)\n', (4291, 4531), False, 'from django.utils.translation import ungettext\n'), ((4574, 4706), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (["(message % {'limit_value': settings.MISAGO_POST_ATTACHMENTS_LIMIT,\n 'show_value': total_attachments})"], {}), "(message % {'limit_value': settings.\n MISAGO_POST_ATTACHMENTS_LIMIT, 'show_value': total_attachments})\n", (4601, 4706), False, 'from rest_framework import serializers\n'), ((878, 904), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '()\n', (902, 904), False, 'from rest_framework import serializers\n'), ((2578, 2604), 'misago.acl.add_acl', 'add_acl', (['user', 'attachments'], {}), '(user, attachments)\n', (2585, 2604), False, 'from misago.acl import add_acl\n'), ((3785, 3829), 'misago.threads.serializers.AttachmentSerializer', 'AttachmentSerializer', (['attachments'], {'many': '(True)'}), '(attachments, many=True)\n', (3805, 3829), False, 'from misago.threads.serializers import AttachmentSerializer\n'), ((1866, 1936), 'django.utils.translation.ugettext', '_', (['"""You don\'t have permission to remove "%(attachment)s" attachment."""'], {}), '(\'You don\\\'t have permission to remove "%(attachment)s" attachment.\')\n', (1867, 1936), True, 'from django.utils.translation import ugettext as _\n'), ((2010, 2084), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (["(message % {'attachment': attachment.filename})"], {}), "(message % {'attachment': attachment.filename})\n", (2037, 2084), False, 'from rest_framework import serializers\n')]
|
# Copyright (C) 2017-2019 <NAME>
# SPDX-License-Identifier: Apache-2.0
import dolfin
import numpy
from ocellaris import Simulation, setup_simulation
import pytest
from helpers import skip_in_parallel
ISO_INPUT = """
ocellaris:
type: input
version: 1.0
mesh:
type: Rectangle
Nx: 4
Ny: 4
probes:
- name: free_surface
enabled: yes
type: IsoSurface
value: 0.5
field: c
custom_hook: MultiPhaseModelUpdated
multiphase_solver:
type: BlendedAlgebraicVOF
function_space_colour: DG
polynomial_degree_colour: 0
solver: {type: AnalyticalSolution}
boundary_conditions: [{'name': 'all', 'selector': 'code', 'inside_code': 'on_boundary'}]
physical_properties: {nu0: 1.0, nu1: 1, rho0: 1, rho1: 1}
output: {log_enabled: no}
"""
@pytest.mark.parametrize("degree", [0, 1, 2])
def test_isoline_horizontal(degree):
sim = Simulation()
sim.input.read_yaml(yaml_string=ISO_INPUT)
sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree)
setup_simulation(sim)
probe = sim.probes['free_surface']
# Initial value with sharp interface at x[1] == 0.5
Vc = sim.data['Vc']
c = sim.data['c']
dm = Vc.dofmap()
arr = c.vector().get_local()
for cell in dolfin.cells(sim.data['mesh']):
cell_value = 1 if cell.midpoint().y() < 0.5 else 0
for dof in dm.cell_dofs(cell.index()):
arr[dof] = cell_value
c.vector().set_local(arr)
c.vector().apply('insert')
lines = probe.run(force_active=True)
print('\nDegree:', degree, 'Vcdim:', Vc.dim())
print(probe.name, probe.field_name, probe.value)
print(len(lines))
if sim.ncpu > 1:
raise pytest.skip()
for x, y in lines:
print('x', x, '\ny', y)
assert all(abs(y - 0.5) < 1e-12)
# Results should be in sorted order
xdx = numpy.diff(x)
assert all(xdx > 0) or all(xdx < 0)
assert len(lines) == 1
@pytest.mark.parametrize("degree", [1])
def test_isoline_circle(degree):
sim = Simulation()
sim.input.read_yaml(yaml_string=ISO_INPUT)
sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree)
sim.input.set_value('mesh/Nx', 10)
sim.input.set_value('mesh/Ny', 10)
sim.input.set_value(
'initial_conditions/cp/cpp_code', '1.1*pow(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2), 0.5)'
)
setup_simulation(sim)
sim.data['c'].assign(sim.data['cp'])
probe = sim.probes['free_surface']
lines = probe.run(force_active=True)
if False:
from matplotlib import pyplot
c = dolfin.plot(sim.data['c'])
pyplot.colorbar(c)
for x, y in lines:
pyplot.plot(x, y)
pyplot.savefig('test_isoline_circle_%d.png' % degree)
pyplot.close()
print(probe.name, probe.field_name, probe.value)
print(len(lines))
for x, y in lines:
# Check that the radius is constant
r = ((x - 0.5) ** 2 + (y - 0.5) ** 2) ** 0.5
print('x', x)
print('y', y)
print('dr', r - 0.5 / 1.1)
assert all(abs(r - 0.5 / 1.1) < 5e-3)
# Check that the line is clockwise or counter clockwise
# for all segments, no going back and forth
theta = numpy.arctan2(y - 0.5, x - 0.5) * 180 / numpy.pi
theta[theta < 0] += 360
tdt = numpy.diff(theta)
tdt2 = tdt[abs(tdt) < 340]
print('dt', tdt)
assert all(tdt2 > 0) or all(tdt2 < 0)
if sim.ncpu == 1:
# The iso surface code is not written for full parallel support
assert len(lines) == 1
assert x[0] == x[-1] and y[0] == y[-1], "The loop should be closed"
|
[
"ocellaris.Simulation",
"numpy.arctan2",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"pytest.skip",
"dolfin.plot",
"matplotlib.pyplot.colorbar",
"numpy.diff",
"pytest.mark.parametrize",
"ocellaris.setup_simulation",
"dolfin.cells",
"matplotlib.pyplot.savefig"
] |
[((772, 816), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""degree"""', '[0, 1, 2]'], {}), "('degree', [0, 1, 2])\n", (795, 816), False, 'import pytest\n'), ((1936, 1974), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""degree"""', '[1]'], {}), "('degree', [1])\n", (1959, 1974), False, 'import pytest\n'), ((864, 876), 'ocellaris.Simulation', 'Simulation', ([], {}), '()\n', (874, 876), False, 'from ocellaris import Simulation, setup_simulation\n'), ((1006, 1027), 'ocellaris.setup_simulation', 'setup_simulation', (['sim'], {}), '(sim)\n', (1022, 1027), False, 'from ocellaris import Simulation, setup_simulation\n'), ((1240, 1270), 'dolfin.cells', 'dolfin.cells', (["sim.data['mesh']"], {}), "(sim.data['mesh'])\n", (1252, 1270), False, 'import dolfin\n'), ((2018, 2030), 'ocellaris.Simulation', 'Simulation', ([], {}), '()\n', (2028, 2030), False, 'from ocellaris import Simulation, setup_simulation\n'), ((2367, 2388), 'ocellaris.setup_simulation', 'setup_simulation', (['sim'], {}), '(sim)\n', (2383, 2388), False, 'from ocellaris import Simulation, setup_simulation\n'), ((1677, 1690), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (1688, 1690), False, 'import pytest\n'), ((1847, 1860), 'numpy.diff', 'numpy.diff', (['x'], {}), '(x)\n', (1857, 1860), False, 'import numpy\n'), ((2577, 2603), 'dolfin.plot', 'dolfin.plot', (["sim.data['c']"], {}), "(sim.data['c'])\n", (2588, 2603), False, 'import dolfin\n'), ((2612, 2630), 'matplotlib.pyplot.colorbar', 'pyplot.colorbar', (['c'], {}), '(c)\n', (2627, 2630), False, 'from matplotlib import pyplot\n'), ((2696, 2749), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (["('test_isoline_circle_%d.png' % degree)"], {}), "('test_isoline_circle_%d.png' % degree)\n", (2710, 2749), False, 'from matplotlib import pyplot\n'), ((2758, 2772), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (2770, 2772), False, 'from matplotlib import pyplot\n'), ((3322, 3339), 'numpy.diff', 'numpy.diff', (['theta'], {}), '(theta)\n', (3332, 3339), False, 'import numpy\n'), ((2670, 2687), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y'], {}), '(x, y)\n', (2681, 2687), False, 'from matplotlib import pyplot\n'), ((3227, 3258), 'numpy.arctan2', 'numpy.arctan2', (['(y - 0.5)', '(x - 0.5)'], {}), '(y - 0.5, x - 0.5)\n', (3240, 3258), False, 'import numpy\n')]
|
from clubs.club_enums import ClubHangoutSetting
from sims4.gsi.dispatcher import GsiHandler
from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers
import services
import sims4.resources
club_schema = GsiGridSchema(label='Club Info')
club_schema.add_field('name', label='Name', type=GsiFieldVisualizers.STRING)
club_schema.add_field('club_id', label='Club ID', type=GsiFieldVisualizers.STRING, unique_field=True)
club_schema.add_field('hangout', label='Hangout Location', type=GsiFieldVisualizers.STRING)
club_schema.add_field('associated_color', label='Associated Color', type=GsiFieldVisualizers.STRING)
club_schema.add_field('uniform_male_child', label='Male Child Uniform', type=GsiFieldVisualizers.STRING)
club_schema.add_field('uniform_female_child', label='Female Child Uniform', type=GsiFieldVisualizers.STRING)
club_schema.add_field('uniform_male_adult', label='Male Adult Uniform', type=GsiFieldVisualizers.STRING)
club_schema.add_field('uniform_female_adult', label='Female Child Uniform', type=GsiFieldVisualizers.STRING)
def generate_all_club_seeds():
instance_manager = services.get_instance_manager(sims4.resources.Types.CLUB_SEED)
if instance_manager.all_instances_loaded:
return [cls.__name__ for cls in instance_manager.types.values()]
return []
def add_club(manager):
with club_schema.add_view_cheat('clubs.create_club_from_seed', label='Create Club') as cheat:
cheat.add_token_param('club_seed', dynamic_token_fn=generate_all_club_seeds)
services.get_instance_manager(sims4.resources.Types.CLUB_SEED).add_on_load_complete(add_club)
with club_schema.add_view_cheat('clubs.remove_club_by_id', label='Remove Club') as cheat:
cheat.add_token_param('club_id')
with club_schema.add_view_cheat('clubs.remove_sim_from_club_by_id', label='Remove Sim From Club') as cheat:
cheat.add_token_param('sim_id')
cheat.add_token_param('club_id')
with club_schema.add_view_cheat('clubs.end_gathering_by_club_id', label='End Club Gathering') as cheat:
cheat.add_token_param('club_id')
with club_schema.add_view_cheat('clubs.start_gathering_by_club_id', label='Start Gathering') as cheat:
cheat.add_token_param('club_id')
with club_schema.add_view_cheat('clubs.refresh_safe_seed_data_for_club', label='Refresh Safe Data') as cheat:
cheat.add_token_param('club_id')
def get_buck_amounts():
return (1, 10, 100, 1000)
with club_schema.add_view_cheat('bucks.update_bucks_by_amount', label='Add Club Bucks') as cheat:
cheat.add_static_param('ClubBucks')
cheat.add_token_param('amount', dynamic_token_fn=get_buck_amounts)
cheat.add_token_param('club_id')
with club_schema.add_has_many('club_members', GsiGridSchema, label='Club Members') as sub_schema:
sub_schema.add_field('sim_id', label='Sim ID', width=0.35)
sub_schema.add_field('sim_name', label='Sim Name', width=0.4)
sub_schema.add_field('is_leader', label='Is Leader')
with club_schema.add_has_many('club_recent_members', GsiGridSchema, label='Recent Members') as sub_schema:
sub_schema.add_field('sim_id', label='Sim ID', width=0.35)
sub_schema.add_field('sim_name', label='Sim Name', width=0.4)
with club_schema.add_has_many('club_rules', GsiGridSchema, label='Club Rules') as sub_schema:
sub_schema.add_field('rule', label='Rule')
with club_schema.add_has_many('membership_criteria', GsiGridSchema, label='Membership Criteria') as sub_schema:
sub_schema.add_field('criteria', label='Criteria')
@GsiHandler('club_info', club_schema)
def generate_club_info_data():
club_service = services.get_club_service()
if club_service is None:
return
sim_info_manager = services.sim_info_manager()
club_info = []
for club in club_service.clubs:
if club.hangout_setting == ClubHangoutSetting.HANGOUT_VENUE:
club_hangout_str = 'Venue: {}'.format(str(club.hangout_venue))
elif club.hangout_setting == ClubHangoutSetting.HANGOUT_LOT:
club_hangout_str = 'Zone: {}'.format(club.hangout_zone_id)
else:
club_hangout_str = 'None'
entry = {'name': str(club), 'club_id': str(club.club_id), 'hangout': club_hangout_str, 'associated_color': str(club.associated_color) if club.associated_color else 'None', 'uniform_male_child': str(bool(club.uniform_male_child)), 'uniform_female_child': str(bool(club.uniform_female_child)), 'uniform_male_adult': str(bool(club.uniform_male_adult)), 'uniform_female_adult': str(bool(club.uniform_female_adult))}
members_info = []
entry['club_members'] = members_info
for sim in club.members:
group_members_entry = {'sim_id': str(sim.id), 'sim_name': sim.full_name, 'is_leader': str(sim is club.leader)}
members_info.append(group_members_entry)
entry['club_recent_members'] = [{'sim_id': str(sim_id), 'sim_name': str(sim_info_manager.get(sim_id))} for sim_id in club._recent_member_ids]
rules_info = []
entry['club_rules'] = rules_info
if club.rules:
for rule in club.rules:
rules_entry = {'rule': str(rule)}
rules_info.append(rules_entry)
criteria_info = []
entry['membership_criteria'] = criteria_info
if club.membership_criteria:
for criteria in club.membership_criteria:
criteria_entry = {'criteria': str(criteria)}
criteria_info.append(criteria_entry)
club_info.append(entry)
return club_info
|
[
"services.get_club_service",
"sims4.gsi.schema.GsiGridSchema",
"services.get_instance_manager",
"services.sim_info_manager",
"sims4.gsi.dispatcher.GsiHandler"
] |
[((209, 241), 'sims4.gsi.schema.GsiGridSchema', 'GsiGridSchema', ([], {'label': '"""Club Info"""'}), "(label='Club Info')\n", (222, 241), False, 'from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers\n'), ((3463, 3499), 'sims4.gsi.dispatcher.GsiHandler', 'GsiHandler', (['"""club_info"""', 'club_schema'], {}), "('club_info', club_schema)\n", (3473, 3499), False, 'from sims4.gsi.dispatcher import GsiHandler\n'), ((1097, 1159), 'services.get_instance_manager', 'services.get_instance_manager', (['sims4.resources.Types.CLUB_SEED'], {}), '(sims4.resources.Types.CLUB_SEED)\n', (1126, 1159), False, 'import services\n'), ((3550, 3577), 'services.get_club_service', 'services.get_club_service', ([], {}), '()\n', (3575, 3577), False, 'import services\n'), ((3645, 3672), 'services.sim_info_manager', 'services.sim_info_manager', ([], {}), '()\n', (3670, 3672), False, 'import services\n'), ((1501, 1563), 'services.get_instance_manager', 'services.get_instance_manager', (['sims4.resources.Types.CLUB_SEED'], {}), '(sims4.resources.Types.CLUB_SEED)\n', (1530, 1563), False, 'import services\n')]
|
import pandas as pd
import geopandas
def compile_airbnb_data(cur_link_table):
cur_tables = []
for cur_row in cur_link_table.itertuples():
tmp_table = cur_row.table.copy()
tmp_table["month"] = cur_row.month
tmp_table["year"] = cur_row.year
tmp_table["datetime"] = cur_row.datetime
cur_tables.append(tmp_table)
cur_data = pd.concat(cur_tables)
cur_data = cur_data.sort_values(by=["id", "datetime"], ascending=False).reset_index(drop=True)
cur_data = cur_data.drop(columns=["host_id", "first_review", "last_review"])
print(len(cur_data))
cur_selector = cur_data.groupby("id")["zipcode"].nunique()
cur_selector = cur_selector[ cur_selector == 1 ]
cur_data = cur_data[cur_data.id.isin(cur_selector.index)]
print(len(cur_data))
cur_data = cur_data[cur_data.room_type == "Entire home/apt"]
cur_data = cur_data.drop(columns = ["room_type"])
print(len(cur_data))
cur_data = cur_data[cur_data.property_type == "Apartment"]
cur_data = cur_data.drop(columns = ["property_type"])
print(len(cur_data))
cur_data = cur_data[cur_data.bed_type == "Real Bed"]
cur_data = cur_data.drop(columns = ["bed_type"])
print(len(cur_data))
cur_data = cur_data.dropna(subset=["zipcode", "beds", "bedrooms", "bathrooms"])
print(len(cur_data))
cur_data["price"] = cur_data.price.str.replace(r"[\$\,]", "").astype(float).round().astype(int)
cur_data = cur_data[cur_data["price"] < 1250]
cur_data = cur_data[cur_data["price"] > 25]
print(len(cur_data))
cur_selector = cur_data.groupby("id")["id"].count()
cur_selector = cur_selector[ cur_selector > 3 ]
cur_data = cur_data[cur_data.id.isin(cur_selector.index)]
print(len(cur_data))
replaced_columns = [
'neighbourhood_group_cleansed', 'latitude', 'longitude',
'accommodates', 'bathrooms', 'bedrooms', 'beds',
'number_of_reviews', 'review_scores_rating',
'reviews_per_month', 'is_location_exact', "datetime"
]
firsts_table = cur_data.groupby("id").first()[replaced_columns]
cur_data = cur_data.drop(columns=replaced_columns).merge(firsts_table, on="id", how="right")
cur_data = geopandas.GeoDataFrame(
cur_data,
geometry=geopandas.points_from_xy(
cur_data.longitude, cur_data.latitude
)
)
cur_data = cur_data.drop(columns=["longitude", "latitude"])
cur_data = cur_data.dropna(subset=["review_scores_rating", "reviews_per_month"])
print(len(cur_data))
cur_data = cur_data[cur_data.review_scores_rating > 60]
cur_data = cur_data.drop(columns=["review_scores_rating"])
print(len(cur_data))
cur_data = cur_data[cur_data.is_location_exact == "t"]
cur_data = cur_data.drop(columns=["is_location_exact"])
print(len(cur_data))
cur_data = cur_data[cur_data.neighbourhood_group_cleansed.isin(["Manhattan", "Brooklyn"])]
cur_data["is_brooklyn"] = cur_data.neighbourhood_group_cleansed == "Brooklyn"
cur_data = cur_data.drop(columns = ["neighbourhood_group_cleansed"])
print(len(cur_data))
cur_data = cur_data[cur_data.accommodates < 9]
print(len(cur_data))
cur_data = cur_data[cur_data.bathrooms >= 1]
print(len(cur_data))
cur_data = cur_data[ cur_data.bedrooms > 0 ]
cur_data = cur_data[ cur_data.bedrooms < 5 ]
print(len(cur_data))
cur_data = cur_data[ cur_data.beds > 0 ]
cur_data = cur_data[ cur_data.beds < 7 ]
print(len(cur_data))
cur_data = cur_data[ cur_data.number_of_reviews > 5 ]
cur_data = cur_data.drop(columns=["number_of_reviews"])
print(len(cur_data))
cur_data = cur_data[ cur_data.reviews_per_month > 1/8 ]
cur_data = cur_data.drop(columns=["reviews_per_month"])
print(len(cur_data))
cur_data = cur_data.drop(columns=["datetime"])
cur_data = cur_data.reset_index(drop=True)
cur_data["zipcode"] = cur_data["zipcode"].str.split("-").map(lambda work_list: work_list[0])
cur_data["zipcode"] = cur_data["zipcode"].astype("int")
return cur_data
|
[
"geopandas.points_from_xy",
"pandas.concat"
] |
[((377, 398), 'pandas.concat', 'pd.concat', (['cur_tables'], {}), '(cur_tables)\n', (386, 398), True, 'import pandas as pd\n'), ((2289, 2352), 'geopandas.points_from_xy', 'geopandas.points_from_xy', (['cur_data.longitude', 'cur_data.latitude'], {}), '(cur_data.longitude, cur_data.latitude)\n', (2313, 2352), False, 'import geopandas\n')]
|
import logging
from app.core.app import create_app
from app.core.cfg import cfg
__author__ = 'kclark'
logger = logging.getLogger(__name__)
app = create_app()
def run_app():
logger.info('App Server Initializing')
app.run(host='localhost', port=5000, threaded=True, debug=cfg.debug_mode)
logger.info('App Server Running')
if __name__ == '__main__':
run_app()
|
[
"logging.getLogger",
"app.core.app.create_app"
] |
[((114, 141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (131, 141), False, 'import logging\n'), ((149, 161), 'app.core.app.create_app', 'create_app', ([], {}), '()\n', (159, 161), False, 'from app.core.app import create_app\n')]
|
#
# CS1010FC --- Programming Methodology
#
# Mission N Solutions
#
# Note that written answers are commented out to allow us to run your
# code easily while grading your problem set.
from random import *
from copy import deepcopy
import math
import random
#######
#Task 1a#
#######
# [Marking Scheme]
# Points to note:
# Matrix elements must be equal but not identical
# 1 mark for creating the correct matrix
def new_game(n):
matrix = []
for i in range(n):
matrix.append([0] * n)
return matrix
###########
# Task 1b #
###########
# [Marking Scheme]
# Points to note:
# Must ensure that it is created on a zero entry
# 1 mark for creating the correct loop
def new_tile(mat):
seq = [2] * 90 + [4]
newTile = choice(seq)
emptySquareList = empty_cells(mat)
emptySquare = choice(emptySquareList)
mat[emptySquare[0]][emptySquare[1]] = newTile
return mat
###########
# Task 1c #
###########
# [Marking Scheme]
# Points to note:
# Matrix elements must be equal but not identical
# 0 marks for completely wrong solutions
# 1 mark for getting only one condition correct
# 2 marks for getting two of the three conditions
# 3 marks for correct checking
def game_state(mat):
for i in range(len(mat)):
for j in range(len(mat[0])):
if mat[i][j]==2048:
return 'win'
for i in range(len(mat)-1): #intentionally reduced to check the row on the right and below
for j in range(len(mat[0])-1): #more elegant to use exceptions but most likely this will be their solution
if mat[i][j]==mat[i+1][j] or mat[i][j+1]==mat[i][j]:
return 'not over'
for i in range(len(mat)): #check for any zero entries
for j in range(len(mat[0])):
if mat[i][j]==0:
return 'not over'
for k in range(len(mat)-1): #to check the left/right entries on the last row
if mat[len(mat)-1][k]==mat[len(mat)-1][k+1]:
return 'not over'
for j in range(len(mat)-1): #check up/down entries on last column
if mat[j][len(mat)-1]==mat[j+1][len(mat)-1]:
return 'not over'
return 'lose'
###########
# Task 2a #
###########
# [Marking Scheme]
# Points to note:
# 0 marks for completely incorrect solutions
# 1 mark for solutions that show general understanding
# 2 marks for correct solutions that work for all sizes of matrices
def reverse(mat):
new=[]
for i in range(len(mat)):
new.append([])
for j in range(len(mat[0])):
new[i].append(mat[i][len(mat[0])-j-1])
return new
###########
# Task 2b #
###########
# [Marking Scheme]
# Points to note:
# 0 marks for completely incorrect solutions
# 1 mark for solutions that show general understanding
# 2 marks for correct solutions that work for all sizes of matrices
def transpose(mat):
new=[]
for i in range(len(mat[0])):
new.append([])
for j in range(len(mat)):
new[i].append(mat[j][i])
return new
##########
# Task 3 #
##########
# [Marking Scheme]
# Points to note:
# The way to do movement is compress -> merge -> compress again
# Basically if they can solve one side, and use transpose and reverse correctly they should
# be able to solve the entire thing just by flipping the matrix around
# No idea how to grade this one at the moment. I have it pegged to 8 (which gives you like,
# 2 per up/down/left/right?) But if you get one correct likely to get all correct so...
# Check the down one. Reverse/transpose if ordered wrongly will give you wrong result.
def cover_up(mat):
new=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]
done=False
for i in range(4):
count=0
for j in range(4):
if mat[i][j]!=0:
new[i][count]=mat[i][j]
if j!=count:
done=True
count+=1
return (new,done)
def merge(mat):
score = 0
done=False
for i in range(4):
for j in range(3):
if mat[i][j]==mat[i][j+1] and mat[i][j]!=0:
score += mat[i][j] * 2
mat[i][j]*=2
mat[i][j+1]=0
done=True
return (mat,done, score)
def empty_cells(mat):
"""
Return a list of empty cells.
"""
emptySquareList = []
for row in range(len(mat)):
for col in range(len(mat[0])):
if mat[row][col] == 0:
emptySquareList.append([row, col])
return emptySquareList
def getMaxTile(mat):
maxTile = 0
for x in range(len(mat)):
for y in range(len(mat[x])):
maxTile = max(maxTile, mat[x][y])
return maxTile
def heuristic_score(mat):
number_of_empty_cells = len(empty_cells(mat))
score = monotonicity(mat)*1.5 + number_of_empty_cells*2 + + getMaxTile(mat)
return score
def monotonicity(grid):
grid_mask = [[2048, 1024, 256, 64],
[1024, 256, 64, 16],
[256, 64, 16, 4],
[64, 16, 4, 1]]
monotonicity_score = 0
for row in range(3):
for column in range(3):
monotonicity_score += grid[row][column] * grid_mask[row][column]
return monotonicity_score
def distance(mat, max_tile):
dis = None
for x in range(len(mat)):
if dis:
break
for y in range(len(mat)):
if max_tile == mat[x][y]:
if max_tile < 1024:
dis = -((abs(x - 0) + abs(y - 0)) * max_tile)
else:
dis = -((abs(x - 0) + abs(y - 0)) * (max_tile / 2))
break
return dis
def a_maximize(mat, alpha, beta, depth):
if game_state(mat)=='lose' or depth == 0:
return heuristic_score(mat)
maxUtility = -float('inf')
d = ['up', 'down', 'left', 'right']
for direction in d:
c = deepcopy(mat)
try:
c, done = move(c, direction)
if done:
maxUtility = max(maxUtility, a_minimize(c, alpha, beta, depth-1 ))
except IndexError:
print("error-----------------------------------------------------------------------------")
continue
alpha = max(maxUtility, alpha)
if alpha >= beta:
break
return maxUtility
def alphaBeta(grid, max, startDepth):
if max:
return a_maximize(grid, -float('inf'), float('inf'), startDepth)
else:
return a_minimize(grid, -float('inf'), float('inf'), startDepth)
def minimax(grid, max, startDepth):
if max:
return maximize(grid, startDepth)
else:
return minimize(grid, startDepth)
def maximize(mat, depth):
if game_state(mat)=='lose' or depth == 0:
return heuristic_score(mat)
maxUtility = -float('inf')
d = ['up', 'down', 'left', 'right']
for direction in d:
c = deepcopy(mat)
try:
c, done = move(c, direction)
if done:
maxUtility = max(maxUtility, minimize(c, depth - 1))
except IndexError:
continue
return maxUtility
def minimize(mat, depth):
if game_state(mat)=='lose' or depth == 0:
return heuristic_score(mat)
minUtility = float('inf')
emptyCells = empty_cells(mat)
children = []
for c in emptyCells:
gridCopy = deepcopy(mat)
gridCopy = set_tile(gridCopy, c[0], c[1], 2)
children.append(gridCopy)
gridCopy = deepcopy(mat)
gridCopy = set_tile(gridCopy, c[0], c[1], 4)
children.append(gridCopy)
for child in children:
minUtility = min(minUtility, maximize(child, depth - 1))
# print minUtility
return minUtility
def a_minimize(mat, alpha, beta, depth):
if game_state(mat)=='lose' or depth == 0:
return heuristic_score(mat)
minUtility = float('inf')
emptyCells = empty_cells(mat)
children = []
for c in emptyCells:
gridCopy = deepcopy(mat)
gridCopy = set_tile(gridCopy, c[0], c[1], 2)
children.append(gridCopy)
gridCopy = deepcopy(mat)
gridCopy = set_tile(gridCopy, c[0], c[1], 4)
children.append(gridCopy)
for child in children:
minUtility = min(minUtility, a_maximize(child, alpha, beta, depth - 1))
if minUtility <= alpha:
break
beta = min(minUtility, beta)
# print minUtility
return minUtility
def montecarlo(mat, initialScore):
scores = []
for i in range(0, 100):
directions = ['up', 'down', 'left', 'right']
direction = directions[random.randint(0, len(directions) - 1)]
newMat = deepcopy(mat)
gameScore = initialScore
while game_state(newMat)!='lose':
try:
newMat, done, score = move(newMat, direction)
newMat = new_tile(newMat)
gameScore+=score+heuristic_score(mat)
except IndexError:
break
scores.append(gameScore)
return sum(scores)/len(scores)
def expectimax(mat, depth, maximizer):
if depth==0:
return heuristic_score(mat)
if maximizer:
currentValue = -1
d = ['up', 'down', 'left', 'right']
for direction in d:
newBoard = deepcopy(mat)
newBoard, done, score = move(newBoard, direction)
calculatedValue = expectimax(newBoard, depth - 1, False)
if calculatedValue > currentValue:
currentValue = calculatedValue
return currentValue
else:
number = 0
sum_value = 0
emptyCells = empty_cells(mat)
children = []
for c in emptyCells:
gridCopy = deepcopy(mat)
gridCopy = set_tile(gridCopy, c[0], c[1], 2)
children.append(gridCopy)
gridCopy = deepcopy(mat)
gridCopy = set_tile(gridCopy, c[0], c[1], 4)
children.append(gridCopy)
for child in children:
sum_value+= expectimax(child, depth-1, True)
number+=1
if number == 0:
return expectimax(mat, depth-1, True)
return (sum_value/number)
def set_tile(mat, row, col, value):
"""
Set the tile at position row, col to have the given value.
"""
mat[row][col] = value
return mat
def move(game, direction):
if(direction=="up"):
return up(game)
elif direction=="down":
return down(game)
# down(game)
elif direction == "left":
return left(game)
elif direction=="right":
return right(game)
def up(game):
# print("up")
# return matrix after shifting up
game=transpose(game)
game,done=cover_up(game)
temp=merge(game)
game=temp[0]
done=done or temp[1]
score = temp[2]
game=cover_up(game)[0]
game=transpose(game)
return (game,done, score)
def down(game):
# print("down")
game=reverse(transpose(game))
game,done=cover_up(game)
temp=merge(game)
game=temp[0]
score = temp[2]
done=done or temp[1]
game=cover_up(game)[0]
game=transpose(reverse(game))
return (game,done, score)
def left(game):
# print("left")
# return matrix after shifting left
game,done=cover_up(game)
temp=merge(game)
game=temp[0]
score = temp[2]
done=done or temp[1]
game=cover_up(game)[0]
return (game,done, score)
def right(game):
# print("right")
# return matrix after shifting right
game=reverse(game)
game,done=cover_up(game)
temp=merge(game)
game=temp[0]
score = temp[2]
done=done or temp[1]
game=cover_up(game)[0]
game=reverse(game)
return (game,done, score)
|
[
"copy.deepcopy"
] |
[((5838, 5851), 'copy.deepcopy', 'deepcopy', (['mat'], {}), '(mat)\n', (5846, 5851), False, 'from copy import deepcopy\n'), ((6835, 6848), 'copy.deepcopy', 'deepcopy', (['mat'], {}), '(mat)\n', (6843, 6848), False, 'from copy import deepcopy\n'), ((7299, 7312), 'copy.deepcopy', 'deepcopy', (['mat'], {}), '(mat)\n', (7307, 7312), False, 'from copy import deepcopy\n'), ((7419, 7432), 'copy.deepcopy', 'deepcopy', (['mat'], {}), '(mat)\n', (7427, 7432), False, 'from copy import deepcopy\n'), ((7912, 7925), 'copy.deepcopy', 'deepcopy', (['mat'], {}), '(mat)\n', (7920, 7925), False, 'from copy import deepcopy\n'), ((8032, 8045), 'copy.deepcopy', 'deepcopy', (['mat'], {}), '(mat)\n', (8040, 8045), False, 'from copy import deepcopy\n'), ((8597, 8610), 'copy.deepcopy', 'deepcopy', (['mat'], {}), '(mat)\n', (8605, 8610), False, 'from copy import deepcopy\n'), ((9215, 9228), 'copy.deepcopy', 'deepcopy', (['mat'], {}), '(mat)\n', (9223, 9228), False, 'from copy import deepcopy\n'), ((9645, 9658), 'copy.deepcopy', 'deepcopy', (['mat'], {}), '(mat)\n', (9653, 9658), False, 'from copy import deepcopy\n'), ((9777, 9790), 'copy.deepcopy', 'deepcopy', (['mat'], {}), '(mat)\n', (9785, 9790), False, 'from copy import deepcopy\n')]
|
import os
import logging
import sys
from collections import OrderedDict, defaultdict
import datetime
import cartosql
import requests
import json
# Constants
LATEST_URL = 'http://popdata.unhcr.org/api/stats/asylum_seekers_monthly.json?year={year}'
CARTO_TABLE = 'soc_038_monthly_asylum_requests'
CARTO_SCHEMA = OrderedDict([
('_UID', 'text'),
('date', 'timestamp'),
('country', 'text'),
('value_type', 'text'),
('num_people', 'numeric'),
('some_stats_confidential', 'text')
])
UID_FIELD = '_UID'
TIME_FIELD = 'date'
DATA_DIR = 'data'
LOG_LEVEL = logging.INFO
DATE_FORMAT = '%Y-%m-%d'
CLEAR_TABLE_FIRST = False
# Limit 1M rows, drop older than 20yrs
MAXROWS = 1000000
MAXAGE = datetime.datetime.today().year - 20
DATASET_ID = 'de24a492-acee-4345-9073-bbbe991f6ede'
def lastUpdateDate(dataset, date):
apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset)
headers = {
'Content-Type': 'application/json',
'Authorization': os.getenv('apiToken')
}
body = {
"dataLastUpdated": date.isoformat()
}
try:
r = requests.patch(url = apiUrl, json = body, headers = headers)
logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code))
return 0
except Exception as e:
logging.error('[lastUpdated]: '+str(e))
def genUID(date, country, valuetype):
'''Generate unique id'''
return '{}_{}_{}'.format(country, date, valuetype)
def insertIfNew(data, year, valuetype,
existing_ids, new_ids, new_rows,
unknown_vals, date_format=DATE_FORMAT):
'''Loop over months in the data, add to new rows if new'''
last_day = [31,28,31,30,31,30,31,31,30,31,30,31]
for cntry in data:
for month, val in data[cntry].items():
date = datetime.datetime(year=year, month=month, day=last_day[month-1]).strftime(date_format)
UID = genUID(date, cntry, valuetype)
if UID not in existing_ids + new_ids:
new_ids.append(UID)
if month in unknown_vals[cntry]:
logging.debug('Some stats confidental for {} in {}-{}'.format(cntry, year, month))
values = [UID, date, cntry, valuetype, val, True]
else:
logging.debug('All known stats released for {} in {}-{}'.format(cntry, year, month))
values = [UID, date, cntry, valuetype, val, False]
new_rows.append(values)
def processNewData(existing_ids):
'''
Iterively fetch parse and post new data
'''
year = datetime.datetime.today().year
new_count = 1
new_ids = []
try:
while year > MAXAGE and new_count:
# get and parse each page; stop when no new results or 200 pages
# 1. Fetch new data
logging.info("Fetching data for year {}".format(year))
r = requests.get(LATEST_URL.format(year=year))
data = r.json()
logging.debug('data: {}'.format(data))
# 2. Collect Totals
origins = defaultdict(lambda: defaultdict(int))
asylums = defaultdict(lambda: defaultdict(int))
unknown_vals_origins = defaultdict(list)
unknown_vals_asylums = defaultdict(list)
for obs in data:
try:
origins[obs['country_of_origin']][obs['month']] += obs['value']
except Exception as e:
logging.debug("Error processing value {} for country of origin {} in {}-{}. Value set to -9999. Error: {}".format(obs['value'],obs['country_of_origin'],year,obs['month'],e))
unknown_vals_origins[obs['country_of_origin']].append(obs['month'])
origins[obs['country_of_origin']][obs['month']] += 0
try:
asylums[obs['country_of_asylum']][obs['month']] += obs['value']
except Exception as e:
logging.debug("Error processing value {} for country of asylum {} in {}-{}. Value set to -9999. Error: {}".format(obs['value'],obs['country_of_asylum'],year,obs['month'],e))
unknown_vals_asylums[obs['country_of_asylum']].append(obs['month'])
asylums[obs['country_of_asylum']][obs['month']] += 0
# 3. Create Unique IDs, create new rows
new_rows = []
logging.debug('Create data about places of origin for year {}'.format(year))
insert_kwargs = {
'data':origins,'year':year,'valuetype':'country_of_origin',
'existing_ids':existing_ids,'new_ids':new_ids,'new_rows':new_rows,
'unknown_vals':unknown_vals_origins
}
insertIfNew(**insert_kwargs)
logging.debug('Create data about places of asylum for year {}'.format(year))
insert_kwargs.update(data=asylums,
valuetype='country_of_asylum',
unknown_vals=unknown_vals_asylums)
insertIfNew(**insert_kwargs)
# 4. Insert new rows
new_count = len(new_rows)
if new_count:
logging.info('Pushing {} new rows'.format(new_count))
cartosql.insertRows(CARTO_TABLE, CARTO_SCHEMA.keys(),
CARTO_SCHEMA.values(), new_rows)
# Decrement year
year -= 1
except json.decoder.JSONDecodeError:
logging.info('API is still down.')
num_new = len(new_ids)
return num_new
##############################################################
# General logic for Carto
# should be the same for most tabular datasets
##############################################################
def createTableWithIndex(table, schema, id_field, time_field=''):
'''Get existing ids or create table'''
cartosql.createTable(table, schema)
cartosql.createIndex(table, id_field, unique=True)
if time_field:
cartosql.createIndex(table, time_field)
def getIds(table, id_field):
'''get ids from table'''
r = cartosql.getFields(id_field, table, f='csv')
return r.text.split('\r\n')[1:-1]
def deleteExcessRows(table, max_rows, time_field, max_age=''):
'''Delete excess rows by age or count'''
num_dropped = 0
if isinstance(max_age, datetime.datetime):
max_age = max_age.isoformat()
# 1. delete by age
if max_age:
r = cartosql.deleteRows(table, "{} < '{}'".format(time_field, max_age))
num_dropped = r.json()['total_rows']
# 2. get sorted ids (old->new)
r = cartosql.getFields('cartodb_id', table, order='{}'.format(time_field),
f='csv')
ids = r.text.split('\r\n')[1:-1]
# 3. delete excess
if len(ids) > max_rows:
r = cartosql.deleteRowsByIDs(table, ids[:-max_rows])
num_dropped += r.json()['total_rows']
if num_dropped:
logging.info('Dropped {} old rows from {}'.format(num_dropped, table))
def get_most_recent_date(table):
r = cartosql.getFields(TIME_FIELD, table, f='csv', post=True)
dates = r.text.split('\r\n')[1:-1]
dates.sort()
most_recent_date = datetime.datetime.strptime(dates[-1], '%Y-%m-%d %H:%M:%S')
return most_recent_date
def main():
logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL)
logging.info('STARTING')
if CLEAR_TABLE_FIRST:
logging.info('Clearing table')
cartosql.deleteRows(CARTO_TABLE, 'cartodb_id IS NOT NULL', user=os.getenv('CARTO_USER'),
key=os.getenv('CARTO_KEY'))
# 1. Check if table exists and create table
existing_ids = []
if cartosql.tableExists(CARTO_TABLE):
logging.info('Fetching existing ids')
existing_ids = getIds(CARTO_TABLE, UID_FIELD)
else:
logging.info('Table {} does not exist, creating'.format(CARTO_TABLE))
createTableWithIndex(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD, TIME_FIELD)
# 2. Iterively fetch, parse and post new data
num_new = processNewData(existing_ids)
existing_count = num_new + len(existing_ids)
logging.info('Total rows: {}, New: {}, Max: {}'.format(
existing_count, num_new, MAXROWS))
# 3. Remove old observations
deleteExcessRows(CARTO_TABLE, MAXROWS, TIME_FIELD, datetime.datetime(year=MAXAGE, month=1, day=1))
# Get most recent update date
most_recent_date = get_most_recent_date(CARTO_TABLE)
lastUpdateDate(DATASET_ID, most_recent_date)
logging.info('SUCCESS')
|
[
"requests.patch",
"datetime.datetime.today",
"logging.basicConfig",
"cartosql.deleteRowsByIDs",
"datetime.datetime",
"collections.defaultdict",
"logging.info",
"datetime.datetime.strptime",
"cartosql.getFields",
"cartosql.createIndex",
"collections.OrderedDict",
"os.getenv",
"cartosql.createTable",
"cartosql.tableExists"
] |
[((312, 484), 'collections.OrderedDict', 'OrderedDict', (["[('_UID', 'text'), ('date', 'timestamp'), ('country', 'text'), (\n 'value_type', 'text'), ('num_people', 'numeric'), (\n 'some_stats_confidential', 'text')]"], {}), "([('_UID', 'text'), ('date', 'timestamp'), ('country', 'text'),\n ('value_type', 'text'), ('num_people', 'numeric'), (\n 'some_stats_confidential', 'text')])\n", (323, 484), False, 'from collections import OrderedDict, defaultdict\n'), ((5889, 5924), 'cartosql.createTable', 'cartosql.createTable', (['table', 'schema'], {}), '(table, schema)\n', (5909, 5924), False, 'import cartosql\n'), ((5929, 5979), 'cartosql.createIndex', 'cartosql.createIndex', (['table', 'id_field'], {'unique': '(True)'}), '(table, id_field, unique=True)\n', (5949, 5979), False, 'import cartosql\n'), ((6115, 6159), 'cartosql.getFields', 'cartosql.getFields', (['id_field', 'table'], {'f': '"""csv"""'}), "(id_field, table, f='csv')\n", (6133, 6159), False, 'import cartosql\n'), ((7066, 7123), 'cartosql.getFields', 'cartosql.getFields', (['TIME_FIELD', 'table'], {'f': '"""csv"""', 'post': '(True)'}), "(TIME_FIELD, table, f='csv', post=True)\n", (7084, 7123), False, 'import cartosql\n'), ((7203, 7261), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['dates[-1]', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(dates[-1], '%Y-%m-%d %H:%M:%S')\n", (7229, 7261), False, 'import datetime\n'), ((7307, 7362), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stderr', 'level': 'LOG_LEVEL'}), '(stream=sys.stderr, level=LOG_LEVEL)\n', (7326, 7362), False, 'import logging\n'), ((7367, 7391), 'logging.info', 'logging.info', (['"""STARTING"""'], {}), "('STARTING')\n", (7379, 7391), False, 'import logging\n'), ((7689, 7722), 'cartosql.tableExists', 'cartosql.tableExists', (['CARTO_TABLE'], {}), '(CARTO_TABLE)\n', (7709, 7722), False, 'import cartosql\n'), ((8521, 8544), 'logging.info', 'logging.info', (['"""SUCCESS"""'], {}), "('SUCCESS')\n", (8533, 8544), False, 'import logging\n'), ((702, 727), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (725, 727), False, 'import datetime\n'), ((973, 994), 'os.getenv', 'os.getenv', (['"""apiToken"""'], {}), "('apiToken')\n", (982, 994), False, 'import os\n'), ((1079, 1133), 'requests.patch', 'requests.patch', ([], {'url': 'apiUrl', 'json': 'body', 'headers': 'headers'}), '(url=apiUrl, json=body, headers=headers)\n', (1093, 1133), False, 'import requests\n'), ((2592, 2617), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (2615, 2617), False, 'import datetime\n'), ((6007, 6046), 'cartosql.createIndex', 'cartosql.createIndex', (['table', 'time_field'], {}), '(table, time_field)\n', (6027, 6046), False, 'import cartosql\n'), ((6830, 6878), 'cartosql.deleteRowsByIDs', 'cartosql.deleteRowsByIDs', (['table', 'ids[:-max_rows]'], {}), '(table, ids[:-max_rows])\n', (6854, 6878), False, 'import cartosql\n'), ((7427, 7457), 'logging.info', 'logging.info', (['"""Clearing table"""'], {}), "('Clearing table')\n", (7439, 7457), False, 'import logging\n'), ((7732, 7769), 'logging.info', 'logging.info', (['"""Fetching existing ids"""'], {}), "('Fetching existing ids')\n", (7744, 7769), False, 'import logging\n'), ((8327, 8373), 'datetime.datetime', 'datetime.datetime', ([], {'year': 'MAXAGE', 'month': '(1)', 'day': '(1)'}), '(year=MAXAGE, month=1, day=1)\n', (8344, 8373), False, 'import datetime\n'), ((3212, 3229), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3223, 3229), False, 'from collections import OrderedDict, defaultdict\n'), ((3265, 3282), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3276, 3282), False, 'from collections import OrderedDict, defaultdict\n'), ((5493, 5527), 'logging.info', 'logging.info', (['"""API is still down."""'], {}), "('API is still down.')\n", (5505, 5527), False, 'import logging\n'), ((7530, 7553), 'os.getenv', 'os.getenv', (['"""CARTO_USER"""'], {}), "('CARTO_USER')\n", (7539, 7553), False, 'import os\n'), ((7587, 7609), 'os.getenv', 'os.getenv', (['"""CARTO_KEY"""'], {}), "('CARTO_KEY')\n", (7596, 7609), False, 'import os\n'), ((1804, 1870), 'datetime.datetime', 'datetime.datetime', ([], {'year': 'year', 'month': 'month', 'day': 'last_day[month - 1]'}), '(year=year, month=month, day=last_day[month - 1])\n', (1821, 1870), False, 'import datetime\n'), ((3099, 3115), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (3110, 3115), False, 'from collections import OrderedDict, defaultdict\n'), ((3159, 3175), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (3170, 3175), False, 'from collections import OrderedDict, defaultdict\n')]
|
import json
import os
import numpy as np
import pytest
from py_path_signature.data_models.stroke import Stroke
from py_path_signature.path_signature_extractor import PathSignatureExtractor
from .conftest import TEST_DATA_INPUT_DIR, TEST_DATA_REFERENCE_DIR
@pytest.mark.parametrize(
"input_strokes, expected_bounding_box",
[
(
[{"x": [1, 2, 3], "y": [1, 2, 3]}],
(1, 1, 2, 2),
),
(
[{"x": [0, 1, 2, 3], "y": [1, 2, 3, 4]}, {"x": [6, 8, 2, 3], "y": [0, 2, 3, 9]}],
(0, 0, 9, 8),
),
(
[
{"x": [714, 1], "y": [3, 4]},
{"x": [6, 8], "y": [0, 9]},
{"x": [100, 8], "y": [10, 9]},
],
(0, 1, 10, 713),
),
],
)
def test_bounding_box(input_strokes, expected_bounding_box):
strokes = [Stroke(**stroke) for stroke in input_strokes]
bounding_box = PathSignatureExtractor.calculate_bounding_box(strokes=strokes)
assert bounding_box == expected_bounding_box
def list_test_cases():
return [
os.path.splitext(case)[0]
for case in os.listdir(TEST_DATA_INPUT_DIR)
if os.path.isfile(os.path.join(TEST_DATA_INPUT_DIR, case))
]
@pytest.fixture(scope="function", params=list_test_cases())
def strokes_and_reference_signature(request):
test_case = request.param
with open(os.path.join(TEST_DATA_INPUT_DIR, f"{test_case}.json")) as f:
strokes = json.load(f)
with open(os.path.join(TEST_DATA_REFERENCE_DIR, f"{test_case}.json")) as f:
path_signature = np.array(json.load(f))
return (strokes, path_signature)
@pytest.fixture(scope="class")
def path_signature_extractor():
path_signature_extractor = PathSignatureExtractor(
order=2, rendering_size=(128, -1), min_rendering_dimension=5, max_aspect_ratio=30, delta=5
)
return path_signature_extractor
def test_image_signatures(path_signature_extractor, strokes_and_reference_signature):
input_strokes, path_signature_groundtruth = strokes_and_reference_signature
strokes = [Stroke(**stroke) for stroke in input_strokes]
path_signature = path_signature_extractor.extract_signature(strokes=strokes)
assert (path_signature == path_signature_groundtruth).all()
|
[
"py_path_signature.path_signature_extractor.PathSignatureExtractor.calculate_bounding_box",
"py_path_signature.data_models.stroke.Stroke",
"json.load",
"pytest.fixture",
"py_path_signature.path_signature_extractor.PathSignatureExtractor",
"os.path.splitext",
"pytest.mark.parametrize",
"os.path.join",
"os.listdir"
] |
[((261, 604), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_strokes, expected_bounding_box"""', "[([{'x': [1, 2, 3], 'y': [1, 2, 3]}], (1, 1, 2, 2)), ([{'x': [0, 1, 2, 3],\n 'y': [1, 2, 3, 4]}, {'x': [6, 8, 2, 3], 'y': [0, 2, 3, 9]}], (0, 0, 9, \n 8)), ([{'x': [714, 1], 'y': [3, 4]}, {'x': [6, 8], 'y': [0, 9]}, {'x':\n [100, 8], 'y': [10, 9]}], (0, 1, 10, 713))]"], {}), "('input_strokes, expected_bounding_box', [([{'x': [1,\n 2, 3], 'y': [1, 2, 3]}], (1, 1, 2, 2)), ([{'x': [0, 1, 2, 3], 'y': [1, \n 2, 3, 4]}, {'x': [6, 8, 2, 3], 'y': [0, 2, 3, 9]}], (0, 0, 9, 8)), ([{\n 'x': [714, 1], 'y': [3, 4]}, {'x': [6, 8], 'y': [0, 9]}, {'x': [100, 8],\n 'y': [10, 9]}], (0, 1, 10, 713))])\n", (284, 604), False, 'import pytest\n'), ((1666, 1695), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (1680, 1695), False, 'import pytest\n'), ((939, 1001), 'py_path_signature.path_signature_extractor.PathSignatureExtractor.calculate_bounding_box', 'PathSignatureExtractor.calculate_bounding_box', ([], {'strokes': 'strokes'}), '(strokes=strokes)\n', (984, 1001), False, 'from py_path_signature.path_signature_extractor import PathSignatureExtractor\n'), ((1759, 1877), 'py_path_signature.path_signature_extractor.PathSignatureExtractor', 'PathSignatureExtractor', ([], {'order': '(2)', 'rendering_size': '(128, -1)', 'min_rendering_dimension': '(5)', 'max_aspect_ratio': '(30)', 'delta': '(5)'}), '(order=2, rendering_size=(128, -1),\n min_rendering_dimension=5, max_aspect_ratio=30, delta=5)\n', (1781, 1877), False, 'from py_path_signature.path_signature_extractor import PathSignatureExtractor\n'), ((874, 890), 'py_path_signature.data_models.stroke.Stroke', 'Stroke', ([], {}), '(**stroke)\n', (880, 890), False, 'from py_path_signature.data_models.stroke import Stroke\n'), ((1483, 1495), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1492, 1495), False, 'import json\n'), ((2109, 2125), 'py_path_signature.data_models.stroke.Stroke', 'Stroke', ([], {}), '(**stroke)\n', (2115, 2125), False, 'from py_path_signature.data_models.stroke import Stroke\n'), ((1098, 1120), 'os.path.splitext', 'os.path.splitext', (['case'], {}), '(case)\n', (1114, 1120), False, 'import os\n'), ((1144, 1175), 'os.listdir', 'os.listdir', (['TEST_DATA_INPUT_DIR'], {}), '(TEST_DATA_INPUT_DIR)\n', (1154, 1175), False, 'import os\n'), ((1403, 1457), 'os.path.join', 'os.path.join', (['TEST_DATA_INPUT_DIR', 'f"""{test_case}.json"""'], {}), "(TEST_DATA_INPUT_DIR, f'{test_case}.json')\n", (1415, 1457), False, 'import os\n'), ((1511, 1569), 'os.path.join', 'os.path.join', (['TEST_DATA_REFERENCE_DIR', 'f"""{test_case}.json"""'], {}), "(TEST_DATA_REFERENCE_DIR, f'{test_case}.json')\n", (1523, 1569), False, 'import os\n'), ((1611, 1623), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1620, 1623), False, 'import json\n'), ((1202, 1241), 'os.path.join', 'os.path.join', (['TEST_DATA_INPUT_DIR', 'case'], {}), '(TEST_DATA_INPUT_DIR, case)\n', (1214, 1241), False, 'import os\n')]
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the del building command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDelBuilding(TestBrokerCommand):
def test_100_del_bu(self):
self.dsdb_expect_del_campus_building("ny", "bu")
self.dsdb_expect("delete_building_aq -building bu")
command = "del building --building bu"
self.noouttest(command.split(" "))
self.dsdb_verify()
def test_100_del_ex(self):
self.dsdb_expect_del_campus_building("ta", "cards")
self.dsdb_expect("delete_building_aq -building cards")
command = "del building --building cards"
self.noouttest(command.split(" "))
self.dsdb_verify()
def test_100_del_tu(self):
self.dsdb_expect_del_campus_building("ln", "tu")
self.dsdb_expect("delete_building_aq -building tu")
command = "del building --building tu"
self.noouttest(command.split(" "))
self.dsdb_verify()
def test_110_del_bunotindsdb(self):
self.dsdb_expect("add_building_aq -building_name bz -city ex "
"-building_addr Nowhere")
self.dsdb_expect_add_campus_building("ta", "bz")
command = ["add", "building", "--building", "bz", "--city", "ex",
"--address", "Nowhere"]
self.noouttest(command)
self.dsdb_verify()
dsdb_command = "delete_building_aq -building bz"
errstr = "bldg bz doesn't exists"
self.dsdb_expect(dsdb_command, True, errstr)
self.dsdb_expect_del_campus_building("ta", "bz")
command = "del building --building bz"
err = self.statustest(command.split(" "))
self.matchoutput(err,
"DSDB does not have building bz defined, proceeding.",
command)
self.dsdb_verify()
def test_120_add_nettest_net(self):
self.net.allocate_network(self, "nettest_net", 24, "unknown",
"building", "nettest",
comments="Made-up network")
def test_121_del_nettest_fail(self):
# try delete building
command = "del building --building nettest"
err = self.badrequesttest(command.split(" "))
self.matchoutput(err,
"Bad Request: Could not delete building nettest, "
"networks were found using this location.",
command)
self.dsdb_verify(empty=True)
def test_122_cleanup_nettest_net(self):
self.net.dispose_network(self, "nettest_net")
def test_130_del_nettest(self):
self.dsdb_expect_del_campus_building("ny", "nettest")
self.dsdb_expect("delete_building_aq -building nettest")
command = "del building --building nettest"
self.noouttest(command.split(" "))
self.dsdb_verify()
def test_200_del_building_notexist(self):
command = "del building --building bldg-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Building bldg-not-exist not found.",
command)
def test_300_verify_bu(self):
command = "show building --building bu"
self.notfoundtest(command.split(" "))
def test_300_verify_tu(self):
command = "show building --building tu"
self.notfoundtest(command.split(" "))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelBuilding)
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"unittest.TextTestRunner",
"unittest.TestLoader",
"utils.import_depends"
] |
[((867, 889), 'utils.import_depends', 'utils.import_depends', ([], {}), '()\n', (887, 889), False, 'import utils\n'), ((4255, 4276), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (4274, 4276), False, 'import unittest\n'), ((4320, 4356), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (4343, 4356), False, 'import unittest\n')]
|
import sys
sys.path.append('../../extractor_de_aspectos')
import unittest
from extractor import extractor_de_aspectos
from cliente_corenlp import cliente_corenlp
from lematizador import lematizador
import nltk
class Test(unittest.TestCase):
def setUp(self):
self.ex = extractor_de_aspectos.ExtractorDeAspectos()
self.cliente = cliente_corenlp.ClienteCoreNLP()
self.lemas = lematizador.Lematizador()
def test_extractor_recibe_arbol_de_dependencias(self):
"""
Para poder extraer los aspectos, primero se necesita pasar como argumento el arbol de dependencias
que resuelve el Stanford CoreNLP.
Prueba que el método extraer levante una excepcion si no recibe el arbol de aspectos en fora de una lista
(la salida que ofrece cliente_corenlp.resolver_dependencias).
"""
com = "i am a valid comment."
diccionario = dict()
arbol = None
pos_lem = list()
with self.assertRaises(Exception):
self.ex.extraer(com, diccionario, arbol, pos_lem)
def test__buscar_en_tupla_pos_lem(self):
"""
Prueba el método auxiliar que es usado para buscar el lema o la palabra de una tupla pos_lem
dado una posición.
Se espera que de la tupla en la posición 1, devuelve el lema 'be'.
"""
tupla_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT', None), ('valid', 'JJ', 'valid'),
('comment', 'NN', 'comment'), ('.', '.', None)]
indice = 1
resultado = self.ex._buscar_en_tupla_pos_lem(indice, tupla_pos_lem)
resultado_esperado = 'be'
self.assertEqual(resultado, resultado_esperado)
def test__buscar_en_tupla_pos_lem_2(self):
"""
Prueba el método auxiliar que es usado para buscar el lema o la palabra de una tupla pos_lem
dado una posición.
Se espera que de la tupla en la posición 3, devuelve la palabra 'a', ya que el lema es None.
"""
tupla_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT', None), ('valid', 'JJ', 'valid'),
('comment', 'NN', 'comment'), ('.', '.', None)]
indice = 3
resultado = self.ex._buscar_en_tupla_pos_lem(indice-1, tupla_pos_lem)
resultado_esperado = 'a'
self.assertEqual(resultado, resultado_esperado)
def test__es_aspecto_1(self):
"""
Prueba el método auxiliar que es usado para determinar si una palabra esta en el diccionario de aspectos.
Se espera que la palabra 'comment' sea determinado como aspecto 'comment'.
"""
palabra = 'comment'
diccionario = {"comment":["comment"]}
resultado = self.ex._es_aspecto(palabra, diccionario)
self.assertEqual("comment", resultado)
def test__es_aspecto_2(self):
"""
Prueba el método auxiliar que es usado para determinar si una palabra esta en el diccionario de aspectos.
Se espera que la palabra 'review' sea determinado como aspecto 'comment'.
"""
palabra = 'comment'
diccionario = {"comment":["comment", "review"]}
resultado = self.ex._es_aspecto(palabra, diccionario)
self.assertEqual("comment", resultado)
def test__es_aspecto_3(self):
"""
Prueba el método auxiliar que es usado para determinar si una palabra esta en el diccionario de aspectos.
Se espera que la palabra 'review' no sea determinado como aspecto y devuelva None.
"""
palabra = 'review'
diccionario = {"comment":["comment"]}
resultado = self.ex._es_aspecto(palabra, diccionario)
self.assertEqual(None, resultado)
def test__amod_1(self):
"""
Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la etiqueta "amod".
Se espera una tupla ("comment", "valid")
"""
indice_raiz = 5
indice_nodo = 4
lista_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT', None), ('valid', 'JJ', 'valid'),
('comment', 'NN', 'comment'), ('.', '.', None)]
diccionario_de_aspectos = {"comment":["comment"]}
res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)
res_esperado = ("comment", "valid")
self.assertEqual(res, res_esperado)
def test__amod_2(self):
"""
Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la etiqueta "amod".
Se espera una tupla ("cyclone", "red")
"""
indice_raiz = 4
indice_nodo = 3
lista_pos_lem = [('im', 'VB', None), ('the', 'DT', None), ('red', 'JJ', None),
('cyclone', 'NN', None), ('.', '.', None)]
diccionario_de_aspectos = {"cyclone":["cyclone"]}
res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)
res_esperado = ("cyclone", "red")
self.assertEqual(res, res_esperado)
def test__amod_3(self):
"""
Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la etiqueta "amod".
Se espera None
"""
indice_raiz = 4
indice_nodo = 3
lista_pos_lem = [('im', 'VB', None), ('the', 'DT', None), ('red', 'JJ', None),
('cyclone', 'NN', None), ('.', '.', None)]
diccionario_de_aspectos = {"not":["ok"]}
res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)
res_esperado = None
self.assertEqual(res, res_esperado)
def test__amod_4(self):
"""
Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la etiqueta "amod".
Se espera None
"""
indice_raiz = 4
indice_nodo = 3
lista_pos_lem = [('im', 'VB', None), ('the', 'DT', None), ('red', 'JJ', None),
('cyclone', 'NN', None), ('.', '.', None)]
arbol_de_dependencias = [('ROOT', 0, 1), ('det', 4, 2), ('amod', 4, 3),
('dobj', 1, 4), ('punct', 1, 5)]
diccionario_de_aspectos = {"not":["ok"]}
res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos,
arbol_de_dependencias=arbol_de_dependencias)
res_esperado = None
self.assertEqual(res, res_esperado)
def test__amod_5(self):
"""
Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la etiqueta "amod".
Se espera una tupla ("cyclone", "red")
"""
indice_raiz = 4
indice_nodo = 3
lista_pos_lem = [('im', 'VB', None), ('the', 'DT', None), ('red', 'JJ', None),
('cyclone', 'NN', None), ('.', '.', None)]
arbol_de_dependencias = [('ROOT', 0, 1), ('det', 4, 2), ('amod', 4, 3),
('dobj', 1, 4), ('punct', 1, 5)]
diccionario_de_aspectos = {"cyclone":["cyclone"]}
res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)
res_esperado = ("cyclone", "red")
self.assertEqual(res, res_esperado)
def test__advmod_1(self):
"""
Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la etiqueta "advmod".
Se espera que regrese el adverbio del sustantivo en una tupla: ('sustantivo', 'dependencia').
"""
# ultimately, it's a sheep
indice_raiz = 6
indice_nodo = 1
lista_pos_lem = [('ultimately', 'RB', None), (',', ',', None), ('it', 'PRP', None),
("'s", 'VBZ', None), ('a', 'DT', None), ('sheep', 'NN', None)]
diccionario_de_aspectos = {"sheep": ["sheep"]}
res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)
res_esperado = ("sheep","ultimately")
self.assertEqual(res_esperado, res)
def test__advmod_2(self):
"""
Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la etiqueta "advmod".
Se espera que regrese el adverbio del sustantivo en una tupla: ('sustantivo', 'dependencia').
"""
# do you dream of perfectly electric sheep, lately?
indice_raiz = 3
indice_nodo = 9
lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None), ('dream', 'NN', None),
('of', 'IN', None), ('perfectly', 'RB', None), ('electric', 'JJ', None),
('sheep', 'NN', None), (',', ',', None), ('lately', 'RB', None),
('?', '.', None)]
diccionario_de_aspectos = {"Dream": ["dream"]}
res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)
res_esperado = ("Dream","lately")
self.assertEqual(res_esperado, res)
def test__amod_advmod(self):
"""
En algunas ocaciones, adjetivos de un sustantivo poseen su propio adverbio. Esta prueba espera que
al encontrar una dependencia amod que tiene su propio advmod, se devuelvan ambos en un solo string.
Se espera ("sheep", "perfectly electric")
"""
# do you dream of perfectly electric sheep, lately?
indice_raiz = 7
indice_nodo = 6
lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None), ('dream', 'NN', None),
('of', 'IN', None), ('perfectly', 'RB', None), ('electric', 'JJ', None),
('sheep', 'NN', None), (',', ',', None), ('lately', 'RB', None),
('?', '.', None)]
arbol_de_dependencias = [('ROOT', 0, 3), ('aux', 3, 1), ('nsubj', 3, 2), ('case', 7, 4),
('advmod', 6, 5), ('amod', 7, 6), ('nmod', 3, 7), ('punct', 3, 8),
('advmod', 3, 9), ('punct', 3, 10)]
diccionario_de_aspectos = {"Sheep": ["sheep"]}
res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos,
arbol_de_dependencias=arbol_de_dependencias)
res_esperado = ("Sheep","perfectly electric")
self.assertEqual(res_esperado, res)
def test_extraer_dependencia_doble_1(self):
"""
Prueba el método auxiliar que busca dependencias de dependencias. Debe encontrar el advmod
del adjetivo 'electric'. Se espera que devuelva 'perfectly'.
"""
indice_nodo = 6
lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None), ('dream', 'NN', None),
('of', 'IN', None), ('perfectly', 'RB', None), ('electric', 'JJ', None),
('sheep', 'NN', None), (',', ',', None), ('lately', 'RB', None),
('?', '.', None)]
arbol_de_dependencias = [('ROOT', 0, 3), ('aux', 3, 1), ('nsubj', 3, 2), ('case', 7, 4),
('advmod', 6, 5), ('amod', 7, 6), ('nmod', 3, 7), ('punct', 3, 8),
('advmod', 3, 9), ('punct', 3, 10)]
res_esperado = "perfectly"
res = self.ex._extraer_dependencia_doble(indice_nodo, lista_pos_lem, arbol_de_dependencias)
self.assertEqual(res_esperado, res)
def test__neg_1(self):
"""
Prueba el método auxiliar que busca negaciones. Debe encontrar la negacion
del sustantivos 'example'. Se espera que devuelva ('example','not').
"""
lista_pos_lem = [('this', 'DT', None), ('is', 'VBZ', None), ('not', 'RB', None),
('a', 'DT', None), ('good', 'JJ', None), ('example', 'NN', None),
('.', '.', None)]
arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1), ('cop', 6, 2),
('neg', 6, 3), ('det', 6, 4), ('amod', 6, 5), ('punct', 6, 7)]
diccionario_de_aspectos = {"example": ["example"]}
indice_raiz = 6
indice_nodo = 3
res_esperado = ("example", "not")
res = self.ex._extraer_dependencia(indice_raiz=indice_raiz, indice_nodo=indice_nodo,
lista_pos_lem=lista_pos_lem,
diccionario_de_aspectos=diccionario_de_aspectos,
arbol_de_dependencias=arbol_de_dependencias)
self.assertEqual(res,res_esperado)
def test__nsub_1(self):
"""
Prueba el método auxiliar que busca sujetos nominales. Debe encontrar el adjetivo y adverbio
del sustantivo 'cats'. Se espera que devuelva ('cats', "really cute").
"""
lista_pos_lem = [('black', 'JJ', None), ('cats', 'NNS', None), ('are', 'VBP', None),
('really', 'RB', None),
('cute', 'JJ', None), ('.', '.', None)]
arbol_de_dependencias = [('ROOT', 0, 5), ('amod', 2, 1), ('nsubj', 5, 2),
('cop', 5, 3), ('advmod', 5, 4), ('punct', 5, 6)]
diccionario_de_aspectos = {"cats":["cats"]}
indice_raiz = 5
indice_nodo = 2
res_esperado = ("cats", "really cute")
res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias)
self.assertEqual(res_esperado, res)
def test__nsub_2(self):
"""
Prueba el método auxiliar que busca sujetos nominales. Como el sujeto nominas no va de un adjetivo
a un sustantivo, debe regresar None.
"""
lista_pos_lem = [('this', 'DT', None), ('is', 'VBZ', None), ('not', 'RB', None),
('a', 'DT', None), ('good', 'JJ', None), ('example', 'NN', None),
('.', '.', None)]
arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1), ('cop', 6, 2),
('neg', 6, 3), ('det', 6, 4), ('amod', 6, 5), ('punct', 6, 7)]
diccionario_de_aspectos = {"example": ["example"]}
indice_raiz = 6
indice_nodo = 1
res_esperado = None
res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias)
self.assertEqual(res_esperado, res)
def test_extractor_1(self):
"""
Dado el siguiente comentario: i am a valid comment.
Debe devolver el adjetivo 'valid' del aspecto 'comment'
"""
com = "i am a valid comment."
diccionario = {"comment":["comment"]}
arbol = self.cliente.resolver_dependencias(com)
etiquetas_pos = self.cliente.etiquetar_texto(com)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
diccionario_esperado = {"comment":["valid"]}
dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_2(self):
"""
Dado el siguiente comentario: im the red cyclone.
Debe devolver {"cyclone":["red"]}
"""
com = "im the red cyclone."
diccionario = {"cyclone":["cyclone"]}
arbol = self.cliente.resolver_dependencias(com)
etiquetas_pos = self.cliente.etiquetar_texto(com)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
diccionario_esperado = {"cyclone":["red"]}
dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_3(self):
"""
Dado el siguiente comentario: do you dream of perfectly electric sheep, lately?
Debe devolver {"dream":["dream"],"sheep":["sheep"]}
"""
com = "do you dream of perfectly electric sheep, lately?"
diccionario = {"dream":["dream"],
"sheep":["sheep"]}
arbol = self.cliente.resolver_dependencias(com)
etiquetas_pos = self.cliente.etiquetar_texto(com)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
diccionario_esperado = {"dream":["lately"], "sheep":["perfectly electric"]}
dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_4(self):
"""
Dado el siguiente comentario: ultimately, it's a sheep
Debe devolver {"sheep":["ultimately"]}
"""
com = "ultimately, it's a sheep"
diccionario = {"sheep":["sheep"]}
arbol = self.cliente.resolver_dependencias(com)
etiquetas_pos = self.cliente.etiquetar_texto(com)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
diccionario_esperado = {"sheep":["ultimately"]}
dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_5(self):
"""
Dado el siguiente comentario: black cats are really cute.
Debe devolver {"cats":["black"," really cute"]}
"""
com = "black cats are really cute."
diccionario = {"cats":["cat", "cats"]}
arbol = self.cliente.resolver_dependencias(com)
etiquetas_pos = self.cliente.etiquetar_texto(com)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
diccionario_esperado = {"cats":["black","really cute"]}
dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_6(self):
"""
Dado el siguiente comentario: i really love black cats.
Debe devolver {"cats":["black"}
"""
com = "i really love black cats."
diccionario = {"cats":["cat", "cats"]}
arbol = self.cliente.resolver_dependencias(com)
etiquetas_pos = self.cliente.etiquetar_texto(com)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
diccionario_esperado = {"cats":["black"]}
dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_7(self):
"""
Dado el siguiente comentario: this is not a good example.
Debe devolver {"example":["not good"]}
"""
com = "this is not a good example."
diccionario = {"example":["example"]}
arbol = self.cliente.resolver_dependencias(com)
etiquetas_pos = self.cliente.etiquetar_texto(com)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
diccionario_esperado = {"example":["not", "good"]}
dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_8(self):
"""
Dado el siguiente comentario: They sent him the same, wrong item.
Debe devolver {"item":["same","wrong"]}
"""
com = "They sent him the same, wrong item."
diccionario = {"item":["item", "items"]}
arbol = self.cliente.resolver_dependencias(com)
etiquetas_pos = self.cliente.etiquetar_texto(com)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
diccionario_esperado = {"item":["same","wrong"]}
dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)
print(diccionario_esperado)
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_9(self):
"""
Pruebas con comentarios reales
"""
com = "Usually I have good experiences with Amazon and its customer service reps, but after todays online customer service chat I am horrified at some of the people Amazon employs. Enter employee Ruchitha. I was trying to get a print out label for my roommate since he doesn't have Prime and isn't really internet savvy. After he had bought a dvd that wasn't playable in the country, he called customer service and a rep said they were going to send him the correct one. They sent him the same, wrong item. So he had 2 returns to do."
diccionario = {"experience":["experiences","experience"],"Amazon":["Amazon","amazon"],
"item":["item","items"]}
sentencias = nltk.sent_tokenize(com)
dic_resultado = dict()
for sentencia in sentencias:
arbol = self.cliente.resolver_dependencias(sentencia)
etiquetas_pos = self.cliente.etiquetar_texto(sentencia)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
res = self.ex.extraer(diccionario, arbol, lista_pos_lem)
dic_resultado = self._combinar_dict(res, dic_resultado)
diccionario_esperado = {"experience":["good"],
"Amazon":[],
"item":["same","wrong"]
}
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_10(self):
"""
Pruebas con comentarios reales
"""
com = "There was a time I was a super-Amazon fan-boy, but those days are long past. If AMZ is good at one thing these days, it is finding new and innovated ways to anger their customers. I try to find the best deal with products all the time and use what discounts where I can. Apparently, AMZ does not like this and has taken to locking people out of their ability to comment on products if they feel you are not paying the top price. Today I had the simplest question about a feature on an item I bought on AMZ, but cannot ask the question as apparently, I am persona non grata these days. I got the product with a discount via research on the net."
diccionario = {"fan-boy":["fan-boy"],"Amazon":["Amazon","amazon","AMZ"],
"question":["question"], "thing":["thing", "things"],
"way":["way","ways"], "deal":["deal","deals"],
"price":["prices", "price"],}
sentencias = nltk.sent_tokenize(com)
dic_resultado = dict()
for sentencia in sentencias:
arbol = self.cliente.resolver_dependencias(sentencia)
etiquetas_pos = self.cliente.etiquetar_texto(sentencia)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
res = self.ex.extraer(diccionario, arbol, lista_pos_lem)
dic_resultado = self._combinar_dict(res, dic_resultado)
diccionario_esperado = {"fan-boy":["super-Amazon"],
"Amazon":["good"],
"question":["simple"],
"thing":["good"],
"way":["new"],
"deal":["best"],
"price":["top"]
}
self.assertEqual(diccionario_esperado, dic_resultado)
def test__conj_1(self):
"""
Método aúxiliar para manejar las conjunciones de un sustantivo a un adverbio/adjetivo
"""
lista_pos_lem = [('I', 'PRP', None), ('have', 'VBP', None), ('been', 'VBN', None),
('a', 'DT', None), ('Prime', 'JJ', None), ('member', 'NN', None),
('for', 'IN', None), ('years', 'NNS', None), ('and', 'CC', None),
('always', 'RB', None), ('received', 'VBD', None), ('my', 'PRP$', None),
('merchandise', 'NN', None), ('in', 'IN', None), ('the', 'DT', None),
('desired', 'JJ', None), ('time', 'NN', None), ('frame', 'NN', None),
(',', ',', None), ('but', 'CC', None), ('no', 'DT', None),
('more', 'JJR', None), ('!!', '.', None)]
arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1), ('aux', 6, 2),
('cop', 6, 3), ('det', 6, 4), ('amod', 6, 5),
('case', 8, 7), ('nmod', 6, 8), ('cc', 6, 9),
('advmod', 11, 10), ('conj', 6, 11), ('nmod:poss', 13, 12),
('dobj', 11, 13), ('case', 18, 14), ('det', 18, 15),
('amod', 18, 16), ('compound', 18, 17), ('nmod', 11, 18),
('punct', 6, 19), ('cc', 6, 20), ('neg', 22, 21),
('conj', 6, 22), ('punct', 6, 23)]
diccionario_de_aspectos = {"Member":["member"]}
indice_raiz = 6
indice_nodo = 22
res_esperado = ("Member", "no more")
res = self.ex._extraer_conj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias)
self.assertEqual(res_esperado, res)
def test_extractor_11(self):
"""
Pruebas con comentarios reales
"""
com = "Prime 2 day shipping seems to be a thing of the past. I have been a Prime member for years and always received my merchandise in the desired time frame, but no more!! I have had numerous conversations with customer service and supervisors. All they do is give me the runaround and tell me their policy has not changed. \"Two day shipping starts when the item leaves the warehouse\". They can't ship if the items are not in their warehouses, seemly blaming the vendors. Shame on you Amazon for not telling the truth. To save money, Amazon no longer uses reliable trucking companies to move merchandise from vendors warehousing to Amazon warehouses. They can't ship what's not available. Nice way to save a buck. But keep taking our membership money for services you no longer can provide."
diccionario = {"Member":["member","Member"],
"Shipping":["shipping","Shipping"],
}
sentencias = nltk.sent_tokenize(com)
dic_resultado = dict()
for sentencia in sentencias:
arbol = self.cliente.resolver_dependencias(sentencia)
etiquetas_pos = self.cliente.etiquetar_texto(sentencia)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
res = self.ex.extraer(diccionario, arbol, lista_pos_lem)
dic_resultado = self._combinar_dict(res, dic_resultado)
diccionario_esperado = {"Member":["Prime", "no more"],
"Shipping":["day"],
}
self.assertEqual(diccionario_esperado, dic_resultado)
def test_quitar_palabras(self):
"""
Prueba el metodo quitar_palabras. Se espera que elimine toda palabra que no tenga una etiqueta POS
de adverbio, sustantivo o negacion.
"""
texto = "do you dream of perfectly electric sheep, lately?"
res = self.ex.quitar_palabras(texto)
texto_esperado = "perfectly electric lately"
self.assertEqual(res, texto_esperado)
def test_quitar_palabras_2(self):
"""
Prueba el metodo quitar_palabras. Se espera que elimine toda palabra que no tenga una etiqueta POS
de adverbio, sustantivo o negacion.
"""
texto = "don't say no to cookies, never again"
res = self.ex.quitar_palabras(texto)
texto_esperado = "n't no never again"
self.assertEqual(res, texto_esperado)
def test_quitar_palabras_3(self):
"""
Prueba el metodo quitar_palabras. Se espera que elimine toda palabra que no tenga una etiqueta POS
de adverbio, sustantivo o negacion.
"""
texto = "black cats are really cute."
res = self.ex.quitar_palabras(texto)
texto_esperado = "black really cute"
self.assertEqual(res, texto_esperado)
def test__purgar_palabras_pos(self):
"""
Método auxiliar que es el que recorre las lista de tuplas para eliminar las palabras innecesarias.
"""
texto = "don't say no to cookies, never again"
lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto))
res = self.ex._purgar_palabras_pos(lista_pos_lem)
tupla_esperada = [("n't", 'RB', "n't"),('no', 'DT', None),
('never', 'RB', "never"), ('again', 'RB', "again")]
self.assertEqual(res, tupla_esperada)
def test__unir_palabras(self):
"""
Método auxiliar que une las palabras de la lista de tuplas.
"""
texto = "don't say no to cookies, never again"
lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto))
tupla_purgada = self.ex._purgar_palabras_pos(lista_pos_lem)
res = self.ex._unir_palabras(tupla_purgada)
texto_esperado = "n't no never again"
self.assertEqual(res, texto_esperado)
def _combinar_dict(self, dict1, dict2):
for llave in dict1:
if llave in dict2.keys():
dict2[llave].extend(dict1[llave])
else:
dict2[llave] = dict1[llave]
return dict2
def tearDown(self):
self.cliente.cerrar_servicio()
self.ex.cerrar()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
[
"sys.path.append",
"unittest.main",
"nltk.sent_tokenize",
"lematizador.lematizador.Lematizador",
"cliente_corenlp.cliente_corenlp.ClienteCoreNLP",
"extractor.extractor_de_aspectos.ExtractorDeAspectos"
] |
[((11, 57), 'sys.path.append', 'sys.path.append', (['"""../../extractor_de_aspectos"""'], {}), "('../../extractor_de_aspectos')\n", (26, 57), False, 'import sys\n'), ((30850, 30865), 'unittest.main', 'unittest.main', ([], {}), '()\n', (30863, 30865), False, 'import unittest\n'), ((284, 327), 'extractor.extractor_de_aspectos.ExtractorDeAspectos', 'extractor_de_aspectos.ExtractorDeAspectos', ([], {}), '()\n', (325, 327), False, 'from extractor import extractor_de_aspectos\n'), ((351, 383), 'cliente_corenlp.cliente_corenlp.ClienteCoreNLP', 'cliente_corenlp.ClienteCoreNLP', ([], {}), '()\n', (381, 383), False, 'from cliente_corenlp import cliente_corenlp\n'), ((405, 430), 'lematizador.lematizador.Lematizador', 'lematizador.Lematizador', ([], {}), '()\n', (428, 430), False, 'from lematizador import lematizador\n'), ((21659, 21682), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['com'], {}), '(com)\n', (21677, 21682), False, 'import nltk\n'), ((23470, 23493), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['com'], {}), '(com)\n', (23488, 23493), False, 'import nltk\n'), ((27333, 27356), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['com'], {}), '(com)\n', (27351, 27356), False, 'import nltk\n')]
|
# SNAKE GAME
import pyglet
from pyglet import gl
from pyglet.window import key
from images_load import batch
from game_state import Game_state
from field import game_field
time_to_move = [0.7]
def on_key_press(symbol, modifiers):
'''
User press key for setting snake direction.
'''
if symbol == key.LEFT:
game_state.direction = (-1, 0)
if symbol == key.RIGHT:
game_state.direction = (1, 0)
if symbol == key.UP:
game_state.direction = (0, 1)
if symbol == key.DOWN:
game_state.direction = (0, -1)
if symbol == key.ENTER:
game_state.keys.append(('enter', 0))
def on_key_release(symbol, modifiers):
'''
On key release.
'''
if symbol == key.ENTER:
game_state.keys.clear()
def on_draw():
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
gl.glColor3f(1, 1, 1)
gl.glLineWidth(4)
x1 = game_field.origin_xy0_game_field[0]
y1 = game_field.origin_xy0_game_field[1]
x2 = game_field.origin_xy1_game_field[0]
y2 = game_field.origin_xy1_game_field[1]
draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2, y1))
x1 = game_field.origin_xy0_menu[0]
y1 = game_field.origin_xy0_menu[1]
x2 = game_field.origin_xy1_menu[0]
y2 = game_field.origin_xy1_menu[1]
draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2, y1))
batch.draw()
menu_text()
if game_state.state == 'dead':
dead_text()
if game_state.state == 'game_over':
game_over_text()
def dead_text():
draw_text('For continue set right direction',
x=game_field.size_window()[0]//2,
y=game_field.size_window()[1]//2-100,
size=30,
anchor_x='center')
def menu_text():
draw_text('in Python',
x=game_field.origin_xy0_menu[0]+25,
y=game_field.origin_xy1_menu[1]-130,
size=16,
anchor_x='left')
draw_text('Move with ← ↓ ↑ →',
x=game_field.origin_xy0_menu[0]+300,
y=game_field.origin_xy1_menu[1]-50,
size=16,
anchor_x='left')
draw_text('Eat Apples',
x=game_field.origin_xy0_menu[0]+300,
y=game_field.origin_xy1_menu[1]-80,
size=16,
anchor_x='left')
draw_text('Don\'t eat walls or yourself.',
x=game_field.origin_xy0_menu[0]+300,
y=game_field.origin_xy1_menu[1]-110,
size=16,
anchor_x='left')
draw_text(str(game_state.lifes),
x=game_field.origin_xy1_menu[0]-70,
y=game_field.origin_xy1_menu[1]-65,
size=30,
anchor_x='left')
draw_text(str(len(game_state.snake_xy)),
x=game_field.origin_xy1_menu[0]-70,
y=game_field.origin_xy1_menu[1]-115,
size=30,
anchor_x='left')
def game_over_text():
draw_text('GAME OVER',
x=game_field.size_window()[0]//2,
y=game_field.size_window()[1]//2-100,
size=30,
anchor_x='center')
draw_text('Press ENTER',
x=game_field.size_window()[0]//2,
y=game_field.size_window()[1]//2-140,
size=20,
anchor_x='center')
def move(t):
time_to_move[0] -= t
if time_to_move[0] < 0:
game_state.move(t)
if game_state.state == 'game_over' and ('enter', 0) in game_state.keys:
game_state.restart_conditions()
time = max(0.7 - 0.05 * int(len(game_state.snake_xy))/3, 0.2)
time_to_move[0] = time
def reset():
game_state = Game_state()
game_state.draw_snake_parts()
return game_state
def draw_polygon(xy1, xy2, xy3, xy4):
'''
Draw polygon.
'''
gl.glBegin(gl.GL_LINE_LOOP);
gl.glVertex2f(int(xy1[0]), int(xy1[1]));
gl.glVertex2f(int(xy2[0]), int(xy2[1]));
gl.glVertex2f(int(xy3[0]), int(xy3[1]));
gl.glVertex2f(int(xy4[0]), int(xy4[1]));
gl.glEnd();
def draw_text(text, x, y, size, anchor_x):
'''
Draw text in playfield.
'''
text = pyglet.text.Label(
text,
font_name='Arial',
font_size=size,
x=x, y=y, anchor_x=anchor_x)
text.draw()
window = pyglet.window.Window(game_field.size_window()[0], game_field.size_window()[1])
game_state = reset()
window.push_handlers(
on_draw=on_draw,
on_key_press=on_key_press,
)
pyglet.clock.schedule_interval(move, 1/30)
pyglet.clock.schedule_interval(game_state.add_food, 5)
pyglet.app.run()
|
[
"pyglet.app.run",
"pyglet.text.Label",
"pyglet.gl.glClear",
"pyglet.gl.glColor3f",
"pyglet.gl.glBegin",
"game_state.Game_state",
"pyglet.gl.glEnd",
"images_load.batch.draw",
"field.game_field.size_window",
"pyglet.gl.glLineWidth",
"pyglet.clock.schedule_interval"
] |
[((4568, 4612), 'pyglet.clock.schedule_interval', 'pyglet.clock.schedule_interval', (['move', '(1 / 30)'], {}), '(move, 1 / 30)\n', (4598, 4612), False, 'import pyglet\n'), ((4611, 4665), 'pyglet.clock.schedule_interval', 'pyglet.clock.schedule_interval', (['game_state.add_food', '(5)'], {}), '(game_state.add_food, 5)\n', (4641, 4665), False, 'import pyglet\n'), ((4668, 4684), 'pyglet.app.run', 'pyglet.app.run', ([], {}), '()\n', (4682, 4684), False, 'import pyglet\n'), ((793, 827), 'pyglet.gl.glClear', 'gl.glClear', (['gl.GL_COLOR_BUFFER_BIT'], {}), '(gl.GL_COLOR_BUFFER_BIT)\n', (803, 827), False, 'from pyglet import gl\n'), ((832, 853), 'pyglet.gl.glColor3f', 'gl.glColor3f', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (844, 853), False, 'from pyglet import gl\n'), ((858, 875), 'pyglet.gl.glLineWidth', 'gl.glLineWidth', (['(4)'], {}), '(4)\n', (872, 875), False, 'from pyglet import gl\n'), ((1331, 1343), 'images_load.batch.draw', 'batch.draw', ([], {}), '()\n', (1341, 1343), False, 'from images_load import batch\n'), ((3765, 3777), 'game_state.Game_state', 'Game_state', ([], {}), '()\n', (3775, 3777), False, 'from game_state import Game_state\n'), ((3913, 3940), 'pyglet.gl.glBegin', 'gl.glBegin', (['gl.GL_LINE_LOOP'], {}), '(gl.GL_LINE_LOOP)\n', (3923, 3940), False, 'from pyglet import gl\n'), ((4126, 4136), 'pyglet.gl.glEnd', 'gl.glEnd', ([], {}), '()\n', (4134, 4136), False, 'from pyglet import gl\n'), ((4238, 4329), 'pyglet.text.Label', 'pyglet.text.Label', (['text'], {'font_name': '"""Arial"""', 'font_size': 'size', 'x': 'x', 'y': 'y', 'anchor_x': 'anchor_x'}), "(text, font_name='Arial', font_size=size, x=x, y=y,\n anchor_x=anchor_x)\n", (4255, 4329), False, 'import pyglet\n'), ((4407, 4431), 'field.game_field.size_window', 'game_field.size_window', ([], {}), '()\n', (4429, 4431), False, 'from field import game_field\n'), ((4436, 4460), 'field.game_field.size_window', 'game_field.size_window', ([], {}), '()\n', (4458, 4460), False, 'from field import game_field\n'), ((1572, 1596), 'field.game_field.size_window', 'game_field.size_window', ([], {}), '()\n', (1594, 1596), False, 'from field import game_field\n'), ((3060, 3084), 'field.game_field.size_window', 'game_field.size_window', ([], {}), '()\n', (3082, 3084), False, 'from field import game_field\n'), ((3261, 3285), 'field.game_field.size_window', 'game_field.size_window', ([], {}), '()\n', (3283, 3285), False, 'from field import game_field\n'), ((1624, 1648), 'field.game_field.size_window', 'game_field.size_window', ([], {}), '()\n', (1646, 1648), False, 'from field import game_field\n'), ((3112, 3136), 'field.game_field.size_window', 'game_field.size_window', ([], {}), '()\n', (3134, 3136), False, 'from field import game_field\n'), ((3313, 3337), 'field.game_field.size_window', 'game_field.size_window', ([], {}), '()\n', (3335, 3337), False, 'from field import game_field\n')]
|
#!/usr/bin/env python
import dbus
import dbus.service
import sys
import signal
from PyQt4 import QtCore
from dbus.mainloop.qt import DBusQtMainLoop
from notifier import Notifier
from als import AmbientLightSensor
from brightnessctrl import BrightnessCtrl
class AutoBrightnessService(dbus.service.Object):
def __init__(self):
path = '/com/github/sheinz/autobrightness'
bus_loop = DBusQtMainLoop(set_as_default=True)
self._bus = dbus.SessionBus(mainloop=bus_loop)
name = dbus.service.BusName('com.github.sheinz.autobrightness',
bus=self._bus)
dbus.service.Object.__init__(self, name, path)
self.notifier = Notifier(self._bus)
self._auto = False
self._als = AmbientLightSensor()
self._br_ctrl = BrightnessCtrl(self._bus)
self._process_timer = QtCore.QTimer()
self._process_timer.timeout.connect(self.process)
@property
def auto(self):
return self._auto
@auto.setter
def auto(self, value):
self._auto = value
self.notifier.auto_brightness(self._auto)
if self._auto:
self._als.start()
self._br_ctrl.start()
self._process_timer.start(1000)
else:
self._als.stop()
self._br_ctrl.stop()
self._process_timer.stop()
def process(self):
value = self._als.get_value()
print('Light sensor: %d' % value)
if value == 0:
value = 1
self._br_ctrl.set_screen_brightness(value)
if value < 5:
self._br_ctrl.set_keyboard_light(True)
else:
self._br_ctrl.set_keyboard_light(False)
def stop(self):
self._process_timer.stop()
self._als.stop()
self._br_ctrl.stop()
@dbus.service.method(dbus_interface='com.github.sheinz.autobrightness')
def up(self):
value = self._br_ctrl.screen_brightness_up()
self.notifier.brightness(value)
@dbus.service.method(dbus_interface='com.github.sheinz.autobrightness')
def down(self):
value = self._br_ctrl.screen_brightness_down()
self.notifier.brightness(value)
@dbus.service.method(dbus_interface='com.github.sheinz.autobrightness')
def auto_toggle(self):
self.auto = not self.auto
@dbus.service.method(dbus_interface='com.github.sheinz.autobrightness')
def exit(self):
sys.exit()
class Application(QtCore.QCoreApplication):
def __init__(self, argv):
super(Application, self).__init__(argv)
self._auto_br = AutoBrightnessService()
def event(self, e):
return super(Application, self).event(e)
def quit(self):
self._auto_br.stop()
super(Application, self).quit()
def main():
app = Application(sys.argv)
app.startTimer(1000)
signal.signal(signal.SIGINT, lambda *args: app.quit())
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
[
"PyQt4.QtCore.QTimer",
"dbus.SessionBus",
"dbus.service.BusName",
"dbus.service.Object.__init__",
"als.AmbientLightSensor",
"brightnessctrl.BrightnessCtrl",
"sys.exit",
"dbus.mainloop.qt.DBusQtMainLoop",
"notifier.Notifier",
"dbus.service.method"
] |
[((1825, 1895), 'dbus.service.method', 'dbus.service.method', ([], {'dbus_interface': '"""com.github.sheinz.autobrightness"""'}), "(dbus_interface='com.github.sheinz.autobrightness')\n", (1844, 1895), False, 'import dbus\n'), ((2013, 2083), 'dbus.service.method', 'dbus.service.method', ([], {'dbus_interface': '"""com.github.sheinz.autobrightness"""'}), "(dbus_interface='com.github.sheinz.autobrightness')\n", (2032, 2083), False, 'import dbus\n'), ((2205, 2275), 'dbus.service.method', 'dbus.service.method', ([], {'dbus_interface': '"""com.github.sheinz.autobrightness"""'}), "(dbus_interface='com.github.sheinz.autobrightness')\n", (2224, 2275), False, 'import dbus\n'), ((2343, 2413), 'dbus.service.method', 'dbus.service.method', ([], {'dbus_interface': '"""com.github.sheinz.autobrightness"""'}), "(dbus_interface='com.github.sheinz.autobrightness')\n", (2362, 2413), False, 'import dbus\n'), ((404, 439), 'dbus.mainloop.qt.DBusQtMainLoop', 'DBusQtMainLoop', ([], {'set_as_default': '(True)'}), '(set_as_default=True)\n', (418, 439), False, 'from dbus.mainloop.qt import DBusQtMainLoop\n'), ((460, 494), 'dbus.SessionBus', 'dbus.SessionBus', ([], {'mainloop': 'bus_loop'}), '(mainloop=bus_loop)\n', (475, 494), False, 'import dbus\n'), ((511, 582), 'dbus.service.BusName', 'dbus.service.BusName', (['"""com.github.sheinz.autobrightness"""'], {'bus': 'self._bus'}), "('com.github.sheinz.autobrightness', bus=self._bus)\n", (531, 582), False, 'import dbus\n'), ((628, 674), 'dbus.service.Object.__init__', 'dbus.service.Object.__init__', (['self', 'name', 'path'], {}), '(self, name, path)\n', (656, 674), False, 'import dbus\n'), ((699, 718), 'notifier.Notifier', 'Notifier', (['self._bus'], {}), '(self._bus)\n', (707, 718), False, 'from notifier import Notifier\n'), ((766, 786), 'als.AmbientLightSensor', 'AmbientLightSensor', ([], {}), '()\n', (784, 786), False, 'from als import AmbientLightSensor\n'), ((811, 836), 'brightnessctrl.BrightnessCtrl', 'BrightnessCtrl', (['self._bus'], {}), '(self._bus)\n', (825, 836), False, 'from brightnessctrl import BrightnessCtrl\n'), ((867, 882), 'PyQt4.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (880, 882), False, 'from PyQt4 import QtCore\n'), ((2442, 2452), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2450, 2452), False, 'import sys\n')]
|
"""
In previous homework task 4, you wrote a cache function that remembers other function output value.
Modify it to be a parametrized decorator, so that the following code::
@cache(times=3)
def some_function():
pass
Would give out cached value up to `times` number only.
Example::
@cache(times=2)
def f():
return input('? ') # careful with input() in python2, use raw_input() instead
>> f()
? 1
'1'
>> f() # will remember previous value
'1'
>> f() # but use it up to two times only
'1'
>> f()
? 2
'2'
"""
import inspect
from typing import Callable
def cache(times: int) -> Callable:
"""Cache decorator which returns func result n times"""
cached_values = {}
def _cache(func: Callable) -> Callable:
def wrapper(*args, **kwargs):
bound = inspect.signature(func).bind(*args, **kwargs)
bound.apply_defaults()
key = str(bound.arguments)
if key not in cached_values:
cached_values[key] = [func(*args, **kwargs), times+1]
if cached_values[key][1] > 1:
cached_values[key][1] -= 1
return cached_values[key][0]
result = cached_values[key][0]
del cached_values[key]
return result
return wrapper
return _cache
|
[
"inspect.signature"
] |
[((900, 923), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (917, 923), False, 'import inspect\n')]
|
from django.contrib import admin
from .models import ServerInfo,SampleData,DeviceControl,UserApp
# Register your models here.
admin.site.register(ServerInfo)
admin.site.register(SampleData)
admin.site.register(DeviceControl)
admin.site.register(UserApp)
|
[
"django.contrib.admin.site.register"
] |
[((127, 158), 'django.contrib.admin.site.register', 'admin.site.register', (['ServerInfo'], {}), '(ServerInfo)\n', (146, 158), False, 'from django.contrib import admin\n'), ((159, 190), 'django.contrib.admin.site.register', 'admin.site.register', (['SampleData'], {}), '(SampleData)\n', (178, 190), False, 'from django.contrib import admin\n'), ((191, 225), 'django.contrib.admin.site.register', 'admin.site.register', (['DeviceControl'], {}), '(DeviceControl)\n', (210, 225), False, 'from django.contrib import admin\n'), ((226, 254), 'django.contrib.admin.site.register', 'admin.site.register', (['UserApp'], {}), '(UserApp)\n', (245, 254), False, 'from django.contrib import admin\n')]
|
import hashlib
import re
from typing import Union, Optional, Dict
from urllib.parse import urljoin
import binascii
import logging
import eyed3
from eyed3.id3 import ID3_V1
from unidecode import unidecode
from tornado import web
from settings import LOG_LEVEL
class BasicHandler(web.RequestHandler):
logger = None
def prepare(self):
self.logger.debug('{} request from {}: {}'.format(
self.request.method.capitalize(),
self.request.remote_ip,
self.request.uri)
)
self.logger.debug('Request body: {}'.format(self.request.body.decode()))
def on_finish(self):
self.log_request()
def write_result(self, result):
self.finish({'success': 1, 'data': result})
def write_error(self, status_code, **kwargs):
result = {'success': 0, 'error': self._reason, 'error_code': status_code}
if 'exc_info' in kwargs:
exception = kwargs['exc_info'][1]
if isinstance(exception, web.HTTPError):
result.update(exception.args) # TODO
self.finish(result)
def log_request(self):
self.logger.info(
'{remote_ip} {method} {request_uri} => HTTP: {status_code} ({time:.0f} ms)'.format(
remote_ip=self.request.remote_ip,
method=self.request.method.upper(),
request_uri=self.request.uri,
status_code=self.get_status(),
time=1000.0 * self.request.request_time()
)
)
def data_received(self, chunk):
pass
def reverse_full_url(self, name, *args):
host_url = "{protocol}://{host}".format(**vars(self.request))
return urljoin(host_url, self.reverse_url(name, *args))
def setup_logger(name, lvl=logging.DEBUG):
logger = logging.getLogger(name)
logger.setLevel(lvl)
basic_stream_handler = logging.StreamHandler()
basic_stream_handler.setFormatter(
logging.Formatter('%(levelname)-8s %(asctime)s %(message)s')
)
basic_stream_handler.setLevel(LOG_LEVEL)
logger.addHandler(basic_stream_handler)
logger.propagate = False
return logger
def vk_url(path: str):
return urljoin('https://api.vk.com/', path)
def crc32(string: Union[str, bytes]):
if isinstance(string, str):
string = string.encode()
return '{:08x}'.format(binascii.crc32(string) & 0xFFFFFFFF)
def md5(string: Union[str, bytes]):
if isinstance(string, str):
string = string.encode()
return hashlib.md5(string).hexdigest()
def uni_hash(hash_func: str, string):
if hash_func == 'crc32':
return crc32(string)
elif hash_func == 'md5':
return md5(string)
raise ValueError('Unknown hash function: {}'.format(hash_func))
def sanitize(string, to_lower: bool = True, alpha_numeric_only: bool = False, truncate: Optional[int] = None):
if alpha_numeric_only:
string = re.sub(r'\w+', '', string)
else:
bad_chars = ['~', '`', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '_', '=', '+',
'[', '{', ']', '}', '\\', '|', ';', ':', '"', "'", '—', '–', ',', '<', '>', '/', '?',
'‘', '’', '“', '”']
string = re.sub(r'|'.join(map(re.escape, bad_chars)), '', string)
string = unidecode(string) # transliteration and other staff: converts to ascii
string = string.strip()
string = re.sub(r'\s+', ' ', string)
if to_lower:
string = string.lower()
if truncate is not None:
string = string[:truncate]
return string
def set_id3_tag(path: str, audio_info: Dict):
audio = eyed3.load(path)
audio.initTag(version=ID3_V1)
audio.tag.title = unidecode(audio_info['title']).strip()
audio.tag.artist = unidecode(audio_info['artist']).strip()
audio.tag.save(version=ID3_V1)
|
[
"unidecode.unidecode",
"hashlib.md5",
"urllib.parse.urljoin",
"eyed3.load",
"logging.StreamHandler",
"logging.Formatter",
"re.sub",
"binascii.crc32",
"logging.getLogger"
] |
[((1811, 1834), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (1828, 1834), False, 'import logging\n'), ((1887, 1910), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1908, 1910), False, 'import logging\n'), ((2197, 2233), 'urllib.parse.urljoin', 'urljoin', (['"""https://api.vk.com/"""', 'path'], {}), "('https://api.vk.com/', path)\n", (2204, 2233), False, 'from urllib.parse import urljoin\n'), ((3298, 3315), 'unidecode.unidecode', 'unidecode', (['string'], {}), '(string)\n', (3307, 3315), False, 'from unidecode import unidecode\n'), ((3411, 3438), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'string'], {}), "('\\\\s+', ' ', string)\n", (3417, 3438), False, 'import re\n'), ((3632, 3648), 'eyed3.load', 'eyed3.load', (['path'], {}), '(path)\n', (3642, 3648), False, 'import eyed3\n'), ((1958, 2018), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)-8s %(asctime)s %(message)s"""'], {}), "('%(levelname)-8s %(asctime)s %(message)s')\n", (1975, 2018), False, 'import logging\n'), ((2929, 2955), 're.sub', 're.sub', (['"""\\\\w+"""', '""""""', 'string'], {}), "('\\\\w+', '', string)\n", (2935, 2955), False, 'import re\n'), ((2366, 2388), 'binascii.crc32', 'binascii.crc32', (['string'], {}), '(string)\n', (2380, 2388), False, 'import binascii\n'), ((2517, 2536), 'hashlib.md5', 'hashlib.md5', (['string'], {}), '(string)\n', (2528, 2536), False, 'import hashlib\n'), ((3705, 3735), 'unidecode.unidecode', 'unidecode', (["audio_info['title']"], {}), "(audio_info['title'])\n", (3714, 3735), False, 'from unidecode import unidecode\n'), ((3767, 3798), 'unidecode.unidecode', 'unidecode', (["audio_info['artist']"], {}), "(audio_info['artist'])\n", (3776, 3798), False, 'from unidecode import unidecode\n')]
|
import colorsys
import struct
import math
PIXELS = 94
# interleaved = 1
# interleaved = 2
# interleaved = 4
interleaved = 8
f = open("test_{}.bin".format(interleaved), "wb")
for n in range(1000):
for x in range(PIXELS):
# This way we get a half "rainbow", easy to find breaks/seams
hue = float(x + n/10.) / PIXELS / 2
r, g, b = colorsys.hsv_to_rgb(
hue,
1,
16 + 16 * math.sin(2. * math.pi * (5. * -x + n / 3.) / 100.)
)
if interleaved == 2:
data1 = struct.pack("BBB", r, g, b)
data2 = struct.pack("BBB", r, 0, 0) # intentionally wrong
for i in range(len(data1)):
cur = 0
byte1 = ord(data1[i])
byte2 = ord(data2[i])
for j in range(8):
cur |= (byte1 & (2**j)) << (j + 0)
cur |= (byte2 & (2**j)) << (j + 1)
f.write(struct.pack(">H", cur))
elif interleaved == 4:
data1 = struct.pack("BBB", r, g, b)
data2 = struct.pack("BBB", r, 0, 0) # intentionally wrong
data3 = struct.pack("BBB", 0, g, 0) # intentionally wrong
data4 = struct.pack("BBB", 0, 0, b) # intentionally wrong
for i in range(len(data1)):
cur = 0
byte1 = ord(data1[i])
byte2 = ord(data2[i])
byte3 = ord(data3[i])
byte4 = ord(data4[i])
for j in range(8):
cur |= (byte1 & (2**j)) << (j * 3 + 0)
cur |= (byte2 & (2**j)) << (j * 3 + 1)
cur |= (byte3 & (2**j)) << (j * 3 + 2)
cur |= (byte4 & (2**j)) << (j * 3 + 3)
f.write(struct.pack(">L", cur))
elif interleaved == 8:
data1 = struct.pack("BBB", r, g, b)
data2 = struct.pack("BBB", r, 0, 0) # intentionally wrong
data3 = struct.pack("BBB", 0, g, 0) # intentionally wrong
data4 = struct.pack("BBB", 0, 0, b) # intentionally wrong
data5 = struct.pack("BBB", 0, g, b) # intentionally wrong
data6 = struct.pack("BBB", r, g, 0) # intentionally wrong
data7 = struct.pack("BBB", r, 0, b) # intentionally wrong
data8 = struct.pack("BBB", 0, g, b) # intentionally wrong
for i in range(len(data1)):
cur = 0
byte1 = ord(data1[i])
byte2 = ord(data2[i])
byte3 = ord(data3[i])
byte4 = ord(data4[i])
byte5 = ord(data5[i])
byte6 = ord(data6[i])
byte7 = ord(data7[i])
byte8 = ord(data8[i])
for j in range(8):
cur |= (byte1 & (2**j)) << (j * 7 + 0)
cur |= (byte2 & (2**j)) << (j * 7 + 1)
cur |= (byte3 & (2**j)) << (j * 7 + 2)
cur |= (byte4 & (2**j)) << (j * 7 + 3)
cur |= (byte5 & (2**j)) << (j * 7 + 4)
cur |= (byte6 & (2**j)) << (j * 7 + 5)
cur |= (byte7 & (2**j)) << (j * 7 + 6)
cur |= (byte8 & (2**j)) << (j * 7 + 7)
f.write(struct.pack(">Q", cur))
else:
# No interleaving
f.write(struct.pack("BBB", r, g, b))
f.close()
|
[
"math.sin",
"struct.pack"
] |
[((559, 586), 'struct.pack', 'struct.pack', (['"""BBB"""', 'r', 'g', 'b'], {}), "('BBB', r, g, b)\n", (570, 586), False, 'import struct\n'), ((607, 634), 'struct.pack', 'struct.pack', (['"""BBB"""', 'r', '(0)', '(0)'], {}), "('BBB', r, 0, 0)\n", (618, 634), False, 'import struct\n'), ((1041, 1068), 'struct.pack', 'struct.pack', (['"""BBB"""', 'r', 'g', 'b'], {}), "('BBB', r, g, b)\n", (1052, 1068), False, 'import struct\n'), ((1089, 1116), 'struct.pack', 'struct.pack', (['"""BBB"""', 'r', '(0)', '(0)'], {}), "('BBB', r, 0, 0)\n", (1100, 1116), False, 'import struct\n'), ((1159, 1186), 'struct.pack', 'struct.pack', (['"""BBB"""', '(0)', 'g', '(0)'], {}), "('BBB', 0, g, 0)\n", (1170, 1186), False, 'import struct\n'), ((1229, 1256), 'struct.pack', 'struct.pack', (['"""BBB"""', '(0)', '(0)', 'b'], {}), "('BBB', 0, 0, b)\n", (1240, 1256), False, 'import struct\n'), ((448, 502), 'math.sin', 'math.sin', (['(2.0 * math.pi * (5.0 * -x + n / 3.0) / 100.0)'], {}), '(2.0 * math.pi * (5.0 * -x + n / 3.0) / 100.0)\n', (456, 502), False, 'import math\n'), ((966, 988), 'struct.pack', 'struct.pack', (['""">H"""', 'cur'], {}), "('>H', cur)\n", (977, 988), False, 'import struct\n'), ((1865, 1892), 'struct.pack', 'struct.pack', (['"""BBB"""', 'r', 'g', 'b'], {}), "('BBB', r, g, b)\n", (1876, 1892), False, 'import struct\n'), ((1913, 1940), 'struct.pack', 'struct.pack', (['"""BBB"""', 'r', '(0)', '(0)'], {}), "('BBB', r, 0, 0)\n", (1924, 1940), False, 'import struct\n'), ((1983, 2010), 'struct.pack', 'struct.pack', (['"""BBB"""', '(0)', 'g', '(0)'], {}), "('BBB', 0, g, 0)\n", (1994, 2010), False, 'import struct\n'), ((2053, 2080), 'struct.pack', 'struct.pack', (['"""BBB"""', '(0)', '(0)', 'b'], {}), "('BBB', 0, 0, b)\n", (2064, 2080), False, 'import struct\n'), ((2123, 2150), 'struct.pack', 'struct.pack', (['"""BBB"""', '(0)', 'g', 'b'], {}), "('BBB', 0, g, b)\n", (2134, 2150), False, 'import struct\n'), ((2193, 2220), 'struct.pack', 'struct.pack', (['"""BBB"""', 'r', 'g', '(0)'], {}), "('BBB', r, g, 0)\n", (2204, 2220), False, 'import struct\n'), ((2263, 2290), 'struct.pack', 'struct.pack', (['"""BBB"""', 'r', '(0)', 'b'], {}), "('BBB', r, 0, b)\n", (2274, 2290), False, 'import struct\n'), ((2333, 2360), 'struct.pack', 'struct.pack', (['"""BBB"""', '(0)', 'g', 'b'], {}), "('BBB', 0, g, b)\n", (2344, 2360), False, 'import struct\n'), ((1790, 1812), 'struct.pack', 'struct.pack', (['""">L"""', 'cur'], {}), "('>L', cur)\n", (1801, 1812), False, 'import struct\n'), ((3370, 3397), 'struct.pack', 'struct.pack', (['"""BBB"""', 'r', 'g', 'b'], {}), "('BBB', r, g, b)\n", (3381, 3397), False, 'import struct\n'), ((3282, 3304), 'struct.pack', 'struct.pack', (['""">Q"""', 'cur'], {}), "('>Q', cur)\n", (3293, 3304), False, 'import struct\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Common Python library imports
from http import HTTPStatus
# Pip package imports
from flask import render_template, request, jsonify
# Internal package imports
from backend.utils import decode_token
from ..models import NewsletterSubscribe
from ..utils import generate_resubscribe_link
from .blueprint import newsletter_subscribe
@newsletter_subscribe.route('/unsubscribe/<token>', methods=['GET'])
def unsubscribe(token):
email_str = decode_token(token)
if email_str is None:
if not request.is_json:
# Return redirect view
#return redirect(get_url())
return
return jsonify({'errors': 'Invalid token given.'}), HTTPStatus.NOT_FOUND
else:
email = NewsletterSubscribe.get_by(email=email_str)
# Commit only if the user is still active
if email.is_active:
email.is_active = False
email.save(commit=True)
if not request.is_json:
return render_template('newsletter_subscribe/email/confirm_unsubscribe.html',
resubscribe_link=generate_resubscribe_link(email.email))
return jsonify({
'email': email,
'status': 'You are successfully unsubscribed from our mailing list.',
})
|
[
"flask.jsonify",
"backend.utils.decode_token"
] |
[((492, 511), 'backend.utils.decode_token', 'decode_token', (['token'], {}), '(token)\n', (504, 511), False, 'from backend.utils import decode_token\n'), ((1190, 1289), 'flask.jsonify', 'jsonify', (["{'email': email, 'status':\n 'You are successfully unsubscribed from our mailing list.'}"], {}), "({'email': email, 'status':\n 'You are successfully unsubscribed from our mailing list.'})\n", (1197, 1289), False, 'from flask import render_template, request, jsonify\n'), ((679, 722), 'flask.jsonify', 'jsonify', (["{'errors': 'Invalid token given.'}"], {}), "({'errors': 'Invalid token given.'})\n", (686, 722), False, 'from flask import render_template, request, jsonify\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
def load_octmi_dat(acquisitionName, basePath="."):
# Vérification de l'existence du fichier
datFilePath = os.path.join(os.path.normpath(basePath), acquisitionName + "_MI.dat")
if not os.path.exists(datFilePath):
print("Could not stat file", datFilePath)
raise NameError("File does not exist")
# Décompte du nombre d'éléments
nval = 0
variableList = ""
with open(datFilePath, "r") as f:
for line in f:
if line[0] == "T":
if line != variableList:
variableList = line
# print variableList
else:
nval = nval + 1
variableList = variableList.split(" ")
dictionnaire = dict()
dictionnaire["nval"] = nval
if nval > 1:
for i in range(len(variableList)):
dictionnaire[variableList[i].strip()] = np.zeros(nval)
linenum = 0
with open(datFilePath, "r") as f:
for line in f:
contentList = line.split(" ")
if contentList[0] != "Time":
if nval == 1:
for i in range(len(variableList)):
dictionnaire[variableList[i].strip()] = eval(
contentList[i].strip()
)
else:
for i in range(len(variableList)):
if i < len(contentList):
dataStr = contentList[i].strip()
if dataStr.lower() == "nan":
dictionnaire[variableList[i].strip()][linenum] = np.nan
else:
dictionnaire[variableList[i].strip()][linenum] = eval(
contentList[i].strip()
)
else:
dictionnaire[variableList[i].strip()][linenum] = np.nan
linenum = linenum + 1
return dictionnaire
|
[
"numpy.zeros",
"os.path.exists",
"os.path.normpath"
] |
[((206, 232), 'os.path.normpath', 'os.path.normpath', (['basePath'], {}), '(basePath)\n', (222, 232), False, 'import os\n'), ((274, 301), 'os.path.exists', 'os.path.exists', (['datFilePath'], {}), '(datFilePath)\n', (288, 301), False, 'import os\n'), ((951, 965), 'numpy.zeros', 'np.zeros', (['nval'], {}), '(nval)\n', (959, 965), True, 'import numpy as np\n')]
|
"""
Copyright 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import os
experiments = {
'1 Client': '1Client_IdealBenchmark',
'5 Clients': '5Clients_IdealBenchmark',
'10 Clients': '10Clients_IdealBenchmark'
}
for exp, dir in experiments.items():
os.chdir(dir)
for r in range(1, 6):
os.chdir('run_{}'.format(r))
with open('server_stats.json', 'r') as f:
data = json.load(f)
with open('server_stats.json', 'w') as f:
data['run_start'] = data['run_start'] - 7200000
data['run_end'] = data['run_end'] - 7200000
json.dump(data, f)
os.chdir('..')
os.chdir('..')
|
[
"json.dump",
"json.load",
"os.chdir"
] |
[((799, 812), 'os.chdir', 'os.chdir', (['dir'], {}), '(dir)\n', (807, 812), False, 'import os\n'), ((1186, 1200), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (1194, 1200), False, 'import os\n'), ((1167, 1181), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (1175, 1181), False, 'import os\n'), ((947, 959), 'json.load', 'json.load', (['f'], {}), '(f)\n', (956, 959), False, 'import json\n'), ((1139, 1157), 'json.dump', 'json.dump', (['data', 'f'], {}), '(data, f)\n', (1148, 1157), False, 'import json\n')]
|
"""Test creation of mock data.
"""
import datetime
from papilotte.connectors.mock import mockdata
def test_generate_person():
"Make sure generate_person() doesn not create more than 15 different persons."
num_of_different_objects = 15
generator = mockdata.generate_person(num_of_different_objects)
objects = {}
for _ in range(num_of_different_objects * 10):
obj = next(generator)
buf = objects.get(obj["@id"], [])
buf.append(obj)
objects[obj["@id"]] = buf
for pid in objects:
assert len(objects[pid]) == 10
# make sure persons with same pid contain same data
for pid, objlist in objects.items():
last_obj = None
for obj in objlist:
if last_obj is None:
last_obj = obj
else:
assert last_obj == obj
def test_generate_source():
"Make sure generate_source() does not create more than 15 different sources."
num_of_different_objects = 25
generator = mockdata.generate_source(num_of_different_objects)
objects = {}
for _ in range(num_of_different_objects * 10):
obj = next(generator)
buf = objects.get(obj["@id"], [])
buf.append(obj)
objects[obj["@id"]] = buf
for pid in objects:
assert len(objects[pid]) == 10
# make sure sources with sam pid contain same data
for pid, objlist in objects.items():
last_obj = None
for obj in objlist:
if last_obj is None:
last_obj = obj
else:
assert last_obj == obj
def test_generate_statement():
"Make sure generate_statement() works as expected."
factoid = {
"@id": "Factoid 1",
"createdWhen": "2019-07-21",
"createdBy": "User 1",
"modifiedWhen": "2019-10-12",
"modifiedBy": "User 2",
}
generator = mockdata.generate_statement(factoid, 1)
for i in range(5):
stmt = next(generator)
assert stmt["@id"] == "F1S%d" % (i + 1)
assert stmt["createdBy"] == factoid["createdBy"]
assert stmt["createdWhen"] == factoid["createdWhen"]
assert stmt["modifiedBy"] == factoid["modifiedBy"]
assert stmt["modifiedWhen"] == factoid["modifiedWhen"]
def test_generate_factoid():
"""Test the factoid generator.
"""
generator = mockdata.generate_factoid()
for i in range(100):
factoid = next(generator)
assert factoid["@id"] == "Factoid %03d" % (i + 1)
assert "Person" in factoid["person"]["@id"]
assert "Source" in factoid["source"]["@id"]
assert "statement" in factoid
assert factoid["statement"]["@id"] == "F%dS1" % (i + 1)
def test_make_label_objects():
"Make sure simple object consisting of a label and an uri or created as expected."
for counter in (1, 4):
objects = mockdata.make_label_objects(3, "xxx", counter)
for i, obj in enumerate(objects):
assert obj["label"] == "Xxx %d_%d" % (counter, i + 1)
assert obj["uri"] == "http://example.com/xxx/%d/%d" % (counter, i + 1)
def test_make_date():
"Make date generates a dict consisting of a date-label and a date string."
# make_date might return an empty dict
assert mockdata.make_date(0) is None
assert mockdata.make_date(1) == {"label": "1801", "sortdate": "1801"}
assert mockdata.make_date(2) == {"label": "February 1802", "sortdate": "1802-02"}
assert mockdata.make_date(3) == {"label": "3 March 1803", "sortdate": "1803-03-03"}
assert mockdata.make_date(5) is None
assert mockdata.make_date(6) == {"label": "1806", "sortdate": "1806"}
assert mockdata.make_date(7) == {"label": "July 1807", "sortdate": "1807-07"}
assert mockdata.make_date(8) == {"label": "8 August 1808", "sortdate": "1808-08-08"}
assert mockdata.make_date(9) == {}
def test_make_date_distribution():
"Check if dates are equally distributed in mockdata."
counter = {}
for i in range(1000):
data = mockdata.make_date(i)
if data is None:
counter["None"] = counter.get("None", 0) + 1
elif data == {}:
counter["empty"] = counter.get("empty", 0) + 1
elif data["sortdate"].count("-") == 0:
counter["yyyy"] = counter.get("yyyy", 0) + 1
elif data["sortdate"].count("-") == 1:
counter["yyyy-mm"] = counter.get("yyyy-mm", 0) + 1
elif data["sortdate"].count("-") == 2:
counter["yyyy-mm-dd"] = counter.get("yyyy-mm-dd", 0) + 1
assert counter["None"] == counter["empty"]
assert counter["None"] == counter["yyyy"]
assert counter["None"] == counter["yyyy-mm"]
assert counter["None"] == counter["yyyy-mm-dd"]
def test_uris():
"Test the mockdata get_uri function."
assert mockdata.get_uris(1) == [
"http://example.com/1",
"http://example.com/2",
"http://example.com/3",
]
assert mockdata.get_uris(2) == [
"http://example.com/1",
"http://example.com/2",
"http://example.com/3",
"http://example.com/4",
"http://example.com/5",
"http://example.com/6",
"http://example.com/7",
"http://example.com/8",
]
assert mockdata.get_uris(3) == [
"http://example.com/1",
"http://example.com/2",
"http://example.com/3",
"http://example.com/4",
"http://example.com/5",
"http://example.com/6",
"http://example.com/7",
"http://example.com/8",
"http://example.com/9",
"http://example.com/10",
"http://example.com/11",
"http://example.com/12",
"http://example.com/13",
"http://example.com/14",
"http://example.com/15",
]
def test_get_modifier_distribution():
"""Check if distribution of modifier names is close to equal and if
there are exactly 3 modifiers.
"""
counter = {}
for i in range(999):
modifier = mockdata.get_modifier(i)
counter[modifier] = counter.get(modifier, 0) + 1
assert counter["Modifier 1"] == counter["Modifier 2"]
assert counter["Modifier 1"] == counter["Modifier 3"]
def test_get_modifer():
"Test creation order of get_modifier()."
assert mockdata.get_modifier(1) == "Modifier 3"
assert mockdata.get_modifier(2) == "Modifier 1"
assert mockdata.get_modifier(3) == "Modifier 2"
assert mockdata.get_modifier(4) == "Modifier 3"
assert mockdata.get_modifier(5) == "Modifier 1"
assert mockdata.get_modifier(6) == "Modifier 2"
def test_get_creator_distribution():
"""Check if distribution of creator names is close to equal and if
there are exactly 3 creators.
"""
counter = {}
for i in range(1000):
modifier = mockdata.get_creator(i)
counter[modifier] = counter.get(modifier, 0) + 1
assert counter["Creator 1"] == counter["Creator 2"]
assert counter["Creator 1"] == counter["Creator 3"]
assert counter["Creator 1"] == counter["Creator 4"]
assert counter["Creator 1"] == counter["Creator 5"]
def test_get_creator():
"Test creation order of get_creator()."
for i in range(1, 6):
assert mockdata.get_creator(i) == "Creator %d" % i
def test_get_datetime():
"Test the mockdata get_date function."
expected = [
"2000-01-01T00:00:00+02:00",
"2000-01-02T10:17:36+02:00",
"2000-01-03T20:35:12+02:00",
"2000-01-05T06:52:48+02:00",
"2000-01-06T17:10:24+02:00",
"2000-01-08T03:28:00+02:00",
"2000-01-09T13:45:36+02:00",
"2000-01-11T00:03:12+02:00",
"2000-01-12T10:20:48+02:00",
"2000-01-13T20:38:24+02:00",
]
base_date = datetime.datetime(2000, 1, 1)
for i in range(10):
assert mockdata.get_datetime(base_date, i) == expected[i]
def test_get_datetime_with_offset():
"Test if getting a date with offset works."
expected = [
"2000-01-01T00:00:00+02:00",
"2000-01-03T08:30:56+02:00",
"2000-01-07T13:28:32+02:00",
"2000-01-13T14:52:48+02:00",
"2000-01-21T12:43:44+02:00",
"2000-01-08T03:28:00+02:00",
"2000-01-15T03:05:36+02:00",
"2000-01-23T23:09:52+02:00",
"2000-02-03T15:40:48+02:00",
"2000-02-16T04:38:24+02:00",
"2000-01-15T06:56:00+02:00",
"2000-01-26T21:40:16+02:00",
"2000-02-09T08:51:12+02:00",
"2000-02-24T16:28:48+02:00",
"2000-03-12T20:33:04+02:00",
"2000-01-22T10:24:00+02:00",
"2000-02-07T16:14:56+02:00",
"2000-02-25T18:32:32+02:00",
"2000-03-16T17:16:48+02:00",
"2000-04-07T12:27:44+02:00",
]
base_date = datetime.datetime(2000, 1, 1)
for i in range(20):
assert mockdata.get_datetime(base_date, i, True) == expected[i]
def test_mod_time_after_creation_time():
"Assert modification cannot be earlier than creation"
base_date = datetime.datetime(2000, 1, 1)
for i in range(1000):
creation_time = mockdata.get_datetime(base_date, i)
modification_time = mockdata.get_datetime(base_date, i, True)
assert creation_time <= modification_time
def test_idempotence():
"Generate a mock data set multiple times and make sure they are identical"
def make_factoids(num):
generated_factoids = []
generator = mockdata.generate_factoid()
for _ in range(num):
generated_factoids.append(next(generator))
return generated_factoids
data_to_compare = make_factoids(250)
for _ in range(10):
assert data_to_compare == make_factoids(250)
def test_make_factoids():
"make_factoids is a convenience function to create test data."
assert len(mockdata.make_factoids(15)) == 15
|
[
"papilotte.connectors.mock.mockdata.make_label_objects",
"papilotte.connectors.mock.mockdata.make_date",
"papilotte.connectors.mock.mockdata.generate_person",
"papilotte.connectors.mock.mockdata.get_datetime",
"papilotte.connectors.mock.mockdata.get_uris",
"papilotte.connectors.mock.mockdata.generate_statement",
"papilotte.connectors.mock.mockdata.make_factoids",
"papilotte.connectors.mock.mockdata.generate_factoid",
"datetime.datetime",
"papilotte.connectors.mock.mockdata.get_creator",
"papilotte.connectors.mock.mockdata.get_modifier",
"papilotte.connectors.mock.mockdata.generate_source"
] |
[((264, 314), 'papilotte.connectors.mock.mockdata.generate_person', 'mockdata.generate_person', (['num_of_different_objects'], {}), '(num_of_different_objects)\n', (288, 314), False, 'from papilotte.connectors.mock import mockdata\n'), ((1009, 1059), 'papilotte.connectors.mock.mockdata.generate_source', 'mockdata.generate_source', (['num_of_different_objects'], {}), '(num_of_different_objects)\n', (1033, 1059), False, 'from papilotte.connectors.mock import mockdata\n'), ((1885, 1924), 'papilotte.connectors.mock.mockdata.generate_statement', 'mockdata.generate_statement', (['factoid', '(1)'], {}), '(factoid, 1)\n', (1912, 1924), False, 'from papilotte.connectors.mock import mockdata\n'), ((2357, 2384), 'papilotte.connectors.mock.mockdata.generate_factoid', 'mockdata.generate_factoid', ([], {}), '()\n', (2382, 2384), False, 'from papilotte.connectors.mock import mockdata\n'), ((7714, 7743), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (7731, 7743), False, 'import datetime\n'), ((8700, 8729), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (8717, 8729), False, 'import datetime\n'), ((8943, 8972), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (8960, 8972), False, 'import datetime\n'), ((2873, 2919), 'papilotte.connectors.mock.mockdata.make_label_objects', 'mockdata.make_label_objects', (['(3)', '"""xxx"""', 'counter'], {}), "(3, 'xxx', counter)\n", (2900, 2919), False, 'from papilotte.connectors.mock import mockdata\n'), ((3268, 3289), 'papilotte.connectors.mock.mockdata.make_date', 'mockdata.make_date', (['(0)'], {}), '(0)\n', (3286, 3289), False, 'from papilotte.connectors.mock import mockdata\n'), ((3309, 3330), 'papilotte.connectors.mock.mockdata.make_date', 'mockdata.make_date', (['(1)'], {}), '(1)\n', (3327, 3330), False, 'from papilotte.connectors.mock import mockdata\n'), ((3383, 3404), 'papilotte.connectors.mock.mockdata.make_date', 'mockdata.make_date', (['(2)'], {}), '(2)\n', (3401, 3404), False, 'from papilotte.connectors.mock import mockdata\n'), ((3469, 3490), 'papilotte.connectors.mock.mockdata.make_date', 'mockdata.make_date', (['(3)'], {}), '(3)\n', (3487, 3490), False, 'from papilotte.connectors.mock import mockdata\n'), ((3557, 3578), 'papilotte.connectors.mock.mockdata.make_date', 'mockdata.make_date', (['(5)'], {}), '(5)\n', (3575, 3578), False, 'from papilotte.connectors.mock import mockdata\n'), ((3598, 3619), 'papilotte.connectors.mock.mockdata.make_date', 'mockdata.make_date', (['(6)'], {}), '(6)\n', (3616, 3619), False, 'from papilotte.connectors.mock import mockdata\n'), ((3672, 3693), 'papilotte.connectors.mock.mockdata.make_date', 'mockdata.make_date', (['(7)'], {}), '(7)\n', (3690, 3693), False, 'from papilotte.connectors.mock import mockdata\n'), ((3754, 3775), 'papilotte.connectors.mock.mockdata.make_date', 'mockdata.make_date', (['(8)'], {}), '(8)\n', (3772, 3775), False, 'from papilotte.connectors.mock import mockdata\n'), ((3843, 3864), 'papilotte.connectors.mock.mockdata.make_date', 'mockdata.make_date', (['(9)'], {}), '(9)\n', (3861, 3864), False, 'from papilotte.connectors.mock import mockdata\n'), ((4024, 4045), 'papilotte.connectors.mock.mockdata.make_date', 'mockdata.make_date', (['i'], {}), '(i)\n', (4042, 4045), False, 'from papilotte.connectors.mock import mockdata\n'), ((4808, 4828), 'papilotte.connectors.mock.mockdata.get_uris', 'mockdata.get_uris', (['(1)'], {}), '(1)\n', (4825, 4828), False, 'from papilotte.connectors.mock import mockdata\n'), ((4947, 4967), 'papilotte.connectors.mock.mockdata.get_uris', 'mockdata.get_uris', (['(2)'], {}), '(2)\n', (4964, 4967), False, 'from papilotte.connectors.mock import mockdata\n'), ((5246, 5266), 'papilotte.connectors.mock.mockdata.get_uris', 'mockdata.get_uris', (['(3)'], {}), '(3)\n', (5263, 5266), False, 'from papilotte.connectors.mock import mockdata\n'), ((5980, 6004), 'papilotte.connectors.mock.mockdata.get_modifier', 'mockdata.get_modifier', (['i'], {}), '(i)\n', (6001, 6004), False, 'from papilotte.connectors.mock import mockdata\n'), ((6260, 6284), 'papilotte.connectors.mock.mockdata.get_modifier', 'mockdata.get_modifier', (['(1)'], {}), '(1)\n', (6281, 6284), False, 'from papilotte.connectors.mock import mockdata\n'), ((6312, 6336), 'papilotte.connectors.mock.mockdata.get_modifier', 'mockdata.get_modifier', (['(2)'], {}), '(2)\n', (6333, 6336), False, 'from papilotte.connectors.mock import mockdata\n'), ((6364, 6388), 'papilotte.connectors.mock.mockdata.get_modifier', 'mockdata.get_modifier', (['(3)'], {}), '(3)\n', (6385, 6388), False, 'from papilotte.connectors.mock import mockdata\n'), ((6416, 6440), 'papilotte.connectors.mock.mockdata.get_modifier', 'mockdata.get_modifier', (['(4)'], {}), '(4)\n', (6437, 6440), False, 'from papilotte.connectors.mock import mockdata\n'), ((6468, 6492), 'papilotte.connectors.mock.mockdata.get_modifier', 'mockdata.get_modifier', (['(5)'], {}), '(5)\n', (6489, 6492), False, 'from papilotte.connectors.mock import mockdata\n'), ((6520, 6544), 'papilotte.connectors.mock.mockdata.get_modifier', 'mockdata.get_modifier', (['(6)'], {}), '(6)\n', (6541, 6544), False, 'from papilotte.connectors.mock import mockdata\n'), ((6775, 6798), 'papilotte.connectors.mock.mockdata.get_creator', 'mockdata.get_creator', (['i'], {}), '(i)\n', (6795, 6798), False, 'from papilotte.connectors.mock import mockdata\n'), ((9023, 9058), 'papilotte.connectors.mock.mockdata.get_datetime', 'mockdata.get_datetime', (['base_date', 'i'], {}), '(base_date, i)\n', (9044, 9058), False, 'from papilotte.connectors.mock import mockdata\n'), ((9087, 9128), 'papilotte.connectors.mock.mockdata.get_datetime', 'mockdata.get_datetime', (['base_date', 'i', '(True)'], {}), '(base_date, i, True)\n', (9108, 9128), False, 'from papilotte.connectors.mock import mockdata\n'), ((9365, 9392), 'papilotte.connectors.mock.mockdata.generate_factoid', 'mockdata.generate_factoid', ([], {}), '()\n', (9390, 9392), False, 'from papilotte.connectors.mock import mockdata\n'), ((7191, 7214), 'papilotte.connectors.mock.mockdata.get_creator', 'mockdata.get_creator', (['i'], {}), '(i)\n', (7211, 7214), False, 'from papilotte.connectors.mock import mockdata\n'), ((7783, 7818), 'papilotte.connectors.mock.mockdata.get_datetime', 'mockdata.get_datetime', (['base_date', 'i'], {}), '(base_date, i)\n', (7804, 7818), False, 'from papilotte.connectors.mock import mockdata\n'), ((8769, 8810), 'papilotte.connectors.mock.mockdata.get_datetime', 'mockdata.get_datetime', (['base_date', 'i', '(True)'], {}), '(base_date, i, True)\n', (8790, 8810), False, 'from papilotte.connectors.mock import mockdata\n'), ((9740, 9766), 'papilotte.connectors.mock.mockdata.make_factoids', 'mockdata.make_factoids', (['(15)'], {}), '(15)\n', (9762, 9766), False, 'from papilotte.connectors.mock import mockdata\n')]
|
from bs4 import BeautifulSoup
import time
import pandas as pd
import requests
import datetime
headers={
"User-Agent":"",
"Connection": "keep-alive",
# 这个cookie的获取方法在文档中已说明
"Cookie":""
}
sets=124 # 最新一期的数字
dates=[] # 日期数组,用于填充url
# 遍历日期 包括begin和end的日期 生成类似2020-05-03的格式的日期
begin = datetime.date(2020,5,3)
end = datetime.date(2020,6,9)
d = begin
delta = datetime.timedelta(days=1)
while d <= end:
dates.append(str(d.strftime("%Y-%m-%d")))
d += delta
Cids=[] # Cid数组,用于填充url
with open('Urls/Cid.txt', 'r') as f:
for line in f.readlines():
Cids.append(line.strip())
for cid in Cids:
# 每次都要重置这些数据
dm_data = [] # 弹幕数据
dm_text = [] # 弹幕本体
# 弹幕的八个参数和弹幕本体
DM_time = []
DM_mode = []
DM_font = []
DM_color = []
DM_realTime = []
DM_pool = []
DM_userID = []
DM_id = []
DM_text = []
print("正在爬取第" + str(sets) + "期的《睡前消息》弹幕...")
for date in dates:
url="https://api.bilibili.com/x/v2/dm/history?type=1&oid="+cid+"&date="+date
html=requests.get(url=url,headers=headers) #返回文本信息
html.encoding='utf8'
soup=BeautifulSoup(html.text,'lxml') #建立soup对象
all=soup.find_all("d")
for d in all:
# 弹幕数据
dm_data.append(str(d.get("p")).split(","))
# 弹幕本体
dm_text.append(d.get_text())
# 分别把数据存进这几个数组
for i in dm_data:
DM_time.append(i[0])
DM_mode.append(i[1])
DM_font.append(i[2])
DM_color.append(i[3])
DM_realTime.append(i[4])
DM_pool.append(i[5])
DM_userID.append(i[6])
DM_id.append(i[7])
for i in dm_text:
DM_text.append(i)
dt={"DM_time":DM_time,"DM_mode":DM_mode,"DM_font":DM_font,"DM_color":DM_color,
"DM_realTime":DM_realTime,"DM_pool":DM_pool,"DM_userID":DM_userID,"DM_id":DM_id,"DM_text":DM_text}
d=pd.DataFrame(dt)
d.to_csv('./Danmu/Danmu-'+str(sets)+'.csv',encoding='utf-8-sig') #存储弹幕信息
print("已将弹幕放入到Danmu-"+str(sets)+".csv文件中")
sets-=1
# 每抓完一个网页休眠7秒
print("缓冲中...")
time.sleep(7)
print("已将《睡前消息》第110-124期的弹幕爬取完毕")
|
[
"pandas.DataFrame",
"datetime.date",
"time.sleep",
"datetime.timedelta",
"requests.get",
"bs4.BeautifulSoup"
] |
[((302, 327), 'datetime.date', 'datetime.date', (['(2020)', '(5)', '(3)'], {}), '(2020, 5, 3)\n', (315, 327), False, 'import datetime\n'), ((332, 357), 'datetime.date', 'datetime.date', (['(2020)', '(6)', '(9)'], {}), '(2020, 6, 9)\n', (345, 357), False, 'import datetime\n'), ((374, 400), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (392, 400), False, 'import datetime\n'), ((1883, 1899), 'pandas.DataFrame', 'pd.DataFrame', (['dt'], {}), '(dt)\n', (1895, 1899), True, 'import pandas as pd\n'), ((2080, 2093), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (2090, 2093), False, 'import time\n'), ((1040, 1078), 'requests.get', 'requests.get', ([], {'url': 'url', 'headers': 'headers'}), '(url=url, headers=headers)\n', (1052, 1078), False, 'import requests\n'), ((1128, 1160), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html.text', '"""lxml"""'], {}), "(html.text, 'lxml')\n", (1141, 1160), False, 'from bs4 import BeautifulSoup\n')]
|
# -*- coding: utf-8 -*-
"""
run all ogs5py benchmarks
"""
import sys
import os
import fnmatch
import time
from pexpect.popen_spawn import PopenSpawn
import pexpect
from ogs5py.tools.tools import Output
# pexpect.spawn just runs on unix-like systems
if sys.platform == "win32":
CmdRun = PopenSpawn
else:
CmdRun = pexpect.spawn
def call_script(script, output, timeout=3):
cwd, script_file = os.path.split(script)
args = [sys.executable, "-u", script_file]
try:
child = CmdRun(
" ".join(args), timeout=timeout, logfile=output, cwd=cwd
)
# wait for ogs to finish
child.expect(pexpect.EOF)
except pexpect.TIMEOUT:
output.write("...timeout\n".encode())
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
if __name__ == "__main__":
timeout = 3 # None for no timeout
out_dir = os.path.join(os.getcwd(), "benchmarks")
# out_dir = os.path.join(os.getcwd(), "benchmarks_FEM_active")
scripts = find("*.py", out_dir)
log_name = os.path.join(
out_dir, "run_log_" + time.strftime("%Y-%m-%d_%H-%M-%S") + ".txt"
)
output = Output(log_name, print_log=True)
for script in scripts:
print(script)
call_script(script, output, timeout=timeout)
output.close()
|
[
"os.getcwd",
"os.walk",
"time.strftime",
"ogs5py.tools.tools.Output",
"os.path.split",
"os.path.join",
"fnmatch.fnmatch"
] |
[((404, 425), 'os.path.split', 'os.path.split', (['script'], {}), '(script)\n', (417, 425), False, 'import os\n'), ((798, 811), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (805, 811), False, 'import os\n'), ((1307, 1339), 'ogs5py.tools.tools.Output', 'Output', (['log_name'], {'print_log': '(True)'}), '(log_name, print_log=True)\n', (1313, 1339), False, 'from ogs5py.tools.tools import Output\n'), ((1056, 1067), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1065, 1067), False, 'import os\n'), ((855, 885), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['name', 'pattern'], {}), '(name, pattern)\n', (870, 885), False, 'import fnmatch\n'), ((1244, 1278), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%H-%M-%S"""'], {}), "('%Y-%m-%d_%H-%M-%S')\n", (1257, 1278), False, 'import time\n'), ((917, 941), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (929, 941), False, 'import os\n')]
|
from django.shortcuts import render, HttpResponse, get_object_or_404
from django.http import Http404
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.urls import reverse,reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views import View
from django.views.generic import TemplateView
from django.views.generic.edit import FormView
import mimetypes
from .models import Style
from .forms import EffectForm,StyleForm
from .utils import get_effect_form_class,render_image
class RenderImageView(View):
def get(self,request,style_name,path):
image = render_image(style_name,path)
content_type = mimetypes.guess_type(image.image.path)
f = open(image.image.path,'rb')
r = HttpResponse(f,content_type=content_type[0])
f.close()
return r
class ModalForm(FormView):
template_name = 'image_styles/modal_form.html'
submit_button = _('Save')
delete_button = ''
title = _('Create')
action = '.'
def get_action(self):
return self.action
def get_submit_button(self):
return self.submit_button
def get_delete_button(self):
return self.delete_button
def get_title(self):
return self.title
def get_context_data(self,**kwargs):
context = super().get_context_data(**kwargs)
context['action'] = self.get_action()
context['submit_button'] = self.get_submit_button()
context['delete_button'] = self.get_delete_button()
context['title'] = self.get_title()
return context
class EffectFormMixin:
effect = None
style = None
title = _('Create Effect')
submit_button = _('Create')
def dispatch(self,request,*args,**kwargs):
self.effect_name = self.kwargs.get('effect_name')
style_id = self.kwargs.get('style_id')
if style_id:
self.style = get_object_or_404(Style,id=style_id)
effect_id = self.kwargs.get('effect_id')
if effect_id and self.effect_name:
from image_styles import models
self.effect = get_object_or_404(getattr(models,self.effect_name),id=effect_id)
return super().dispatch(request,*args,**kwargs)
def get_form_class(self):
form_class = get_effect_form_class(self.effect_name)
if form_class:
return form_class
raise Http404("Not Found")
def get_form_kwargs(self,*args,**kwargs):
data = super().get_form_kwargs(*args,**kwargs)
if self.effect:
data['instance'] = self.effect
return data
def get_submit_button(self):
if self.effect:
return _('Update')
return super().get_submit_button()
def get_title(self):
if self.effect:
return _('Update Effect')
return super().get_title()
def get_action(self):
if self.style:
return reverse(
'image_styles:effect_create',
kwargs={'style_id':self.style.id,'effect_name':self.effect_name}
)
return reverse(
'image_styles:effect_update',
kwargs={'effect':self.effect.id,'effect_name':self.effect_name}
)
def form_valid(self,form):
form.save()
return HttpResponse(_('Effect Created!'))
def delete(self,*args,**kwargs):
if self.effect:
self.effect.delete()
return HttpResponse(_('Effect Removed!'))
return HttpResponse(_('Delete failed!'))
class StyleFormMixin:
style = None
form_class = StyleForm
def dispatch(self,request,*args,**kwargs):
style_id = self.kwargs.get('style_id')
if style_id:
self.style = get_object_or_404(Style,id=style_id)
self.delete_button = _('Delete')
return super().dispatch(request,*args,**kwargs)
def get_form_kwargs(self,*args,**kwargs):
data = super().get_form_kwargs(*args,**kwargs)
if self.style:
data['instance'] = self.style
return data
def get_action(self):
if self.style:
return reverse(
'image_styles:style_update',
kwargs={'style_id':self.style.id}
)
return reverse('image_styles:style_create')
def get_submit_button(self):
if self.style:
return _('Update')
return super().get_submit_button()
def get_title(self):
if self.style:
return _('Update Style')
return super().get_title()
def form_valid(self,form):
form.save()
return HttpResponse(_('Style Created!'))
def delete(self,*args,**kwargs):
if self.style:
self.style.delete()
return HttpResponse(_('Style Removed!'))
return HttpResponse(_('Delete failed!'))
@method_decorator(staff_member_required(),name='dispatch')
class ManageImageStylesView(TemplateView):
template_name = 'image_styles/home.html'
def get_image_styles(self):
ims = []
for s in Style.objects.all():
effects = s.get_effects()
for i in range(len(effects)):
form = get_effect_form_class(effect_model=effects[i]['object'])
if form:
effects[i]['form'] = form(instance=effects[i]['object'])
effects[i]['action'] = reverse(
'image_styles:effect_update',
kwargs = {
'effect_id':effects[i]['object'].id,
'effect_name':effects[i]['object'].get_name()
}
)
ims.append({
'style':s,
'effects':effects,
})
return ims
def get_context_data(self,**kwargs):
context = super().get_context_data(**kwargs)
context['styles'] = self.get_image_styles()
return context
@method_decorator(staff_member_required(),name='dispatch')
class EffectCreateInitView(ModalForm):
form_class = EffectForm
submit_button = _('Next')
title = _('Select Effect')
def dispatch(self,request,*args,**kwargs):
self.style = get_object_or_404(Style,id=self.kwargs.get('style_id'))
return super().dispatch(request,*args,**kwargs)
def get_form(self,**kwargs):
form = super().get_form(**kwargs)
form.initial['style'] = self.style
return form
def get_submit_button(self):
if self.form_class != EffectForm:
return _('Create')
return super().get_submit_button()
def get_title(self):
if self.form_class != EffectForm:
return _('Create Effect')
return super().get_title()
def get_action(self):
if self.action == '.':
return reverse('image_styles:effect_create_init',kwargs={'style_id':self.style.id})
return self.action
def form_valid(self,form):
effect_name = form.cleaned_data.get('effect')
self.form_class = get_effect_form_class(effect_name=effect_name)
self.action = reverse(
'image_styles:effect_create',
kwargs={'style_id':self.style.id,'effect_name':effect_name}
)
self.request.method = 'GET'
return super().get(self.request,style_id=self.style.id)
@method_decorator(staff_member_required(),name='dispatch')
class EffectCreateView(EffectFormMixin,ModalForm):
title = _('Create Effect')
submit_button = _('Create')
def get_form(self,**kwargs):
form = super().get_form(**kwargs)
form.initial['style'] = self.style
return form
@method_decorator(staff_member_required(),name='dispatch')
class EffectUpdateView(EffectFormMixin,ModalForm):
pass
@method_decorator(staff_member_required(),name='dispatch')
class StyleView(StyleFormMixin,ModalForm):
pass
|
[
"django.shortcuts.HttpResponse",
"django.contrib.admin.views.decorators.staff_member_required",
"django.urls.reverse",
"django.shortcuts.get_object_or_404",
"django.http.Http404",
"django.utils.translation.ugettext_lazy",
"mimetypes.guess_type"
] |
[((1062, 1071), 'django.utils.translation.ugettext_lazy', '_', (['"""Save"""'], {}), "('Save')\n", (1063, 1071), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1107, 1118), 'django.utils.translation.ugettext_lazy', '_', (['"""Create"""'], {}), "('Create')\n", (1108, 1118), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1777, 1795), 'django.utils.translation.ugettext_lazy', '_', (['"""Create Effect"""'], {}), "('Create Effect')\n", (1778, 1795), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1816, 1827), 'django.utils.translation.ugettext_lazy', '_', (['"""Create"""'], {}), "('Create')\n", (1817, 1827), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4998, 5021), 'django.contrib.admin.views.decorators.staff_member_required', 'staff_member_required', ([], {}), '()\n', (5019, 5021), False, 'from django.contrib.admin.views.decorators import staff_member_required\n'), ((6245, 6254), 'django.utils.translation.ugettext_lazy', '_', (['"""Next"""'], {}), "('Next')\n", (6246, 6254), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6267, 6285), 'django.utils.translation.ugettext_lazy', '_', (['"""Select Effect"""'], {}), "('Select Effect')\n", (6268, 6285), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6117, 6140), 'django.contrib.admin.views.decorators.staff_member_required', 'staff_member_required', ([], {}), '()\n', (6138, 6140), False, 'from django.contrib.admin.views.decorators import staff_member_required\n'), ((7615, 7633), 'django.utils.translation.ugettext_lazy', '_', (['"""Create Effect"""'], {}), "('Create Effect')\n", (7616, 7633), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7654, 7665), 'django.utils.translation.ugettext_lazy', '_', (['"""Create"""'], {}), "('Create')\n", (7655, 7665), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7511, 7534), 'django.contrib.admin.views.decorators.staff_member_required', 'staff_member_required', ([], {}), '()\n', (7532, 7534), False, 'from django.contrib.admin.views.decorators import staff_member_required\n'), ((7824, 7847), 'django.contrib.admin.views.decorators.staff_member_required', 'staff_member_required', ([], {}), '()\n', (7845, 7847), False, 'from django.contrib.admin.views.decorators import staff_member_required\n'), ((7944, 7967), 'django.contrib.admin.views.decorators.staff_member_required', 'staff_member_required', ([], {}), '()\n', (7965, 7967), False, 'from django.contrib.admin.views.decorators import staff_member_required\n'), ((791, 829), 'mimetypes.guess_type', 'mimetypes.guess_type', (['image.image.path'], {}), '(image.image.path)\n', (811, 829), False, 'import mimetypes\n'), ((882, 927), 'django.shortcuts.HttpResponse', 'HttpResponse', (['f'], {'content_type': 'content_type[0]'}), '(f, content_type=content_type[0])\n', (894, 927), False, 'from django.shortcuts import render, HttpResponse, get_object_or_404\n'), ((2506, 2526), 'django.http.Http404', 'Http404', (['"""Not Found"""'], {}), "('Not Found')\n", (2513, 2526), False, 'from django.http import Http404\n'), ((3205, 3314), 'django.urls.reverse', 'reverse', (['"""image_styles:effect_update"""'], {'kwargs': "{'effect': self.effect.id, 'effect_name': self.effect_name}"}), "('image_styles:effect_update', kwargs={'effect': self.effect.id,\n 'effect_name': self.effect_name})\n", (3212, 3314), False, 'from django.urls import reverse, reverse_lazy\n'), ((4385, 4421), 'django.urls.reverse', 'reverse', (['"""image_styles:style_create"""'], {}), "('image_styles:style_create')\n", (4392, 4421), False, 'from django.urls import reverse, reverse_lazy\n'), ((7259, 7364), 'django.urls.reverse', 'reverse', (['"""image_styles:effect_create"""'], {'kwargs': "{'style_id': self.style.id, 'effect_name': effect_name}"}), "('image_styles:effect_create', kwargs={'style_id': self.style.id,\n 'effect_name': effect_name})\n", (7266, 7364), False, 'from django.urls import reverse, reverse_lazy\n'), ((2027, 2064), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Style'], {'id': 'style_id'}), '(Style, id=style_id)\n', (2044, 2064), False, 'from django.shortcuts import render, HttpResponse, get_object_or_404\n'), ((2793, 2804), 'django.utils.translation.ugettext_lazy', '_', (['"""Update"""'], {}), "('Update')\n", (2794, 2804), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2917, 2935), 'django.utils.translation.ugettext_lazy', '_', (['"""Update Effect"""'], {}), "('Update Effect')\n", (2918, 2935), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3040, 3150), 'django.urls.reverse', 'reverse', (['"""image_styles:effect_create"""'], {'kwargs': "{'style_id': self.style.id, 'effect_name': self.effect_name}"}), "('image_styles:effect_create', kwargs={'style_id': self.style.id,\n 'effect_name': self.effect_name})\n", (3047, 3150), False, 'from django.urls import reverse, reverse_lazy\n'), ((3422, 3442), 'django.utils.translation.ugettext_lazy', '_', (['"""Effect Created!"""'], {}), "('Effect Created!')\n", (3423, 3442), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3629, 3648), 'django.utils.translation.ugettext_lazy', '_', (['"""Delete failed!"""'], {}), "('Delete failed!')\n", (3630, 3648), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3858, 3895), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Style'], {'id': 'style_id'}), '(Style, id=style_id)\n', (3875, 3895), False, 'from django.shortcuts import render, HttpResponse, get_object_or_404\n'), ((3928, 3939), 'django.utils.translation.ugettext_lazy', '_', (['"""Delete"""'], {}), "('Delete')\n", (3929, 3939), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4252, 4324), 'django.urls.reverse', 'reverse', (['"""image_styles:style_update"""'], {'kwargs': "{'style_id': self.style.id}"}), "('image_styles:style_update', kwargs={'style_id': self.style.id})\n", (4259, 4324), False, 'from django.urls import reverse, reverse_lazy\n'), ((4498, 4509), 'django.utils.translation.ugettext_lazy', '_', (['"""Update"""'], {}), "('Update')\n", (4499, 4509), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4621, 4638), 'django.utils.translation.ugettext_lazy', '_', (['"""Update Style"""'], {}), "('Update Style')\n", (4622, 4638), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4754, 4773), 'django.utils.translation.ugettext_lazy', '_', (['"""Style Created!"""'], {}), "('Style Created!')\n", (4755, 4773), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4957, 4976), 'django.utils.translation.ugettext_lazy', '_', (['"""Delete failed!"""'], {}), "('Delete failed!')\n", (4958, 4976), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6701, 6712), 'django.utils.translation.ugettext_lazy', '_', (['"""Create"""'], {}), "('Create')\n", (6702, 6712), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6843, 6861), 'django.utils.translation.ugettext_lazy', '_', (['"""Create Effect"""'], {}), "('Create Effect')\n", (6844, 6861), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6974, 7052), 'django.urls.reverse', 'reverse', (['"""image_styles:effect_create_init"""'], {'kwargs': "{'style_id': self.style.id}"}), "('image_styles:effect_create_init', kwargs={'style_id': self.style.id})\n", (6981, 7052), False, 'from django.urls import reverse, reverse_lazy\n'), ((3579, 3599), 'django.utils.translation.ugettext_lazy', '_', (['"""Effect Removed!"""'], {}), "('Effect Removed!')\n", (3580, 3599), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4908, 4927), 'django.utils.translation.ugettext_lazy', '_', (['"""Style Removed!"""'], {}), "('Style Removed!')\n", (4909, 4927), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
from pathlib import Path
import cdsapi
YEARS = [2019]
MONTHS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
ROOT = Path("wind_data")
ROOT.mkdir(exist_ok=True)
c = cdsapi.Client(key="YOUR_API_KEY")
for year in YEARS:
for month in MONTHS:
month = str(month).zfill(2)
c.retrieve(
"reanalysis-era5-single-levels",
{
"product_type": "reanalysis",
"format": "netcdf",
"variable": [
"10m_u_component_of_wind",
"10m_v_component_of_wind",
],
"year": str(year),
"month": month,
"day": [
"01",
"02",
"03",
"04",
"05",
"06",
"07",
"08",
"09",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"20",
"21",
"22",
"23",
"24",
"25",
"26",
"27",
"28",
"29",
"30",
"31",
],
"time": [
"00:00",
"06:00",
"12:00",
"18:00",
],
},
str(ROOT / f"CDS_wind_{year}_{month}.nc"),
)
|
[
"pathlib.Path",
"cdsapi.Client"
] |
[((112, 129), 'pathlib.Path', 'Path', (['"""wind_data"""'], {}), "('wind_data')\n", (116, 129), False, 'from pathlib import Path\n'), ((161, 194), 'cdsapi.Client', 'cdsapi.Client', ([], {'key': '"""YOUR_API_KEY"""'}), "(key='YOUR_API_KEY')\n", (174, 194), False, 'import cdsapi\n')]
|
import time
from random import choice
from tornado.ioloop import PeriodicCallback
from nltk.chat.util import Chat, reflections
from nltk.chat.eliza import pairs
chat_info = {}
idle_phrases = [
"Are you still there?",
"Would you like to say something?",
"If you're busy, we can talk later.",
"What are you thinking?",
"Got distracted, did you?",
"Let's change the topic. What makes you happy?",
"Let's talk about something else. When did you last travel?",
"Let's meditate for a few minutes.",
"I'll take a short break. Ping me when you're back.",
]
def open(handler):
# Send an introductory message
handler.write_message('Hello. How are you feeling today?')
# Set up chat configuration in the session
chat = chat_info[handler.session['id']] = {
# This is the Eliza bot that will converse with the user
'bot': Chat(pairs, reflections),
# The time at which the user last sent a message. Used for idle messages
'time': time.time(),
# Schedule a periodic check
'callback': PeriodicCallback(idler(handler), callback_time=5000),
# Send the next idle message after this many seconds.
# This is doubled after every idle message, and reset when the user responds
'delay': 10,
}
chat['callback'].start()
def on_message(handler, message):
# When we receive a message, respond with the chatbot response
chat = chat_info[handler.session['id']]
handler.write_message(chat['bot'].respond(message))
# Note the time of the last message. Reset the idle delay time
chat.update(time=time.time(), delay=10)
def on_close(handler):
# Stop periodic callback on
session = handler.session['id']
chat_info[session]['callback'].stop()
chat_info.pop(session)
def idler(handler):
# Return a method that can be called periodically to send idle messages.
# The handler parameter we get here is stored to send future messages.
def method():
'''
If delay seconds have elapsed since last message, send an idle message.
Then double the delay so that we don't keep sending idle messages.
'''
now = time.time()
chat = chat_info[handler.session['id']]
if chat['time'] < now - chat['delay']:
handler.write_message(choice(idle_phrases))
chat['time'] = now
chat['delay'] = chat['delay'] * 2
return method
|
[
"random.choice",
"nltk.chat.util.Chat",
"time.time"
] |
[((880, 904), 'nltk.chat.util.Chat', 'Chat', (['pairs', 'reflections'], {}), '(pairs, reflections)\n', (884, 904), False, 'from nltk.chat.util import Chat, reflections\n'), ((1003, 1014), 'time.time', 'time.time', ([], {}), '()\n', (1012, 1014), False, 'import time\n'), ((2190, 2201), 'time.time', 'time.time', ([], {}), '()\n', (2199, 2201), False, 'import time\n'), ((1620, 1631), 'time.time', 'time.time', ([], {}), '()\n', (1629, 1631), False, 'import time\n'), ((2331, 2351), 'random.choice', 'choice', (['idle_phrases'], {}), '(idle_phrases)\n', (2337, 2351), False, 'from random import choice\n')]
|
#!/usr/bin/env python3
import logging
import os
import signal
import sys
from .device import Device
class Fan(Device):
@staticmethod
def logTemperature():
process = os.popen(
"cat /sys/devices/virtual/thermal/thermal_zone*/temp")
stdout = process.read()
zones = [
"AO-therm",
"CPU-therm",
"GPU-therm",
"PLL-therm",
"PMIC-Die (Not real)",
"thermal-fan-est"
]
temperatures = stdout.split("\n")
for temperature_index in range(len(temperatures)):
c_temp = temperatures[temperature_index]
if c_temp is not '':
logging.info(
"{} ----> {} C".format(zones[temperature_index], int(c_temp)/1000))
logging.basicConfig(
level=logging.DEBUG,
format='%(levelname)s: %(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
handlers=[
logging.FileHandler("test.log"),
logging.StreamHandler()
])
PID_FILE = "pro.pid"
def refreshPID(killOnly=False):
current_pid = os.getpid()
with open(PID_FILE, 'w+') as pid:
previous_pid = pid.readline()
if not len(previous_pid) is 0:
os.kill(int(previous_pid), signal.SIGTERM)
if not killOnly:
logging.info(
"Starting A/C controller in PID {}".format(current_pid))
pid.write(str(current_pid))
def cleanup(device):
device.shutdown()
logging.shutdown()
os.remove(PID_FILE)
def main(argv):
fan = Fan("Normal Fan", 11)
if len(argv) is 1 and argv[0] == "stop":
refreshPID(True)
cleanup(fan)
logging.warning(
"Killed existing stale process and stopping the device !!")
return
onTime = 2
offTime = 2
if len(argv) is 2:
onTime = float(argv[0])
offTime = float(argv[1])
refreshPID()
try:
while True:
Fan.logTemperature()
fan.turnOn(onTime)
Fan.logTemperature()
fan.turnOff(offTime)
except KeyboardInterrupt as identifier:
logging.error("Keyboard interrupt occurred, Gracefully closing . . .")
finally:
cleanup(fan)
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"os.remove",
"logging.error",
"os.getpid",
"logging.FileHandler",
"logging.warning",
"os.popen",
"logging.StreamHandler",
"logging.shutdown"
] |
[((1097, 1108), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1106, 1108), False, 'import os\n'), ((1492, 1510), 'logging.shutdown', 'logging.shutdown', ([], {}), '()\n', (1508, 1510), False, 'import logging\n'), ((1515, 1534), 'os.remove', 'os.remove', (['PID_FILE'], {}), '(PID_FILE)\n', (1524, 1534), False, 'import os\n'), ((184, 247), 'os.popen', 'os.popen', (['"""cat /sys/devices/virtual/thermal/thermal_zone*/temp"""'], {}), "('cat /sys/devices/virtual/thermal/thermal_zone*/temp')\n", (192, 247), False, 'import os\n'), ((1685, 1760), 'logging.warning', 'logging.warning', (['"""Killed existing stale process and stopping the device !!"""'], {}), "('Killed existing stale process and stopping the device !!')\n", (1700, 1760), False, 'import logging\n'), ((951, 982), 'logging.FileHandler', 'logging.FileHandler', (['"""test.log"""'], {}), "('test.log')\n", (970, 982), False, 'import logging\n'), ((992, 1015), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1013, 1015), False, 'import logging\n'), ((2136, 2206), 'logging.error', 'logging.error', (['"""Keyboard interrupt occurred, Gracefully closing . . ."""'], {}), "('Keyboard interrupt occurred, Gracefully closing . . .')\n", (2149, 2206), False, 'import logging\n')]
|
#!\usr\bin\python
# coding=utf-8
# Author: youngfeng
# Update: 07/16/2018
"""
Flash, proposed by Nair et al. (arXiv '18), which aims to find the (near) optimal configuration in unevaluated set.
STEP 1: select 80%% of original data as dataset
STEP 2: split the dataset into training set (30 configs) and unevaluated set (remaining configs)
STEP 3: predict the optimal configuration in unevaluated set, then remove it from unevaluated set to training set.
STEP 4: repeat the STEP 4 until the budget (50 configs) is loss out.
The details of Progressive are introduced in paper "Finding Faster Configurations using FLASH".
"""
import pandas as pd
import random as rd
import numpy as np
from sklearn.tree import DecisionTreeRegressor
class config_node:
"""
for each configuration, we create a config_node object to save its informations
index : actual rank
features : feature list
perfs : actual performance
"""
def __init__(self, index, features, perfs, predicted):
self.index = index
self.features = features
self.perfs = perfs
self.predicted = predicted
def remove_by_index(config_pool, index):
"""
remove the selected configuration
"""
for config in config_pool:
if config.index == index:
config_pool.remove(config)
break
return config_pool
def find_lowest_rank(train_set, test_set):
"""
return the lowest rank in top 10
"""
sorted_test = sorted(test_set, key=lambda x: x.perfs[-1])
# train data
train_features = [t.features for t in train_set]
train_perfs = [t.perfs[-1] for t in train_set]
# test data
test_perfs = [t.features for t in sorted_test]
cart_model = DecisionTreeRegressor()
cart_model.fit(train_features, train_perfs)
predicted = cart_model.predict(test_perfs)
predicted_id = [[i, p] for i, p in enumerate(predicted)]
# i-> actual rank, p -> predicted value
predicted_sorted = sorted(predicted_id, key=lambda x: x[-1])
# print(predicted_sorted)
# assigning predicted ranks
predicted_rank_sorted = [[p[0], p[-1], i] for i,p in enumerate(predicted_sorted)]
# p[0] -> actual rank, p[-1] -> perdicted value, i -> predicted rank
select_few = predicted_rank_sorted[:10]
# print the predcited top-10 configuration
# for sf in select_few:
# print("actual rank:", sf[0], " actual value:", sorted_test[sf[0]].perfs[-1], " predicted value:", sf[1], " predicted rank:", sf[2])
# print("-------------")
return np.min([sf[0] for sf in select_few])
def predict_by_cart(train_set, test_set):
"""
return the predicted optimal condiguration
"""
train_features = [config.features for config in train_set]
train_perfs = [config.perfs[-1] for config in train_set]
test_features = [config.features for config in test_set]
cart_model = DecisionTreeRegressor()
cart_model.fit(train_features, train_perfs)
predicted = cart_model.predict(test_features)
predicted_id = [[i,p] for i,p in enumerate(predicted)]
predicted_sorted = sorted(predicted_id, key=lambda x: x[-1]) # sort test_set by predicted performance
return test_set[predicted_sorted[0][0]] # the optimal configuration
def split_data_by_fraction(csv_file, fraction):
"""
split data set and return the 80% data
"""
# step1: read from csv file
pdcontent = pd.read_csv(csv_file)
attr_list = pdcontent.columns # all feature list
# step2: split attribute - method 1
features = [i for i in attr_list if "$<" not in i]
perfs = [i for i in attr_list if "$<" in i]
sortedcontent = pdcontent.sort_values(perfs[-1]) # from small to big
# print(len(sortedcontent))
# step3: collect configuration
configs = list()
for c in range(len(pdcontent)):
configs.append(config_node(c, # actual rank
sortedcontent.iloc[c][features].tolist(), # feature list
sortedcontent.iloc[c][perfs].tolist(), # performance list
sortedcontent.iloc[c][perfs].tolist(), # predicted performance list
))
# for config in configs:
# print(config.index, "-", config.perfs, "-", config.predicted, "-", config.rank)
# step4: data split
# fraction = 0.4 # split fraction
# rd.seed(seed) # random seed
rd.shuffle(configs) # shuffle the configs
indexes = range(len(configs))
train_index = indexes[:int(fraction*len(configs))]
dataset = [configs[i] for i in train_index]
# print(len(dataset))
return dataset
def predict_by_flash(dataset, size=30, budget=50):
"""
use the budget in dataset to train a best model,
return the train_set and unevaluated_set
"""
#initilize the train set with 30 configurations
rd.shuffle(dataset)
train_set = dataset[:size]
unevaluated_set = dataset
for config in train_set:
unevaluated_set = remove_by_index(unevaluated_set, config.index) # remove train_set
while budget >= 0: # budget equals to 50
optimal_config = predict_by_cart(train_set, unevaluated_set)
# print("[add]:", optimal_config.index)
unevaluated_set = remove_by_index(unevaluated_set, optimal_config.index)
train_set.append(optimal_config)
budget = budget - 1
return [train_set, unevaluated_set]
if __name__ == "__main__":
#######################################################################################
# select 80% data
dataset = split_data_by_fraction("data/Apache_AllMeasurements.csv", 0.8)
print("### initialzation")
for i in dataset:
print(str(i.index), ",", end="")
print("\n-------------")
data = predict_by_flash(dataset)
print("### finally split")
train_set = data[0]
uneval_set = data[1]
for i in train_set:
print(str(i.index), ",", end="")
print("\n-------------")
for i in uneval_set:
print(str(i.index), ",", end="")
print("\n-------------")
#######################################################################################
lowest_rank = find_lowest_rank(train_set, uneval_set)
print(lowest_rank)
|
[
"pandas.read_csv",
"random.shuffle",
"numpy.min",
"sklearn.tree.DecisionTreeRegressor"
] |
[((1632, 1655), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (1653, 1655), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((2411, 2447), 'numpy.min', 'np.min', (['[sf[0] for sf in select_few]'], {}), '([sf[0] for sf in select_few])\n', (2417, 2447), True, 'import numpy as np\n'), ((2738, 2761), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (2759, 2761), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((3228, 3249), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (3239, 3249), True, 'import pandas as pd\n'), ((4081, 4100), 'random.shuffle', 'rd.shuffle', (['configs'], {}), '(configs)\n', (4091, 4100), True, 'import random as rd\n'), ((4495, 4514), 'random.shuffle', 'rd.shuffle', (['dataset'], {}), '(dataset)\n', (4505, 4514), True, 'import random as rd\n')]
|
import colorama, traceback
from python_helper.api.src.domain import Constant as c
from python_helper.api.src.service import SettingHelper, StringHelper, EnvironmentHelper, ObjectHelper, ReflectionHelper
LOG = 'LOG'
INFO = 'INFO'
SUCCESS = 'SUCCESS'
SETTING = 'SETTING'
DEBUG = 'DEBUG'
WARNING = 'WARNING'
WRAPPER = 'WRAPPER'
FAILURE = 'FAILURE'
ERROR = 'ERROR'
TEST = 'TEST'
RESET_ALL_COLORS = colorama.Style.RESET_ALL
from python_helper.api.src.helper import LogHelperHelper
global LOG_HELPER_SETTINGS
# import asyncio
# global OUTPUT_PRINT_LIST
# PRINTING = 'PRINTING'
# def loadLogger() :
# global OUTPUT_PRINT_LIST
# try :
# if ObjectHelper.isNone(OUTPUT_PRINT_LIST) :
# OUTPUT_PRINT_LIST = []
# except Exception as exception :
# OUTPUT_PRINT_LIST = []
#
# async def asyncAsyncPrintIt(itArgsAndKwargs) :
# global LOG_HELPER_SETTINGS
# while LOG_HELPER_SETTINGS[PRINTING] :
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('------------------------------------------------------------------------ awaiting ------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# LOG_HELPER_SETTINGS[PRINTING] = True
# print(itArgsAndKwargs[0], **itArgsAndKwargs[1])
#
# async def asyncPrintIt(itArgsAndKwargs) :
# global LOG_HELPER_SETTINGS
# await asyncAsyncPrintIt(itArgsAndKwargs)
# LOG_HELPER_SETTINGS[PRINTING] = False
#
# async def printOutput() :
# global OUTPUT_PRINT_LIST
# while 0 < len(OUTPUT_PRINT_LIST) :
# asyncio.run(asyncPrintIt(OUTPUT_PRINT_LIST.pop(0)))
#
# def logIt(it, **kwargs) :
# global OUTPUT_PRINT_LIST
# shouldPrint = True if 0 == len(OUTPUT_PRINT_LIST) else False
# OUTPUT_PRINT_LIST.append([it, kwargs])
# if shouldPrint :
# printOutput()
# import logging
# LOGGER_INSTANCE = None
# def loadLogger(logger) :
# return logger if ObjectHelper.isNotNone(logger) else logging.getLogger(__name__)
def logIt(it, **kwargs) :
# logging.error(it, **kwargs)
# logging.log(msg=args[0], level=9)
# logger = loadLogger(LOGGER_INSTANCE)
# logger.setLevel(logging.DEBUG)
# logger.info(it)
print(it, **kwargs)
def loadSettings() :
global LOG_HELPER_SETTINGS
# logger = loadLogger(LOGGER_INSTANCE)
# logger.setLevel(logging.DEBUG)
###- logging.basicConfig(filename='example.log', encoding='utf-8', level=logging.DEBUG)
colorama.deinit()
settings = {}
settings[SettingHelper.ACTIVE_ENVIRONMENT] = SettingHelper.getActiveEnvironment()
for level in LogHelperHelper.LEVEL_DICTIONARY :
status = EnvironmentHelper.get(level)
settings[level] = status if not status is None else c.TRUE
LOG_HELPER_SETTINGS = settings
# if PRINTING not in LOG_HELPER_SETTINGS :
# LOG_HELPER_SETTINGS[PRINTING] = False
if SettingHelper.activeEnvironmentIsLocal() :
colorama.init()
# logging.basicConfig(level=logging.DEBUG)
logIt(RESET_ALL_COLORS, end=c.NOTHING)
loadSettings()
def log(origin, message, level=LOG, exception=None, muteStackTrace=False, newLine=False) :
LogHelperHelper.softLog(origin, message, LOG, muteStackTrace=muteStackTrace, newLine=newLine, exception=exception)
def info(origin, message, level=INFO, exception=None, muteStackTrace=False, newLine=False) :
LogHelperHelper.softLog(origin, message, INFO, muteStackTrace=muteStackTrace, newLine=newLine, exception=exception)
def success(origin, message, muteStackTrace=False, newLine=False) :
LogHelperHelper.softLog(origin, message, SUCCESS, muteStackTrace=muteStackTrace, newLine=newLine)
def setting(origin, message, muteStackTrace=False, newLine=False) :
LogHelperHelper.softLog(origin, message, SETTING, muteStackTrace=muteStackTrace, newLine=newLine)
def debug(origin, message, exception=None, muteStackTrace=False, newLine=False) :
LogHelperHelper.softLog(origin, message, DEBUG, muteStackTrace=muteStackTrace, newLine=newLine, exception=exception)
def warning(origin, message, exception=None, muteStackTrace=False, newLine=False) :
LogHelperHelper.softLog(origin, message, WARNING, muteStackTrace=muteStackTrace, newLine=newLine, exception=exception)
def wraper(origin, message, exception, muteStackTrace=False, newLine=False) :
LogHelperHelper.hardLog(origin, message, exception, WRAPPER, muteStackTrace=muteStackTrace, newLine=newLine)
def failure(origin, message, exception, muteStackTrace=False, newLine=False) :
LogHelperHelper.hardLog(origin, message, exception, FAILURE, muteStackTrace=muteStackTrace, newLine=newLine)
def error(origin, message, exception, muteStackTrace=False, newLine=False) :
LogHelperHelper.hardLog(origin, message, exception, ERROR, muteStackTrace=muteStackTrace, newLine=newLine)
def test(origin, message, exception=None, muteStackTrace=False, newLine=False) :
LogHelperHelper.softLog(origin, message, TEST, muteStackTrace=muteStackTrace, newLine=newLine, exception=exception)
def printLog(message, condition=False, muteStackTrace=False, newLine=True, margin=True, exception=None) :
LogHelperHelper.printMessageLog(LOG, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin, exception=exception)
def printInfo(message, condition=False, muteStackTrace=False, newLine=True, margin=True, exception=None) :
LogHelperHelper.printMessageLog(INFO, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin, exception=exception)
def printSuccess(message, condition=False, muteStackTrace=False, newLine=True, margin=True) :
LogHelperHelper.printMessageLog(SUCCESS, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin)
def printSetting(message, condition=False, muteStackTrace=False, newLine=True, margin=True) :
LogHelperHelper.printMessageLog(SETTING, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin)
def printDebug(message, condition=False, muteStackTrace=False, newLine=True, margin=True, exception=None) :
LogHelperHelper.printMessageLog(DEBUG, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin, exception=exception)
def printWarning(message, condition=False, muteStackTrace=False, newLine=True, margin=True, exception=None) :
LogHelperHelper.printMessageLog(WARNING, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin, exception=exception)
def printWarper(message, condition=False, muteStackTrace=False, newLine=True, margin=True, exception=None) :
LogHelperHelper.printMessageLog(WRAPPER, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin, exception=exception)
def printFailure(message, condition=False, muteStackTrace=False, newLine=True, margin=True, exception=None) :
LogHelperHelper.printMessageLog(FAILURE, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin, exception=exception)
def printError(message, condition=False, muteStackTrace=False, newLine=True, margin=True, exception=None) :
LogHelperHelper.printMessageLog(ERROR, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin, exception=exception)
def printTest(message, condition=False, muteStackTrace=False, newLine=True, margin=True, exception=None) :
LogHelperHelper.printMessageLog(TEST, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin, exception=exception)
def prettyPython(
origin,
message,
dictionaryInstance,
quote = c.SINGLE_QUOTE,
tabCount = 0,
nullValue = c.NONE,
trueValue = c.TRUE,
falseValue = c.FALSE,
logLevel = LOG,
condition = True
) :
if condition :
stdout, stderr = EnvironmentHelper.getCurrentSoutStatus()
prettyPythonValue = StringHelper.prettyPython(
dictionaryInstance,
quote = quote,
tabCount = tabCount,
nullValue = nullValue,
trueValue = trueValue,
falseValue = falseValue,
withColors = SettingHelper.activeEnvironmentIsLocal(),
joinAtReturn = False
)
LogHelperHelper.softLog(origin, StringHelper.join([message, c.COLON_SPACE, *prettyPythonValue]), logLevel)
EnvironmentHelper.overrideSoutStatus(stdout, stderr)
def prettyJson(
origin,
message,
dictionaryInstance,
quote = c.DOUBLE_QUOTE,
tabCount = 0,
nullValue = c.NULL_VALUE,
trueValue = c.TRUE_VALUE,
falseValue = c.FALSE_VALUE,
logLevel = LOG,
condition = True
) :
if condition :
stdout, stderr = EnvironmentHelper.getCurrentSoutStatus()
prettyJsonValue = StringHelper.prettyJson(
dictionaryInstance,
quote = quote,
tabCount = tabCount,
nullValue = nullValue,
trueValue = trueValue,
falseValue = falseValue,
withColors = SettingHelper.activeEnvironmentIsLocal(),
joinAtReturn = False
)
LogHelperHelper.softLog(origin, StringHelper.join([message, c.COLON_SPACE, *prettyJsonValue]), logLevel)
EnvironmentHelper.overrideSoutStatus(stdout, stderr)
def getExceptionMessage(exception) :
if ObjectHelper.isEmpty(exception) :
return c.UNKNOWN
exceptionAsString = str(exception)
if c.NOTHING == exceptionAsString :
return ReflectionHelper.getName(exception.__class__)
else :
return exceptionAsString
def getTracebackMessage(muteStackTrace) :
tracebackMessage = c.BLANK
try :
tracebackMessage = traceback.format_exc()
except :
tracebackMessage = f'{c.NEW_LINE}'
if muteStackTrace :
return StringHelper.join(tracebackMessage.split(c.NEW_LINE)[-2:], character=c.NEW_LINE)
return LogHelperHelper.NO_TRACEBACK_PRESENT_MESSAGE if LogHelperHelper.NO_TRACEBACK_PRESENT == str(tracebackMessage) else tracebackMessage
|
[
"colorama.init",
"python_helper.api.src.helper.LogHelperHelper.softLog",
"python_helper.api.src.service.ObjectHelper.isEmpty",
"python_helper.api.src.helper.LogHelperHelper.hardLog",
"python_helper.api.src.service.EnvironmentHelper.getCurrentSoutStatus",
"python_helper.api.src.service.EnvironmentHelper.get",
"colorama.deinit",
"python_helper.api.src.service.EnvironmentHelper.overrideSoutStatus",
"python_helper.api.src.service.SettingHelper.getActiveEnvironment",
"traceback.format_exc",
"python_helper.api.src.service.ReflectionHelper.getName",
"python_helper.api.src.service.StringHelper.join",
"python_helper.api.src.service.SettingHelper.activeEnvironmentIsLocal",
"python_helper.api.src.helper.LogHelperHelper.printMessageLog"
] |
[((4804, 4821), 'colorama.deinit', 'colorama.deinit', ([], {}), '()\n', (4819, 4821), False, 'import colorama, traceback\n'), ((4889, 4925), 'python_helper.api.src.service.SettingHelper.getActiveEnvironment', 'SettingHelper.getActiveEnvironment', ([], {}), '()\n', (4923, 4925), False, 'from python_helper.api.src.service import SettingHelper, StringHelper, EnvironmentHelper, ObjectHelper, ReflectionHelper\n'), ((5228, 5268), 'python_helper.api.src.service.SettingHelper.activeEnvironmentIsLocal', 'SettingHelper.activeEnvironmentIsLocal', ([], {}), '()\n', (5266, 5268), False, 'from python_helper.api.src.service import SettingHelper, StringHelper, EnvironmentHelper, ObjectHelper, ReflectionHelper\n'), ((5505, 5623), 'python_helper.api.src.helper.LogHelperHelper.softLog', 'LogHelperHelper.softLog', (['origin', 'message', 'LOG'], {'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine', 'exception': 'exception'}), '(origin, message, LOG, muteStackTrace=muteStackTrace,\n newLine=newLine, exception=exception)\n', (5528, 5623), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((5718, 5838), 'python_helper.api.src.helper.LogHelperHelper.softLog', 'LogHelperHelper.softLog', (['origin', 'message', 'INFO'], {'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine', 'exception': 'exception'}), '(origin, message, INFO, muteStackTrace=\n muteStackTrace, newLine=newLine, exception=exception)\n', (5741, 5838), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((5907, 6009), 'python_helper.api.src.helper.LogHelperHelper.softLog', 'LogHelperHelper.softLog', (['origin', 'message', 'SUCCESS'], {'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine'}), '(origin, message, SUCCESS, muteStackTrace=\n muteStackTrace, newLine=newLine)\n', (5930, 6009), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((6078, 6180), 'python_helper.api.src.helper.LogHelperHelper.softLog', 'LogHelperHelper.softLog', (['origin', 'message', 'SETTING'], {'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine'}), '(origin, message, SETTING, muteStackTrace=\n muteStackTrace, newLine=newLine)\n', (6101, 6180), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((6263, 6384), 'python_helper.api.src.helper.LogHelperHelper.softLog', 'LogHelperHelper.softLog', (['origin', 'message', 'DEBUG'], {'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine', 'exception': 'exception'}), '(origin, message, DEBUG, muteStackTrace=\n muteStackTrace, newLine=newLine, exception=exception)\n', (6286, 6384), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((6469, 6592), 'python_helper.api.src.helper.LogHelperHelper.softLog', 'LogHelperHelper.softLog', (['origin', 'message', 'WARNING'], {'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine', 'exception': 'exception'}), '(origin, message, WARNING, muteStackTrace=\n muteStackTrace, newLine=newLine, exception=exception)\n', (6492, 6592), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((6671, 6784), 'python_helper.api.src.helper.LogHelperHelper.hardLog', 'LogHelperHelper.hardLog', (['origin', 'message', 'exception', 'WRAPPER'], {'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine'}), '(origin, message, exception, WRAPPER, muteStackTrace\n =muteStackTrace, newLine=newLine)\n', (6694, 6784), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((6864, 6977), 'python_helper.api.src.helper.LogHelperHelper.hardLog', 'LogHelperHelper.hardLog', (['origin', 'message', 'exception', 'FAILURE'], {'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine'}), '(origin, message, exception, FAILURE, muteStackTrace\n =muteStackTrace, newLine=newLine)\n', (6887, 6977), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((7055, 7166), 'python_helper.api.src.helper.LogHelperHelper.hardLog', 'LogHelperHelper.hardLog', (['origin', 'message', 'exception', 'ERROR'], {'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine'}), '(origin, message, exception, ERROR, muteStackTrace=\n muteStackTrace, newLine=newLine)\n', (7078, 7166), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((7248, 7368), 'python_helper.api.src.helper.LogHelperHelper.softLog', 'LogHelperHelper.softLog', (['origin', 'message', 'TEST'], {'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine', 'exception': 'exception'}), '(origin, message, TEST, muteStackTrace=\n muteStackTrace, newLine=newLine, exception=exception)\n', (7271, 7368), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((7475, 7633), 'python_helper.api.src.helper.LogHelperHelper.printMessageLog', 'LogHelperHelper.printMessageLog', (['LOG', 'message'], {'condition': 'condition', 'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine', 'margin': 'margin', 'exception': 'exception'}), '(LOG, message, condition=condition,\n muteStackTrace=muteStackTrace, newLine=newLine, margin=margin,\n exception=exception)\n', (7506, 7633), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((7738, 7897), 'python_helper.api.src.helper.LogHelperHelper.printMessageLog', 'LogHelperHelper.printMessageLog', (['INFO', 'message'], {'condition': 'condition', 'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine', 'margin': 'margin', 'exception': 'exception'}), '(INFO, message, condition=condition,\n muteStackTrace=muteStackTrace, newLine=newLine, margin=margin,\n exception=exception)\n', (7769, 7897), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((7989, 8126), 'python_helper.api.src.helper.LogHelperHelper.printMessageLog', 'LogHelperHelper.printMessageLog', (['SUCCESS', 'message'], {'condition': 'condition', 'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine', 'margin': 'margin'}), '(SUCCESS, message, condition=condition,\n muteStackTrace=muteStackTrace, newLine=newLine, margin=margin)\n', (8020, 8126), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((8222, 8359), 'python_helper.api.src.helper.LogHelperHelper.printMessageLog', 'LogHelperHelper.printMessageLog', (['SETTING', 'message'], {'condition': 'condition', 'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine', 'margin': 'margin'}), '(SETTING, message, condition=condition,\n muteStackTrace=muteStackTrace, newLine=newLine, margin=margin)\n', (8253, 8359), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((8469, 8629), 'python_helper.api.src.helper.LogHelperHelper.printMessageLog', 'LogHelperHelper.printMessageLog', (['DEBUG', 'message'], {'condition': 'condition', 'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine', 'margin': 'margin', 'exception': 'exception'}), '(DEBUG, message, condition=condition,\n muteStackTrace=muteStackTrace, newLine=newLine, margin=margin,\n exception=exception)\n', (8500, 8629), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((8737, 8899), 'python_helper.api.src.helper.LogHelperHelper.printMessageLog', 'LogHelperHelper.printMessageLog', (['WARNING', 'message'], {'condition': 'condition', 'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine', 'margin': 'margin', 'exception': 'exception'}), '(WARNING, message, condition=condition,\n muteStackTrace=muteStackTrace, newLine=newLine, margin=margin,\n exception=exception)\n', (8768, 8899), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((9006, 9168), 'python_helper.api.src.helper.LogHelperHelper.printMessageLog', 'LogHelperHelper.printMessageLog', (['WRAPPER', 'message'], {'condition': 'condition', 'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine', 'margin': 'margin', 'exception': 'exception'}), '(WRAPPER, message, condition=condition,\n muteStackTrace=muteStackTrace, newLine=newLine, margin=margin,\n exception=exception)\n', (9037, 9168), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((9276, 9438), 'python_helper.api.src.helper.LogHelperHelper.printMessageLog', 'LogHelperHelper.printMessageLog', (['FAILURE', 'message'], {'condition': 'condition', 'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine', 'margin': 'margin', 'exception': 'exception'}), '(FAILURE, message, condition=condition,\n muteStackTrace=muteStackTrace, newLine=newLine, margin=margin,\n exception=exception)\n', (9307, 9438), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((9544, 9704), 'python_helper.api.src.helper.LogHelperHelper.printMessageLog', 'LogHelperHelper.printMessageLog', (['ERROR', 'message'], {'condition': 'condition', 'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine', 'margin': 'margin', 'exception': 'exception'}), '(ERROR, message, condition=condition,\n muteStackTrace=muteStackTrace, newLine=newLine, margin=margin,\n exception=exception)\n', (9575, 9704), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((9809, 9968), 'python_helper.api.src.helper.LogHelperHelper.printMessageLog', 'LogHelperHelper.printMessageLog', (['TEST', 'message'], {'condition': 'condition', 'muteStackTrace': 'muteStackTrace', 'newLine': 'newLine', 'margin': 'margin', 'exception': 'exception'}), '(TEST, message, condition=condition,\n muteStackTrace=muteStackTrace, newLine=newLine, margin=margin,\n exception=exception)\n', (9840, 9968), False, 'from python_helper.api.src.helper import LogHelperHelper\n'), ((11820, 11851), 'python_helper.api.src.service.ObjectHelper.isEmpty', 'ObjectHelper.isEmpty', (['exception'], {}), '(exception)\n', (11840, 11851), False, 'from python_helper.api.src.service import SettingHelper, StringHelper, EnvironmentHelper, ObjectHelper, ReflectionHelper\n'), ((4995, 5023), 'python_helper.api.src.service.EnvironmentHelper.get', 'EnvironmentHelper.get', (['level'], {}), '(level)\n', (5016, 5023), False, 'from python_helper.api.src.service import SettingHelper, StringHelper, EnvironmentHelper, ObjectHelper, ReflectionHelper\n'), ((5279, 5294), 'colorama.init', 'colorama.init', ([], {}), '()\n', (5292, 5294), False, 'import colorama, traceback\n'), ((10282, 10322), 'python_helper.api.src.service.EnvironmentHelper.getCurrentSoutStatus', 'EnvironmentHelper.getCurrentSoutStatus', ([], {}), '()\n', (10320, 10322), False, 'from python_helper.api.src.service import SettingHelper, StringHelper, EnvironmentHelper, ObjectHelper, ReflectionHelper\n'), ((10810, 10862), 'python_helper.api.src.service.EnvironmentHelper.overrideSoutStatus', 'EnvironmentHelper.overrideSoutStatus', (['stdout', 'stderr'], {}), '(stdout, stderr)\n', (10846, 10862), False, 'from python_helper.api.src.service import SettingHelper, StringHelper, EnvironmentHelper, ObjectHelper, ReflectionHelper\n'), ((11200, 11240), 'python_helper.api.src.service.EnvironmentHelper.getCurrentSoutStatus', 'EnvironmentHelper.getCurrentSoutStatus', ([], {}), '()\n', (11238, 11240), False, 'from python_helper.api.src.service import SettingHelper, StringHelper, EnvironmentHelper, ObjectHelper, ReflectionHelper\n'), ((11722, 11774), 'python_helper.api.src.service.EnvironmentHelper.overrideSoutStatus', 'EnvironmentHelper.overrideSoutStatus', (['stdout', 'stderr'], {}), '(stdout, stderr)\n', (11758, 11774), False, 'from python_helper.api.src.service import SettingHelper, StringHelper, EnvironmentHelper, ObjectHelper, ReflectionHelper\n'), ((11973, 12018), 'python_helper.api.src.service.ReflectionHelper.getName', 'ReflectionHelper.getName', (['exception.__class__'], {}), '(exception.__class__)\n', (11997, 12018), False, 'from python_helper.api.src.service import SettingHelper, StringHelper, EnvironmentHelper, ObjectHelper, ReflectionHelper\n'), ((12174, 12196), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (12194, 12196), False, 'import colorama, traceback\n'), ((10727, 10790), 'python_helper.api.src.service.StringHelper.join', 'StringHelper.join', (['[message, c.COLON_SPACE, *prettyPythonValue]'], {}), '([message, c.COLON_SPACE, *prettyPythonValue])\n', (10744, 10790), False, 'from python_helper.api.src.service import SettingHelper, StringHelper, EnvironmentHelper, ObjectHelper, ReflectionHelper\n'), ((11641, 11702), 'python_helper.api.src.service.StringHelper.join', 'StringHelper.join', (['[message, c.COLON_SPACE, *prettyJsonValue]'], {}), '([message, c.COLON_SPACE, *prettyJsonValue])\n', (11658, 11702), False, 'from python_helper.api.src.service import SettingHelper, StringHelper, EnvironmentHelper, ObjectHelper, ReflectionHelper\n'), ((10602, 10642), 'python_helper.api.src.service.SettingHelper.activeEnvironmentIsLocal', 'SettingHelper.activeEnvironmentIsLocal', ([], {}), '()\n', (10640, 10642), False, 'from python_helper.api.src.service import SettingHelper, StringHelper, EnvironmentHelper, ObjectHelper, ReflectionHelper\n'), ((11516, 11556), 'python_helper.api.src.service.SettingHelper.activeEnvironmentIsLocal', 'SettingHelper.activeEnvironmentIsLocal', ([], {}), '()\n', (11554, 11556), False, 'from python_helper.api.src.service import SettingHelper, StringHelper, EnvironmentHelper, ObjectHelper, ReflectionHelper\n')]
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
SEM_batch_conversion script
Extracts important header info into parameter log, designed to read out pertinent header information from all emsa files within a folder.
No need to convert psmsa into csv ... just always strip header when opening
Output into single log file for import into Excel or elsewhere
"""
#%% Load modules
import glob, sys, os # already run with functions
import pandas as pd
import numpy as np
if 'C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX' not in sys.path:
sys.path.append('C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX')
import EDX_import_functions as EDXimport
import EDX_quant_functions as EDXquant
import EDX_plot_functions as EDXplot
import EDX_refit_tk_gui as EDXrf
import EDX_quantplotter_tk_gui as EDXqpl
#%%
# datapath = filedialog.askdirectorypwd
# initialdir="H:\\Research_data", title = "choose data directory")
filelist=glob.glob('*.psmsa')+glob.glob('*.emsa') # psmsa option
#%% Main file processing loop for emsa or psmsa parameter extraction
# Create parameters log for all SEM-EDX files (autosaved with prior backup) using parameter template
# Checks for existing EDXlogbook correlating filenames w/ sample
EDXlog= EDXimport.getparams(filelist)
EDXlog= EDXimport.getparams(filelist, reprocess=True) # alt version that reacquires params from existing files
EDXlog.to_csv('EDXparamlog.csv',index=False)
# Creation of jpg images with points/areas superimposed (from .psref and .p_s files).. jpgs directly saved
# returns df with spatial areas (automatically saved w/ backup)
SpatialAreasLog=EDXimport.processpointshoot()
#%%
# Combine files with same basename/point name (autosaves altered EDXlog with backup)
EDXlog=EDXimport.combineEDX(EDXlog)
#%% Automated background fitting of SEM-EDX spectra
# can drop or exclude files here if desired (filter of EDXlog)
# Various ways of slicing up above full parameters log list
EDXfiles=EDXlog
EDXfiles=EDXfiles[0:10][:] # grab first ten rows
EDXfiles=EDXfiles.iloc[[0]] # select single row
EDXfiles=EDXfiles[EDXfiles['Filenumber'].str.contains("\+",na=False)] # choose only summed files
EDXfiles=EDXfiles[~EDXfiles['Comments'].str.contains("exclude",na=False, case=False)] # choose only summed files
EDXfiles=EDXfiles[EDXfiles['Timeconst']>12500] # backfits fail with small timeconst
#%% Reload of existing files (if reprocessing data) from working directory
EDXlog, Backfitlog, Integlog, Peakfitlog, EDXquantparams, Interferences=EDXimport.loadprocessfiles()
#%%
Elements=EDXimport.pickelemsGUI(EDXquantparams) # interactive element selection
Elements=['S','C','Ca','O','Cr', 'FeL','Fe','Mg','Al','Si'] # meteorites
Elements=['S','C','Ca','O','Cr', 'FeL','Fe','Mg','Al','Si'] # pristine SiC
Elements=['S','C','Ca','O','Cr', 'FeL','Fe','Mg','Al','Si','PtM','PtL','PtL2','Ga','GaL'] # meteorites +FIB artifact
Elements=['N','C','O','FeL','Fe','S','Ca','Mg','Al','Si','Ti'] # refractory analogs
Elements=np.ndarray.tolist(Integlog.Element.unique())# gets prior used element set
Elements.append('PtL2')
Elements.extend(['GaL','PtM', 'Ga','PtL','PtL2'])
# Load energy ranges without peaks for background fitting (various options and can also create custom version)
Fitregionsdf=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX\\SEM_backfit_regions.csv', encoding='utf-8')
Fitregionsdf=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX\\SEM_backfit_regions_alt.csv', encoding='utf-8')
# Version for pristine grains on graphene
Fitregionsdf=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX\\SEM_backfit_regions_pristine.csv', encoding='utf-8')
# TEM version
Fitregionsdf=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX\\TEM_backfit_regions.csv', encoding='utf-8')
Fitregionsdf=pd.read_csv('SEM_backfit_regions_alt.csv', encoding='utf-8') # local version
# If any modifications were made during quant of this data, load local version stored with data
EDXquantparams=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX\\SEMquantparams.csv', encoding='utf-8')
EDXquantparams=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX\\TEMquantparams.csv', encoding='utf-8')
#%%
# Run main quant loop (not autosaved so use to_csv save below after checks)
kwargs={}
Backfitlog, Peakfitlog, Integlog= EDXimport.batchEDXquant(EDXlog, Fitregionsdf, EDXquantparams, Elements, Backfitlog, Integlog, Peakfitlog, **kwargs)
# optional kwargs for above command
kwargs.update({'redo_backfit':True}) # default false for redo, redo of integration but not of background fits; no effect on new spectra
kwargs.update({'redo_integration':False}) # defaults true (false allows skip of existing integrations and gauss peak fits
# if quant rerun w/o changing backfits (i.e. after custom mods) skip clear of backfits
kwargs.update({'clear_old_backfits':True}) # default false option to not overwrite all backgrounds in csv files (defaults True)
kwargs.update({'savegauss':False}) # optional save of gaussian fit column into spectrum's csv file; default true
# Find/ Replace subset of files (processed in alternate manner) from above log files.. refit of failed fits
Backfitlog.to_csv('Backfitparamslog.csv', index=False)
Peakfitlog.to_csv('Peakfitlog.csv', index=False)
Integlog.to_csv('Integquantlog.csv', index=False)
# After successful refit of subset of files, find/replace entries in original logbooks (saves after finishing)
Backfitlog, Peakfitlog, Integlog = EDXimport.replacelogentries(EDXlog, Backfitlog, Peakfitlog, Integlog)
#%% Run interactive EDXrefitter (if any plots, backfit points, etc. are bad)
EDXrf.launch_refitter()
EDXqpl.launch_plotter(os.getcwd())
# Redo integlog, peakfits if any backfits were changed (first reload saved changes from file)
EDXlog, Backfitlog, Integlog, Peakfitlog, EDXquantparams, Interferences=EDXimport.loadprocessfiles()
kwargs={'newback':False,'overwrite':False} # do not refit or overwrite backgrounds... use ones made with interactive refitter
Backfitlog, Peakfitlog, Integlog= EDXimport.batchEDXquant(EDXlog, Fitregionsdf, EDXquantparams, Elements, **kwargs)
# Manual save of peakfitlog and integlog are needed
Peakfitlog.to_csv('Peakfitlog.csv', index=False)
Integlog.to_csv('Integquantlog.csv', index=False)
#%% PLOTTING to check quality of background fits, peaks, etc.
EDXfiles=EDXlog[0:5] # Selecting subsets of all SEM files
# Plot counts and background over specified energy range
pkwargs={}
pkwargs.update({'xrange':'0.3-10'}) # optional x range for plot (default is 0-10? )
pkwargs.update({'backfitdf':Backfitlog}) # optional plotting of points used to create background fit
pkwargs.update({'backfitpts':False}) # skip background pts but include fits
pkwargs.update({'yrange':[-500,3000]}) # optional y range for plot.. defaults to data range
pkwargs.update({'plotelems':['O','Mg','S','Si', 'Ca', 'Fe', 'FeL']}) # list of elements to label on plots
pkwargs.update({'plotelems':['O','Mg','Si', 'Fe']})
pkwargs.update({'PDFname':'counts_report_9Jan18.pdf'}) # alt save name (defaults to countsback_report.pdf)
pkwargs.update({'savgol':True}) # include savgol differentiated plot (default False)
EDXplot.reportcounts(EDXfiles, EDXquantparams, **pkwargs)
EDXplot.reportcounts(EDXlog, EDXquantparams, **pkwargs)
# plot report with subtracted counts and optionally gaussian peak fits (if they exist)
EDXplot.reportSEMpeaks(EDXfiles, plotelems, SEMquantparams, addgauss=True, PDFname='peak_report.pdf')
# TODO Place center of integration on plot for significant peaks
# plot subtracted data around major elements including corrected counts
EDXplot.reportsubdatamajor(EDXfiles, Integquantlog, PDFname='Subcounts_major_report.pdf')
reportcountspeakfits(EDXfiles, Fitregionsdf, plotrange, plotelems, SEMquantparams)
# Now proceed to EDX_quant_main for interference adjustments, \\osition calcs, etc.
# Renaming of troublesome p_s and psmsa files (i.e. containing blanks)
psfiles=glob.glob('*.p_s')
badpsfiles=[i for i in psfiles if '\xa0' in i]
for i, psfile in enumerate(badpsfiles):
EDXimport.renamePSset(psfile, '\xa0', '_')
train=pd.read_csv('Backfit_training.csv')
|
[
"sys.path.append",
"EDX_import_functions.getparams",
"EDX_plot_functions.reportSEMpeaks",
"EDX_import_functions.renamePSset",
"pandas.read_csv",
"EDX_import_functions.loadprocessfiles",
"EDX_plot_functions.reportcounts",
"os.getcwd",
"EDX_import_functions.batchEDXquant",
"EDX_import_functions.combineEDX",
"EDX_import_functions.replacelogentries",
"EDX_plot_functions.reportsubdatamajor",
"glob.glob",
"EDX_import_functions.pickelemsGUI",
"EDX_import_functions.processpointshoot",
"EDX_refit_tk_gui.launch_refitter"
] |
[((1213, 1242), 'EDX_import_functions.getparams', 'EDXimport.getparams', (['filelist'], {}), '(filelist)\n', (1232, 1242), True, 'import EDX_import_functions as EDXimport\n'), ((1251, 1296), 'EDX_import_functions.getparams', 'EDXimport.getparams', (['filelist'], {'reprocess': '(True)'}), '(filelist, reprocess=True)\n', (1270, 1296), True, 'import EDX_import_functions as EDXimport\n'), ((1587, 1616), 'EDX_import_functions.processpointshoot', 'EDXimport.processpointshoot', ([], {}), '()\n', (1614, 1616), True, 'import EDX_import_functions as EDXimport\n'), ((1714, 1742), 'EDX_import_functions.combineEDX', 'EDXimport.combineEDX', (['EDXlog'], {}), '(EDXlog)\n', (1734, 1742), True, 'import EDX_import_functions as EDXimport\n'), ((2478, 2506), 'EDX_import_functions.loadprocessfiles', 'EDXimport.loadprocessfiles', ([], {}), '()\n', (2504, 2506), True, 'import EDX_import_functions as EDXimport\n'), ((2521, 2559), 'EDX_import_functions.pickelemsGUI', 'EDXimport.pickelemsGUI', (['EDXquantparams'], {}), '(EDXquantparams)\n', (2543, 2559), True, 'import EDX_import_functions as EDXimport\n'), ((3223, 3336), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\tkc\\\\Documents\\\\Python_Scripts\\\\EDX\\\\SEM_backfit_regions.csv"""'], {'encoding': '"""utf-8"""'}), "(\n 'C:\\\\Users\\\\tkc\\\\Documents\\\\Python_Scripts\\\\EDX\\\\SEM_backfit_regions.csv',\n encoding='utf-8')\n", (3234, 3336), True, 'import pandas as pd\n'), ((3341, 3459), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\tkc\\\\Documents\\\\Python_Scripts\\\\EDX\\\\SEM_backfit_regions_alt.csv"""'], {'encoding': '"""utf-8"""'}), "(\n 'C:\\\\Users\\\\tkc\\\\Documents\\\\Python_Scripts\\\\EDX\\\\SEM_backfit_regions_alt.csv'\n , encoding='utf-8')\n", (3352, 3459), True, 'import pandas as pd\n'), ((3505, 3628), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\tkc\\\\Documents\\\\Python_Scripts\\\\EDX\\\\SEM_backfit_regions_pristine.csv"""'], {'encoding': '"""utf-8"""'}), "(\n 'C:\\\\Users\\\\tkc\\\\Documents\\\\Python_Scripts\\\\EDX\\\\SEM_backfit_regions_pristine.csv'\n , encoding='utf-8')\n", (3516, 3628), True, 'import pandas as pd\n'), ((3646, 3759), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\tkc\\\\Documents\\\\Python_Scripts\\\\EDX\\\\TEM_backfit_regions.csv"""'], {'encoding': '"""utf-8"""'}), "(\n 'C:\\\\Users\\\\tkc\\\\Documents\\\\Python_Scripts\\\\EDX\\\\TEM_backfit_regions.csv',\n encoding='utf-8')\n", (3657, 3759), True, 'import pandas as pd\n'), ((3764, 3824), 'pandas.read_csv', 'pd.read_csv', (['"""SEM_backfit_regions_alt.csv"""'], {'encoding': '"""utf-8"""'}), "('SEM_backfit_regions_alt.csv', encoding='utf-8')\n", (3775, 3824), True, 'import pandas as pd\n'), ((3953, 4061), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\tkc\\\\Documents\\\\Python_Scripts\\\\EDX\\\\SEMquantparams.csv"""'], {'encoding': '"""utf-8"""'}), "(\n 'C:\\\\Users\\\\tkc\\\\Documents\\\\Python_Scripts\\\\EDX\\\\SEMquantparams.csv',\n encoding='utf-8')\n", (3964, 4061), True, 'import pandas as pd\n'), ((4068, 4176), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\tkc\\\\Documents\\\\Python_Scripts\\\\EDX\\\\TEMquantparams.csv"""'], {'encoding': '"""utf-8"""'}), "(\n 'C:\\\\Users\\\\tkc\\\\Documents\\\\Python_Scripts\\\\EDX\\\\TEMquantparams.csv',\n encoding='utf-8')\n", (4079, 4176), True, 'import pandas as pd\n'), ((4293, 4412), 'EDX_import_functions.batchEDXquant', 'EDXimport.batchEDXquant', (['EDXlog', 'Fitregionsdf', 'EDXquantparams', 'Elements', 'Backfitlog', 'Integlog', 'Peakfitlog'], {}), '(EDXlog, Fitregionsdf, EDXquantparams, Elements,\n Backfitlog, Integlog, Peakfitlog, **kwargs)\n', (4316, 4412), True, 'import EDX_import_functions as EDXimport\n'), ((5444, 5513), 'EDX_import_functions.replacelogentries', 'EDXimport.replacelogentries', (['EDXlog', 'Backfitlog', 'Peakfitlog', 'Integlog'], {}), '(EDXlog, Backfitlog, Peakfitlog, Integlog)\n', (5471, 5513), True, 'import EDX_import_functions as EDXimport\n'), ((5591, 5614), 'EDX_refit_tk_gui.launch_refitter', 'EDXrf.launch_refitter', ([], {}), '()\n', (5612, 5614), True, 'import EDX_refit_tk_gui as EDXrf\n'), ((5817, 5845), 'EDX_import_functions.loadprocessfiles', 'EDXimport.loadprocessfiles', ([], {}), '()\n', (5843, 5845), True, 'import EDX_import_functions as EDXimport\n'), ((6006, 6092), 'EDX_import_functions.batchEDXquant', 'EDXimport.batchEDXquant', (['EDXlog', 'Fitregionsdf', 'EDXquantparams', 'Elements'], {}), '(EDXlog, Fitregionsdf, EDXquantparams, Elements, **\n kwargs)\n', (6029, 6092), True, 'import EDX_import_functions as EDXimport\n'), ((7137, 7194), 'EDX_plot_functions.reportcounts', 'EDXplot.reportcounts', (['EDXfiles', 'EDXquantparams'], {}), '(EDXfiles, EDXquantparams, **pkwargs)\n', (7157, 7194), True, 'import EDX_plot_functions as EDXplot\n'), ((7195, 7250), 'EDX_plot_functions.reportcounts', 'EDXplot.reportcounts', (['EDXlog', 'EDXquantparams'], {}), '(EDXlog, EDXquantparams, **pkwargs)\n', (7215, 7250), True, 'import EDX_plot_functions as EDXplot\n'), ((7339, 7444), 'EDX_plot_functions.reportSEMpeaks', 'EDXplot.reportSEMpeaks', (['EDXfiles', 'plotelems', 'SEMquantparams'], {'addgauss': '(True)', 'PDFname': '"""peak_report.pdf"""'}), "(EDXfiles, plotelems, SEMquantparams, addgauss=True,\n PDFname='peak_report.pdf')\n", (7361, 7444), True, 'import EDX_plot_functions as EDXplot\n'), ((7582, 7676), 'EDX_plot_functions.reportsubdatamajor', 'EDXplot.reportsubdatamajor', (['EDXfiles', 'Integquantlog'], {'PDFname': '"""Subcounts_major_report.pdf"""'}), "(EDXfiles, Integquantlog, PDFname=\n 'Subcounts_major_report.pdf')\n", (7608, 7676), True, 'import EDX_plot_functions as EDXplot\n'), ((7920, 7938), 'glob.glob', 'glob.glob', (['"""*.p_s"""'], {}), "('*.p_s')\n", (7929, 7938), False, 'import glob, sys, os\n'), ((8080, 8115), 'pandas.read_csv', 'pd.read_csv', (['"""Backfit_training.csv"""'], {}), "('Backfit_training.csv')\n", (8091, 8115), True, 'import pandas as pd\n'), ((532, 597), 'sys.path.append', 'sys.path.append', (['"""C:\\\\Users\\\\tkc\\\\Documents\\\\Python_Scripts\\\\EDX"""'], {}), "('C:\\\\Users\\\\tkc\\\\Documents\\\\Python_Scripts\\\\EDX')\n", (547, 597), False, 'import glob, sys, os\n'), ((910, 930), 'glob.glob', 'glob.glob', (['"""*.psmsa"""'], {}), "('*.psmsa')\n", (919, 930), False, 'import glob, sys, os\n'), ((931, 950), 'glob.glob', 'glob.glob', (['"""*.emsa"""'], {}), "('*.emsa')\n", (940, 950), False, 'import glob, sys, os\n'), ((5638, 5649), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5647, 5649), False, 'import glob, sys, os\n'), ((8030, 8072), 'EDX_import_functions.renamePSset', 'EDXimport.renamePSset', (['psfile', '"""\xa0"""', '"""_"""'], {}), "(psfile, '\\xa0', '_')\n", (8051, 8072), True, 'import EDX_import_functions as EDXimport\n')]
|
#!/usr/bin/env python3
"""
Python script for retrieving IPTC Video Metadata Hub mapping data from a Google sheet
The retrieved data are transformed in HTML as saved as HTML page.
For IPTC-internal use
Creator: <NAME>
History:
2016-11-25 mws: project started, download and HTML output ok
2020-06-15 BQ: Updated and checked into GitHub
"""
from __future__ import print_function
import pickle
import os
import sys
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from lxml import etree as ET
SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
CLIENT_SECRET_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'client_secret.json')
APPLICATION_NAME = 'Video Metadata Hub Documentation Generator'
# Constant values
StdVersion = "1.3"
HeaderAppendix = "" # could be " - D-R-A-F-T - "
IPTCApprovalDate = "13 May 2020"
IPTCRevisionDate = "13 May 2020"
CopyrightYear = "2020"
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
CLIENT_SECRET_FILE, SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return creds
def createSpecificMapping(valuesProp, headingtext1, headingtext2, findmoreaturl, mapIdx, filename):
# create the HTML document
xroot = ET.Element('html')
head = ET.SubElement(xroot, 'head')
title = ET.SubElement(head, 'title')
title.text = 'Video Metadata Hub Mapping'
metachset = ET.SubElement(head, 'meta', {'http-equiv': "Content-Type", 'content': "text/html; charset=utf-8"})
csslink1 = ET.SubElement(head, 'link', {'type': 'text/css', 'rel': 'stylesheet', 'href': 'iptcspecs1.css'})
body = ET.SubElement(xroot, 'body')
pageheader = ET.SubElement(body, 'h1', {'class':'pageheader'})
iptcanc = ET.SubElement(pageheader, 'a', {'href':'https://iptc.org'})
iptcimg = ET.SubElement(iptcanc, 'img', {'src':'https://iptc.org/download/resources/logos/iptc-gr_70x70.jpg', 'align':'left', 'border':'0'})
pageheader.text = headingtext1
seeotherdoc1 = ET.SubElement(body, 'p', {'class':'note1'})
seeotherdoc1.text = 'Return to '
seeotherdoc1link1 = ET.SubElement(seeotherdoc1, 'a', {'href':'IPTC-VideoMetadataHub-mapping-Rec_'+StdVersion+'.html'})
seeotherdoc1link1.text = 'all recommended mappings of the Video Metadata Hub.'
seeotherdoc2 = ET.SubElement(body, 'p', {'class':'note1'})
seeotherdoc2.text = 'See the '
seeotherdoc1link2 = ET.SubElement(seeotherdoc2, 'a', {'href':'IPTC-VideoMetadataHub-props-Rec_'+StdVersion+'.html'})
seeotherdoc1link2.text = 'specification of Video Metadata Hub properties'
docdate = ET.SubElement(body, 'p', {'class':'note1'})
docdate.text = 'Mapping recommended on ' + IPTCApprovalDate + '. Document revision as of ' + IPTCRevisionDate + '.'
copyrightnotice = ET.fromstring('<p class="smallnote1">Copyright © ' + CopyrightYear + ', <a href="https://iptc.org">IPTC</a> - all rights reserved. Published under the Creative Commons Attribution 4.0 license <a href="http://creativecommons.org/licenses/by/4.0/">http://creativecommons.org/licenses/by/4.0/</a></p>')
body.append(copyrightnotice)
mappedstdnote = ET.SubElement(body, 'p', {'class':'note1'})
mappedstdnote.text = 'In this table the columns with a blue header are defined by the Video Metadata Hub, the column with the green header is defined by ' + headingtext2
propnote1 = ET.fromstring('<p class="note1">Note on the column headers:<br />EBUcore: based on the EBU Core Metadata Standard.<br />XMP: based on the ISO XMP standard.<br />PVMD: a specification of JSON properties for Photo and Video MetaData by IPTC (aka phovidmd).</p>')
body.append(propnote1)
if not valuesProp:
print('No Property data found.')
else:
table = ET.SubElement(body, 'table', {'class':'spec1 vmhmapping'})
thead = ET.SubElement(table, 'thead')
throw = ET.SubElement(thead, 'tr')
thcol1 = ET.SubElement(throw, 'th', {'class':'hdrcol1'})
thcol1.text = 'Property Group'
thcol2 = ET.SubElement(throw, 'th', {'class':'hdrcol2'})
thcol2.text = 'Property Name'
thcol3 = ET.SubElement(throw, 'th', {'class':'hdrcol3'})
thcol3.text = 'Definition / Semantics'
"""
thcol4 = ET.SubElement(throw, 'th', {'class':'hdrcol4'})
thcol4.text = 'Basic Type/Cardinality'
"""
thcol5 = ET.SubElement(throw, 'th', {'class':'hdrcol5'})
thcol5.text = 'EBUcore'
thcol6 = ET.SubElement(throw, 'th', {'class':'hdrcol6'})
thcol6.text = 'XMP'
thcol7 = ET.SubElement(throw, 'th', {'class':'hdrcol7'})
thcol7.text = 'PVMD JSON'
thcol8 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc'})
thcol8.text = headingtext2
# second row with "find more at ..." links
throw = ET.SubElement(thead, 'tr')
thcol1 = ET.SubElement(throw, 'td', {'class':'hdrcol1'})
thcol1.text = ' '
thcol2 = ET.SubElement(throw, 'td', {'class':'hdrcol2'})
thcol2.text = ' '
thcol3 = ET.SubElement(throw, 'td', {'class':'hdrcol3'})
thcol3.text = ' '
"""
thcol4 = ET.SubElement(throw, 'td', {'class':'hdrcol4'})
thcol4.text = ''
"""
moreatlink = valuesProp[0][4]
colcode = ET.fromstring(
'<td class="hdrcolIptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
moreatlink = valuesProp[0][5]
colcode = ET.fromstring(
'<td class="hdrcolIptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
moreatlink = valuesProp[0][6]
colcode = ET.fromstring(
'<td class="hdrcolIptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
moreatlink = valuesProp[0][mapIdx]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"> </td>')
throw.append(colcode)
tbody = ET.SubElement(table, 'tbody')
for rowcounter in range(2, 186):
xrow = ET.SubElement(tbody, 'tr')
teststr = valuesProp[rowcounter][0]
if teststr == 'Property Structures (PS)':
xrow.set('style', 'background-color: #009999;')
if teststr.find('PS', 0) == 0:
xrow.set('style', 'background-color: #00cccc;')
xcell1 = ET.SubElement(xrow, 'td', { 'class': 'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][0]
except:
valstr = ' '
xcell1.text = valstr
xcell2 = ET.SubElement(xrow, 'td', { 'class': 'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][1]
except:
valstr = ' '
xcell2.text = valstr
xcell3 = ET.SubElement(xrow, 'td', { 'class': 'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][2]
except:
valstr = ' '
xcell3.text = valstr
"""
xcell4 = ET.SubElement(xrow, 'td', { 'class': 'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][3]
except:
valstr = ' '
xcell4.text = valstr
"""
xcell5 = ET.SubElement(xrow, 'td', { 'class': 'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][4]
except:
valstr = ' '
xcell5.text = valstr
xcell6 = ET.SubElement(xrow, 'td', { 'class': 'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][5]
except:
valstr = ' '
xcell6.text = valstr
xcell7 = ET.SubElement(xrow, 'td', { 'class': 'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][6]
except:
valstr = ' '
xcell7.text = valstr
xcell8 = ET.SubElement(xrow, 'td', { 'class': 'bgdcolNoniptc'})
try:
valstr = valuesProp[rowcounter][mapIdx]
except:
valstr = ' '
xcell8.text = valstr
with open(filename, 'w') as file:
file.write(ET.tostring(xroot, pretty_print=True).decode())
def main():
credentials = get_credentials()
service = build('sheets', 'v4', credentials=credentials)
spreadsheetId = '1TgfvHcsbGvJqmF0iUUnaL-RAdd1lbentmb2LhcM8SDk'
rangeName = 'MappingRec 1.3.1!A4:R'
result1 = service.spreadsheets().values().get(
spreadsheetId=spreadsheetId, range=rangeName).execute()
valuesProp = result1.get('values', [])
# create the HTML document
xroot = ET.Element('html')
head = ET.SubElement(xroot, 'head')
title = ET.SubElement(head, 'title')
title.text = 'Video Metadata Hub Mapping'
metachset = ET.SubElement(head, 'meta', {'http-equiv': "Content-Type", 'content': "text/html; charset=utf-8"})
csslink1 = ET.SubElement(head, 'link', {'type': 'text/css', 'rel': 'stylesheet', 'href': 'iptcspecs1.css'})
body = ET.SubElement(xroot, 'body')
pageheader = ET.SubElement(body, 'h1', {'class':'pageheader'})
iptcanc = ET.SubElement(pageheader, 'a', {'href':'https://iptc.org'})
iptcimg = ET.SubElement(iptcanc, 'img', {'src':'https://iptc.org/download/resources/logos/iptc-gr_70x70.jpg', 'align':'left', 'border':'0'})
pageheader.text = 'IPTC Video Metadata Hub - Recommendation '+ StdVersion +' / all Mappings' + HeaderAppendix
seeotherdoc1 = ET.SubElement(body, 'p', {'class':'note1'})
seeotherdoc1.text = 'See the '
seeotherdoc1link1 = ET.SubElement(seeotherdoc1, 'a', {'href':'IPTC-VideoMetadataHub-props-Rec_'+StdVersion+'.html'})
seeotherdoc1link1.text = 'specification of Video Metadata Hub properties'
docdate = ET.SubElement(body, 'p', {'class':'note1'})
docdate.text = 'Mapping recommended on ' + IPTCApprovalDate + '. Document revision as of ' + IPTCRevisionDate + '.'
copyrightnotice = ET.fromstring('<p class="smallnote1">Copyright © '+ CopyrightYear + ', <a href="https://iptc.org">IPTC</a> - all rights reserved. Published under the Creative Commons Attribution 4.0 license <a href="http://creativecommons.org/licenses/by/4.0/">http://creativecommons.org/licenses/by/4.0/</a></p>')
body.append(copyrightnotice)
mappedstdnote = ET.SubElement(body, 'p', {'class':'note1'})
mappedstdnote.text = 'In this table the columns with a blue header are defined by the Video Metadata Hub, the columns with the green or amber headers are defined by other standards or tools.'
propnote1 = ET.fromstring('<p class="note1">Note on the column headers:<br />EBUcore: based on the EBU Core Metadata Standard.<br />XMP: based on the ISO XMP standard.<br />PVMD: a specification of JSON properties for Photo and Video MetaData by IPTC (aka phovidmd).</p>')
body.append(propnote1)
docnote1 = ET.SubElement(body, 'p', {'class':'smallnote1'})
docnote1.text = 'The header of mappings to other standards provides a link to a table including only this mapping (better for printing)'
if not valuesProp:
print('No Property data found.')
else:
table = ET.SubElement(body, 'table', {'class':'spec1 vmhmapping'})
thead = ET.SubElement(table, 'thead')
throw = ET.SubElement(thead, 'tr')
thcol1 = ET.SubElement(throw, 'th', {'class':'hdrcol1'})
thcol1.text = 'Property Group'
thcol2 = ET.SubElement(throw, 'th', {'class':'hdrcol2'})
thcol2.text = 'Property Name'
thcol3 = ET.SubElement(throw, 'th', {'class':'hdrcol3'})
thcol3.text = 'Definition / Semantics'
"""
thcol4 = ET.SubElement(throw, 'th', {'class':'hdrcol4'})
thcol4.text = 'Basic Type/Cardinality'
"""
thcol5 = ET.SubElement(throw, 'th', {'class':'hdrcol5'})
thcol5.text = 'EBUcore'
thcol6 = ET.SubElement(throw, 'th', {'class':'hdrcol6'})
thcol6.text = 'XMP'
thcol7 = ET.SubElement(throw, 'th', {'class':'hdrcol7'})
thcol7.text = 'IPTC PVMD JSON'
thcol8 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc'})
thcol8link = ET.SubElement(thcol8,'a', {'href':'IPTC-VideoMetadataHub-mapping-AppleQT-Rec_'+StdVersion+'.html'})
thcol8link.text = 'Apple Quicktime'
thcol9 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc2'})
thcol9link = ET.SubElement(thcol9,'a', {'href':'IPTC-VideoMetadataHub-mapping-MPEG7-Rec_'+StdVersion+'.html'})
thcol9link.text = 'MPEG 7'
thcol10 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc'})
thcol10link = ET.SubElement(thcol10,'a', {'href':'IPTC-VideoMetadataHub-mapping-NewsMLG2-Rec_'+StdVersion+'.html'})
thcol10link.text = 'NewsML-G2'
thcol11 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc2'})
thcol11link = ET.SubElement(thcol11,'a', {'href':'IPTC-VideoMetadataHub-mapping-PBCore21-Rec_'+StdVersion+'.html'})
thcol11link.text = 'PB Core 2.1'
thcol12 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc'})
thcol12link = ET.SubElement(thcol12,'a', {'href':'IPTC-VideoMetadataHub-mapping-SchemaOrg-Rec_'+StdVersion+'.html'})
thcol12link.text = 'Schema.org'
# new in 2018-03
thcol13 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc2'})
thcol13link = ET.SubElement(thcol13,'a', {'href':'IPTC-VideoMetadataHub-mapping-SonyXDCAM-Rec_'+StdVersion+'.html'})
thcol13link.text = 'Sony XDCAM & Planning'
thcol14 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc'})
thcol14link = ET.SubElement(thcol14,'a', {'href':'IPTC-VideoMetadataHub-mapping-Panasonic-SMPTEP2-Rec_'+StdVersion+'.html'})
thcol14link.text = 'Panasonic/SMPTE P2'
thcol15 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc2'})
thcol15link = ET.SubElement(thcol15,'a', {'href':'IPTC-VideoMetadataHub-mapping-CanonVClip-Rec_'+StdVersion+'.html'})
thcol15link.text = 'Canon VideoClip XML'
thcol16 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc'})
thcol16link = ET.SubElement(thcol16,'a', {'href':'IPTC-VideoMetadataHub-mapping-exiftool-Rec_'+StdVersion+'.html'})
thcol16link.text = 'exiftool field ids'
thcol17 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc2'})
thcol17link = ET.SubElement(thcol17,'a', {'href':'IPTC-VideoMetadataHub-mapping-EIDR-Rec_'+StdVersion+'.html'})
thcol17link.text = 'EIDR Data Fields 2.0'
# second row with "find more at ..." links
throw = ET.SubElement(thead, 'tr')
thcol1 = ET.SubElement(throw, 'td', {'class':'hdrcol1'})
thcol1.text = ' '
thcol2 = ET.SubElement(throw, 'td', {'class':'hdrcol2'})
thcol2.text = ' '
thcol3 = ET.SubElement(throw, 'td', {'class':'hdrcol3'})
thcol3.text = ' '
"""
thcol4 = ET.SubElement(throw, 'td', {'class':'hdrcol4'})
thcol4.text = ''
"""
moreatlink = valuesProp[0][4]
colcode = ET.fromstring(
'<td class="hdrcolIptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
moreatlink = valuesProp[0][5]
colcode = ET.fromstring(
'<td class="hdrcolIptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
moreatlink = valuesProp[0][6]
colcode = ET.fromstring(
'<td class="hdrcolIptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
moreatlink = valuesProp[0][7]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][9]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][10]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][11]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][12]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][13]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][14]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][15]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][16]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][17]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"> </td>')
throw.append(colcode)
tbody = ET.SubElement(table, 'tbody')
for rowcounter in range(2, 186):
xrow = ET.SubElement(tbody, 'tr')
teststr = valuesProp[rowcounter][0]
if teststr == 'Property Structures (PS)':
xrow.set('style', 'background-color: #009999;')
if teststr.find('PS', 0) == 0:
xrow.set('style', 'background-color: #00cccc;')
xcell1 = ET.SubElement(xrow, 'td', {'class':'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][0]
except:
valstr = ' '
xcell1.text = valstr
xcell2 = ET.SubElement(xrow, 'td', {'class':'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][1]
except:
valstr = ' '
xcell2.text = valstr
xcell3 = ET.SubElement(xrow, 'td', {'class':'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][2]
except:
valstr = ' '
xcell3.text = valstr
"""
xcell4 = ET.SubElement(xrow, 'td', {'class':'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][3]
except:
valstr = ' '
xcell4.text = valstr
"""
xcell5 = ET.SubElement(xrow, 'td', {'class':'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][4]
except:
valstr = ' '
xcell5.text = valstr
xcell6 = ET.SubElement(xrow, 'td', {'class':'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][5]
except:
valstr = ' '
xcell6.text = valstr
xcell7 = ET.SubElement(xrow, 'td', {'class':'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][6]
except:
valstr = ' '
xcell7.text = valstr
xcell8 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc'})
try:
valstr = valuesProp[rowcounter][7]
except:
valstr = ' '
xcell8.text = valstr
xcell9 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc2'})
try:
valstr = valuesProp[rowcounter][9]
except:
valstr = ' '
xcell9.text = valstr
xcell10 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc'})
try:
valstr = valuesProp[rowcounter][10]
except:
valstr = ' '
xcell10.text = valstr
xcell11 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc2'})
try:
valstr = valuesProp[rowcounter][11]
except:
valstr = ' '
xcell11.text = valstr
xcell12 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc'})
try:
valstr = valuesProp[rowcounter][12]
except:
valstr = ' '
xcell12.text = valstr
xcell13 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc2'})
try:
valstr = valuesProp[rowcounter][13]
except:
valstr = ' '
xcell13.text = valstr
xcell14 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc'})
try:
valstr = valuesProp[rowcounter][14]
except:
valstr = ' '
xcell14.text = valstr
xcell15 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc2'})
try:
valstr = valuesProp[rowcounter][15]
except:
valstr = ' '
xcell15.text = valstr
xcell16 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc'})
try:
valstr = valuesProp[rowcounter][16]
except:
valstr = ' '
xcell16.text = valstr
xcell17 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc2'})
try:
valstr = valuesProp[rowcounter][17]
except:
valstr = ' '
xcell17.text = valstr
filename = "IPTC-VideoMetadataHub-mapping-Rec_"+StdVersion+".html"
with open(filename, 'w') as file:
file.write(ET.tostring(xroot, pretty_print=True).decode())
moreatlink = valuesProp[0][7]
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - Apple Quicktime', 'Apple Quicktime', moreatlink, 7, 'IPTC-VideoMetadataHub-mapping-AppleQT-Rec_'+StdVersion+'.html')
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - MPEG 7', 'MPEG 7', moreatlink, 9,'IPTC-VideoMetadataHub-mapping-MPEG7-Rec_'+StdVersion+'.html')
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - NewsML-G2', 'NewsML-G2', moreatlink, 10,'IPTC-VideoMetadataHub-mapping-NewsMLG2-Rec_'+StdVersion+'.html')
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - PB Core 2.1', 'PB Core 2.1', moreatlink, 11,'IPTC-VideoMetadataHub-mapping-PBCore21-Rec_'+StdVersion+'.html')
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - Schema.org', 'Schema.org', moreatlink, 12,'IPTC-VideoMetadataHub-mapping-SchemaOrg-Rec_'+StdVersion+'.html')
# new in 2018-03
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - Sony Cameras ', 'Sony XDCAM & Planning', moreatlink, 13,'IPTC-VideoMetadataHub-mapping-SonyXDCAM-Rec_'+StdVersion+'.html')
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - Panasonic Cameras', 'Panasonic/SMPTE P2', moreatlink, 14,'IPTC-VideoMetadataHub-mapping-Panasonic-SMPTEP2-Rec_'+StdVersion+'.html')
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - Canon Cameras', 'Canon VideoClip XML', moreatlink, 15,'IPTC-VideoMetadataHub-mapping-CanonVClip-Rec_'+StdVersion+'.html')
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - exiftool', 'exiftool field id', moreatlink, 16,'IPTC-VideoMetadataHub-mapping-exiftool-Rec_'+StdVersion+'.html')
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - EIDR Data Fields 2.0', 'EIDR Data Fields 2.0', moreatlink, 17,'IPTC-VideoMetadataHub-mapping-EIDR-Rec_'+StdVersion+'.html')
if __name__ == '__main__':
main()
|
[
"os.path.abspath",
"pickle.dump",
"google.auth.transport.requests.Request",
"lxml.etree.fromstring",
"lxml.etree.Element",
"os.path.exists",
"pickle.load",
"lxml.etree.SubElement",
"lxml.etree.tostring",
"google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file",
"googleapiclient.discovery.build"
] |
[((1487, 1517), 'os.path.exists', 'os.path.exists', (['"""token.pickle"""'], {}), "('token.pickle')\n", (1501, 1517), False, 'import os\n'), ((2283, 2301), 'lxml.etree.Element', 'ET.Element', (['"""html"""'], {}), "('html')\n", (2293, 2301), True, 'from lxml import etree as ET\n'), ((2313, 2341), 'lxml.etree.SubElement', 'ET.SubElement', (['xroot', '"""head"""'], {}), "(xroot, 'head')\n", (2326, 2341), True, 'from lxml import etree as ET\n'), ((2354, 2382), 'lxml.etree.SubElement', 'ET.SubElement', (['head', '"""title"""'], {}), "(head, 'title')\n", (2367, 2382), True, 'from lxml import etree as ET\n'), ((2445, 2547), 'lxml.etree.SubElement', 'ET.SubElement', (['head', '"""meta"""', "{'http-equiv': 'Content-Type', 'content': 'text/html; charset=utf-8'}"], {}), "(head, 'meta', {'http-equiv': 'Content-Type', 'content':\n 'text/html; charset=utf-8'})\n", (2458, 2547), True, 'from lxml import etree as ET\n'), ((2559, 2659), 'lxml.etree.SubElement', 'ET.SubElement', (['head', '"""link"""', "{'type': 'text/css', 'rel': 'stylesheet', 'href': 'iptcspecs1.css'}"], {}), "(head, 'link', {'type': 'text/css', 'rel': 'stylesheet',\n 'href': 'iptcspecs1.css'})\n", (2572, 2659), True, 'from lxml import etree as ET\n'), ((2668, 2696), 'lxml.etree.SubElement', 'ET.SubElement', (['xroot', '"""body"""'], {}), "(xroot, 'body')\n", (2681, 2696), True, 'from lxml import etree as ET\n'), ((2714, 2764), 'lxml.etree.SubElement', 'ET.SubElement', (['body', '"""h1"""', "{'class': 'pageheader'}"], {}), "(body, 'h1', {'class': 'pageheader'})\n", (2727, 2764), True, 'from lxml import etree as ET\n'), ((2778, 2838), 'lxml.etree.SubElement', 'ET.SubElement', (['pageheader', '"""a"""', "{'href': 'https://iptc.org'}"], {}), "(pageheader, 'a', {'href': 'https://iptc.org'})\n", (2791, 2838), True, 'from lxml import etree as ET\n'), ((2852, 2993), 'lxml.etree.SubElement', 'ET.SubElement', (['iptcanc', '"""img"""', "{'src': 'https://iptc.org/download/resources/logos/iptc-gr_70x70.jpg',\n 'align': 'left', 'border': '0'}"], {}), "(iptcanc, 'img', {'src':\n 'https://iptc.org/download/resources/logos/iptc-gr_70x70.jpg', 'align':\n 'left', 'border': '0'})\n", (2865, 2993), True, 'from lxml import etree as ET\n'), ((3037, 3081), 'lxml.etree.SubElement', 'ET.SubElement', (['body', '"""p"""', "{'class': 'note1'}"], {}), "(body, 'p', {'class': 'note1'})\n", (3050, 3081), True, 'from lxml import etree as ET\n'), ((3142, 3250), 'lxml.etree.SubElement', 'ET.SubElement', (['seeotherdoc1', '"""a"""', "{'href': 'IPTC-VideoMetadataHub-mapping-Rec_' + StdVersion + '.html'}"], {}), "(seeotherdoc1, 'a', {'href': \n 'IPTC-VideoMetadataHub-mapping-Rec_' + StdVersion + '.html'})\n", (3155, 3250), True, 'from lxml import etree as ET\n'), ((3344, 3388), 'lxml.etree.SubElement', 'ET.SubElement', (['body', '"""p"""', "{'class': 'note1'}"], {}), "(body, 'p', {'class': 'note1'})\n", (3357, 3388), True, 'from lxml import etree as ET\n'), ((3447, 3553), 'lxml.etree.SubElement', 'ET.SubElement', (['seeotherdoc2', '"""a"""', "{'href': 'IPTC-VideoMetadataHub-props-Rec_' + StdVersion + '.html'}"], {}), "(seeotherdoc2, 'a', {'href': \n 'IPTC-VideoMetadataHub-props-Rec_' + StdVersion + '.html'})\n", (3460, 3553), True, 'from lxml import etree as ET\n'), ((3637, 3681), 'lxml.etree.SubElement', 'ET.SubElement', (['body', '"""p"""', "{'class': 'note1'}"], {}), "(body, 'p', {'class': 'note1'})\n", (3650, 3681), True, 'from lxml import etree as ET\n'), ((3824, 4132), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<p class="smallnote1">Copyright © \' + CopyrightYear +\n \', <a href="https://iptc.org">IPTC</a> - all rights reserved. Published under the Creative Commons Attribution 4.0 license <a href="http://creativecommons.org/licenses/by/4.0/">http://creativecommons.org/licenses/by/4.0/</a></p>\'\n )'], {}), '(\'<p class="smallnote1">Copyright © \' + CopyrightYear +\n \', <a href="https://iptc.org">IPTC</a> - all rights reserved. Published under the Creative Commons Attribution 4.0 license <a href="http://creativecommons.org/licenses/by/4.0/">http://creativecommons.org/licenses/by/4.0/</a></p>\'\n )\n', (3837, 4132), True, 'from lxml import etree as ET\n'), ((4178, 4222), 'lxml.etree.SubElement', 'ET.SubElement', (['body', '"""p"""', "{'class': 'note1'}"], {}), "(body, 'p', {'class': 'note1'})\n", (4191, 4222), True, 'from lxml import etree as ET\n'), ((4412, 4682), 'lxml.etree.fromstring', 'ET.fromstring', (['"""<p class="note1">Note on the column headers:<br />EBUcore: based on the EBU Core Metadata Standard.<br />XMP: based on the ISO XMP standard.<br />PVMD: a specification of JSON properties for Photo and Video MetaData by IPTC (aka phovidmd).</p>"""'], {}), '(\n \'<p class="note1">Note on the column headers:<br />EBUcore: based on the EBU Core Metadata Standard.<br />XMP: based on the ISO XMP standard.<br />PVMD: a specification of JSON properties for Photo and Video MetaData by IPTC (aka phovidmd).</p>\'\n )\n', (4425, 4682), True, 'from lxml import etree as ET\n'), ((9742, 9788), 'googleapiclient.discovery.build', 'build', (['"""sheets"""', '"""v4"""'], {'credentials': 'credentials'}), "('sheets', 'v4', credentials=credentials)\n", (9747, 9788), False, 'from googleapiclient.discovery import build\n'), ((10099, 10117), 'lxml.etree.Element', 'ET.Element', (['"""html"""'], {}), "('html')\n", (10109, 10117), True, 'from lxml import etree as ET\n'), ((10129, 10157), 'lxml.etree.SubElement', 'ET.SubElement', (['xroot', '"""head"""'], {}), "(xroot, 'head')\n", (10142, 10157), True, 'from lxml import etree as ET\n'), ((10170, 10198), 'lxml.etree.SubElement', 'ET.SubElement', (['head', '"""title"""'], {}), "(head, 'title')\n", (10183, 10198), True, 'from lxml import etree as ET\n'), ((10261, 10363), 'lxml.etree.SubElement', 'ET.SubElement', (['head', '"""meta"""', "{'http-equiv': 'Content-Type', 'content': 'text/html; charset=utf-8'}"], {}), "(head, 'meta', {'http-equiv': 'Content-Type', 'content':\n 'text/html; charset=utf-8'})\n", (10274, 10363), True, 'from lxml import etree as ET\n'), ((10375, 10475), 'lxml.etree.SubElement', 'ET.SubElement', (['head', '"""link"""', "{'type': 'text/css', 'rel': 'stylesheet', 'href': 'iptcspecs1.css'}"], {}), "(head, 'link', {'type': 'text/css', 'rel': 'stylesheet',\n 'href': 'iptcspecs1.css'})\n", (10388, 10475), True, 'from lxml import etree as ET\n'), ((10484, 10512), 'lxml.etree.SubElement', 'ET.SubElement', (['xroot', '"""body"""'], {}), "(xroot, 'body')\n", (10497, 10512), True, 'from lxml import etree as ET\n'), ((10530, 10580), 'lxml.etree.SubElement', 'ET.SubElement', (['body', '"""h1"""', "{'class': 'pageheader'}"], {}), "(body, 'h1', {'class': 'pageheader'})\n", (10543, 10580), True, 'from lxml import etree as ET\n'), ((10594, 10654), 'lxml.etree.SubElement', 'ET.SubElement', (['pageheader', '"""a"""', "{'href': 'https://iptc.org'}"], {}), "(pageheader, 'a', {'href': 'https://iptc.org'})\n", (10607, 10654), True, 'from lxml import etree as ET\n'), ((10668, 10809), 'lxml.etree.SubElement', 'ET.SubElement', (['iptcanc', '"""img"""', "{'src': 'https://iptc.org/download/resources/logos/iptc-gr_70x70.jpg',\n 'align': 'left', 'border': '0'}"], {}), "(iptcanc, 'img', {'src':\n 'https://iptc.org/download/resources/logos/iptc-gr_70x70.jpg', 'align':\n 'left', 'border': '0'})\n", (10681, 10809), True, 'from lxml import etree as ET\n'), ((10932, 10976), 'lxml.etree.SubElement', 'ET.SubElement', (['body', '"""p"""', "{'class': 'note1'}"], {}), "(body, 'p', {'class': 'note1'})\n", (10945, 10976), True, 'from lxml import etree as ET\n'), ((11035, 11141), 'lxml.etree.SubElement', 'ET.SubElement', (['seeotherdoc1', '"""a"""', "{'href': 'IPTC-VideoMetadataHub-props-Rec_' + StdVersion + '.html'}"], {}), "(seeotherdoc1, 'a', {'href': \n 'IPTC-VideoMetadataHub-props-Rec_' + StdVersion + '.html'})\n", (11048, 11141), True, 'from lxml import etree as ET\n'), ((11224, 11268), 'lxml.etree.SubElement', 'ET.SubElement', (['body', '"""p"""', "{'class': 'note1'}"], {}), "(body, 'p', {'class': 'note1'})\n", (11237, 11268), True, 'from lxml import etree as ET\n'), ((11410, 11718), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<p class="smallnote1">Copyright © \' + CopyrightYear +\n \', <a href="https://iptc.org">IPTC</a> - all rights reserved. Published under the Creative Commons Attribution 4.0 license <a href="http://creativecommons.org/licenses/by/4.0/">http://creativecommons.org/licenses/by/4.0/</a></p>\'\n )'], {}), '(\'<p class="smallnote1">Copyright © \' + CopyrightYear +\n \', <a href="https://iptc.org">IPTC</a> - all rights reserved. Published under the Creative Commons Attribution 4.0 license <a href="http://creativecommons.org/licenses/by/4.0/">http://creativecommons.org/licenses/by/4.0/</a></p>\'\n )\n', (11423, 11718), True, 'from lxml import etree as ET\n'), ((11762, 11806), 'lxml.etree.SubElement', 'ET.SubElement', (['body', '"""p"""', "{'class': 'note1'}"], {}), "(body, 'p', {'class': 'note1'})\n", (11775, 11806), True, 'from lxml import etree as ET\n'), ((12018, 12288), 'lxml.etree.fromstring', 'ET.fromstring', (['"""<p class="note1">Note on the column headers:<br />EBUcore: based on the EBU Core Metadata Standard.<br />XMP: based on the ISO XMP standard.<br />PVMD: a specification of JSON properties for Photo and Video MetaData by IPTC (aka phovidmd).</p>"""'], {}), '(\n \'<p class="note1">Note on the column headers:<br />EBUcore: based on the EBU Core Metadata Standard.<br />XMP: based on the ISO XMP standard.<br />PVMD: a specification of JSON properties for Photo and Video MetaData by IPTC (aka phovidmd).</p>\'\n )\n', (12031, 12288), True, 'from lxml import etree as ET\n'), ((12321, 12370), 'lxml.etree.SubElement', 'ET.SubElement', (['body', '"""p"""', "{'class': 'smallnote1'}"], {}), "(body, 'p', {'class': 'smallnote1'})\n", (12334, 12370), True, 'from lxml import etree as ET\n'), ((718, 743), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (733, 743), False, 'import os\n'), ((4792, 4851), 'lxml.etree.SubElement', 'ET.SubElement', (['body', '"""table"""', "{'class': 'spec1 vmhmapping'}"], {}), "(body, 'table', {'class': 'spec1 vmhmapping'})\n", (4805, 4851), True, 'from lxml import etree as ET\n'), ((4867, 4896), 'lxml.etree.SubElement', 'ET.SubElement', (['table', '"""thead"""'], {}), "(table, 'thead')\n", (4880, 4896), True, 'from lxml import etree as ET\n'), ((4913, 4939), 'lxml.etree.SubElement', 'ET.SubElement', (['thead', '"""tr"""'], {}), "(thead, 'tr')\n", (4926, 4939), True, 'from lxml import etree as ET\n'), ((4957, 5005), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcol1'}"], {}), "(throw, 'th', {'class': 'hdrcol1'})\n", (4970, 5005), True, 'from lxml import etree as ET\n'), ((5061, 5109), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcol2'}"], {}), "(throw, 'th', {'class': 'hdrcol2'})\n", (5074, 5109), True, 'from lxml import etree as ET\n'), ((5164, 5212), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcol3'}"], {}), "(throw, 'th', {'class': 'hdrcol3'})\n", (5177, 5212), True, 'from lxml import etree as ET\n'), ((5412, 5460), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcol5'}"], {}), "(throw, 'th', {'class': 'hdrcol5'})\n", (5425, 5460), True, 'from lxml import etree as ET\n'), ((5509, 5557), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcol6'}"], {}), "(throw, 'th', {'class': 'hdrcol6'})\n", (5522, 5557), True, 'from lxml import etree as ET\n'), ((5602, 5650), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcol7'}"], {}), "(throw, 'th', {'class': 'hdrcol7'})\n", (5615, 5650), True, 'from lxml import etree as ET\n'), ((5701, 5755), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcolNoniptc'}"], {}), "(throw, 'th', {'class': 'hdrcolNoniptc'})\n", (5714, 5755), True, 'from lxml import etree as ET\n'), ((5858, 5884), 'lxml.etree.SubElement', 'ET.SubElement', (['thead', '"""tr"""'], {}), "(thead, 'tr')\n", (5871, 5884), True, 'from lxml import etree as ET\n'), ((5902, 5950), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""td"""', "{'class': 'hdrcol1'}"], {}), "(throw, 'td', {'class': 'hdrcol1'})\n", (5915, 5950), True, 'from lxml import etree as ET\n'), ((5993, 6041), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""td"""', "{'class': 'hdrcol2'}"], {}), "(throw, 'td', {'class': 'hdrcol2'})\n", (6006, 6041), True, 'from lxml import etree as ET\n'), ((6084, 6132), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""td"""', "{'class': 'hdrcol3'}"], {}), "(throw, 'td', {'class': 'hdrcol3'})\n", (6097, 6132), True, 'from lxml import etree as ET\n'), ((6328, 6451), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<td class="hdrcolIptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')'], {}), '(\'<td class="hdrcolIptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')\n', (6341, 6451), True, 'from lxml import etree as ET\n'), ((6547, 6670), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<td class="hdrcolIptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')'], {}), '(\'<td class="hdrcolIptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')\n', (6560, 6670), True, 'from lxml import etree as ET\n'), ((6766, 6889), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<td class="hdrcolIptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')'], {}), '(\'<td class="hdrcolIptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')\n', (6779, 6889), True, 'from lxml import etree as ET\n'), ((7352, 7381), 'lxml.etree.SubElement', 'ET.SubElement', (['table', '"""tbody"""'], {}), "(table, 'tbody')\n", (7365, 7381), True, 'from lxml import etree as ET\n'), ((12602, 12661), 'lxml.etree.SubElement', 'ET.SubElement', (['body', '"""table"""', "{'class': 'spec1 vmhmapping'}"], {}), "(body, 'table', {'class': 'spec1 vmhmapping'})\n", (12615, 12661), True, 'from lxml import etree as ET\n'), ((12677, 12706), 'lxml.etree.SubElement', 'ET.SubElement', (['table', '"""thead"""'], {}), "(table, 'thead')\n", (12690, 12706), True, 'from lxml import etree as ET\n'), ((12723, 12749), 'lxml.etree.SubElement', 'ET.SubElement', (['thead', '"""tr"""'], {}), "(thead, 'tr')\n", (12736, 12749), True, 'from lxml import etree as ET\n'), ((12767, 12815), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcol1'}"], {}), "(throw, 'th', {'class': 'hdrcol1'})\n", (12780, 12815), True, 'from lxml import etree as ET\n'), ((12871, 12919), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcol2'}"], {}), "(throw, 'th', {'class': 'hdrcol2'})\n", (12884, 12919), True, 'from lxml import etree as ET\n'), ((12974, 13022), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcol3'}"], {}), "(throw, 'th', {'class': 'hdrcol3'})\n", (12987, 13022), True, 'from lxml import etree as ET\n'), ((13222, 13270), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcol5'}"], {}), "(throw, 'th', {'class': 'hdrcol5'})\n", (13235, 13270), True, 'from lxml import etree as ET\n'), ((13319, 13367), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcol6'}"], {}), "(throw, 'th', {'class': 'hdrcol6'})\n", (13332, 13367), True, 'from lxml import etree as ET\n'), ((13412, 13460), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcol7'}"], {}), "(throw, 'th', {'class': 'hdrcol7'})\n", (13425, 13460), True, 'from lxml import etree as ET\n'), ((13516, 13570), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcolNoniptc'}"], {}), "(throw, 'th', {'class': 'hdrcolNoniptc'})\n", (13529, 13570), True, 'from lxml import etree as ET\n'), ((13591, 13701), 'lxml.etree.SubElement', 'ET.SubElement', (['thcol8', '"""a"""', "{'href': 'IPTC-VideoMetadataHub-mapping-AppleQT-Rec_' + StdVersion + '.html'}"], {}), "(thcol8, 'a', {'href': \n 'IPTC-VideoMetadataHub-mapping-AppleQT-Rec_' + StdVersion + '.html'})\n", (13604, 13701), True, 'from lxml import etree as ET\n'), ((13752, 13807), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcolNoniptc2'}"], {}), "(throw, 'th', {'class': 'hdrcolNoniptc2'})\n", (13765, 13807), True, 'from lxml import etree as ET\n'), ((13828, 13936), 'lxml.etree.SubElement', 'ET.SubElement', (['thcol9', '"""a"""', "{'href': 'IPTC-VideoMetadataHub-mapping-MPEG7-Rec_' + StdVersion + '.html'}"], {}), "(thcol9, 'a', {'href': \n 'IPTC-VideoMetadataHub-mapping-MPEG7-Rec_' + StdVersion + '.html'})\n", (13841, 13936), True, 'from lxml import etree as ET\n'), ((13979, 14033), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcolNoniptc'}"], {}), "(throw, 'th', {'class': 'hdrcolNoniptc'})\n", (13992, 14033), True, 'from lxml import etree as ET\n'), ((14055, 14167), 'lxml.etree.SubElement', 'ET.SubElement', (['thcol10', '"""a"""', "{'href': 'IPTC-VideoMetadataHub-mapping-NewsMLG2-Rec_' + StdVersion + '.html'}"], {}), "(thcol10, 'a', {'href': \n 'IPTC-VideoMetadataHub-mapping-NewsMLG2-Rec_' + StdVersion + '.html'})\n", (14068, 14167), True, 'from lxml import etree as ET\n'), ((14214, 14269), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcolNoniptc2'}"], {}), "(throw, 'th', {'class': 'hdrcolNoniptc2'})\n", (14227, 14269), True, 'from lxml import etree as ET\n'), ((14291, 14403), 'lxml.etree.SubElement', 'ET.SubElement', (['thcol11', '"""a"""', "{'href': 'IPTC-VideoMetadataHub-mapping-PBCore21-Rec_' + StdVersion + '.html'}"], {}), "(thcol11, 'a', {'href': \n 'IPTC-VideoMetadataHub-mapping-PBCore21-Rec_' + StdVersion + '.html'})\n", (14304, 14403), True, 'from lxml import etree as ET\n'), ((14452, 14506), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcolNoniptc'}"], {}), "(throw, 'th', {'class': 'hdrcolNoniptc'})\n", (14465, 14506), True, 'from lxml import etree as ET\n'), ((14528, 14641), 'lxml.etree.SubElement', 'ET.SubElement', (['thcol12', '"""a"""', "{'href': 'IPTC-VideoMetadataHub-mapping-SchemaOrg-Rec_' + StdVersion + '.html'}"], {}), "(thcol12, 'a', {'href': \n 'IPTC-VideoMetadataHub-mapping-SchemaOrg-Rec_' + StdVersion + '.html'})\n", (14541, 14641), True, 'from lxml import etree as ET\n'), ((14714, 14769), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcolNoniptc2'}"], {}), "(throw, 'th', {'class': 'hdrcolNoniptc2'})\n", (14727, 14769), True, 'from lxml import etree as ET\n'), ((14791, 14904), 'lxml.etree.SubElement', 'ET.SubElement', (['thcol13', '"""a"""', "{'href': 'IPTC-VideoMetadataHub-mapping-SonyXDCAM-Rec_' + StdVersion + '.html'}"], {}), "(thcol13, 'a', {'href': \n 'IPTC-VideoMetadataHub-mapping-SonyXDCAM-Rec_' + StdVersion + '.html'})\n", (14804, 14904), True, 'from lxml import etree as ET\n'), ((14963, 15017), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcolNoniptc'}"], {}), "(throw, 'th', {'class': 'hdrcolNoniptc'})\n", (14976, 15017), True, 'from lxml import etree as ET\n'), ((15039, 15164), 'lxml.etree.SubElement', 'ET.SubElement', (['thcol14', '"""a"""', "{'href': 'IPTC-VideoMetadataHub-mapping-Panasonic-SMPTEP2-Rec_' +\n StdVersion + '.html'}"], {}), "(thcol14, 'a', {'href': \n 'IPTC-VideoMetadataHub-mapping-Panasonic-SMPTEP2-Rec_' + StdVersion +\n '.html'})\n", (15052, 15164), True, 'from lxml import etree as ET\n'), ((15216, 15271), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcolNoniptc2'}"], {}), "(throw, 'th', {'class': 'hdrcolNoniptc2'})\n", (15229, 15271), True, 'from lxml import etree as ET\n'), ((15293, 15407), 'lxml.etree.SubElement', 'ET.SubElement', (['thcol15', '"""a"""', "{'href': 'IPTC-VideoMetadataHub-mapping-CanonVClip-Rec_' + StdVersion + '.html'\n }"], {}), "(thcol15, 'a', {'href': \n 'IPTC-VideoMetadataHub-mapping-CanonVClip-Rec_' + StdVersion + '.html'})\n", (15306, 15407), True, 'from lxml import etree as ET\n'), ((15464, 15518), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcolNoniptc'}"], {}), "(throw, 'th', {'class': 'hdrcolNoniptc'})\n", (15477, 15518), True, 'from lxml import etree as ET\n'), ((15540, 15652), 'lxml.etree.SubElement', 'ET.SubElement', (['thcol16', '"""a"""', "{'href': 'IPTC-VideoMetadataHub-mapping-exiftool-Rec_' + StdVersion + '.html'}"], {}), "(thcol16, 'a', {'href': \n 'IPTC-VideoMetadataHub-mapping-exiftool-Rec_' + StdVersion + '.html'})\n", (15553, 15652), True, 'from lxml import etree as ET\n'), ((15708, 15763), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""th"""', "{'class': 'hdrcolNoniptc2'}"], {}), "(throw, 'th', {'class': 'hdrcolNoniptc2'})\n", (15721, 15763), True, 'from lxml import etree as ET\n'), ((15785, 15893), 'lxml.etree.SubElement', 'ET.SubElement', (['thcol17', '"""a"""', "{'href': 'IPTC-VideoMetadataHub-mapping-EIDR-Rec_' + StdVersion + '.html'}"], {}), "(thcol17, 'a', {'href': \n 'IPTC-VideoMetadataHub-mapping-EIDR-Rec_' + StdVersion + '.html'})\n", (15798, 15893), True, 'from lxml import etree as ET\n'), ((16001, 16027), 'lxml.etree.SubElement', 'ET.SubElement', (['thead', '"""tr"""'], {}), "(thead, 'tr')\n", (16014, 16027), True, 'from lxml import etree as ET\n'), ((16045, 16093), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""td"""', "{'class': 'hdrcol1'}"], {}), "(throw, 'td', {'class': 'hdrcol1'})\n", (16058, 16093), True, 'from lxml import etree as ET\n'), ((16136, 16184), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""td"""', "{'class': 'hdrcol2'}"], {}), "(throw, 'td', {'class': 'hdrcol2'})\n", (16149, 16184), True, 'from lxml import etree as ET\n'), ((16227, 16275), 'lxml.etree.SubElement', 'ET.SubElement', (['throw', '"""td"""', "{'class': 'hdrcol3'}"], {}), "(throw, 'td', {'class': 'hdrcol3'})\n", (16240, 16275), True, 'from lxml import etree as ET\n'), ((16471, 16594), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<td class="hdrcolIptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')'], {}), '(\'<td class="hdrcolIptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')\n', (16484, 16594), True, 'from lxml import etree as ET\n'), ((16690, 16813), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<td class="hdrcolIptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')'], {}), '(\'<td class="hdrcolIptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')\n', (16703, 16813), True, 'from lxml import etree as ET\n'), ((16909, 17032), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<td class="hdrcolIptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')'], {}), '(\'<td class="hdrcolIptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')\n', (16922, 17032), True, 'from lxml import etree as ET\n'), ((21108, 21137), 'lxml.etree.SubElement', 'ET.SubElement', (['table', '"""tbody"""'], {}), "(table, 'tbody')\n", (21121, 21137), True, 'from lxml import etree as ET\n'), ((1589, 1607), 'pickle.load', 'pickle.load', (['token'], {}), '(token)\n', (1600, 1607), False, 'import pickle\n'), ((1849, 1918), 'google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file', 'InstalledAppFlow.from_client_secrets_file', (['CLIENT_SECRET_FILE', 'SCOPES'], {}), '(CLIENT_SECRET_FILE, SCOPES)\n', (1890, 1918), False, 'from google_auth_oauthlib.flow import InstalledAppFlow\n'), ((2096, 2121), 'pickle.dump', 'pickle.dump', (['creds', 'token'], {}), '(creds, token)\n', (2107, 2121), False, 'import pickle\n'), ((7024, 7150), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<td class="hdrcolNoniptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')'], {}), '(\'<td class="hdrcolNoniptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')\n', (7037, 7150), True, 'from lxml import etree as ET\n'), ((7234, 7283), 'lxml.etree.fromstring', 'ET.fromstring', (['"""<td class="hdrcolNoniptc"> </td>"""'], {}), '(\'<td class="hdrcolNoniptc"> </td>\')\n', (7247, 7283), True, 'from lxml import etree as ET\n'), ((7443, 7469), 'lxml.etree.SubElement', 'ET.SubElement', (['tbody', '"""tr"""'], {}), "(tbody, 'tr')\n", (7456, 7469), True, 'from lxml import etree as ET\n'), ((7764, 7814), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolIptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolIptc'})\n", (7777, 7814), True, 'from lxml import etree as ET\n'), ((7989, 8039), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolIptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolIptc'})\n", (8002, 8039), True, 'from lxml import etree as ET\n'), ((8213, 8263), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolIptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolIptc'})\n", (8226, 8263), True, 'from lxml import etree as ET\n'), ((8691, 8741), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolIptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolIptc'})\n", (8704, 8741), True, 'from lxml import etree as ET\n'), ((8915, 8965), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolIptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolIptc'})\n", (8928, 8965), True, 'from lxml import etree as ET\n'), ((9139, 9189), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolIptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolIptc'})\n", (9152, 9189), True, 'from lxml import etree as ET\n'), ((9363, 9416), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolNoniptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolNoniptc'})\n", (9376, 9416), True, 'from lxml import etree as ET\n'), ((17162, 17288), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<td class="hdrcolNoniptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')'], {}), '(\'<td class="hdrcolNoniptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')\n', (17175, 17288), True, 'from lxml import etree as ET\n'), ((17372, 17421), 'lxml.etree.fromstring', 'ET.fromstring', (['"""<td class="hdrcolNoniptc"> </td>"""'], {}), '(\'<td class="hdrcolNoniptc"> </td>\')\n', (17385, 17421), True, 'from lxml import etree as ET\n'), ((17562, 17689), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<td class="hdrcolNoniptc2"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')'], {}), '(\'<td class="hdrcolNoniptc2"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')\n', (17575, 17689), True, 'from lxml import etree as ET\n'), ((17773, 17823), 'lxml.etree.fromstring', 'ET.fromstring', (['"""<td class="hdrcolNoniptc2"> </td>"""'], {}), '(\'<td class="hdrcolNoniptc2"> </td>\')\n', (17786, 17823), True, 'from lxml import etree as ET\n'), ((17965, 18091), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<td class="hdrcolNoniptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')'], {}), '(\'<td class="hdrcolNoniptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')\n', (17978, 18091), True, 'from lxml import etree as ET\n'), ((18175, 18224), 'lxml.etree.fromstring', 'ET.fromstring', (['"""<td class="hdrcolNoniptc"> </td>"""'], {}), '(\'<td class="hdrcolNoniptc"> </td>\')\n', (18188, 18224), True, 'from lxml import etree as ET\n'), ((18366, 18493), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<td class="hdrcolNoniptc2"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')'], {}), '(\'<td class="hdrcolNoniptc2"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')\n', (18379, 18493), True, 'from lxml import etree as ET\n'), ((18577, 18627), 'lxml.etree.fromstring', 'ET.fromstring', (['"""<td class="hdrcolNoniptc2"> </td>"""'], {}), '(\'<td class="hdrcolNoniptc2"> </td>\')\n', (18590, 18627), True, 'from lxml import etree as ET\n'), ((18769, 18895), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<td class="hdrcolNoniptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')'], {}), '(\'<td class="hdrcolNoniptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')\n', (18782, 18895), True, 'from lxml import etree as ET\n'), ((18979, 19028), 'lxml.etree.fromstring', 'ET.fromstring', (['"""<td class="hdrcolNoniptc"> </td>"""'], {}), '(\'<td class="hdrcolNoniptc"> </td>\')\n', (18992, 19028), True, 'from lxml import etree as ET\n'), ((19170, 19297), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<td class="hdrcolNoniptc2"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')'], {}), '(\'<td class="hdrcolNoniptc2"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')\n', (19183, 19297), True, 'from lxml import etree as ET\n'), ((19381, 19431), 'lxml.etree.fromstring', 'ET.fromstring', (['"""<td class="hdrcolNoniptc2"> </td>"""'], {}), '(\'<td class="hdrcolNoniptc2"> </td>\')\n', (19394, 19431), True, 'from lxml import etree as ET\n'), ((19573, 19699), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<td class="hdrcolNoniptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')'], {}), '(\'<td class="hdrcolNoniptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')\n', (19586, 19699), True, 'from lxml import etree as ET\n'), ((19783, 19832), 'lxml.etree.fromstring', 'ET.fromstring', (['"""<td class="hdrcolNoniptc"> </td>"""'], {}), '(\'<td class="hdrcolNoniptc"> </td>\')\n', (19796, 19832), True, 'from lxml import etree as ET\n'), ((19974, 20101), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<td class="hdrcolNoniptc2"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')'], {}), '(\'<td class="hdrcolNoniptc2"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')\n', (19987, 20101), True, 'from lxml import etree as ET\n'), ((20185, 20235), 'lxml.etree.fromstring', 'ET.fromstring', (['"""<td class="hdrcolNoniptc2"> </td>"""'], {}), '(\'<td class="hdrcolNoniptc2"> </td>\')\n', (20198, 20235), True, 'from lxml import etree as ET\n'), ((20377, 20503), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<td class="hdrcolNoniptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')'], {}), '(\'<td class="hdrcolNoniptc"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')\n', (20390, 20503), True, 'from lxml import etree as ET\n'), ((20587, 20636), 'lxml.etree.fromstring', 'ET.fromstring', (['"""<td class="hdrcolNoniptc"> </td>"""'], {}), '(\'<td class="hdrcolNoniptc"> </td>\')\n', (20600, 20636), True, 'from lxml import etree as ET\n'), ((20778, 20905), 'lxml.etree.fromstring', 'ET.fromstring', (['(\'<td class="hdrcolNoniptc2"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')'], {}), '(\'<td class="hdrcolNoniptc2"><a href="\' + moreatlink +\n \'" target="_blank">Find more about it at ...</a></td>\')\n', (20791, 20905), True, 'from lxml import etree as ET\n'), ((20989, 21039), 'lxml.etree.fromstring', 'ET.fromstring', (['"""<td class="hdrcolNoniptc2"> </td>"""'], {}), '(\'<td class="hdrcolNoniptc2"> </td>\')\n', (21002, 21039), True, 'from lxml import etree as ET\n'), ((21198, 21224), 'lxml.etree.SubElement', 'ET.SubElement', (['tbody', '"""tr"""'], {}), "(tbody, 'tr')\n", (21211, 21224), True, 'from lxml import etree as ET\n'), ((21520, 21570), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolIptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolIptc'})\n", (21533, 21570), True, 'from lxml import etree as ET\n'), ((21743, 21793), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolIptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolIptc'})\n", (21756, 21793), True, 'from lxml import etree as ET\n'), ((21965, 22015), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolIptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolIptc'})\n", (21978, 22015), True, 'from lxml import etree as ET\n'), ((22439, 22489), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolIptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolIptc'})\n", (22452, 22489), True, 'from lxml import etree as ET\n'), ((22661, 22711), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolIptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolIptc'})\n", (22674, 22711), True, 'from lxml import etree as ET\n'), ((22883, 22933), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolIptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolIptc'})\n", (22896, 22933), True, 'from lxml import etree as ET\n'), ((23105, 23158), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolNoniptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolNoniptc'})\n", (23118, 23158), True, 'from lxml import etree as ET\n'), ((23330, 23384), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolNoniptc2'}"], {}), "(xrow, 'td', {'class': 'bgdcolNoniptc2'})\n", (23343, 23384), True, 'from lxml import etree as ET\n'), ((23557, 23610), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolNoniptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolNoniptc'})\n", (23570, 23610), True, 'from lxml import etree as ET\n'), ((23785, 23839), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolNoniptc2'}"], {}), "(xrow, 'td', {'class': 'bgdcolNoniptc2'})\n", (23798, 23839), True, 'from lxml import etree as ET\n'), ((24014, 24067), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolNoniptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolNoniptc'})\n", (24027, 24067), True, 'from lxml import etree as ET\n'), ((24242, 24296), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolNoniptc2'}"], {}), "(xrow, 'td', {'class': 'bgdcolNoniptc2'})\n", (24255, 24296), True, 'from lxml import etree as ET\n'), ((24471, 24524), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolNoniptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolNoniptc'})\n", (24484, 24524), True, 'from lxml import etree as ET\n'), ((24699, 24753), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolNoniptc2'}"], {}), "(xrow, 'td', {'class': 'bgdcolNoniptc2'})\n", (24712, 24753), True, 'from lxml import etree as ET\n'), ((24928, 24981), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolNoniptc'}"], {}), "(xrow, 'td', {'class': 'bgdcolNoniptc'})\n", (24941, 24981), True, 'from lxml import etree as ET\n'), ((25156, 25210), 'lxml.etree.SubElement', 'ET.SubElement', (['xrow', '"""td"""', "{'class': 'bgdcolNoniptc2'}"], {}), "(xrow, 'td', {'class': 'bgdcolNoniptc2'})\n", (25169, 25210), True, 'from lxml import etree as ET\n'), ((1805, 1814), 'google.auth.transport.requests.Request', 'Request', ([], {}), '()\n', (1812, 1814), False, 'from google.auth.transport.requests import Request\n'), ((9631, 9668), 'lxml.etree.tostring', 'ET.tostring', (['xroot'], {'pretty_print': '(True)'}), '(xroot, pretty_print=True)\n', (9642, 9668), True, 'from lxml import etree as ET\n'), ((25492, 25529), 'lxml.etree.tostring', 'ET.tostring', (['xroot'], {'pretty_print': '(True)'}), '(xroot, pretty_print=True)\n', (25503, 25529), True, 'from lxml import etree as ET\n')]
|
import glob
import numpy as np
from pprint import pprint as pp
g = glob.glob("data/*/*/audio/*.wav")
wavpath = g[:10]
pp(wavpath)
res = [("/".join(wp.split("/")[:-2]), "/".join(wp.split("/")[-2:])) for wp in wavpath]
pp(res)
|
[
"pprint.pprint",
"glob.glob"
] |
[((69, 102), 'glob.glob', 'glob.glob', (['"""data/*/*/audio/*.wav"""'], {}), "('data/*/*/audio/*.wav')\n", (78, 102), False, 'import glob\n'), ((122, 133), 'pprint.pprint', 'pp', (['wavpath'], {}), '(wavpath)\n', (124, 133), True, 'from pprint import pprint as pp\n'), ((223, 230), 'pprint.pprint', 'pp', (['res'], {}), '(res)\n', (225, 230), True, 'from pprint import pprint as pp\n')]
|
from __future__ import annotations
from datetime import datetime
from typing import List, Dict, Union, Optional, TYPE_CHECKING
import pymongo
from pymongo import MongoClient
from pytz import timezone
import config
if TYPE_CHECKING:
import discord
JsonData = Dict[str, Union[str, int]]
cluster = MongoClient(config.mongo_client)
db: MongoClient = cluster[config.cluster_name]
collection: MongoClient = db[config.collection_name]
def daily_leaderboard() -> List[JsonData]:
print(
list(collection.find({}).sort(
"dailyTime", pymongo.DESCENDING)
)[:10]
)
return list(collection.find({}).sort(
"dailyTime", pymongo.DESCENDING)
)[:10]
def weekly_leaderboard() -> List[JsonData]:
return list(collection.find({}).sort(
"weeklyTime", pymongo.DESCENDING)
)[:10]
def monthly_leaderboard() -> List[JsonData]:
return list(collection.find({}).sort(
"monthlyTime", pymongo.DESCENDING)
)[:10]
def member_leaderboard() -> List[JsonData]:
return list(collection.find({}).sort(
"memberTime", pymongo.DESCENDING)
)[:10]
def member_details(member_id) -> Optional[JsonData]:
member = collection.find_one({"_id": member_id})
return member if str(member) != "none" else None
def resetDaily():
"""
Resets daily time of all members
"""
collection.update_many({}, {"$set": {"dailyTime": 0}})
def resetWeekly():
"""
Resets weekly time of all members
"""
collection.update_many({}, {"$set": {"weeklyTime": 0}})
def resetMonthly():
"""
Resets monthly time of all members.
"""
collection.update_many({}, {"$set": {"monthlyTime": 0}})
def end(member: discord.Member):
"""
Updates total Study time for members when they leave.
:param member:
The member that left the voice channel.
"""
now: datetime = datetime.now(timezone('Asia/Kolkata'))
now_str: str = now.strftime("%H:%M")
user = collection.find_one({"_id": str(member.id)})
join_time = str(user["startTime"])
join_hour, join_minutes = join_time.split(':')
join_minutes = int(join_hour) * 60 + int(join_minutes)
current_hour, current_minutes = now_str.split(':')
current_minutes = int(current_hour) * 60 + int(current_minutes)
if current_minutes < join_minutes:
daily_time = current_minutes
difference = (1440 - join_minutes) + current_minutes
weekly_time = current_minutes if int(now.weekday()) == 0 else difference
monthly_time = current_minutes if int(now.day) == 1 else difference
else:
difference = current_minutes - join_minutes
daily_time = difference
weekly_time = difference
monthly_time = difference
collection.update_one(
{"_id": str(member.id)},
{
"$inc": {
"memberTime": int(difference),
"monthlyTime": int(monthly_time),
"weeklyTime": int(weekly_time),
"dailyTime": int(daily_time)
}
}
)
collection.update_one(
{"_id": str(member.id)},
{"$set": {"startTime": 0}}
)
def update_join(member: discord.Member, _before_flag, _after_flag):
"""
Updates join data for existing members
:param member:
The member who joined the study channel
:param _before_flag:
The flag before the member joined the study channel
:param _after_flag:
The flag after the member joined the study channel
"""
now: str = datetime.now(timezone('Asia/Kolkata')).strftime("%H:%M")
collection.update_one(
{"_id": str(member.id)},
{
"$set": {
"startTime": now,
"name#": str(member.name + "#" + member.discriminator)
}
}
)
def add(member: discord.Member, _before_flag, _after_flag):
"""
Adds new entry in database for new members.
:param member:
The member who joined the study channel
:param _before_flag:
The flag before the member joined the study channel
:param _after_flag:
The flag after the member joined the study channel
"""
now: str = datetime.now(timezone('Asia/Kolkata')).strftime("%H:%M")
post = {
"_id": str(member.id),
"memberTime": 0,
"monthlyTime": 0,
"weeklyTime": 0,
"dailyTime": 0,
"startTime": now,
"name#": str(member.name + "#" + member.discriminator)
}
collection.insert_one(post)
def join(member: discord.Member, before_flag, after_flag):
"""
Called once member joins study channel.
:param member:
The member who joined the study channel
:param before_flag:
The flag before the member joined the study channel
:param after_flag:
The flag after the member joined the study channel
"""
if before_flag == after_flag:
return
user_exist = str(collection.find_one({"_id": str(member.id)}))
if user_exist == "None":
add(member, before_flag, after_flag)
else:
update_join(member, before_flag, after_flag)
|
[
"pymongo.MongoClient",
"pytz.timezone"
] |
[((319, 351), 'pymongo.MongoClient', 'MongoClient', (['config.mongo_client'], {}), '(config.mongo_client)\n', (330, 351), False, 'from pymongo import MongoClient\n'), ((1976, 2000), 'pytz.timezone', 'timezone', (['"""Asia/Kolkata"""'], {}), "('Asia/Kolkata')\n", (1984, 2000), False, 'from pytz import timezone\n'), ((3696, 3720), 'pytz.timezone', 'timezone', (['"""Asia/Kolkata"""'], {}), "('Asia/Kolkata')\n", (3704, 3720), False, 'from pytz import timezone\n'), ((4384, 4408), 'pytz.timezone', 'timezone', (['"""Asia/Kolkata"""'], {}), "('Asia/Kolkata')\n", (4392, 4408), False, 'from pytz import timezone\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 1 11:41:59 2019
@author: gemsec-user
"""
import numpy as np
import prody as pd
import PeptideBuilder as pb
import os
import Bio
import cleaning
from decimal import Decimal
from symbols import *
#from pdbtools import pdbtools as pdb
d = os.getcwd()
parsed_aa = {}
# parses peptides and creates file structures to store these peptides
# stores a dictionary with peptide keys that map to the atoms that make it up
def parse_aa():
if not os.path.exists(d + '/amino_acids'):
os.mkdir(d + '/amino_acids')
global parsed_aa
for amino in AA:
# out writes information to files
out = Bio.PDB.PDBIO()
# i is a peptide structure from amino acid
i = pb.make_structure(amino, [180]*len(amino),[180]*len(amino))
out.set_structure(i)
out.save(d + '/amino_acids/' + amino + '.pdb')
cleaning.cleanATOM(d + '/amino_acids/' + amino + '.pdb', out_file= d + '/amino_acids/' + amino + '.pdb', ext = '.pdb')
temp = pd.parsePDB(d + '/amino_acids/' + amino + ".pdb")
# maps amino acids to their atoms
parsed_aa[amino] = []
for atom in temp.iterAtoms():
parsed_aa[amino].append(str(atom.getName()))
# what are nodes? (2d array)
# returns the atoms from the given nodes
def remove_padding(nodes):
atoms = []
current = 0
# gets the currrent column of the first 5 rows
col = nodes[0:5, current]
while sum(col) != 0:
# adds the element index of the current node column
atoms.append(ELEMENT_INDEX[col.tolist().index(1.0)])
current += 1
col = nodes[0:5, current]
return atoms
# checks the rate of correctness in heuristic efficiency
def heuristic(index, node, amino_acid):
correct = 0
total = 0
for atom in parsed_aa[amino_acid]:
if (index+total) < len(node) and ELEMENT_SYMBOLS[int(node[index+total][0]) - 1] == atom[0]:
correct += 1
total += 1
return float(correct/total)
# finds all possible sequences of amino acid sequences keyed to heuristic efficiency values
def find_sequence_recurs(nodes, length, current_ind, current_sequence, current_value):
if len(parsed_aa.keys()) == 0:
parse_aa()
# adds the given value and sequence to the possible sequences dictionary
if len(current_sequence) == length:
global POSSIBLE_SEQUENCES
if current_value in POSSIBLE_SEQUENCES:
POSSIBLE_SEQUENCES[current_value].append(current_sequence)
else:
POSSIBLE_SEQUENCES[current_value] = [current_sequence]
values = []
for a in AA:
values.append(heuristic(current_ind,nodes, a))
max_value = max(values)
if max_value > 0.8:
for i in range(len(values)):
if max_value == values[i]:
amino = AA[i]
find_sequence_recurs(nodes, length, current_ind + len(parsed_aa[amino]), current_sequence + amino, current_value + max_value)
# returns a string of whitespace specified
def find_white_space(total_space, text):
return ' '*(total_space - len(text))
POSSIBLE_SEQUENCES = None
# what are nodes?
# decodes information into a pdb file
# what does encoding look like?
def decode(encoding, save_loc = d, save_name = '', find_coord = False, use_coord = False):
if len(parsed_aa.keys()) == 0:
parse_aa()
if save_name == '':
save_name = encoding['sequence'] + '.pdb'
placed = []
new_nodes = remove_padding(encoding['index'])
if not use_coord:
D = encoding['secondary']
placed.append([new_nodes[0], (0,0,0)])
placed.append([new_nodes[1], (D[0,1],0,0)])
x = (D[1,2]**2 - D[0,2]**2 - D[0,1]**2)/(-2 * D[0,1])
y = (abs(D[0,2]**2 - x**2))**(0.5)
placed.append([new_nodes[2], (x,y,0)])
P = placed[2][1][0]**2 + placed[2][1][1]**2
for i in range(3,len(new_nodes)):
x = (D[1,i]**2 - D[0,i]**2 - D[0,1]**2)/(-2*D[0,1])
y = (D[2,i]**2 - D[0,i]**2 - P + (2*x*placed[2][1][0]))/(-2*placed[2][1][1])
z = (abs(D[0,i]**2 - x**2 - y**2))**(0.5)
placed.append([new_nodes[i], (x,y,z)])
if find_coord:
final = np.zeros((len(encoding['secondary'][0]),3))
for i in range(len(placed)):
final[i, 0] = placed[i][1][0]
final[i, 1] = placed[i][1][1]
final[i, 2] = placed[i][1][2]
return final
else:
for i in range(3,len(new_nodes)):
placed.append([new_nodes[i], (encoding['coordinates'][i][0],encoding['coordinates'][i][1],encoding['coordinates'][i][2])])
with open(save_loc + '/' + save_name, 'w+') as g:
counter = 0
amino_num = 0
for i in range(len(placed)):
if counter == 0:
counter = len(parsed_aa[encoding['ele_to_amino'][i][1]])
amino_num += 1
string = 'ATOM' #+ str(i + 1) + ' '+ encoding['seq_to_atoms'][i][0]
string += find_white_space(7, str(i + 1)) + str(i+1) + ' '
string += encoding['ele_to_amino'][i][0] + find_white_space(4, encoding['ele_to_amino'][i][0])
string += AA3[AA.index(encoding['ele_to_amino'][i][1])] + ' A'
string += find_white_space(4, str(amino_num)) + str(amino_num)
string += find_white_space(12, str(round(Decimal(placed[i][1][0]), 3))) + str(round(Decimal(placed[i][1][0]), 3))
string += find_white_space(8, str(round(Decimal(placed[i][1][1]), 3))) + str(round(Decimal(placed[i][1][1]), 3))
string += find_white_space(8, str(round(Decimal(placed[i][1][2]), 3))) + str(round(Decimal(placed[i][1][2]), 3))
string += ' 1.00 0.00'
string += find_white_space(11, placed[i][0]) + placed[i][0]
g.write(string + '\n')
counter -= 1
return save_loc + '/' + save_name
|
[
"os.mkdir",
"decimal.Decimal",
"os.getcwd",
"os.path.exists",
"prody.parsePDB",
"Bio.PDB.PDBIO",
"cleaning.cleanATOM"
] |
[((309, 320), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (318, 320), False, 'import os\n'), ((512, 546), 'os.path.exists', 'os.path.exists', (["(d + '/amino_acids')"], {}), "(d + '/amino_acids')\n", (526, 546), False, 'import os\n'), ((556, 584), 'os.mkdir', 'os.mkdir', (["(d + '/amino_acids')"], {}), "(d + '/amino_acids')\n", (564, 584), False, 'import os\n'), ((683, 698), 'Bio.PDB.PDBIO', 'Bio.PDB.PDBIO', ([], {}), '()\n', (696, 698), False, 'import Bio\n'), ((914, 1033), 'cleaning.cleanATOM', 'cleaning.cleanATOM', (["(d + '/amino_acids/' + amino + '.pdb')"], {'out_file': "(d + '/amino_acids/' + amino + '.pdb')", 'ext': '""".pdb"""'}), "(d + '/amino_acids/' + amino + '.pdb', out_file=d +\n '/amino_acids/' + amino + '.pdb', ext='.pdb')\n", (932, 1033), False, 'import cleaning\n'), ((1048, 1097), 'prody.parsePDB', 'pd.parsePDB', (["(d + '/amino_acids/' + amino + '.pdb')"], {}), "(d + '/amino_acids/' + amino + '.pdb')\n", (1059, 1097), True, 'import prody as pd\n'), ((5454, 5478), 'decimal.Decimal', 'Decimal', (['placed[i][1][0]'], {}), '(placed[i][1][0])\n', (5461, 5478), False, 'from decimal import Decimal\n'), ((5579, 5603), 'decimal.Decimal', 'Decimal', (['placed[i][1][1]'], {}), '(placed[i][1][1])\n', (5586, 5603), False, 'from decimal import Decimal\n'), ((5704, 5728), 'decimal.Decimal', 'Decimal', (['placed[i][1][2]'], {}), '(placed[i][1][2])\n', (5711, 5728), False, 'from decimal import Decimal\n'), ((5411, 5435), 'decimal.Decimal', 'Decimal', (['placed[i][1][0]'], {}), '(placed[i][1][0])\n', (5418, 5435), False, 'from decimal import Decimal\n'), ((5536, 5560), 'decimal.Decimal', 'Decimal', (['placed[i][1][1]'], {}), '(placed[i][1][1])\n', (5543, 5560), False, 'from decimal import Decimal\n'), ((5661, 5685), 'decimal.Decimal', 'Decimal', (['placed[i][1][2]'], {}), '(placed[i][1][2])\n', (5668, 5685), False, 'from decimal import Decimal\n')]
|
def main():
from summ_eval.server import EvalServer
from summ_eval.server.helper import get_run_args
args = get_run_args()
server = EvalServer(args)
server.start()
server.join()
|
[
"summ_eval.server.EvalServer",
"summ_eval.server.helper.get_run_args"
] |
[((120, 134), 'summ_eval.server.helper.get_run_args', 'get_run_args', ([], {}), '()\n', (132, 134), False, 'from summ_eval.server.helper import get_run_args\n'), ((148, 164), 'summ_eval.server.EvalServer', 'EvalServer', (['args'], {}), '(args)\n', (158, 164), False, 'from summ_eval.server import EvalServer\n')]
|
"""
Unit-tests for target generation
"""
import os
import click
import pytest
from pathlib import Path as P
from textwrap import dedent
from commodore import cluster
from commodore.inventory import Inventory
from commodore.config import Config
@pytest.fixture
def data():
"""
Setup test data
"""
tenant = {
"id": "mytenant",
"displayName": "My Test Tenant",
}
cluster = {
"id": "mycluster",
"displayName": "My Test Cluster",
"tenant": tenant["id"],
"facts": {
"distribution": "rancher",
"cloud": "cloudscale",
},
"dynamicFacts": {
"kubernetes_version": {
"major": "1",
"minor": "21",
"gitVersion": "v1.21.3",
}
},
"gitRepo": {
"url": "ssh://[email protected]/cluster-catalogs/mycluster",
},
}
return {
"cluster": cluster,
"tenant": tenant,
}
def cluster_from_data(data) -> cluster.Cluster:
return cluster.Cluster(data["cluster"], data["tenant"])
def _setup_working_dir(inv: Inventory, components):
for cls in components:
defaults = inv.defaults_file(cls)
os.makedirs(defaults.parent, exist_ok=True)
defaults.touch()
component = inv.component_file(cls)
os.makedirs(component.parent, exist_ok=True)
component.touch()
def test_render_bootstrap_target(tmp_path: P):
components = ["foo", "bar"]
inv = Inventory(work_dir=tmp_path)
_setup_working_dir(inv, components)
target = cluster.render_target(inv, "cluster", ["foo", "bar", "baz"])
classes = [
"params.cluster",
"defaults.foo",
"defaults.bar",
"global.commodore",
]
assert target != ""
print(target)
assert len(target["classes"]) == len(
classes
), "rendered target includes different amount of classes"
for i in range(len(classes)):
assert target["classes"][i] == classes[i]
assert target["parameters"]["_instance"] == "cluster"
def test_render_target(tmp_path: P):
components = ["foo", "bar"]
inv = Inventory(work_dir=tmp_path)
_setup_working_dir(inv, components)
target = cluster.render_target(inv, "foo", ["foo", "bar", "baz"])
classes = [
"params.cluster",
"defaults.foo",
"defaults.bar",
"global.commodore",
"components.foo",
]
assert target != ""
print(target)
assert len(target["classes"]) == len(
classes
), "rendered target includes different amount of classes"
for i in range(len(classes)):
assert target["classes"][i] == classes[i]
assert target["parameters"]["kapitan"]["vars"]["target"] == "foo"
assert target["parameters"]["_instance"] == "foo"
def test_render_aliased_target(tmp_path: P):
components = ["foo", "bar"]
inv = Inventory(work_dir=tmp_path)
_setup_working_dir(inv, components)
target = cluster.render_target(inv, "fooer", ["foo", "bar", "baz"], component="foo")
classes = [
"params.cluster",
"defaults.foo",
"defaults.bar",
"global.commodore",
"components.foo",
]
assert target != ""
print(target)
assert len(target["classes"]) == len(
classes
), "rendered target includes different amount of classes"
for i in range(len(classes)):
assert target["classes"][i] == classes[i]
assert target["parameters"]["kapitan"]["vars"]["target"] == "fooer"
assert target["parameters"]["foo"] == "${fooer}"
assert target["parameters"]["_instance"] == "fooer"
def test_render_aliased_target_with_dash(tmp_path: P):
components = ["foo-comp", "bar"]
inv = Inventory(work_dir=tmp_path)
_setup_working_dir(inv, components)
target = cluster.render_target(
inv, "foo-1", ["foo-comp", "bar", "baz"], component="foo-comp"
)
classes = [
"params.cluster",
"defaults.foo-comp",
"defaults.bar",
"global.commodore",
"components.foo-comp",
]
assert target != ""
print(target)
assert len(target["classes"]) == len(
classes
), "rendered target includes different amount of classes"
for i in range(len(classes)):
assert target["classes"][i] == classes[i]
assert target["parameters"]["kapitan"]["vars"]["target"] == "foo-1"
assert target["parameters"]["foo_comp"] == "${foo_1}"
assert target["parameters"]["_instance"] == "foo-1"
def test_render_params(data, tmp_path: P):
cfg = Config(work_dir=tmp_path)
target = cfg.inventory.bootstrap_target
params = cluster.render_params(cfg.inventory, cluster_from_data(data))
assert "parameters" in params
params = params["parameters"]
assert "cluster" in params
assert "name" in params["cluster"]
assert params["cluster"]["name"] == "mycluster"
assert target in params
target_params = params[target]
assert "name" in target_params
assert target_params["name"] == "mycluster"
assert "display_name" in target_params
assert target_params["display_name"] == "My Test Cluster"
assert "catalog_url" in target_params
assert (
target_params["catalog_url"]
== "ssh://[email protected]/cluster-catalogs/mycluster"
)
assert "tenant" in target_params
assert target_params["tenant"] == "mytenant"
assert "tenant_display_name" in target_params
assert target_params["tenant_display_name"] == "My Test Tenant"
assert "dist" in target_params
assert target_params["dist"] == "rancher"
assert "facts" in params
assert params["facts"] == data["cluster"]["facts"]
assert "dynamic_facts" in params
dyn_facts = params["dynamic_facts"]
assert "kubernetes_version" in dyn_facts
k8s_ver = dyn_facts["kubernetes_version"]
assert "major" in k8s_ver
assert "minor" in k8s_ver
assert "gitVersion" in k8s_ver
assert "1" == k8s_ver["major"]
assert "21" == k8s_ver["minor"]
assert "v1.21.3" == k8s_ver["gitVersion"]
assert "cloud" in params
assert "provider" in params["cloud"]
assert params["cloud"]["provider"] == "cloudscale"
assert "customer" in params
assert "name" in params["customer"]
assert params["customer"]["name"] == "mytenant"
def test_missing_facts(data, tmp_path: P):
data["cluster"]["facts"].pop("cloud")
cfg = Config(work_dir=tmp_path)
with pytest.raises(click.ClickException):
cluster.render_params(cfg.inventory, cluster_from_data(data))
def test_empty_facts(data, tmp_path: P):
data["cluster"]["facts"]["cloud"] = ""
cfg = Config(work_dir=tmp_path)
with pytest.raises(click.ClickException):
cluster.render_params(cfg.inventory, cluster_from_data(data))
def test_read_cluster_and_tenant(tmp_path):
cfg = Config(work_dir=tmp_path)
file = cfg.inventory.params_file
os.makedirs(file.parent, exist_ok=True)
with open(file, "w") as f:
f.write(
dedent(
"""
parameters:
cluster:
name: c-twilight-water-9032
tenant: t-delicate-pine-3938"""
)
)
cluster_id, tenant_id = cluster.read_cluster_and_tenant(cfg.inventory)
assert cluster_id == "c-twilight-water-9032"
assert tenant_id == "t-delicate-pine-3938"
def test_read_cluster_and_tenant_missing_fact(tmp_path):
inv = Inventory(work_dir=tmp_path)
file = inv.params_file
os.makedirs(file.parent, exist_ok=True)
with open(file, "w") as f:
f.write(
dedent(
"""
classes: []
parameters: {}"""
)
)
with pytest.raises(KeyError):
cluster.read_cluster_and_tenant(inv)
|
[
"textwrap.dedent",
"os.makedirs",
"commodore.inventory.Inventory",
"commodore.cluster.render_target",
"pytest.raises",
"commodore.cluster.read_cluster_and_tenant",
"commodore.cluster.Cluster",
"commodore.config.Config"
] |
[((1059, 1107), 'commodore.cluster.Cluster', 'cluster.Cluster', (["data['cluster']", "data['tenant']"], {}), "(data['cluster'], data['tenant'])\n", (1074, 1107), False, 'from commodore import cluster\n'), ((1522, 1550), 'commodore.inventory.Inventory', 'Inventory', ([], {'work_dir': 'tmp_path'}), '(work_dir=tmp_path)\n', (1531, 1550), False, 'from commodore.inventory import Inventory\n'), ((1605, 1665), 'commodore.cluster.render_target', 'cluster.render_target', (['inv', '"""cluster"""', "['foo', 'bar', 'baz']"], {}), "(inv, 'cluster', ['foo', 'bar', 'baz'])\n", (1626, 1665), False, 'from commodore import cluster\n'), ((2176, 2204), 'commodore.inventory.Inventory', 'Inventory', ([], {'work_dir': 'tmp_path'}), '(work_dir=tmp_path)\n', (2185, 2204), False, 'from commodore.inventory import Inventory\n'), ((2259, 2315), 'commodore.cluster.render_target', 'cluster.render_target', (['inv', '"""foo"""', "['foo', 'bar', 'baz']"], {}), "(inv, 'foo', ['foo', 'bar', 'baz'])\n", (2280, 2315), False, 'from commodore import cluster\n'), ((2926, 2954), 'commodore.inventory.Inventory', 'Inventory', ([], {'work_dir': 'tmp_path'}), '(work_dir=tmp_path)\n', (2935, 2954), False, 'from commodore.inventory import Inventory\n'), ((3009, 3084), 'commodore.cluster.render_target', 'cluster.render_target', (['inv', '"""fooer"""', "['foo', 'bar', 'baz']"], {'component': '"""foo"""'}), "(inv, 'fooer', ['foo', 'bar', 'baz'], component='foo')\n", (3030, 3084), False, 'from commodore import cluster\n'), ((3767, 3795), 'commodore.inventory.Inventory', 'Inventory', ([], {'work_dir': 'tmp_path'}), '(work_dir=tmp_path)\n', (3776, 3795), False, 'from commodore.inventory import Inventory\n'), ((3850, 3940), 'commodore.cluster.render_target', 'cluster.render_target', (['inv', '"""foo-1"""', "['foo-comp', 'bar', 'baz']"], {'component': '"""foo-comp"""'}), "(inv, 'foo-1', ['foo-comp', 'bar', 'baz'], component=\n 'foo-comp')\n", (3871, 3940), False, 'from commodore import cluster\n'), ((4598, 4623), 'commodore.config.Config', 'Config', ([], {'work_dir': 'tmp_path'}), '(work_dir=tmp_path)\n', (4604, 4623), False, 'from commodore.config import Config\n'), ((6452, 6477), 'commodore.config.Config', 'Config', ([], {'work_dir': 'tmp_path'}), '(work_dir=tmp_path)\n', (6458, 6477), False, 'from commodore.config import Config\n'), ((6690, 6715), 'commodore.config.Config', 'Config', ([], {'work_dir': 'tmp_path'}), '(work_dir=tmp_path)\n', (6696, 6715), False, 'from commodore.config import Config\n'), ((6888, 6913), 'commodore.config.Config', 'Config', ([], {'work_dir': 'tmp_path'}), '(work_dir=tmp_path)\n', (6894, 6913), False, 'from commodore.config import Config\n'), ((6955, 6994), 'os.makedirs', 'os.makedirs', (['file.parent'], {'exist_ok': '(True)'}), '(file.parent, exist_ok=True)\n', (6966, 6994), False, 'import os\n'), ((7275, 7321), 'commodore.cluster.read_cluster_and_tenant', 'cluster.read_cluster_and_tenant', (['cfg.inventory'], {}), '(cfg.inventory)\n', (7306, 7321), False, 'from commodore import cluster\n'), ((7487, 7515), 'commodore.inventory.Inventory', 'Inventory', ([], {'work_dir': 'tmp_path'}), '(work_dir=tmp_path)\n', (7496, 7515), False, 'from commodore.inventory import Inventory\n'), ((7547, 7586), 'os.makedirs', 'os.makedirs', (['file.parent'], {'exist_ok': '(True)'}), '(file.parent, exist_ok=True)\n', (7558, 7586), False, 'import os\n'), ((1239, 1282), 'os.makedirs', 'os.makedirs', (['defaults.parent'], {'exist_ok': '(True)'}), '(defaults.parent, exist_ok=True)\n', (1250, 1282), False, 'import os\n'), ((1360, 1404), 'os.makedirs', 'os.makedirs', (['component.parent'], {'exist_ok': '(True)'}), '(component.parent, exist_ok=True)\n', (1371, 1404), False, 'import os\n'), ((6487, 6522), 'pytest.raises', 'pytest.raises', (['click.ClickException'], {}), '(click.ClickException)\n', (6500, 6522), False, 'import pytest\n'), ((6725, 6760), 'pytest.raises', 'pytest.raises', (['click.ClickException'], {}), '(click.ClickException)\n', (6738, 6760), False, 'import pytest\n'), ((7763, 7786), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (7776, 7786), False, 'import pytest\n'), ((7796, 7832), 'commodore.cluster.read_cluster_and_tenant', 'cluster.read_cluster_and_tenant', (['inv'], {}), '(inv)\n', (7827, 7832), False, 'from commodore import cluster\n'), ((7055, 7215), 'textwrap.dedent', 'dedent', (['"""\n parameters:\n cluster:\n name: c-twilight-water-9032\n tenant: t-delicate-pine-3938"""'], {}), '(\n """\n parameters:\n cluster:\n name: c-twilight-water-9032\n tenant: t-delicate-pine-3938"""\n )\n', (7061, 7215), False, 'from textwrap import dedent\n'), ((7647, 7712), 'textwrap.dedent', 'dedent', (['"""\n classes: []\n parameters: {}"""'], {}), '("""\n classes: []\n parameters: {}""")\n', (7653, 7712), False, 'from textwrap import dedent\n')]
|
import webbrowser
class Movie():
'''This is a class for storing information about movies.'''
def __init__(self, movie_title, movie_year, poster_image, trailer_youtube, movie_rating):
self.title = movie_title
self.year = movie_year
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
self.rating = movie_rating
def show_trailer(self):
'''This method opens a youtube url.'''
webbrowser.open(self.trailer_youtube_url)
|
[
"webbrowser.open"
] |
[((470, 511), 'webbrowser.open', 'webbrowser.open', (['self.trailer_youtube_url'], {}), '(self.trailer_youtube_url)\n', (485, 511), False, 'import webbrowser\n')]
|
# Copyright 2013 Google Inc. All Rights Reserved.
"""Provide commands for managing SSL certificates of Cloud SQL instances."""
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
class SslCerts(base.Group):
"""Provide commands for managing SSL certificates of Cloud SQL instances.
Provide commands for managing SSL certificates of Cloud SQL instances,
including creating, deleting, listing, and getting information about
certificates.
"""
@staticmethod
def Args(parser):
parser.add_argument(
'--instance',
'-i',
help='Cloud SQL instance ID.')
def Filter(self, tool_context, args):
if not args.instance:
raise exceptions.ToolException('argument --instance/-i is required')
|
[
"googlecloudsdk.calliope.exceptions.ToolException"
] |
[((707, 769), 'googlecloudsdk.calliope.exceptions.ToolException', 'exceptions.ToolException', (['"""argument --instance/-i is required"""'], {}), "('argument --instance/-i is required')\n", (731, 769), False, 'from googlecloudsdk.calliope import exceptions\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from random import randint
from set_aws_mfa.data.data_manager import ProfileTuple
from set_aws_mfa.helper import helper
from set_aws_mfa import validate
from set_aws_mfa.data import data_manager
from set_aws_mfa.helper.helper import IntObject
from set_aws_mfa import prompts
from tests.conftest import BUILTIN_INPUTS
########################
# Get profiles
########################
# 1. config, credentials 両方にいる profile に、credentials の値を合体させたリストを取得する
def test_get_perfect_profile_list(profile_obj_list, credentials_lists, perfect_profile_list):
"""テスト: 取得したリストは、Credential にも Config にも存在する プロファイルのリストかどうか"""
# GIVEN: Profile に Credentials の値も合わせた ProfileTuple のリストを取得する
profile_name_list = []
credentials_name_list = []
for i in profile_obj_list:
# Given: ProfileTuple の name だけを抽出する
profile_name_list.append(i.name)
for k in credentials_lists:
# GIVEN: CredentialsTuple の name だけを抽出する
credentials_name_list.append(k.name)
for in_both in perfect_profile_list:
assert isinstance(in_both, ProfileTuple)
# WHEN: ProfileTuple に aws_secret_access_key がセットされているならば
if in_both.aws_secret_access_key is not None:
# THEN: credentials にも config にも、その profile が存在している
assert in_both.name in credentials_name_list
assert in_both.name in profile_name_list
def test_prompt_displays_profile_name(capsys, perfect_profile_list):
"""テスト:プロファイルの選択肢が表示されるかどうか"""
# GIVEN: get perfect_profile_list
# WHEN: execute prompt_user_selection()
prompts.prompt_user_selection(perfect_profile_list)
out, err = capsys.readouterr()
# THEN: prompt usable profile name
for p in perfect_profile_list:
if p.aws_secret_access_key is not None:
# ") profile_name" is included in stdout
assert ") " + p.name in out.strip()
def test_get_selected_profile(perfect_profile_list, monkeypatch):
# GIVEN: perfect profile list
# GIVEN: Mock user input
user_input = 2
monkeypatch.setattr(BUILTIN_INPUTS, lambda _: user_input)
# WHEN: this function is called
profile = data_manager.get_selected_profile()
assert profile == perfect_profile_list[user_input - 1]
########################
# Get aws account info
########################
# テスト ~/.aws_accounts_for_set_aws_mfa が存在しない場合、False を返す
def test_no_aws_accounts_for_set_aws_mfa_returns_false(set_fake_aws_account_files):
# GIVEN: the path of AWS_ACCOUNT_FOR_SET_AWS_MFA replaced with fake path
# WHEN: Check the existence of AWS_ACCOUNT_FOR_SET_AWS_MFA
is_the_file_exists = validate.check_aws_accounts_for_set_aws_mfa_existence()
# THEN: The file is not exist
assert not is_the_file_exists
# テスト ~/.aws_accounts_for_set_aws_mfa が存在しない場合、作成する
def test_create_aws_accounts_for_set_aws_mfa(set_fake_aws_account_files, delete_fake_aws_account_files):
# GIVEN: the path of AWS_ACCOUNT_FOR_SET_AWS_MFA replaced with fake path
# GIVEN: the path of AWS_ACCOUNT_FOR_SET_AWS_MFA is not exist
# WHEN: Try to prepare AWS_ACCOUNT_FOR_SET_AWS_MFA and it is created
data_manager.prepare_aws_account_id_file()
# WHEN: Check the existence of AWS_ACCOUNT_FOR_SET_AWS_MFA
is_the_file_exists = validate.check_aws_accounts_for_set_aws_mfa_existence()
# THEN: The file is exist
assert is_the_file_exists
# テスト ~/.aws_accounts_for_set_aws_mfa 作成後、ユーザーに 該当ProfileのAWSアカウントID の入力を求める
def test_when_no_aws_account_file_asks_for_user_input(set_fake_aws_account_files, delete_fake_aws_account_files,
perfect_profile_list, capsys):
# GIVEN a Profile
profile = perfect_profile_list[0]
# WHEN create a new aws account file
if not validate.check_aws_accounts_for_set_aws_mfa_existence():
data_manager.create_aws_account_id_file()
else:
# そのファイルが既に存在していた場合、書き込みをせずに raise
raise
# THEN: ask to input aws account id for the profile
prompts.prompt_for_asking_aws_account_id(profile)
out, err = capsys.readouterr()
assert profile.name in out.rstrip()
assert prompts.PROMPT_ASK_AWS_ACCOUNT_ID_FOR_PROFILE_BEFORE in out.rstrip()
assert prompts.PROMPT_ASK_AWS_ACCOUNT_ID_FOR_PROFILE_AFTER in out.rstrip()
# ~/.aws_accounts_for_set_aws_mfa から該当ProfileのAWSアカウントIDを取得する
def test_get_aws_account_id_for_the_profile(perfect_profile_list):
"""注意: ~/.aws_accounts_for_set_aws_mfa がローカルにない場合、
テスト対象のツール使用時には該当ファイルがない場合は生成、入力がなされるが、
上記生成を行う前にこのテストは実施した際はテストに失敗する
"""
# GIVEN: a ProfileTuple
profile = perfect_profile_list[0]
# WHEN: call the function
aws_account_id = data_manager.get_aws_account_id(profile)
# THEN:
assert type(aws_account_id) == int
# テスト ユーザー入力の AWSアカウントID が int じゃない場合、False が返される
def test_user_input_is_not_int(monkeypatch):
# GIVEN: ユーザーインプットが integer ではない場合、を Mock
user_input_not_int = "hogehoge"
# GIVEN: Mock user input string
monkeypatch.setattr(BUILTIN_INPUTS, lambda _: user_input_not_int)
# WHEN: Validate the input
is_int = helper.is_input_int_loop(IntObject(), data_manager.ASKING_AWS_ACCOUNT_ID_INPUT_MESSAGE)
# THEN: It's not an int
assert not is_int
# テスト ユーザー入力の AWSアカウントID が int の場合、True が返される
def test_user_input_is_int(monkeypatch):
# GIVEN: ユーザーインプットが integer ではない場合、を Mock
user_input_not_int = "12345"
# GIVEN: Mock user input string
monkeypatch.setattr(BUILTIN_INPUTS, lambda _: user_input_not_int)
# WHEN: Validate the input
is_int = helper.is_input_int_loop(IntObject(), data_manager.ASKING_AWS_ACCOUNT_ID_INPUT_MESSAGE)
# THEN: It's not an int
assert is_int
# ~/.aws_accounts_for_set_aws_mfa に ユーザー入力の AWSアカウントIDを 記入する
def test_writing_aws_account_to_the_file(set_fake_aws_account_files, delete_fake_aws_account_files, perfect_profile_list):
# GIVEN: AWS_ACCOUNT_FOR_SET_AWS_MFA is changed to fake path
# GIVEN: Create fake AWS_ACCOUNT_FOR_SET_AWS_MFA
data_manager.create_aws_account_id_file()
# GIVEN: 対象 profile を指定する
profile = perfect_profile_list[0]
# GIVEN: 下記aws account id を取得したとする
aws_account_id = 12345
data_manager.create_aws_account_id_file()
# WHEN: check the existence of info for the given profile
data_manager.writing_aws_account_to_the_file(profile, aws_account_id)
# WHEN: AWS_ACCOUNT_FOR_SET_AWS_MFA から該当 profile の aws account id を検索した場合
retrieved_aws_account_id = data_manager.get_aws_account_id(profile)
# THEN: int の aws account id が取得できている
assert type(retrieved_aws_account_id) is int
# テスト ~/.aws_accounts_for_data_manager はするが、該当ProfileのAWSアカウントIDが存在しない場合にユーザーに入力を求める
def test_no_aws_account_id_for_given_profile_prompts_msg(set_fake_aws_account_files,
perfect_profile_list, create_fake_aws_account_files,
delete_fake_aws_account_files,
capsys, monkeypatch):
# GIVEN: Create fake AWS_ACCOUNT_FOR_data_manager
# GIVEN: No info for profile exists in fake AWS_ACCOUNT_FOR_SET_AWS_MFA
# GIVEN: 対象 profile を指定する
profile = perfect_profile_list[0]
# GIVEN: ユーザーインプットが integer ではない場合、を Mock
aws_account_id_int = "12345"
# GIVEN: Mock user input string
monkeypatch.setattr(BUILTIN_INPUTS, lambda _: aws_account_id_int)
# WHEN: check the existence of info for the given profile
data_manager.get_aws_account_id(profile)
# THEN: Prompt message to ask for input aws account id for the profile
out, err = capsys.readouterr()
print(out.rstrip())
assert profile.name in out.rstrip()
assert prompts.PROMPT_ASK_AWS_ACCOUNT_ID_FOR_PROFILE_BEFORE in out.rstrip()
assert prompts.PROMPT_ASK_AWS_ACCOUNT_ID_FOR_PROFILE_AFTER in out.rstrip()
# テスト該当プロファイルのMFA ARN を取得する
def test_get_mfa_arn(perfect_profile_list):
# GIVEN: a ProfileTuple
profile = perfect_profile_list[0]
# WHEN: call the function
mfa_arn = data_manager.get_mfa_arn(profile)
# THEN:
assert data_manager.AWS_IAM_ARN_HEAD_PART
assert data_manager.AWS_IAM_ARN_MFA_PART
assert profile.name in mfa_arn
def test_get_role_for_a_base_profile(profile_which_has_role, profile_obj_list):
"""該当プロフィールと紐づくロールを返す"""
# GIVEN: a valid profile which can switch role
# WHEN: Check a role related to a given profile
role_for_the_profile_list = data_manager.get_role_list_for_a_profile(profile_which_has_role, profile_obj_list)
# THEN: there is some roles related to the profile
if len(role_for_the_profile_list) != 0:
assert role_for_the_profile_list[0].source_profile == profile_which_has_role.name
def test_get_profile_instance_for_user_input(perfect_profile_list):
# GIVEN: validated input num
validated_input = randint(1, len(perfect_profile_list))
# WHEN: get profile instance for the input number
profile_instance = data_manager.get_specified_profile(
perfect_profile_list, validated_input)
# THEN:
assert isinstance(profile_instance, ProfileTuple)
|
[
"set_aws_mfa.helper.helper.IntObject",
"set_aws_mfa.prompts.prompt_for_asking_aws_account_id",
"set_aws_mfa.data.data_manager.get_role_list_for_a_profile",
"set_aws_mfa.data.data_manager.get_selected_profile",
"set_aws_mfa.data.data_manager.get_aws_account_id",
"set_aws_mfa.data.data_manager.get_mfa_arn",
"set_aws_mfa.data.data_manager.prepare_aws_account_id_file",
"set_aws_mfa.prompts.prompt_user_selection",
"set_aws_mfa.validate.check_aws_accounts_for_set_aws_mfa_existence",
"set_aws_mfa.data.data_manager.writing_aws_account_to_the_file",
"set_aws_mfa.data.data_manager.get_specified_profile",
"set_aws_mfa.data.data_manager.create_aws_account_id_file"
] |
[((1609, 1660), 'set_aws_mfa.prompts.prompt_user_selection', 'prompts.prompt_user_selection', (['perfect_profile_list'], {}), '(perfect_profile_list)\n', (1638, 1660), False, 'from set_aws_mfa import prompts\n'), ((2182, 2217), 'set_aws_mfa.data.data_manager.get_selected_profile', 'data_manager.get_selected_profile', ([], {}), '()\n', (2215, 2217), False, 'from set_aws_mfa.data import data_manager\n'), ((2659, 2714), 'set_aws_mfa.validate.check_aws_accounts_for_set_aws_mfa_existence', 'validate.check_aws_accounts_for_set_aws_mfa_existence', ([], {}), '()\n', (2712, 2714), False, 'from set_aws_mfa import validate\n'), ((3163, 3205), 'set_aws_mfa.data.data_manager.prepare_aws_account_id_file', 'data_manager.prepare_aws_account_id_file', ([], {}), '()\n', (3203, 3205), False, 'from set_aws_mfa.data import data_manager\n'), ((3295, 3350), 'set_aws_mfa.validate.check_aws_accounts_for_set_aws_mfa_existence', 'validate.check_aws_accounts_for_set_aws_mfa_existence', ([], {}), '()\n', (3348, 3350), False, 'from set_aws_mfa import validate\n'), ((4035, 4084), 'set_aws_mfa.prompts.prompt_for_asking_aws_account_id', 'prompts.prompt_for_asking_aws_account_id', (['profile'], {}), '(profile)\n', (4075, 4084), False, 'from set_aws_mfa import prompts\n'), ((4710, 4750), 'set_aws_mfa.data.data_manager.get_aws_account_id', 'data_manager.get_aws_account_id', (['profile'], {}), '(profile)\n', (4741, 4750), False, 'from set_aws_mfa.data import data_manager\n'), ((6030, 6071), 'set_aws_mfa.data.data_manager.create_aws_account_id_file', 'data_manager.create_aws_account_id_file', ([], {}), '()\n', (6069, 6071), False, 'from set_aws_mfa.data import data_manager\n'), ((6210, 6251), 'set_aws_mfa.data.data_manager.create_aws_account_id_file', 'data_manager.create_aws_account_id_file', ([], {}), '()\n', (6249, 6251), False, 'from set_aws_mfa.data import data_manager\n'), ((6318, 6387), 'set_aws_mfa.data.data_manager.writing_aws_account_to_the_file', 'data_manager.writing_aws_account_to_the_file', (['profile', 'aws_account_id'], {}), '(profile, aws_account_id)\n', (6362, 6387), False, 'from set_aws_mfa.data import data_manager\n'), ((6497, 6537), 'set_aws_mfa.data.data_manager.get_aws_account_id', 'data_manager.get_aws_account_id', (['profile'], {}), '(profile)\n', (6528, 6537), False, 'from set_aws_mfa.data import data_manager\n'), ((7528, 7568), 'set_aws_mfa.data.data_manager.get_aws_account_id', 'data_manager.get_aws_account_id', (['profile'], {}), '(profile)\n', (7559, 7568), False, 'from set_aws_mfa.data import data_manager\n'), ((8090, 8123), 'set_aws_mfa.data.data_manager.get_mfa_arn', 'data_manager.get_mfa_arn', (['profile'], {}), '(profile)\n', (8114, 8123), False, 'from set_aws_mfa.data import data_manager\n'), ((8509, 8595), 'set_aws_mfa.data.data_manager.get_role_list_for_a_profile', 'data_manager.get_role_list_for_a_profile', (['profile_which_has_role', 'profile_obj_list'], {}), '(profile_which_has_role,\n profile_obj_list)\n', (8549, 8595), False, 'from set_aws_mfa.data import data_manager\n'), ((9022, 9095), 'set_aws_mfa.data.data_manager.get_specified_profile', 'data_manager.get_specified_profile', (['perfect_profile_list', 'validated_input'], {}), '(perfect_profile_list, validated_input)\n', (9056, 9095), False, 'from set_aws_mfa.data import data_manager\n'), ((3801, 3856), 'set_aws_mfa.validate.check_aws_accounts_for_set_aws_mfa_existence', 'validate.check_aws_accounts_for_set_aws_mfa_existence', ([], {}), '()\n', (3854, 3856), False, 'from set_aws_mfa import validate\n'), ((3866, 3907), 'set_aws_mfa.data.data_manager.create_aws_account_id_file', 'data_manager.create_aws_account_id_file', ([], {}), '()\n', (3905, 3907), False, 'from set_aws_mfa.data import data_manager\n'), ((5157, 5168), 'set_aws_mfa.helper.helper.IntObject', 'IntObject', ([], {}), '()\n', (5166, 5168), False, 'from set_aws_mfa.helper.helper import IntObject\n'), ((5613, 5624), 'set_aws_mfa.helper.helper.IntObject', 'IntObject', ([], {}), '()\n', (5622, 5624), False, 'from set_aws_mfa.helper.helper import IntObject\n')]
|
from django.contrib import admin
from .models import UserLog
admin.site.register(UserLog)
|
[
"django.contrib.admin.site.register"
] |
[((62, 90), 'django.contrib.admin.site.register', 'admin.site.register', (['UserLog'], {}), '(UserLog)\n', (81, 90), False, 'from django.contrib import admin\n')]
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.http.response import HttpResponse
from django.http import JsonResponse
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.urls import path, reverse
from django.utils import html
from openpyxl.writer.excel import save_virtual_workbook
import utils.admin as utils
# Local Imports
from mwbase import models as mwbase
from mwbase.forms import ImportXLSXForm
from mwbase.utils import sms_bank
import swapper
AutomatedMessage = swapper.load_model("mwbase", "AutomatedMessage")
Participant = swapper.load_model("mwbase", "Participant")
StatusChange = swapper.load_model("mwbase", "StatusChange")
class ConnectionInline(admin.TabularInline):
model = mwbase.Connection
extra = 0
class NoteInline(admin.TabularInline):
model = mwbase.Note
extra = 1
def mark_quit(modeladmin, request, queryset):
''' mark all mwbase in queryset as quit and save '''
for c in queryset:
c.set_status('quit', comment='Status set from bulk quit action')
mark_quit.short_description = 'Mark participant as quit'
def revert_status(modeladmin, request, queryset):
''' set the status for each participant in queryset to their previous status '''
for c in queryset:
old_status = c.statuschange_set.last().old
c.set_status(old_status, comment='Status reverted from bulk action')
revert_status.short_description = 'Revert to last status'
@admin.register(Participant)
class ParticipantAdmin(admin.ModelAdmin):
list_display = ('study_id', 'display_name', 'preg_status', 'sms_status', 'description', 'facility', 'phone_number', 'due_date', 'language', 'send_day', 'is_validated', 'created')
list_display_links = ('study_id', 'display_name')
list_filter = ('facility', 'study_group', ('created', admin.DateFieldListFilter), 'preg_status', 'is_validated', 'language', 'send_day')
ordering = ('study_id',)
search_fields = ('study_id', 'display_name', 'connection__identity', 'anc_num')
readonly_fields = ('last_msg_client', 'last_msg_system', 'created', 'modified')
inlines = (ConnectionInline, NoteInline)
actions = (mark_quit, revert_status,)
class ParticipantAdminMixin(object):
participant_field = 'participant'
def participant_name(self, obj):
participant = getattr(obj, self.participant_field)
if participant is not None:
return html.format_html(
"<a href='../participant/{0.pk}'>({0.study_id}) {0.display_name}</a>".format(participant))
participant_name.short_description = 'SMS Name'
participant_name.admin_order_field = '{}__study_id'.format(participant_field)
def facility(self, obj):
participant = getattr(obj, self.participant_field)
if participant is not None:
return participant.facility.capitalize()
facility.admin_order_field = '{}__facility'.format(participant_field)
def study_id(self, obj):
return getattr(obj, self.participant_field).study_id
study_id.short_description = 'Study ID'
study_id.admin_order_field = '{}__study_id'.format(participant_field)
def phone_number(self, obj):
connection = getattr(obj, self.participant_field).connection()
if connection is not None:
return html.format_html("<a href='../connection/{0.pk}'>{0.identity}</a>".format(connection))
phone_number.short_description = 'Number'
phone_number.admin_order_field = '{}__connection__identity'.format(participant_field)
@admin.register(mwbase.Message)
class MessageAdmin(admin.ModelAdmin, ParticipantAdminMixin):
list_display = ('text', 'participant_name', 'identity', 'is_system', 'is_outgoing', 'is_reply', 'external_status', 'translation_status', 'created')
list_filter = ('is_system', 'is_outgoing', 'external_status', ('participant', utils.NullFieldListFilter), ('created', admin.DateFieldListFilter), 'connection__participant__facility', 'translation_status', 'is_related', 'external_success')
date_hierarchy = 'created'
search_fields = ('participant__study_id', 'participant__display_name', 'connection__identity')
readonly_fields = ('created', 'modified')
def identity(self, obj):
return html.format_html("<a href='./?q={0.identity}'>{0.identity}</a>".format(obj.connection))
identity.short_description = 'Number'
identity.admin_order_field = 'connection__identity'
@admin.register(mwbase.PhoneCall)
class PhoneCallAdmin(admin.ModelAdmin, ParticipantAdminMixin):
list_display = ('comment', 'participant_name', 'phone_number', 'outcome', 'is_outgoing', 'created')
date_hierarchy = 'created'
list_filter = ('outcome', 'is_outgoing')
readonly_fields = ('created', 'modified')
search_fields = ('participant__study_id', 'participant__display_name')
@admin.register(mwbase.Note)
class NoteAdmin(admin.ModelAdmin, ParticipantAdminMixin):
list_display = ('participant_name', 'comment', 'created')
date_hierarchy = 'created'
@admin.register(mwbase.Connection)
class ConnectionAdmin(admin.ModelAdmin, ParticipantAdminMixin):
list_display = ('identity', 'participant_name', 'facility', 'is_primary')
search_fields = ('participant__study_id', 'participant__display_name', 'identity')
@admin.register(mwbase.Visit)
class VisitAdmin(admin.ModelAdmin, ParticipantAdminMixin):
list_display = ('study_id', 'participant_name', 'visit_type', 'scheduled',
'notification_last_seen', 'notify_count', 'arrived', 'status')
date_hierarchy = 'scheduled'
list_filter = ('status', 'visit_type', 'arrived', 'scheduled')
search_fields = ('participant__study_id', 'participant__display_name')
@admin.register(mwbase.ScheduledPhoneCall)
class ScheduledPhoneCall(admin.ModelAdmin, ParticipantAdminMixin):
list_display = ('study_id', 'participant_name', 'call_type', 'scheduled',
'notification_last_seen', 'notify_count', 'arrived', 'status')
date_hierarchy = 'scheduled'
list_filter = ('status', 'call_type', 'arrived', 'scheduled')
search_fields = ('participant__study_id', 'participant__display_name')
@admin.register(mwbase.Practitioner)
class PractitionerAdmin(admin.ModelAdmin):
list_display = ('facility', 'username', 'password_changed')
@admin.register(StatusChange)
class StatusChangeAdmin(admin.ModelAdmin, ParticipantAdminMixin):
list_display = ('comment', 'participant_name', 'old', 'new', 'type', 'created')
search_fields = ('participant__study_id', 'participant__display_name')
@admin.register(mwbase.EventLog)
class EventLogAdmin(admin.ModelAdmin):
list_display = ('user', 'event', 'created')
class PractitionerInline(admin.TabularInline):
model = mwbase.Practitioner
class UserAdmin(UserAdmin):
inlines = (PractitionerInline,)
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
@admin.register(AutomatedMessage)
class AutomatedMessageAdmin(admin.ModelAdmin):
list_display = ('description', 'english')
list_filter = ('send_base', 'condition', 'group')
change_list_template = "admin/mwbase/automatedmessage/change_list.html"
smsbank_check_template = "admin/mwbase/automatedmessage/sms_bank_check.html"
smsbank_import_template = "admin/mwbase/automatedmessage/sms_bank_import.html"
def changelist_view(self, request, extra_context=None):
extra_context = extra_context or {}
extra_context['form'] = ImportXLSXForm
return super(AutomatedMessageAdmin, self).changelist_view(request, extra_context=extra_context)
def get_urls(self):
urls = super().get_urls()
my_urls = [
path(r'smsbank_check_view/', self.admin_site.admin_view(self.smsbank_check_view), name='smsbank_check_view'),
path(r'smsbank_import_view/', self.admin_site.admin_view(self.smsbank_import_view), name='smsbank_import_view'),
path(r'smsbank_create_xlsx/', self.admin_site.admin_view(self.smsbank_create_xlsx), name='smsbank_create_xlsx')
]
urls = my_urls + urls
return urls
def smsbank_create_xlsx(self, request, extra_context=None):
wb = sms_bank.create_xlsx()
response = HttpResponse(save_virtual_workbook(wb), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename="smsbank.xlsx"'
return response
def smsbank_import_view(self, request, extra_context=None):
opts = self.model._meta
app_label = opts.app_label
form = ImportXLSXForm(request.POST or None, request.FILES or None)
counts, existing, diff= [], [], []
error = ""
if request.method == 'POST':
if form.is_valid():
file = form.cleaned_data.get("file")
# try:
counts, existing, diff= sms_bank.import_messages(file)
# except Exception as e:
# print(e)
# error = "There was an error importing the given file. Please try again."
context = {
**self.admin_site.each_context(request),
'module_name': str(opts.verbose_name_plural),
'opts': opts,
'counts': counts,
'existing': existing,
'diff': diff,
'error': error,
**(extra_context or {}),
}
return TemplateResponse(request, self.smsbank_import_template or [
'admin/%s/%s/sms_bank_import.html' % (app_label, opts.model_name),
'admin/%s/sms_bank_import.html' % app_label,
'admin/sms_bank_import.html'
], context)
def smsbank_check_view(self, request, extra_context=None):
opts = self.model._meta
app_label = opts.app_label
items = duplicates = descriptions = total = None
form = ImportXLSXForm(request.POST or None, request.FILES or None)
if request.method == 'POST':
if form.is_valid():
file = form.cleaned_data.get("file")
(items, duplicates, descriptions, total, errors ) = sms_bank.check_messages(file)
url = reverse('admin:smsbank_import_view')
response = JsonResponse({
'url': url,
'duplicates': duplicates,
'errors': errors,
'total': total,
'success': True,
})
return response
else:
return JsonResponse({'success': False, 'message': 'Form Invalid',})
else:
return JsonResponse({'success': False, 'message': 'Invalid method',})
|
[
"mwbase.forms.ImportXLSXForm",
"mwbase.utils.sms_bank.create_xlsx",
"mwbase.utils.sms_bank.check_messages",
"django.contrib.admin.site.register",
"django.http.JsonResponse",
"django.template.response.TemplateResponse",
"swapper.load_model",
"django.contrib.admin.register",
"django.urls.reverse",
"mwbase.utils.sms_bank.import_messages",
"openpyxl.writer.excel.save_virtual_workbook",
"django.contrib.admin.site.unregister"
] |
[((601, 649), 'swapper.load_model', 'swapper.load_model', (['"""mwbase"""', '"""AutomatedMessage"""'], {}), "('mwbase', 'AutomatedMessage')\n", (619, 649), False, 'import swapper\n'), ((664, 707), 'swapper.load_model', 'swapper.load_model', (['"""mwbase"""', '"""Participant"""'], {}), "('mwbase', 'Participant')\n", (682, 707), False, 'import swapper\n'), ((723, 767), 'swapper.load_model', 'swapper.load_model', (['"""mwbase"""', '"""StatusChange"""'], {}), "('mwbase', 'StatusChange')\n", (741, 767), False, 'import swapper\n'), ((1548, 1575), 'django.contrib.admin.register', 'admin.register', (['Participant'], {}), '(Participant)\n', (1562, 1575), False, 'from django.contrib import admin\n'), ((3622, 3652), 'django.contrib.admin.register', 'admin.register', (['mwbase.Message'], {}), '(mwbase.Message)\n', (3636, 3652), False, 'from django.contrib import admin\n'), ((4522, 4554), 'django.contrib.admin.register', 'admin.register', (['mwbase.PhoneCall'], {}), '(mwbase.PhoneCall)\n', (4536, 4554), False, 'from django.contrib import admin\n'), ((4922, 4949), 'django.contrib.admin.register', 'admin.register', (['mwbase.Note'], {}), '(mwbase.Note)\n', (4936, 4949), False, 'from django.contrib import admin\n'), ((5104, 5137), 'django.contrib.admin.register', 'admin.register', (['mwbase.Connection'], {}), '(mwbase.Connection)\n', (5118, 5137), False, 'from django.contrib import admin\n'), ((5370, 5398), 'django.contrib.admin.register', 'admin.register', (['mwbase.Visit'], {}), '(mwbase.Visit)\n', (5384, 5398), False, 'from django.contrib import admin\n'), ((5798, 5839), 'django.contrib.admin.register', 'admin.register', (['mwbase.ScheduledPhoneCall'], {}), '(mwbase.ScheduledPhoneCall)\n', (5812, 5839), False, 'from django.contrib import admin\n'), ((6245, 6280), 'django.contrib.admin.register', 'admin.register', (['mwbase.Practitioner'], {}), '(mwbase.Practitioner)\n', (6259, 6280), False, 'from django.contrib import admin\n'), ((6391, 6419), 'django.contrib.admin.register', 'admin.register', (['StatusChange'], {}), '(StatusChange)\n', (6405, 6419), False, 'from django.contrib import admin\n'), ((6648, 6679), 'django.contrib.admin.register', 'admin.register', (['mwbase.EventLog'], {}), '(mwbase.EventLog)\n', (6662, 6679), False, 'from django.contrib import admin\n'), ((6940, 6967), 'django.contrib.admin.site.unregister', 'admin.site.unregister', (['User'], {}), '(User)\n', (6961, 6967), False, 'from django.contrib import admin\n'), ((6968, 7004), 'django.contrib.admin.site.register', 'admin.site.register', (['User', 'UserAdmin'], {}), '(User, UserAdmin)\n', (6987, 7004), False, 'from django.contrib import admin\n'), ((7008, 7040), 'django.contrib.admin.register', 'admin.register', (['AutomatedMessage'], {}), '(AutomatedMessage)\n', (7022, 7040), False, 'from django.contrib import admin\n'), ((8284, 8306), 'mwbase.utils.sms_bank.create_xlsx', 'sms_bank.create_xlsx', ([], {}), '()\n', (8304, 8306), False, 'from mwbase.utils import sms_bank\n'), ((8708, 8767), 'mwbase.forms.ImportXLSXForm', 'ImportXLSXForm', (['(request.POST or None)', '(request.FILES or None)'], {}), '(request.POST or None, request.FILES or None)\n', (8722, 8767), False, 'from mwbase.forms import ImportXLSXForm\n'), ((10083, 10142), 'mwbase.forms.ImportXLSXForm', 'ImportXLSXForm', (['(request.POST or None)', '(request.FILES or None)'], {}), '(request.POST or None, request.FILES or None)\n', (10097, 10142), False, 'from mwbase.forms import ImportXLSXForm\n'), ((8339, 8364), 'openpyxl.writer.excel.save_virtual_workbook', 'save_virtual_workbook', (['wb'], {}), '(wb)\n', (8360, 8364), False, 'from openpyxl.writer.excel import save_virtual_workbook\n'), ((9607, 9831), 'django.template.response.TemplateResponse', 'TemplateResponse', (['request', "(self.smsbank_import_template or ['admin/%s/%s/sms_bank_import.html' % (\n app_label, opts.model_name), 'admin/%s/sms_bank_import.html' %\n app_label, 'admin/sms_bank_import.html'])", 'context'], {}), "(request, self.smsbank_import_template or [\n 'admin/%s/%s/sms_bank_import.html' % (app_label, opts.model_name), \n 'admin/%s/sms_bank_import.html' % app_label,\n 'admin/sms_bank_import.html'], context)\n", (9623, 9831), False, 'from django.template.response import SimpleTemplateResponse, TemplateResponse\n'), ((10845, 10906), 'django.http.JsonResponse', 'JsonResponse', (["{'success': False, 'message': 'Invalid method'}"], {}), "({'success': False, 'message': 'Invalid method'})\n", (10857, 10906), False, 'from django.http import JsonResponse\n'), ((9024, 9054), 'mwbase.utils.sms_bank.import_messages', 'sms_bank.import_messages', (['file'], {}), '(file)\n', (9048, 9054), False, 'from mwbase.utils import sms_bank\n'), ((10334, 10363), 'mwbase.utils.sms_bank.check_messages', 'sms_bank.check_messages', (['file'], {}), '(file)\n', (10357, 10363), False, 'from mwbase.utils import sms_bank\n'), ((10386, 10422), 'django.urls.reverse', 'reverse', (['"""admin:smsbank_import_view"""'], {}), "('admin:smsbank_import_view')\n", (10393, 10422), False, 'from django.urls import path, reverse\n'), ((10451, 10558), 'django.http.JsonResponse', 'JsonResponse', (["{'url': url, 'duplicates': duplicates, 'errors': errors, 'total': total,\n 'success': True}"], {}), "({'url': url, 'duplicates': duplicates, 'errors': errors,\n 'total': total, 'success': True})\n", (10463, 10558), False, 'from django.http import JsonResponse\n'), ((10751, 10810), 'django.http.JsonResponse', 'JsonResponse', (["{'success': False, 'message': 'Form Invalid'}"], {}), "({'success': False, 'message': 'Form Invalid'})\n", (10763, 10810), False, 'from django.http import JsonResponse\n')]
|
import logging
from .util import raw_get
from google.appengine.api.taskqueue import TaskAlreadyExistsError
from google.appengine.api.taskqueue import TombstonedTaskError
from google.appengine.ext import ndb
from google.appengine.ext import deferred
from datetime import datetime
from time import time
CACHE_TIMEOUT = 30
def update_cache(self, endpoint, **kwargs):
data = raw_get(self, endpoint, **kwargs)
key = ndb.Key(CachedResponse, endpoint)
cr = key.get()
cr.data = data
cr.put()
def cached(timeout=CACHE_TIMEOUT):
def func_wrapper(func):
def cached_check(self, endpoint, **kwargs):
key = ndb.Key(CachedResponse, endpoint)
cr = key.get()
if not cr:
data = func(self, endpoint, **kwargs)
cr = CachedResponse(key=key,
endpoint=endpoint,
data=data)
cr.put()
else:
oldtime = cr.timestamp
ts = time()
currtime = datetime.utcfromtimestamp(ts)
td = currtime - oldtime
if td.seconds > timeout:
try:
task_name = endpoint.replace('/', '-') + \
'-%d' % (int(ts))
deferred.defer(update_cache, self, endpoint,
_name=task_name, **kwargs)
except TaskAlreadyExistsError:
logging.critical('Task <%s> already exists.' %
task_name)
logging.critical('Could not update cache.')
except TombstonedTaskError:
logging.critical('Tombstoned task <%s> encountered.' %
task_name)
logging.critical('Attempting to serve old cache data.')
logging.critical('Stored timestamp was: %s' %
str(cr.timestamp))
logging.critical('Current time is: %s' % str(currtime))
return cr.data
return cached_check
return func_wrapper
class CachedResponse(ndb.Model):
endpoint = ndb.StringProperty('e', required=True, indexed=True)
data = ndb.JsonProperty('d', required=True)
timestamp = ndb.DateTimeProperty('t', auto_now=True)
|
[
"time.time",
"google.appengine.ext.ndb.DateTimeProperty",
"datetime.datetime.utcfromtimestamp",
"google.appengine.ext.deferred.defer",
"google.appengine.ext.ndb.StringProperty",
"google.appengine.ext.ndb.JsonProperty",
"logging.critical",
"google.appengine.ext.ndb.Key"
] |
[((431, 464), 'google.appengine.ext.ndb.Key', 'ndb.Key', (['CachedResponse', 'endpoint'], {}), '(CachedResponse, endpoint)\n', (438, 464), False, 'from google.appengine.ext import ndb\n'), ((2309, 2361), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', (['"""e"""'], {'required': '(True)', 'indexed': '(True)'}), "('e', required=True, indexed=True)\n", (2327, 2361), False, 'from google.appengine.ext import ndb\n'), ((2373, 2409), 'google.appengine.ext.ndb.JsonProperty', 'ndb.JsonProperty', (['"""d"""'], {'required': '(True)'}), "('d', required=True)\n", (2389, 2409), False, 'from google.appengine.ext import ndb\n'), ((2426, 2466), 'google.appengine.ext.ndb.DateTimeProperty', 'ndb.DateTimeProperty', (['"""t"""'], {'auto_now': '(True)'}), "('t', auto_now=True)\n", (2446, 2466), False, 'from google.appengine.ext import ndb\n'), ((650, 683), 'google.appengine.ext.ndb.Key', 'ndb.Key', (['CachedResponse', 'endpoint'], {}), '(CachedResponse, endpoint)\n', (657, 683), False, 'from google.appengine.ext import ndb\n'), ((1038, 1044), 'time.time', 'time', ([], {}), '()\n', (1042, 1044), False, 'from time import time\n'), ((1072, 1101), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['ts'], {}), '(ts)\n', (1097, 1101), False, 'from datetime import datetime\n'), ((1355, 1426), 'google.appengine.ext.deferred.defer', 'deferred.defer', (['update_cache', 'self', 'endpoint'], {'_name': 'task_name'}), '(update_cache, self, endpoint, _name=task_name, **kwargs)\n', (1369, 1426), False, 'from google.appengine.ext import deferred\n'), ((1541, 1598), 'logging.critical', 'logging.critical', (["('Task <%s> already exists.' % task_name)"], {}), "('Task <%s> already exists.' % task_name)\n", (1557, 1598), False, 'import logging\n'), ((1664, 1707), 'logging.critical', 'logging.critical', (['"""Could not update cache."""'], {}), "('Could not update cache.')\n", (1680, 1707), False, 'import logging\n'), ((1780, 1845), 'logging.critical', 'logging.critical', (["('Tombstoned task <%s> encountered.' % task_name)"], {}), "('Tombstoned task <%s> encountered.' % task_name)\n", (1796, 1845), False, 'import logging\n'), ((1911, 1966), 'logging.critical', 'logging.critical', (['"""Attempting to serve old cache data."""'], {}), "('Attempting to serve old cache data.')\n", (1927, 1966), False, 'import logging\n')]
|
import numpy as np
import sys
import os
import json
import csv
import re
import random
import subprocess
from markdown2 import Markdown
from Bio import Entrez
from Bio import SeqIO
from collections import defaultdict, OrderedDict
from scipy import stats
from utils import getBindingCore, importBindData,\
importData, reference_retreive, div0, getBindingCore, getRandomColor
def statisticalTest(options, seqMut, vaccSample, refProt):
# Initialize
MUT_stats = defaultdict(lambda: defaultdict(lambda : defaultdict(lambda: defaultdict(int))))
# For each position
for pos in range(options['pos_range'][0], options['pos_range'][1]+1):
if pos in list(seqMut.keys()):
for ptm in list(seqMut[pos].keys()):
if 'PAN' and 'ARP' in list(seqMut[pos][ptm].keys()):
# Create array
ptm_positive = [seqMut[pos][ptm]['ARP'], seqMut[pos][ptm]['PAN']]
ptm_negative = [vaccSample[pos]['ARP'] - seqMut[pos][ptm]['ARP'], \
vaccSample[pos]['PAN'] - seqMut[pos][ptm]['PAN']]
# Fisher test and append to output
oddsratio, pvalue = stats.fisher_exact([ptm_positive, ptm_negative])
MUT_stats[pos][ptm]['ARP']['pvalue'] = pvalue
MUT_stats[pos][ptm]['ARP']['oddsratio'] = oddsratio
if 'PAN' and 'FOC' in list(seqMut[pos][ptm].keys()):
# Create array
ptm_positive = [seqMut[pos][ptm]['FOC'], seqMut[pos][ptm]['PAN']]
ptm_negative = [vaccSample[pos]['FOC'] - seqMut[pos][ptm]['FOC'], \
vaccSample[pos]['PAN'] - seqMut[pos][ptm]['PAN']]
# Fisher test and append to output
oddsratio, pvalue = stats.fisher_exact([ptm_positive, ptm_negative])
MUT_stats[pos][ptm]['FOC']['pvalue'] = pvalue
MUT_stats[pos][ptm]['FOC']['oddsratio'] = oddsratio
return MUT_stats
def mapMutations(data, refProt, options):
# Initialize outputs
seqMUT = defaultdict(lambda: defaultdict(lambda : defaultdict(int)))
vaccSample = defaultdict(lambda: defaultdict((int)))
# For each sequence
for seq in data:
# Initialize: sequence with and without PTM, initial position
AAseq = seq[1][2:-2]
AAnonPTM = re.sub('\[.+?\]', '', AAseq)
init_pos = int(seq[2])
# Check for mutations
for AA, pos in zip(AAnonPTM, range(init_pos, init_pos + len(AAnonPTM))):
# Count instances
vaccSample[pos][seq[3]] += 1
# If there is a mutation append
if AA is not refProt[pos]:
seqMUT[pos][AA][seq[3]] += 1
# Filter positions where there is no samples from any of the
# vaccines
for pos in list(seqMUT.keys()):
for ptm in list(seqMUT[pos].keys()):
if not(seqMUT[pos][ptm]['ARP'] and seqMUT[pos][ptm]['PAN']) \
and not(seqMUT[pos][ptm]['FOC'] and seqMUT[pos][ptm]['PAN']):
del seqMUT[pos][ptm]
if len(seqMUT[pos]) < 1:
del seqMUT[pos]
return seqMUT, vaccSample
def map2HTML(options, coreIdxs, coreClass, refProt, MUT_stats, seqMut, vaccSample):
# Initialize
PTM_HTML = list()
markdowner = Markdown()
color = getRandomColor(options)
refProt = ''.join([refProt[pos] for pos in refProt])
# In blocks of 70, while smaller than the length of the protein of reference
i = 0
while i < len(refProt):
# Create string of reference protein (taking 70 AA)
refProtStr = refProt[i:i+70]
count = 0
# For each binding core and class
for core, coreCl in zip(coreIdxs, coreClass):
# If initial position of the core overlaps with that fragment
if core[0] in range(i, i + 70):
# If no previous hightlight
if count == 0:
# Update core idxes, and highlight based on class
core = [idx -i for idx in core]
if coreCl == 'strong':
refProtStr = refProtStr[0:core[0]] + color['strongBinder'][0] + refProtStr[core[0]:core[1]] + \
color['strongBinder'][1] + refProtStr[core[1]:]
count += 1
else:
refProtStr = refProtStr[0:core[0]] + color['weakBinder'][0] + refProtStr[core[0]:core[1]] + \
color['weakBinder'][1] + refProtStr[core[1]:]
count += 1
# If previous binding core in segment, update idx and highlight based on class
else:
if coreCl == 'strong':
core = [idx - i + count*(len(color['strongBinder'][0]) + len(color['strongBinder'][1])) for idx in core]
refProtStr = refProtStr[0:core[0]] + color['strongBinder'][0] + refProtStr[core[0]:core[1]] + \
color['strongBinder'][1] + refProtStr[core[1]:]
count += 1
else:
core = [idx - i + count*(len(color['strongBinder'][0]) + len(color['strongBinder'][1])) for idx in core]
refProtStr = refProtStr[0:core[0]] + color['weakBinder'][0] + refProtStr[core[0]:core[1]] + \
color['weakBinder'][1] + refProtStr[core[1]:]
count += 1
# If ending position of the core overlaps with the fragment: same
elif core[1] in range(i, i + 70):
core = [idx -i for idx in core]
core = [0 if idx < 0 else idx for idx in core]
if coreCl == 'strong':
refProtStr = color['strongBinder'][0] + refProtStr[core[0]:core[1]] + \
color['strongBinder'][1] + refProtStr[core[1]:]
count += 1
else:
refProtStr = color['weakBinder'][0] + refProtStr[core[0]:core[1]] + \
color['weakBinder'][1] + refProtStr[core[1]:]
count += 1
# Append to HTML output
refProtStr = str(i+1) + '.' + ' '*(6 -len(str(i))-1) + refProtStr + '\n'
PTM_HTML.append(markdowner.convert(refProtStr))
# Create PAN string: same as ARP string
PAN_str = color['PAN'][0] + 'PAN: ' + color['PAN'][1]
last_pos = 0
for pos in range(i,i+70):
if pos in list(seqMut.keys()):
if any(seqMut[pos][mut]['PAN'] for mut in seqMut[pos]):
PAN_str = PAN_str + color['PAN'][0] + '—'*(pos - last_pos -1 - i) + color['PAN'][1] + refProt[pos-1]
last_pos = pos - i
PAN_str = PAN_str + color['PAN'][0] + '—'*(70 - last_pos) + color['PAN'][1]
PTM_HTML.append(markdowner.convert(PAN_str))
# Create ARP string, highlighting positions of PTMs, and append
ARP_str = color['ARP'][0] + 'ARP: ' + color['ARP'][1]
mut_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
last_pos = 0
for pos in range(i,i+70):
if pos in list(seqMut.keys()):
if any(seqMut[pos][mut]['ARP'] for mut in seqMut[pos]):
ARP_str = ARP_str + color['ARP'][0] + '—'*(pos - last_pos -1 - i) + color['ARP'][1] + refProt[pos-1]
for mut in seqMut[pos]:
mut_dict[pos][mut]['ARP'] = seqMut[pos][mut]['ARP']
last_pos = pos - i
ARP_str = ARP_str + color['ARP'][0] + '—'*(70 - last_pos) + color['ARP'][1]
PTM_HTML.append(markdowner.convert(ARP_str))
# Create FOC string, highlighting positions of PTMs, and append
FOC_str = color['FOC'][0] + 'FOC: ' + color['FOC'][1]
last_pos = 0
for pos in range(i,i+70):
if pos in list(seqMut.keys()):
if any(seqMut[pos][mut]['FOC'] for mut in seqMut[pos]):
FOC_str = FOC_str + color['FOC'][0] + '—'*(pos - last_pos -1 - i) + color['FOC'][1] + refProt[pos-1]
for mut in seqMut[pos]:
mut_dict[pos][mut]['FOC'] = seqMut[pos][mut]['FOC']
last_pos = pos - i
FOC_str = FOC_str + color['FOC'][0] + '—'*(70 - last_pos) + color['FOC'][1]
PTM_HTML.append(markdowner.convert(FOC_str))
# Create strings for each PTM positon and type
for pos in list(mut_dict.keys()):
for mut in list(mut_dict[pos].keys()):
for vacc in list(mut_dict[pos][mut].keys()):
if mut_dict[pos][mut][vacc] > 0:
vacc_prop = seqMut[pos][mut][vacc]/vaccSample[pos][vacc]
vacc_samp = vaccSample[pos][vacc]
PAN_prop = seqMut[pos][mut]['PAN']/vaccSample[pos]['PAN']
PAN_samp = vaccSample[pos]['PAN']
PAN_mut_str = ' '*(pos -i -3+ 6) + \
color['mut'][0] + mut + color['mut'][1] + \
'(' + vacc + ':{:.2%}({}),PAN:{:.2%}({}),'.format(vacc_prop, vacc_samp, PAN_prop, PAN_samp)
if pos in list(MUT_stats.keys()) and vacc in list(MUT_stats[pos][mut].keys()) \
and MUT_stats[pos][mut][vacc]['pvalue'] < 0.05:
PAN_mut_str = PAN_mut_str + color['red'][0] + 'p={:.2}'.format(MUT_stats[pos][mut][vacc]['pvalue']) + '\n'
elif pos in list(MUT_stats.keys()) and vacc in list(MUT_stats[pos][mut].keys()):
PAN_mut_str = PAN_mut_str + 'p={:.2})'.format(MUT_stats[pos][mut][vacc]['pvalue']) + '\n'
PTM_HTML.append(markdowner.convert(PAN_mut_str))
# Separate
PTM_HTML.append(markdowner.convert(' \n'))
# Update index
i += 70
# Print and save
with open(options['html']["scroll-template"], 'r') as inFile:
with open(options['files']['mutMapJacob.html'], 'w') as outFile:
for line in inFile:
outFile.write(line)
outFile.writelines(PTM_HTML)
def main():
# Read options
with open('options.json','r') as inFile:
options = json.load(inFile)
# Import data
data = importData(options)
# Import protein of reference
refProt = reference_retreive(options['refProt'])
# Get binding cores and binding core positions
coreIdxs, coreClass = getBindingCore(options, refProt)
# Map mutations
seqMut, vaccSample = mapMutations(data, refProt, options)
# Compute Fisher exact test
MUT_stats = statisticalTest(options, seqMut, vaccSample, refProt)
# Create HTML output
map2HTML(options, coreIdxs, coreClass, refProt, MUT_stats, seqMut, vaccSample)
if __name__ == "__main__":
main()
|
[
"utils.getBindingCore",
"json.load",
"utils.importData",
"markdown2.Markdown",
"scipy.stats.fisher_exact",
"collections.defaultdict",
"utils.reference_retreive",
"re.sub",
"utils.getRandomColor"
] |
[((2904, 2914), 'markdown2.Markdown', 'Markdown', ([], {}), '()\n', (2912, 2914), False, 'from markdown2 import Markdown\n'), ((2924, 2947), 'utils.getRandomColor', 'getRandomColor', (['options'], {}), '(options)\n', (2938, 2947), False, 'from utils import getBindingCore, importBindData, importData, reference_retreive, div0, getBindingCore, getRandomColor\n'), ((8742, 8761), 'utils.importData', 'importData', (['options'], {}), '(options)\n', (8752, 8761), False, 'from utils import getBindingCore, importBindData, importData, reference_retreive, div0, getBindingCore, getRandomColor\n'), ((8806, 8844), 'utils.reference_retreive', 'reference_retreive', (["options['refProt']"], {}), "(options['refProt'])\n", (8824, 8844), False, 'from utils import getBindingCore, importBindData, importData, reference_retreive, div0, getBindingCore, getRandomColor\n'), ((8917, 8949), 'utils.getBindingCore', 'getBindingCore', (['options', 'refProt'], {}), '(options, refProt)\n', (8931, 8949), False, 'from utils import getBindingCore, importBindData, importData, reference_retreive, div0, getBindingCore, getRandomColor\n'), ((2083, 2113), 're.sub', 're.sub', (['"""\\\\[.+?\\\\]"""', '""""""', 'AAseq'], {}), "('\\\\[.+?\\\\]', '', AAseq)\n", (2089, 2113), False, 'import re\n'), ((8699, 8716), 'json.load', 'json.load', (['inFile'], {}), '(inFile)\n', (8708, 8716), False, 'import json\n'), ((1922, 1938), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1933, 1938), False, 'from collections import defaultdict, OrderedDict\n'), ((1064, 1112), 'scipy.stats.fisher_exact', 'stats.fisher_exact', (['[ptm_positive, ptm_negative]'], {}), '([ptm_positive, ptm_negative])\n', (1082, 1112), False, 'from scipy import stats\n'), ((1574, 1622), 'scipy.stats.fisher_exact', 'stats.fisher_exact', (['[ptm_positive, ptm_negative]'], {}), '([ptm_positive, ptm_negative])\n', (1592, 1622), False, 'from scipy import stats\n'), ((1869, 1885), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1880, 1885), False, 'from collections import defaultdict, OrderedDict\n'), ((6025, 6041), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (6036, 6041), False, 'from collections import defaultdict, OrderedDict\n'), ((526, 542), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (537, 542), False, 'from collections import defaultdict, OrderedDict\n')]
|
import numpy as np
from grabscreen import grab_screen
from directkeys import Up , Down , PressKey , ReleaseKey , Move1 , Move2
import time
from getkeys import key_check
import cv2
def main () :
while(True) :
#Resize the game window to about less than quarter of the screen at 1920*1080 resolution
screen = cv2.cvtColor(grab_screen(region=(0,0,800,800)),cv2.COLOR_RGB2GRAY)
keys = key_check()
while screen[778,250] < 130 or screen[778,250] > 200 :
if screen[765,250] < 130 or screen[765,250] > 200 :
Move1(307,778)
screen = cv2.cvtColor(grab_screen(region=(0,0,800,800)),cv2.COLOR_RGB2GRAY)
print(screen[778 , 250] )
keys = key_check()
## time.sleep(0.1)
if 'X' in keys:
break
Move2(0,0)
while screen[778 , 360]<130 or screen[778 , 360]>200 :
if screen[765 , 360]<130 or screen[765 , 360]>200 :
Move1(420 , 778)
screen = cv2.cvtColor(grab_screen(region=(0,0,800,800)),cv2.COLOR_RGB2GRAY)
print(screen[778 , 360] )
## time.sleep(0.1)
keys = key_check()
if 'X' in keys:
break
Move2(0,0)
while screen [778 , 480]<130 or screen [778 , 480]>200 :
if screen [765 , 480]<130 or screen [765 , 480]>200 :
Move1(525 , 778)
## time.sleep(0.1)
screen = cv2.cvtColor(grab_screen(region=(0,0,800,800)),cv2.COLOR_RGB2GRAY)
print(screen[778 , 480] )
keys = key_check()
if 'X' in keys:
break
Move2(0,0)
while screen[778 , 590]<130 or screen[778 , 590]>200:
if screen[765 , 590]<130 or screen[765 , 590]>200:
Move1(620 , 778)
## time.sleep(0.1)
screen = cv2.cvtColor(grab_screen(region=(0,0,800,800)),cv2.COLOR_RGB2GRAY)
print(screen[778 , 600] )
keys = key_check()
if 'X' in keys:
break
Move2(0,0)
if 'X' in keys:
break
main()
|
[
"getkeys.key_check",
"directkeys.Move2",
"grabscreen.grab_screen",
"directkeys.Move1"
] |
[((420, 431), 'getkeys.key_check', 'key_check', ([], {}), '()\n', (429, 431), False, 'from getkeys import key_check\n'), ((847, 858), 'directkeys.Move2', 'Move2', (['(0)', '(0)'], {}), '(0, 0)\n', (852, 858), False, 'from directkeys import Up, Down, PressKey, ReleaseKey, Move1, Move2\n'), ((1281, 1292), 'directkeys.Move2', 'Move2', (['(0)', '(0)'], {}), '(0, 0)\n', (1286, 1292), False, 'from directkeys import Up, Down, PressKey, ReleaseKey, Move1, Move2\n'), ((1715, 1726), 'directkeys.Move2', 'Move2', (['(0)', '(0)'], {}), '(0, 0)\n', (1720, 1726), False, 'from directkeys import Up, Down, PressKey, ReleaseKey, Move1, Move2\n'), ((2143, 2154), 'directkeys.Move2', 'Move2', (['(0)', '(0)'], {}), '(0, 0)\n', (2148, 2154), False, 'from directkeys import Up, Down, PressKey, ReleaseKey, Move1, Move2\n'), ((350, 386), 'grabscreen.grab_screen', 'grab_screen', ([], {'region': '(0, 0, 800, 800)'}), '(region=(0, 0, 800, 800))\n', (361, 386), False, 'from grabscreen import grab_screen\n'), ((743, 754), 'getkeys.key_check', 'key_check', ([], {}), '()\n', (752, 754), False, 'from getkeys import key_check\n'), ((1208, 1219), 'getkeys.key_check', 'key_check', ([], {}), '()\n', (1217, 1219), False, 'from getkeys import key_check\n'), ((1642, 1653), 'getkeys.key_check', 'key_check', ([], {}), '()\n', (1651, 1653), False, 'from getkeys import key_check\n'), ((2070, 2081), 'getkeys.key_check', 'key_check', ([], {}), '()\n', (2079, 2081), False, 'from getkeys import key_check\n'), ((580, 595), 'directkeys.Move1', 'Move1', (['(307)', '(778)'], {}), '(307, 778)\n', (585, 595), False, 'from directkeys import Up, Down, PressKey, ReleaseKey, Move1, Move2\n'), ((630, 666), 'grabscreen.grab_screen', 'grab_screen', ([], {'region': '(0, 0, 800, 800)'}), '(region=(0, 0, 800, 800))\n', (641, 666), False, 'from grabscreen import grab_screen\n'), ((1016, 1031), 'directkeys.Move1', 'Move1', (['(420)', '(778)'], {}), '(420, 778)\n', (1021, 1031), False, 'from directkeys import Up, Down, PressKey, ReleaseKey, Move1, Move2\n'), ((1068, 1104), 'grabscreen.grab_screen', 'grab_screen', ([], {'region': '(0, 0, 800, 800)'}), '(region=(0, 0, 800, 800))\n', (1079, 1104), False, 'from grabscreen import grab_screen\n'), ((1446, 1461), 'directkeys.Move1', 'Move1', (['(525)', '(778)'], {}), '(525, 778)\n', (1451, 1461), False, 'from directkeys import Up, Down, PressKey, ReleaseKey, Move1, Move2\n'), ((1529, 1565), 'grabscreen.grab_screen', 'grab_screen', ([], {'region': '(0, 0, 800, 800)'}), '(region=(0, 0, 800, 800))\n', (1540, 1565), False, 'from grabscreen import grab_screen\n'), ((1874, 1889), 'directkeys.Move1', 'Move1', (['(620)', '(778)'], {}), '(620, 778)\n', (1879, 1889), False, 'from directkeys import Up, Down, PressKey, ReleaseKey, Move1, Move2\n'), ((1957, 1993), 'grabscreen.grab_screen', 'grab_screen', ([], {'region': '(0, 0, 800, 800)'}), '(region=(0, 0, 800, 800))\n', (1968, 1993), False, 'from grabscreen import grab_screen\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.