max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
python_for_everybody/bs_regex_parse.py | timothyyu/p4e-prac | 0 | 12788151 | from bs4 import BeautifulSoup as Soup
import re
string = '<p>Please click <a href="http://www.dr-chuck.com">here</a></p>'
match = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', string)
html = Soup(string, 'html.parser')
bsm= [a['href'] for a in html.find_all('a')]
match2 = re.findall('"(http.*.*)"',string)
print(match)
print(match2)
print(bsm)
| 3.359375 | 3 |
contacts/urls.py | pedrohd21/Agenda-Django | 1 | 12788152 | <reponame>pedrohd21/Agenda-Django
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.contactsList, name='contacts-list'),
path('contact/<int:id>', views.contactsViews, name='contacts-views'),
path('newcontact/', views.newContact, name='new-contact'),
path('edit/<int:id>', views.editContact, name='edit-contact'),
path('delete/<int:id>', views.deleteContact, name='delete-contact'),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/', include('accounts.urls')),
] | 1.960938 | 2 |
postcipes/unstructured_channel_flow.py | timofeymukha/postcipes | 0 | 12788153 | <filename>postcipes/unstructured_channel_flow.py
# This file is part of postcipes
# (c) <NAME>
# The code is released under the MIT Licence.
# See LICENCE.txt and the Legal section in the README for more information
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .postcipe import Postcipe
import numpy as np
from os.path import join
from scipy.integrate import simps
from collections import OrderedDict
from scipy.interpolate import LinearNDInterpolator
from scipy.spatial import Delaunay
import h5py
import os
import turbulucid as tbl
from vtkmodules.numpy_interface import dataset_adapter as dsa
from vtkmodules.util.numpy_support import *
__all__ = ["UnstructuredChannelFlow"]
class UnstructuredChannelFlow(Postcipe):
def __init__(self, path, nu, n, time, wallModel=False):
Postcipe.__init__(self)
self.case = path
self.readPath = join(self.case)
self.nu = nu
self.n = n
self.wallModel = wallModel
self.time = time
def read(self, time, debug=False):
"""Read the case from a given path to .foam file.
Parameters
----------
time : float
The time step to load, default to latest time
Returns
-------
The reader updated with the read case.
Raises
------
ValueError
If the path is not valid.
"""
# Check that paths are valid
from vtkmodules.vtkIOGeometry import vtkOpenFOAMReader
from vtkmodules.vtkCommonExecutionModel import vtkStreamingDemandDrivenPipeline
if not os.path.exists(self.case):
raise ValueError("Provided path to .foam file invalid!")
if debug:
print(" Opening the case")
# Case reader
reader = vtkOpenFOAMReader()
reader.SetFileName(self.case)
reader.Update()
if debug:
print(" Changing reader parameters")
reader.CreateCellToPointOff()
reader.DisableAllPointArrays()
reader.EnableAllPatchArrays()
reader.DecomposePolyhedraOn()
reader.Update()
reader.UpdateInformation()
info = reader.GetExecutive().GetOutputInformation(0)
if debug:
print("The available timesteps are", vtk_to_numpy(reader.GetTimeValues()))
if time is None:
print("Selecting the latest available time step")
info.Set(vtkStreamingDemandDrivenPipeline.UPDATE_TIME_STEP(),
vtk_to_numpy(reader.GetTimeValues())[-1])
else:
print("Selecting the time step", time)
info.Set(vtkStreamingDemandDrivenPipeline.UPDATE_TIME_STEP(), time)
reader.Update()
reader.UpdateInformation()
return reader
def compute(self, debug=False):
from vtkmodules.vtkFiltersCore import vtkCellDataToPointData
from vtkmodules.vtkFiltersSources import vtkPlaneSource
from vtkmodules.vtkFiltersPoints import vtkVoronoiKernel, vtkPointInterpolator
from vtkmodules.vtkFiltersVerdict import vtkCellSizeFilter
d = 1/self.n
y = np.linspace(d/2, 2 - d/2, 2*self.n)
reader = self.read(self.time, debug=debug)
caseData = reader.GetOutput()
internalBlock = caseData.GetBlock(0)
patchBlocks = caseData.GetBlock(1)
bounds = internalBlock.GetBounds()
if debug:
print(bounds)
fieldNames = dsa.WrapDataObject(internalBlock).GetCellData().keys()
averaged = {}
for i, field in enumerate(fieldNames):
averaged[field] = []
pointData = vtkCellDataToPointData()
pointData.SetInputData(internalBlock)
pointData.Update()
plane = vtkPlaneSource()
plane.SetResolution(int(bounds[1]/d), int(bounds[5]/d))
kernel = vtkVoronoiKernel()
interpolator = vtkPointInterpolator()
interpolator.SetSourceData(pointData.GetOutput())
interpolator.SetKernel(kernel)
# Internal field, go layer by layer
for i in range(y.size):
plane.SetOrigin(0.55*(bounds[0] + bounds[1]), y[i], 0.15*(bounds[4] + bounds[5]))
plane.SetPoint1(bounds[0], y[i], bounds[4])
plane.SetPoint2(bounds[1], y[i], bounds[5])
plane.Update()
interpolator.SetInputConnection(plane.GetOutputPort())
interpolator.Update()
interpolatedData = dsa.WrapDataObject(interpolator.GetOutput()).GetPointData()
for field in fieldNames:
averaged[field].append(np.mean(interpolatedData[field], axis=0))
# Patch data
for wall in ["bottomWall", "topWall"]:
wallBlock = patchBlocks.GetBlock(self.get_block_index(patchBlocks, wall))
cellSizeFilter = vtkCellSizeFilter()
cellSizeFilter.SetInputData(wallBlock)
cellSizeFilter.Update()
area = dsa.WrapDataObject(cellSizeFilter.GetOutput()).CellData['Area']
wallData = dsa.WrapDataObject(wallBlock).CellData
for field in fieldNames:
# area weighted average
avrg = np.sum(wallData[field]*area, axis=0)/np.sum(area)
if wall == "bottomWall":
averaged[field].insert(0, avrg)
else:
averaged[field].append(avrg)
for field in fieldNames:
averaged[field] = np.array(averaged[field])
self.y = np.append(np.append(0, y), 2)
self.avrgFields = averaged
self.u = self.avrgFields['UMean'][:, 0]
self.uu = self.avrgFields['UPrime2Mean'][:, 0]
self.vv = self.avrgFields['UPrime2Mean'][:, 1]
self.ww = self.avrgFields['UPrime2Mean'][:, 2]
self.uv = self.avrgFields['UPrime2Mean'][:, 3]
self.k = 0.5*(self.uu + self.vv + self.ww)
self.nut = self.avrgFields['nutMean']
self.tau = 0
if self.wallModel:
self.wss = self.avrgFields['wallShearStressMean'][:, 0]
self.tau = 0.5*(self.wss[0] + self.wss[-1])
else:
self.tau = self.nu*0.5*(self.u[1] + self.u[-2])/self.y[1]
self.uTau = np.sqrt(self.tau)
self.delta = 0.5*(self.y[-1] - self.y[0])
self.uB = simps(self.u, self.y)/(2*self.delta)
self.uC = 0.5*(self.u[int(self.y.size/2)] +
self.u[int(self.y.size/2) - 1])
self.yPlus = self.y*self.uTau/self.nu
self.uPlus = self.u/self.uTau
self.uuPlus = self.uu/self.uTau**2
self.vvPlus = self.vv/self.uTau**2
self.wwPlus = self.ww/self.uTau**2
self.uvPlus = self.uv/self.uTau**2
self.kPlus = self.k/self.uTau**2
self.uRms = np.sqrt(self.uu)/self.uTau
self.vRms = np.sqrt(self.vv)/self.uTau
self.wRms = np.sqrt(self.ww)/self.uTau
self.reTau = self.uTau*self.delta/self.nu
self.reB = self.uB*self.delta/self.nu
self.reC = self.uC*self.delta/self.nu
self.theta = tbl.momentum_thickness(self.y[:int(self.y.size/2)],
self.u[:int(self.u.size/2)],
interpolate=True)
self.delta99 = tbl.delta_99(self.y[:int(self.y.size/2)],
self.u[:int(self.u.size/2)],
interpolate=True)
#
self.deltaStar = tbl.delta_star(self.y[:int(self.y.size/2)],
self.u[:int(self.u.size/2)],
interpolate=True)
#
self.reTheta = self.theta*self.uC/self.nu
self.reDelta99 = self.delta99*self.uC/self.nu
self.reDeltaStar = self.deltaStar*self.uC/self.nu
def save(self, name):
f = h5py.File(name, 'w')
f.attrs["nu"] = self.nu
f.attrs["uTau"] = self.uTau
f.attrs["uB"] = self.uB
f.attrs["uC"] = self.uC
f.attrs["delta"] = self.delta
f.attrs["delta99"] = self.delta99
f.attrs["deltaStar"] = self.deltaStar
f.attrs["theta"] = self.reTheta
f.attrs["reDelta99"] = self.reDelta99
f.attrs["reDeltaStar"] = self.reDeltaStar
f.attrs["reTheta"] = self.reTheta
f.attrs["reTau"] = self.reTau
f.attrs["reB"] = self.reB
f.attrs["reC"] = self.reC
f.create_dataset("y", data=self.y)
f.create_dataset("u", data=self.u)
f.create_dataset("uu", data=self.uu)
f.create_dataset("vv", data=self.vv)
f.create_dataset("ww", data=self.ww)
f.create_dataset("k", data=self.k)
f.create_dataset("uv", data=self.uv)
f.create_dataset("nut", data=self.nut)
f.create_dataset("yPlus", data=self.yPlus)
f.create_dataset("uPlus", data=self.uPlus)
f.create_dataset("uuPlus", data=self.uuPlus)
f.create_dataset("vvPlus", data=self.vvPlus)
f.create_dataset("wwPlus", data=self.wwPlus)
f.create_dataset("uvPlus", data=self.uvPlus)
f.create_dataset("kPlus", data=self.kPlus)
f.create_dataset("uRms", data=self.uRms)
f.create_dataset("vRms", data=self.vRms)
f.create_dataset("wRms", data=self.wRms)
f.close()
def load(self, name):
f = h5py.File(name, 'r')
self.nu = f.attrs["nu"]
self.uTau = f.attrs["uTau"]
self.uB = f.attrs["uB"]
self.uC = f.attrs["uC"]
self.delta = f.attrs["delta"]
self.delta99 = f.attrs["delta99"]
self.deltaStar = f.attrs["deltaStar"]
self.reTheta = f.attrs["theta"]
self.reDelta99 = f.attrs["reDelta99"]
self.reDeltaStar = f.attrs["reDeltaStar"]
self.reTheta = f.attrs["reTheta"]
self.reTau = f.attrs["reTau"]
self.reB = f.attrs["reB"]
self.reC = f.attrs["reC"]
self.y = f["y"][:]
self.u = f["u"][:]
self.uu = f["uu"][:]
self.vv = f["vv"][:]
self.ww = f["ww"][:]
self.k = f["k"][:]
self.uv = f["uv"][:]
self.nut = f["nut"][:]
self.yPlus = f["yPlus"][:]
self.uPlus = f["uPlus"][:]
self.uuPlus = f["uuPlus"][:]
self.vvPlus = f["vvPlus"][:]
self.wwPlus = f["wwPlus"][:]
self.uvPlus = f["uvPlus"][:]
self.uvPlus = f["kPlus"][:]
self.uRms = f["uRms"][:]
self.vRms = f["vRms"][:]
self.vRms = f["wRms"][:]
self.kPlus = f["kPlus"][:]
f.close()
def utau_relative_error(self, bench, procent=True, abs=False):
error = (self.uTau - bench)/bench
if procent:
error *= 100
if abs:
error = np.abs(error)
return error
def get_block_index(self, blocks, name):
"""Get the index of the block by name.
Parameters
----------
blocks : vtkMultiBlockDataSet
The dataset with the blocks.
name : str
The name of the block that is sought.
Returns
-------
int
The index of the sought block.
"""
number = -1
for i in range(blocks.GetNumberOfBlocks()):
if (blocks.GetMetaData(i).Get(vtk.vtkCompositeDataSet.NAME()) ==
name):
number = i
break
if number == -1:
raise NameError("No block named " + name + " found")
return number
| 2.375 | 2 |
bootstrap.py | Borgotto/Parsecbot | 2 | 12788154 | import subprocess
import sys
def start():
# 0x78 == 01111000 == x, eXit
return subprocess.call((sys.executable, "main.py")) == 0x78
if __name__ == "__main__":
print("Executing Bot initialisation.")
while True:
# Start the bot
print("Starting Bot;")
code = start()
if not code == 0x78:
print(f'\nRe-executing Bot initialisation. Exit code {code}')
continue
else: # Exit if bot script returns special shutdown code 0x78
print(f'\nShutting down. Exit code {code}')
break
| 2.859375 | 3 |
src/SimilarNeuron/exception.py | guanquanchen/similar-neuron | 1 | 12788155 | class SimilarNeuronException(Exception):
def __init__(self, name: str, reason: str) -> None:
self.name = name
self.reason = reason
def __str__(self) -> str:
return f"{self.name}: {self.reason}"
class AnnotationEmpty(SimilarNeuronException):
def __init__(
self,
name: str = 'AnnotationEmpty',
reason: str = 'The function signature has no comments.'
) -> None:
super().__init__(name, reason)
class TransformError(SimilarNeuronException):
def __init__(
self,
name: str = 'TransformError',
reason: str = 'Type conversion error, look up `transform` object.'
) -> None:
super().__init__(name, reason)
class SwitchEmptyError(SimilarNeuronException):
def __init__(
self,
name: str = 'SwitchEmptyError',
reason: str = 'This `Switch` object not in Agreement.'
) -> None:
super().__init__(name, reason)
class AssignmentError(SimilarNeuronException):
def __init__(
self,
name: str = 'AssignmentError',
reason: str = 'Relationship assignment error.'
) -> None:
super().__init__(name, reason) | 3.03125 | 3 |
View/GUI.py | esteban-mendoza/ProyectoFinal | 0 | 12788156 | from tkinter import *
from tkinter import ttk
from View.Datepicker import Datepicker
from Controller.Controller import Controller
from tkinter.filedialog import askopenfilename
"""
Autor: <NAME> (418002863)
Email: <EMAIL>
"""
class GUI(ttk.Frame):
fields_current_query = dict()
def __init__(self, master):
self.control = Controller()
# Main frame
super().__init__(master)
super().grid(row=0, column=0, sticky=(N, W, E, S))
# Master settings
master.title("Pólizas")
master.columnconfigure(0, weight=1)
master.rowconfigure(0, weight=1)
# Notebook
self.notebook = ttk.Notebook(self, padding=10)
self.notebook.grid(row=0, column=0, sticky=(N, W, E, S))
# Consultas frame
self.fr_consultas = ttk.Frame(self.notebook)
self.notebook.add(self.fr_consultas, text="Consultas", padding=10)
# Agregar registros / importar frame
self.fr_agregar = ttk.Frame(self.notebook)
self.notebook.add(self.fr_agregar, text="Agregar / Importar", padding=10)
# Ayuda frame
self.fr_ayuda = ttk.Frame(self.notebook)
self.notebook.add(self.fr_ayuda, text="Ayuda", padding=10)
# Cliente
self.lf_cliente = ttk.Labelframe(self.fr_consultas, text="Cliente")
self.lf_cliente.grid(row=0, column=0, rowspan=3, columnspan=6)
self.val_id_cliente = BooleanVar()
self.ch_id_cliente = ttk.Checkbutton(self.lf_cliente, variable=self.val_id_cliente)
self.ch_id_cliente.grid(row=1, column=0)
self.la_id_cliente = ttk.Label(self.lf_cliente, text="id_cliente")
self.la_id_cliente.grid(row=1, column=1)
self.id_cliente = StringVar()
self.en_id_cliente = ttk.Entry(self.lf_cliente, textvariable=self.id_cliente)
self.en_id_cliente.grid(row=1, column=2)
self.val_nombre = BooleanVar()
self.ch_nombre = ttk.Checkbutton(self.lf_cliente, variable=self.val_nombre)
self.ch_nombre.grid(row=1, column=3)
self.la_nombre = ttk.Label(self.lf_cliente, text="Nombre")
self.la_nombre.grid(row=1, column=4)
self.nombre = StringVar()
self.en_nombre = ttk.Entry(self.lf_cliente, width=34, textvariable=self.nombre)
self.en_nombre.grid(row=1, column=5)
self.val_direccion = BooleanVar()
self.ch_direccion = ttk.Checkbutton(self.lf_cliente, variable=self.val_direccion)
self.ch_direccion.grid(row=2, column=0)
self.la_direccion = ttk.Label(self.lf_cliente, text="Dirección")
self.la_direccion.grid(row=2, column=1)
self.direccion = StringVar()
self.en_direccion = ttk.Entry(self.lf_cliente, width=72, textvariable=self.direccion)
self.en_direccion.grid(row=2, column=2, columnspan=4)
for child in self.lf_cliente.winfo_children():
child.grid_configure(padx=5, pady=5)
# Factura
self.lf_factura = ttk.LabelFrame(self.fr_consultas, text="Factura")
self.lf_factura.grid(row=0, column=6, rowspan=3, columnspan=3)
self.val_id_factura = BooleanVar()
self.ch_id_factura = ttk.Checkbutton(self.lf_factura, variable=self.val_id_factura)
self.ch_id_factura.grid(row=1, column=6)
self.la_id_factura = ttk.Label(self.lf_factura, text="id_factura")
self.la_id_factura.grid(row=1, column=7)
self.id_factura = StringVar()
self.en_id_factura = ttk.Entry(self.lf_factura, textvariable=self.id_factura)
self.en_id_factura.grid(row=1, column=8)
self.val_costo_vehiculo = BooleanVar()
self.ch_costo_vehiculo = ttk.Checkbutton(self.lf_factura, variable=self.val_costo_vehiculo)
self.ch_costo_vehiculo.grid(row=2, column=6)
self.la_costo_vehiculo = ttk.Label(self.lf_factura, text="Costo del\nautomóvil")
self.la_costo_vehiculo.grid(row=2, column=7)
self.costo_vehiculo = StringVar()
self.en_costo_vehiculo = ttk.Entry(self.lf_factura, textvariable=self.costo_vehiculo)
self.en_costo_vehiculo.grid(row=2, column=8)
for child in self.lf_factura.winfo_children():
child.grid_configure(padx=5, pady=5)
# Vehículo
self.lf_vehiculo = ttk.LabelFrame(self.fr_consultas, text="Vehículo")
self.lf_vehiculo.grid(row=3, column=6, rowspan=4, columnspan=3)
self.val_placas = BooleanVar()
self.ch_placas = ttk.Checkbutton(self.lf_vehiculo, variable=self.val_placas)
self.ch_placas.grid(row=4, column=0)
self.la_placas = ttk.Label(self.lf_vehiculo, text="Placas")
self.la_placas.grid(row=4, column=1)
self.placas = StringVar()
self.en_placas = ttk.Entry(self.lf_vehiculo, textvariable=self.placas)
self.en_placas.grid(row=4, column=2)
self.val_marca = BooleanVar()
self.ch_marca = ttk.Checkbutton(self.lf_vehiculo, variable=self.val_marca)
self.ch_marca.grid(row=5, column=0)
self.la_marca = ttk.Label(self.lf_vehiculo, text="Marca")
self.la_marca.grid(row=5, column=1)
self.marca = StringVar()
self.en_marca = ttk.Entry(self.lf_vehiculo, textvariable=self.marca)
self.en_marca.grid(row=5, column=2)
self.val_modelo = BooleanVar()
self.ch_modelo = ttk.Checkbutton(self.lf_vehiculo, variable=self.val_modelo)
self.ch_modelo.grid(row=6, column=0)
self.la_modelo = ttk.Label(self.lf_vehiculo, text="Modelo")
self.la_modelo.grid(row=6, column=1)
self.modelo = StringVar()
self.en_modelo = ttk.Entry(self.lf_vehiculo, textvariable=self.modelo)
self.en_modelo.grid(row=6, column=2)
for child in self.lf_vehiculo.winfo_children():
child.grid_configure(padx=5, pady=5)
# Póliza
self.lf_poliza = ttk.LabelFrame(self.fr_consultas, text="Póliza")
self.lf_poliza.grid(row=3, column=0, rowspan=3, columnspan=6)
self.val_costo_seguro = BooleanVar()
self.ch_costo_seguro = ttk.Checkbutton(self.lf_poliza, variable=self.val_costo_seguro)
self.ch_costo_seguro.grid(row=4, column=3)
self.la_costo_seguro = ttk.Label(self.lf_poliza, text="Costo del\nseguro")
self.la_costo_seguro.grid(row=4, column=4)
self.costo_seguro = StringVar()
self.en_costo_seguro = ttk.Entry(self.lf_poliza, textvariable=self.costo_seguro)
self.en_costo_seguro.grid(row=4, column=5)
self.val_prima_asegurada = BooleanVar()
self.ch_prima_asegurada = ttk.Checkbutton(self.lf_poliza, variable=self.val_prima_asegurada)
self.ch_prima_asegurada.grid(row=5, column=3)
self.la_prima_asegurada = ttk.Label(self.lf_poliza, text="Prima asegurada")
self.la_prima_asegurada.grid(row=5, column=4)
self.prima_asegurada = StringVar()
self.en_prima_asegurada = ttk.Entry(self.lf_poliza, textvariable=self.prima_asegurada)
self.en_prima_asegurada.grid(row=5, column=5)
self.val_fecha_apertura = BooleanVar()
self.ch_fecha_apertura = ttk.Checkbutton(self.lf_poliza, variable=self.val_fecha_apertura)
self.ch_fecha_apertura.grid(row=4, column=6)
self.la_fecha_apertura = ttk.Label(self.lf_poliza, text="Fecha de\napertura")
self.la_fecha_apertura.grid(row=4, column=7)
self.fecha_apertura = StringVar()
self.en_fecha_apertura = Datepicker(self.lf_poliza, datevar=self.fecha_apertura)
self.en_fecha_apertura.grid(row=4, column=8)
self.val_fecha_vencimiento = BooleanVar()
self.ch_fecha_vencimiento = ttk.Checkbutton(self.lf_poliza, variable=self.val_fecha_vencimiento)
self.ch_fecha_vencimiento.grid(row=5, column=6)
self.la_fecha_vencimiento = ttk.Label(self.lf_poliza, text="Fecha de\nvencimiento")
self.la_fecha_vencimiento.grid(row=5, column=7)
self.fecha_vencimiento = StringVar()
self.en_fecha_vencimiento = Datepicker(self.lf_poliza, datevar=self.fecha_vencimiento)
self.en_fecha_vencimiento.grid(row=5, column=8)
for child in self.lf_poliza.winfo_children():
child.grid_configure(padx=5, pady=5)
# Table
self.fr_tabla = ttk.Frame(self.fr_consultas, width=900, height=180)
self.fr_tabla.grid(row=7, column=0, rowspan=8, columnspan=10)
self.fr_tabla.grid_propagate(0)
self.tabla = ttk.Treeview(self.fr_tabla, height=12, selectmode=BROWSE)
self.tabla.grid(row=7, column=0, sticky=N+S+W+E)
self.tabla.bind("<<TreeviewSelect>>", self.populate_fields)
# Scroll bars
self.vscroll = ttk.Scrollbar(self.fr_tabla, orient=VERTICAL)
self.vscroll.grid(row=7, column=9, rowspan=7, sticky=W+N+S)
self.hscroll = ttk.Scrollbar(self.fr_tabla, orient=HORIZONTAL)
self.hscroll.grid(row=14, column=0, columnspan=9, sticky=W+E+N)
# Scroll bars binding
self.vscroll.configure(command=self.tabla.yview)
self.hscroll.configure(command=self.tabla.xview)
self.tabla.configure(yscrollcommand=self.vscroll.set)
self.tabla.configure(xscrollcommand=self.hscroll.set)
# Buttons
self.bo_mostrar = ttk.Button(self.fr_consultas, text="Mostrar todo", width=16,
command=self.show_all)
self.bo_mostrar.grid(row=1, column=9, sticky=W)
self.bo_limpiar = ttk.Button(self.fr_consultas, text="Limpiar campos", width=16,
command=self.limpiar_campos)
self.bo_limpiar.grid(row=2, column=9, sticky=W)
self.bo_buscar = ttk.Button(self.fr_consultas, text="Buscar", width=16)
self.bo_buscar.grid(row=3, column=9, sticky=W)
self.bo_actualizar = ttk.Button(self.fr_consultas, text="Actualizar", width=16)
self.bo_actualizar.grid(row=4, column=9, sticky=W)
self.bo_eliminar = ttk.Button(self.fr_consultas, text="Eliminar", width=16)
self.bo_eliminar.grid(row=5, column=9, sticky=W)
# Padding of elements in consultas frame
for child in self.fr_consultas.winfo_children():
child.grid_configure(padx=5, pady=5)
# Ayuda frame widgets
self.la_ayuda = ttk.Label(self.fr_ayuda,
text="Licenciatura en Matemáticas Aplicadas\n\n"
"Proyecto final para la materia de Manejo de Datos.\n"
"Profesor: <NAME>\n\n"
"Autor: <NAME> (418002863)\n")
self.la_ayuda.grid(row=0, column=0)
# Padding of elements in ayuda frame
for child in self.fr_ayuda.winfo_children():
child.grid_configure(padx=5, pady=5)
# Agregar / importar frame widgets
self.la_instruccion = ttk.Label(self.fr_agregar,
text="NOTA: \n"
"Los campos marcados con * no pueden estar vacíos.\n"
"Los campos marcados con + pueden dejarse en blanco y se generan "
"automáticamente.")
self.la_instruccion.grid(row=0, column=0, pady=20)
self.lf_ag_cliente = ttk.Labelframe(self.fr_agregar, text="Cliente")
self.lf_ag_cliente.grid(row=4, column=0, rowspan=3, columnspan=8, sticky=(E, W))
self.la_ag_id_cliente = ttk.Label(self.lf_ag_cliente, text="id_cliente+")
self.la_ag_id_cliente.grid(row=1, column=1)
self.ag_id_cliente = StringVar()
self.en_ag_id_cliente = ttk.Entry(self.lf_ag_cliente, textvariable=self.ag_id_cliente)
self.en_ag_id_cliente.grid(row=1, column=2)
self.la_ag_nombre = ttk.Label(self.lf_ag_cliente, text="Nombre")
self.la_ag_nombre.grid(row=1, column=4)
self.ag_nombre = StringVar()
self.en_ag_nombre = ttk.Entry(self.lf_ag_cliente, width=35, textvariable=self.ag_nombre)
self.en_ag_nombre.grid(row=1, column=5)
self.la_ag_direccion = ttk.Label(self.lf_ag_cliente, text="Dirección")
self.la_ag_direccion.grid(row=2, column=1)
self.ag_direccion = StringVar()
self.en_ag_direccion = ttk.Entry(self.lf_ag_cliente, width=68, textvariable=self.ag_direccion)
self.en_ag_direccion.grid(row=2, column=2, columnspan=4)
self.bo_ag_cliente = ttk.Button(self.lf_ag_cliente, width=18,
text="Agregar cliente", command=self.insert_cliente)
self.bo_ag_cliente.grid(row=1, column=6)
self.bo_importar_clientes = ttk.Button(self.lf_ag_cliente, width=18,
text="Importar clientes", command=self.importar_clientes)
self.bo_importar_clientes.grid(row=2, column=6)
for child in self.lf_ag_cliente.winfo_children():
child.grid_configure(padx=5, pady=5)
self.lf_ag_vehiculo = ttk.Labelframe(self.fr_agregar, text="Vehículo")
self.lf_ag_vehiculo.grid(row=7, column=0, rowspan=3, columnspan=8, sticky=(E, W))
self.la_ag_placas = ttk.Label(self.lf_ag_vehiculo, text="Placas*")
self.la_ag_placas.grid(row=1, column=1)
self.ag_placas = StringVar()
self.en_ag_placas = ttk.Entry(self.lf_ag_vehiculo, textvariable=self.ag_placas)
self.en_ag_placas.grid(row=1, column=2)
# self.la_ag_id_factura = ttk.Label(self.lf_ag_vehiculo, text="id_factura")
# self.la_ag_id_factura.grid(row=2, column=1)
#
# self.ag_id_factura = StringVar()
# self.en_ag_id_factura = ttk.Entry(self.lf_ag_vehiculo, textvariable=self.ag_id_factura)
# self.en_ag_id_factura.grid(row=2, column=2)
self.la_ag_marca = ttk.Label(self.lf_ag_vehiculo, text="Marca")
self.la_ag_marca.grid(row=1, column=3)
self.ag_marca = StringVar()
self.en_ag_marca = ttk.Entry(self.lf_ag_vehiculo, textvariable=self.ag_marca)
self.en_ag_marca.grid(row=1, column=4)
self.la_ag_modelo = ttk.Label(self.lf_ag_vehiculo, text="Modelo")
self.la_ag_modelo.grid(row=2, column=3)
self.ag_modelo = StringVar()
self.en_ag_modelo = ttk.Entry(self.lf_ag_vehiculo, textvariable=self.ag_modelo)
self.en_ag_modelo.grid(row=2, column=4)
self.bo_ag_vehiculo = ttk.Button(self.lf_ag_vehiculo, width=18,
text="Agregar vehículo", command=self.insert_vehiculo)
self.bo_ag_vehiculo.grid(row=1, column=6)
self.bo_importar_vehiculo = ttk.Button(self.lf_ag_vehiculo, width=18,
text="Importar vehículos")
self.bo_importar_vehiculo.grid(row=2, column=6)
for child in self.lf_ag_vehiculo.winfo_children():
child.grid_configure(padx=5, pady=5)
self.lf_ag_factura = ttk.Labelframe(self.fr_agregar, text="Factura")
self.lf_ag_factura.grid(row=10, column=0, rowspan=3, columnspan=8, sticky=(E, W))
self.la_ag_id_factura2 = ttk.Label(self.lf_ag_factura, text="id_factura+")
self.la_ag_id_factura2.grid(row=1, column=1)
self.ag_id_factura2 = StringVar()
self.en_ag_id_factura2 = ttk.Entry(self.lf_ag_factura, textvariable=self.ag_id_factura2)
self.en_ag_id_factura2.grid(row=1, column=2)
self.la_ag_placas2 = ttk.Label(self.lf_ag_factura, text="Placas*")
self.la_ag_placas2.grid(row=2, column=1)
self.ag_placas2 = StringVar()
self.en_ag_placas2 = ttk.Entry(self.lf_ag_factura, textvariable=self.ag_placas2)
self.en_ag_placas2.grid(row=2, column=2)
self.la_ag_costo = ttk.Label(self.lf_ag_factura, text="Costo del vehículo*")
self.la_ag_costo.grid(row=1, column=3)
self.ag_costo = StringVar()
self.en_ag_costo = ttk.Entry(self.lf_ag_factura, textvariable=self.ag_costo)
self.en_ag_costo.grid(row=1, column=4)
self.bo_ag_factura = ttk.Button(self.lf_ag_factura, width=18,
text="Agregar factura", command=self.insert_factura)
self.bo_ag_factura.grid(row=1, column=5)
self.bo_importar_facturas = ttk.Button(self.lf_ag_factura, width=18,
text="Importar facturas")
self.bo_importar_facturas.grid(row=2, column=5)
for child in self.lf_ag_factura.winfo_children():
child.grid_configure(padx=5, pady=5)
self.lf_ag_poliza = ttk.Labelframe(self.fr_agregar, text="Póliza")
self.lf_ag_poliza.grid(row=1, column=0, rowspan=3, columnspan=8, sticky=(E, W))
self.la_ag_id_cliente2 = ttk.Label(self.lf_ag_poliza, text="id_cliente*")
self.la_ag_id_cliente2.grid(row=1, column=1)
self.ag_id_cliente2 = StringVar()
self.en_ag_id_cliente2 = ttk.Entry(self.lf_ag_poliza, textvariable=self.ag_id_cliente2)
self.en_ag_id_cliente2.grid(row=1, column=2)
self.la_ag_id_factura3 = ttk.Label(self.lf_ag_poliza, text="id_factura*")
self.la_ag_id_factura3.grid(row=2, column=1)
self.ag_id_factura3 = StringVar()
self.en_ag_id_factura3 = ttk.Entry(self.lf_ag_poliza, textvariable=self.ag_id_factura3)
self.en_ag_id_factura3.grid(row=2, column=2)
self.la_ag_costo_seguro = ttk.Label(self.lf_ag_poliza, text="Costo del seguro+")
self.la_ag_costo_seguro.grid(row=1, column=3)
self.ag_costo_seguro = StringVar()
self.en_ag_costo_seguro = ttk.Entry(self.lf_ag_poliza, textvariable=self.ag_costo_seguro)
self.en_ag_costo_seguro.grid(row=1, column=4)
self.la_ag_prima = ttk.Label(self.lf_ag_poliza, text="Prima asegurada+")
self.la_ag_prima.grid(row=2, column=3)
self.ag_prima = StringVar()
self.en_ag_prima = ttk.Entry(self.lf_ag_poliza, textvariable=self.ag_prima)
self.en_ag_prima.grid(row=2, column=4)
self.la_ag_apertura = ttk.Label(self.lf_ag_poliza, text="Fecha de apertura+")
self.la_ag_apertura.grid(row=1, column=5)
self.ag_apertura = StringVar()
self.en_ag_apertura = Datepicker(self.lf_ag_poliza, datevar=self.ag_apertura)
self.en_ag_apertura.grid(row=1, column=6)
self.la_ag_vencimiento = ttk.Label(self.lf_ag_poliza, text="Fecha de vencimiento+")
self.la_ag_vencimiento.grid(row=2, column=5)
self.ag_vencimiento = StringVar()
self.en_ag_vencimiento = Datepicker(self.lf_ag_poliza, datevar=self.ag_vencimiento)
self.en_ag_vencimiento .grid(row=2, column=6)
self.bo_gen_poliza = ttk.Button(self.lf_ag_poliza, width=18,
text="Generar póliza", command=self.gen_poliza)
self.bo_gen_poliza.grid(row=1, column=7)
for child in self.lf_ag_poliza.winfo_children():
child.grid_configure(padx=5, pady=5)
# Padding of elements in agregar / importar frame
for child in self.fr_agregar.winfo_children():
child.grid_configure(padx=5, pady=5)
def insert_cliente(self):
data = dict()
if self.ag_id_cliente.get():
data['id_cliente'] = int(self.ag_id_cliente.get())
if self.ag_nombre.get():
data['nombre'] = self.ag_nombre.get()
if self.ag_direccion.get():
data['direccion'] = self.ag_direccion.get()
self.control.insert_cliente(**data)
self.show_new_cliente(**data)
def show_new_cliente(self, **data):
new_cliente = self.control.last_cliente(**data)
self.ag_id_cliente.set(new_cliente['id_cliente'])
self.ag_nombre.set(new_cliente['nombre'])
self.ag_direccion.set(new_cliente['direccion'])
def insert_vehiculo(self):
data = dict()
if self.ag_placas.get():
data['placas'] = self.ag_placas.get()
# if self.ag_id_factura.get():
# data['id_factura'] = int(self.ag_id_factura.get())
if self.ag_marca.get():
data['marca'] = self.ag_marca.get()
if self.ag_modelo.get():
data['modelo'] = self.ag_modelo.get()
self.control.insert_vehiculo(**data)
self.show_new_vehiculo(**data)
def show_new_vehiculo(self, **data):
new_vehiculo = self.control.last_vehiculo(**data)
self.ag_placas.set(new_vehiculo['placas'])
self.ag_marca.set(new_vehiculo['marca'])
self.ag_modelo.set(new_vehiculo['modelo'])
def insert_factura(self):
data = dict()
if self.ag_id_factura2.get():
data['id_factura'] = int(self.ag_id_factura2.get())
if self.ag_placas2.get():
data['placas'] = self.ag_placas2.get()
if self.ag_costo.get():
data['costo_vehiculo'] = float(self.ag_costo.get())
self.control.insert_factura(**data)
self.show_new_factura(**data)
def show_new_factura(self, **data):
new_factura = self.control.last_factura(**data)
self.ag_id_factura2.set(new_factura['id_factura'])
self.ag_placas2.set(new_factura['placas'])
self.ag_costo.set(new_factura['costo_vehiculo'])
def gen_poliza(self):
data = dict()
if self.ag_id_cliente2.get():
data['id_cliente'] = int(self.ag_id_cliente2.get())
if self.ag_id_factura3.get():
data['id_factura'] = int(self.ag_id_factura3.get())
if self.ag_prima.get():
data['prima_asegurada'] = float(self.ag_prima.get())
if self.ag_costo_seguro.get():
data['costo_seguro'] = float(self.ag_costo_seguro.get())
if self.ag_apertura.get():
data['fecha_apertura'] = self.ag_apertura.get()
if self.ag_vencimiento.get():
data['fecha_vencimiento'] = self.ag_vencimiento.get()
self.control.gen_poliza(**data)
self.show_new_poliza(**data)
def show_new_poliza(self, **data):
new_poliza = self.control.last_poliza(**data)
self.ag_id_cliente2.set(new_poliza['id_cliente'])
self.ag_id_factura3.set(new_poliza['id_factura'])
self.ag_prima.set(new_poliza['prima_asegurada'])
self.ag_costo_seguro.set(new_poliza['costo_seguro'])
self.ag_apertura.set(new_poliza['fecha_apertura'])
self.ag_vencimiento.set(new_poliza['fecha_vencimiento'])
def importar_clientes(self):
path = askopenfilename()
self.control.insert_clientes(path)
def importar_vehiculos(self):
path = askopenfilename()
self.control.insert_vehiculos(path)
def importar_facturas(self):
path = askopenfilename()
self.control.insert_facturas(path)
def get_active_fields(self):
active_fields = dict()
active_fields["id_cliente"] = self.val_id_cliente.get()
active_fields["nombre"] = self.val_nombre.get()
active_fields["direccion"] = self.val_direccion.get()
active_fields["placas"] = self.val_placas.get()
active_fields["marca"] = self.val_marca.get()
active_fields["modelo"] = self.val_modelo.get()
active_fields["id_factura"] = self.val_id_factura.get()
active_fields["costo_vehiculo"] = self.val_costo_vehiculo.get()
active_fields["prima_asegurada"] = self.val_prima_asegurada.get()
active_fields["costo_seguro"] = self.val_costo_seguro.get()
active_fields["fecha_apertura"] = self.val_fecha_apertura.get()
active_fields["fecha_vencimiento"] = self.val_fecha_vencimiento.get()
return active_fields
def show_all(self):
self.clear_results()
# Set columnas
all_fields = {
# Clientes
"id_cliente": True,
"nombre": True,
"direccion": True,
# Facturas
"id_factura": True,
"costo_vehiculo": True,
# Pólizas
"costo_seguro": True,
"prima_asegurada": True,
"fecha_apertura": True,
"fecha_vencimiento": True,
# Vehículos
"placas": True,
"marca": True,
"modelo": True
}
self.set_columnas(all_fields)
self.fields_current_query = all_fields
# Query
rows = self.control.query_all(all_fields)
# Agregar filas
for i, row in enumerate(rows):
self.tabla.insert("", END, text=str(i+1), values=row)
def set_columnas(self, fields):
# Set columns
self.tabla.configure(columns=tuple(fields))
for column in fields.keys():
self.tabla.column(column, width=15)
# Set headings
self.tabla.heading("#0", text="No.")
if fields.get("id_cliente", False):
self.tabla.heading("id_cliente", text="id_cliente")
if fields.get("nombre", False):
self.tabla.heading("nombre", text="Nombre")
if fields.get("direccion", False):
self.tabla.heading("direccion", text="Dirección")
if fields.get("placas", False):
self.tabla.heading("placas", text="Placas")
if fields.get("modelo", False):
self.tabla.heading("modelo", text="Modelo")
if fields.get("marca", False):
self.tabla.heading("marca", text="Marca")
if fields.get("id_factura", False):
self.tabla.heading("id_factura", text="id_factura")
if fields.get("costo_vehiculo", False):
self.tabla.heading("costo_vehiculo", text="Costo del vehículo")
if fields.get("prima_asegurada", False):
self.tabla.heading("prima_asegurada", text="Prima asegurada")
if fields.get("costo_seguro", False):
self.tabla.heading("costo_seguro", text="Costo del seguro")
if fields.get("fecha_apertura", False):
self.tabla.heading("fecha_apertura", text="Fecha de apertura")
if fields.get("fecha_vencimiento", False):
self.tabla.heading("fecha_vencimiento", text="Fecha de vencimiento")
def clear_results(self):
for child in self.tabla.get_children():
self.tabla.delete(child)
def limpiar_campos(self):
# Limpiar cliente
self.val_id_cliente.set(False)
self.id_cliente.set("")
self.val_nombre.set(False)
self.nombre.set("")
self.val_direccion.set(False)
self.direccion.set("")
# Limpiar vehiculo
self.val_placas.set(False)
self.placas.set("")
self.val_marca.set(False)
self.marca.set("")
self.val_modelo.set(False)
self.modelo.set("")
# Limpiar factura
self.val_id_factura.set(False)
self.id_factura.set("")
self.val_costo_vehiculo.set(False)
self.costo_vehiculo.set("")
# Limpiar póliza
self.val_costo_seguro.set(False)
self.costo_seguro.set("")
self.val_prima_asegurada.set(False)
self.prima_asegurada.set("")
self.val_fecha_apertura.set(False)
self.fecha_apertura.set("")
self.val_fecha_vencimiento.set(False)
self.fecha_vencimiento.set("")
def populate_fields(self, event):
row_id = self.tabla.selection()[0]
if self.fields_current_query["id_cliente"]:
self.id_cliente.set(str(self.tabla.set(row_id, "id_cliente")))
if self.fields_current_query["nombre"]:
self.nombre.set(str(self.tabla.set(row_id, "nombre")))
if self.fields_current_query["direccion"]:
self.direccion.set(str(self.tabla.set(row_id, "direccion")))
if self.fields_current_query["placas"]:
self.placas.set(str(self.tabla.set(row_id, "placas")))
if self.fields_current_query["marca"]:
self.marca.set(str(self.tabla.set(row_id, "marca")))
if self.fields_current_query["modelo"]:
self.modelo.set(str(self.tabla.set(row_id, "modelo")))
if self.fields_current_query["id_factura"]:
self.id_factura.set(str(self.tabla.set(row_id, "id_factura")))
if self.fields_current_query["costo_vehiculo"]:
self.costo_vehiculo.set(str(self.tabla.set(row_id, "costo_vehiculo")))
if self.fields_current_query["prima_asegurada"]:
self.prima_asegurada.set(str(self.tabla.set(row_id, "prima_asegurada")))
if self.fields_current_query["costo_seguro"]:
self.costo_seguro.set(str(self.tabla.set(row_id, "costo_seguro")))
if self.fields_current_query["fecha_apertura"]:
self.fecha_apertura.set(str(self.tabla.set(row_id, "fecha_apertura")))
if self.fields_current_query["fecha_vencimiento"]:
self.fecha_vencimiento.set(str(self.tabla.set(row_id, "fecha_vencimiento")))
if __name__ == '__main__':
root = Tk()
GUI(root)
root.mainloop()
| 2.9375 | 3 |
454 4Sum II.py | krishna13052001/LeetCode | 872 | 12788157 | #!/usr/bin/python3
"""
Given four lists A, B, C, D of integer values, compute how many tuples (i, j,
k, l) there are such that A[i] + B[j] + C[k] + D[l] is zero.
To make problem a bit easier, all A, B, C, D have same length of N where
0 ≤ N ≤ 500. All integers are in the range of -2^28 to 2^28 - 1 and the result
is guaranteed to be at most 2^31 - 1.
Example:
Input:
A = [ 1, 2]
B = [-2,-1]
C = [-1, 2]
D = [ 0, 2]
Output:
2
Explanation:
The two tuples are:
1. (0, 0, 0, 1) -> A[0] + B[0] + C[0] + D[1] = 1 + (-2) + (-1) + 2 = 0
2. (1, 1, 0, 0) -> A[1] + B[1] + C[0] + D[0] = 2 + (-1) + (-1) + 0 = 0
"""
from collections import defaultdict
class Solution:
def fourSumCount(self, A, B, C, D):
"""
Brute force with map: O(N^3)
O(N^3) is pretty large, O(N^2) or O(N log N)?
O(N^2) to sum cartesian product (A, B) to construct index
similar to C, D.
Then index loop up
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
N = len(A)
AB = defaultdict(int)
CD = defaultdict(int)
for i in range(N):
for j in range(N):
AB[A[i] + B[j]] += 1
CD[C[i] + D[j]] += 1
ret = 0
# O(N^2)
for gross, count in AB.items():
target = 0 - gross
ret += count * CD[target]
return ret
if __name__ == "__main__":
A = [ 1, 2]
B = [-2,-1]
C = [-1, 2]
D = [ 0, 2]
assert Solution().fourSumCount(A, B, C, D) == 2
| 3.140625 | 3 |
MoviesForm/moviesapp/views.py | KiralyTamas/Django | 0 | 12788158 | from django.shortcuts import render
from moviesapp.models import Movies
from moviesapp.forms import MoviesForm
def index(request):
return render(request, 'moviesapp/index.html')
def moviesList(request):
moviesList=Movies.objects.all()
movies_dict={'movies':moviesList}
return render(request, 'moviesapp/moviesList.html',movies_dict)
def addMovies(request):
success=str("Successfuly Movie Registration")
moviesForm=MoviesForm()
movies_dict={'movies':moviesForm}
if request.method=='POST':
moviesForm=MoviesForm(request.POST)
movies_dict={'movies':moviesForm,'success':success}
if moviesForm.is_valid():
moviesForm.save()
return render(request, 'moviesapp/addMovies.html',movies_dict)
return render(request, 'moviesapp/addMovies.html',movies_dict) | 2.171875 | 2 |
scripts/hello_kitty.py | zodiacfireworks/introduction-to-programming-in-python | 0 | 12788159 | <reponame>zodiacfireworks/introduction-to-programming-in-python
# -*- encoding: utf-8 -*-
print("hello kitty!")
| 2.109375 | 2 |
src/animo.py | MimansaSharma15/Animo | 2 | 12788160 | import streamlit as st
from help import health_analysis
from load_chatmodel import load
from data import main_data
from get_medical import get_links
import torch
from tensorflow.keras.preprocessing.sequence import pad_sequences
encoder_net, decoder_net = load()
data = main_data()
max, vocab_enc, vocab_dec = data.len_all()
tok_enc = data.tok_enc
tok_dec = data.tok_dec
st.title("Animo")
st.write("A Guide To Mental Heath")
html_temp1 = """
<div style="background-color:#000000 ;padding:10px; background:rgba(255,255,255,0.2); box-shadow: 0 5px 15px rgba(0,0,0,0.5)">
<h2 style="color:black;text-align:center;font-family: "Lucida Console", Courier, monospace;">“Anything that’s human is mentionable, and anything that is mentionable can be more manageable. When we can talk about our feelings, they become less overwhelming, less upsetting, and less scary.” <NAME></h2>
</div>"""
st.markdown(html_temp1, unsafe_allow_html=True)
st.write("")
html_temp2 = """
<div style="background-color:#000000 ;padding:10px; background:rgba(255,255,255,0.2); box-shadow: 0 5px 15px rgba(0,0,0,0.5)">
<h2 style="color:black;text-align:center;font-family: "Lucida Console", Courier, monospace;">Animo is the latin translation of mind and noticing the sync where mental health is the well being of one's mind .We bring you a one stop guide to answer all your questions regarding mental health. We aim to provide and connect every individual with the vast expanse of mental health .Enter your queries in the space provided below and we'll be there at your service!!</h2>
</div>"""
st.markdown(html_temp2, unsafe_allow_html=True)
page_bg_img = '''
<style>
body {
background-image: url("https://www.homemaidsimple.com/wp-content/uploads/2018/04/Mental-health-stigma-1.jpg");
background-size: cover;
height: 100vh;
background-position: center;
}
</style>
'''
st.markdown(page_bg_img, unsafe_allow_html=True)
html_temp3 = """
<div style="background-color:#000000 ;padding:10px; background:rgba(255,255,255,0.2); box-shadow: 0 5px 15px rgba(0,0,0,0.5) ">
<h2 style="color:black;text-align:center;font-family: "Lucida Console", Courier, monospace;">Let's Talk</h2>
</div>"""
st.markdown(html_temp3, unsafe_allow_html=True)
st.write("")
st.write("Ask as many questions as you want and without the fear of judgement. We are hear for you clear any doubts you have about your health.")
st.write("Note: Enter your question in brief!!")
st.write("")
question = st.text_input("Question")
if question:
question = tok_enc.texts_to_sequences(question)
seq = torch.tensor(pad_sequences(question, padding='pre',maxlen=100), dtype=torch.long)
with torch.no_grad():
hidden, cell = encoder_net(seq)
outputs = [1]
for _ in range(100):
previous_word = torch.LongTensor([outputs[-1]]).to("cpu")
with torch.no_grad():
output, hidden, cell = decoder_net(previous_word, hidden, cell)
best_guess = output.argmax(1).item()
outputs.append(best_guess)
# Model predicts it's the end of the sentence
if output.argmax(1).item() == 2:
break
if question:
question = question.lower()
sent = [question]
health_model = health_analysis(sent)
if health_model.recommend():
html_temp4 = """
<div style="background-color:#000000 ;padding:10px; background:rgba(255,255,255,0.2); box-shadow: 0 5px 15px rgba(0,0,0,0.5)">
<h2 style="color:black;text-align:center;font-family: "Lucida Console", Courier, monospace;">From our conversation we would like to recommend for you whats best for your health. We also provide a service which recommends trained mental health experts,counsellors and psychiatrists in your area.")
</h2>
</div>"""
st.markdown(html_temp4, unsafe_allow_html=True)
html_temp5 = """
<div style="background-color:#000000 ;padding:10px; background:rgba(255,255,255,0.2); box-shadow: 0 5px 15px rgba(0,0,0,0.5)">
<h2 style="color:black;text-align:center;font-family: "Lucida Console", Courier, monospace;">Please enter your pincode in the box below!!</h2>
</div>"""
st.markdown(html_temp5, unsafe_allow_html=True)
pin = st.text_input("Pin")
st.write("""Mental Health : is a loaded term. It can trigger a dizzying array of reactions when we hear it. In a country like India where
Mental health is probably the next pandemic but the awareness is still very low. Our main objective is to educate the population still oblivious to the issues regarding mental health.""")
if pin:
for i in get_links(pin):
st.write(i)
| 3.046875 | 3 |
tests/test_log.py | InfluxGraph/influxgraph | 97 | 12788161 | import uuid
import unittest
import influxgraph
from influxdb import InfluxDBClient
class InfluxGraphLogFileTestCase(unittest.TestCase):
def setUp(self):
self.db_name = 'fakey'
self.client = InfluxDBClient(database=self.db_name)
self.client.create_database(self.db_name)
_logger = influxgraph.classes.finder.logger
_logger.handlers = []
def tearDown(self):
self.client.drop_database(self.db_name)
def test_create_log_file_should_succeed(self):
config = { 'influxdb' : { 'host' : 'localhost',
'port' : 8086,
'user' : 'root',
'pass' : '<PASSWORD>',
'db' : self.db_name,
'log_file' : '/tmp/fakey',
'log_level' : 'debug',
},
}
finder = influxgraph.InfluxDBFinder(config)
self.assertTrue(finder)
def test_create_root_log_file_should_fail(self):
_config = { 'influxdb' : { 'host' : 'localhost',
'port' : 8086,
'user' : 'root',
'pass' : '<PASSWORD>',
'db' : self.db_name,
'log_file' : '/' + str(uuid.uuid4()),
'log_level' : 'debug',
},
}
finder = influxgraph.InfluxDBFinder(_config)
self.assertTrue(finder)
if __name__ == '__main__':
unittest.main()
| 2.546875 | 3 |
memory.py | hiroki-kyoto/memory-association-recognition | 0 | 12788162 | # read_dataset.py
# Author : Hiroki
# Modified: 2018-1-10
import cPickle
import matplotlib.pyplot as pl
import numpy as np
def unpickle(file):
with open(file, 'rb') as fo:
dict = cPickle.load(fo)
return dict
def load_dataset():
data = unpickle('cifar-10/data_batch_1')
images = [data['data'][i].reshape(3, 32, 32) for i in range(data['data'].shape[0])]
pl.figure(figsize=[1.0, 1.0])
pl.imshow(np.transpose(images[5], [1, 2, 0]))
pl.show()
return images
######## can we build a network to detect if the pixel belongs
######## to a edge according to its neighbors
# Each of the pixel in the RGB image will be turned into
# a vector of Edge-element{boolean: True means edge otherwise
# in-area pixel}:
class SparseMatrix(object):
'''SparseMatrix: A vector of elements (v,[i,j,...])
in which {v} is required to be nonzero and {[i,j,...]}
is the coordination of this element in matrix.
'''
def __init__(self, elements=None, coords=None, dims=None):
if elements!=None:
assert type(elements)==np.ndarray
self.elements = elements.astype(np.float32)
if coords!=None:
assert type(coords)==np.ndarray
self.coords = coords.astype(np.int32)
if dims!=None:
self.dims = dims
def __from_dense_matrix__(self, dense_matrix):
'''dense_matrix required to be a numpy array'''
assert (type(dense_matrix) == np.ndarray)
self.elements = []
self.coords = []
# One dimension vector
if len(dense_matrix.shape)==1:
for i in range(dense_matrix.shape[0]):
if dense_matrix[i]:
self.elements.append(dense_matrix[i])
self.coords.append(i)
self.elements = np.array(self.elements)
self.coords = np.array(self.coords)
self.dims = dense_matrix.shape
elif len(dense_matrix.shape)==2:
for i in range(dense_matrix.shape[0]):
for j in range(dense_matrix.shape[1]):
if dense_matrix[i,j]:
self.elements.append(dense_matrix[i,j])
self.coords.append([i,j])
self.elements = np.array(self.elements)
self.coords = np.array(self.coords)
self.dims = dense_matrix.shape
elif len(dense_matrix.shape)==3:
for i in range(dense_matrix.shape[0]):
for j in range(dense_matrix.shape[1]):
for k in range(dense_matrix.shape[2]):
if dense_matrix[i,j,k]:
self.elements.append(dense_matrix[i,j,k])
self.coords.append([i,j,k])
self.elements = np.array(self.elements)
self.coords = np.array(self.coords)
self.dims = dense_matrix.shape
else:
raise ValueError("Matrix to convert is not supported!")
def __multiply__(self, sparse_matrix):
assert type(sparse_matrix)==SparseMatrix
if len(sparse_matrix.dims)==1:
if sparse_matrix.dims[0]==1:
self.elements *= sparse_matrix.elements[0]
elif ...
def conv(x, w):
'''Conv: A plain convolution operation
input [x] should be sparse matrix.
param [w] is a kernel
'''
def Test_Class_SparseMatrix():
sm = SparseMatrix()
x = np.random.uniform(0,1,[32,32])
x[x < 0.99] = 0
sm.__from_dense_matrix__(x)
print(sm.elements)
print(sm.coords)
def main():
print('============================')
Test_Class_SparseMatrix()
if __name__ == '__main__':
main() | 3.015625 | 3 |
lightning/admin.py | git-men/bsm-django | 90 | 12788163 | from api_basebone.core.admin import BSMAdmin
Admin = BSMAdmin | 1.125 | 1 |
realtime_csv_to_graph_and_limit_check.py | contengee/realtime_csv_to_graph_and_limit_check | 1 | 12788164 | <gh_stars>1-10
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
import datetime as dt
import tkinter as tk
from tkinter import messagebox
import tkinter.simpledialog as simpledialog
MAX_SET_NUM = 10000
LIMIT_SET_NUM = MAX_SET_NUM * 0.9
UPDATE_TIME = 5
def readyCsv():
server_path = Path(str(Path.home()) + r"\Desktop")
date = dt.datetime.today().strftime("%Y%m%d")
csv_list = list(server_path.glob("aaa_"+ date + ".csv"))
df = pd.read_csv(csv_list.pop(), index_col=0)
df = df.loc[:, ['SET']]
df.index = pd.to_datetime(df.index, format='%Y%m%d_%H:%M:%S')
return df
def makeFigure(df):
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
ax.grid()
ax.axhline(LIMIT_SET_NUM, ls='dashed', color = "red")
tday = dt.date.today().strftime('%Y-%m-%d')
sxmin = tday + ' 00:00'
sxmax = tday + ' 23:59'
xmin = dt.datetime.strptime(sxmin, '%Y-%m-%d %H:%M')
xmax = dt.datetime.strptime(sxmax, '%Y-%m-%d %H:%M')
plt.xlim([xmin,xmax])
plt.ylim([0,MAX_SET_NUM])
plt.xticks(rotation=45)
plt.title("Realtime limit check SET_number")
# plt.legend('SET',bbox_to_anchor=(1, 0), loc='lower right', borderaxespad=1, fontsize=10)
plt.plot(df,"b")
# plt.plot(df.index,df.loc[:,'SET'],"b", label="SET")
def upper_limit_check_setnum(now_set_number):
if now_set_number > LIMIT_SET_NUM:
root = tk.Tk()
root.withdraw()
res = messagebox.showwarning("alert", "SET number is over 90%")
print("showwarning", res)
return True
return False
if __name__ == '__main__':
limit_check_flg = False
df = readyCsv()
makeFigure(df)
while True:
df = readyCsv()
if limit_check_flg == False:
now_set_number = df.iloc[-1]['SET']
limit_check_flg = upper_limit_check_setnum(now_set_number)
# plt.plot(df.index,df.loc[:,'SET'],"b", label="SET")
plt.plot(df,"b")
plt.pause(UPDATE_TIME) | 2.609375 | 3 |
categories/customcommand.py | MikeJollie2707/MichaelBot | 0 | 12788165 | import discord
from discord.ext import commands
import datetime as dt
import utilities.db as DB
import utilities.facility as Facility
from bot import MichaelBot
class CustomCommand(commands.Cog, name = "Custom Commands", command_attrs = {"cooldown_after_parsing": True}):
"""Commands that support adding custom commands."""
def __init__(self, bot):
self.bot : MichaelBot = bot
self.emoji = '✨'
self.__flags__ = [
"--description",
"--message",
"--channel",
"--reply",
"--addroles",
"--rmvroles"
]
@commands.Cog.listener("on_message")
async def _message(self, message : discord.Message):
# This is kind of oofy, but whatever conditions within `events.py`, you'll need to filter them out here.
if message.author == self.bot.user or isinstance(message.channel, discord.DMChannel):
return
guild_prefix = self.bot._prefixes[message.guild.id] if self.bot.user.id != 649822097492803584 else '!'
if message.content.startswith(guild_prefix):
import utilities.db as DB
async with self.bot.pool.acquire() as conn:
# The message have the format of <prefix>command some_random_bs
# To get the command, split the content, and get the first, which will be
# <prefix>command only.
# To remove prefix, trim the string view based on the length of prefix.
existed = await DB.CustomCommand.get_command(conn, message.guild.id, message.content.split()[0][len(guild_prefix):])
if existed is not None:
if existed["channel"] is not None:
channel = message.guild.get_channel(existed["channel"])
else:
channel = message.channel
# Can only reply to the same channel
reference = None
if existed["is_reply"] and existed["channel"] == message.channel.id:
reference = message
else:
reference = None
try:
await channel.send(existed["message"], reference = reference)
except discord.Forbidden:
# For now, we're just silently ignore this.
# Might change to raising a command error though.
pass
if len(existed["addroles"]) > 0:
addroles_list = [message.guild.get_role(role) for role in existed["addroles"]]
try:
await message.author.add_roles(*addroles_list)
except discord.Forbidden:
await channel.send("Failed to add roles.")
if len(existed["rmvroles"]) > 0:
rmvroles_list = [message.guild.get_role(role) for role in existed["rmvroles"]]
try:
await message.author.remove_roles(*rmvroles_list)
except discord.Forbidden:
await channel.send("Failed to remove roles.")
@commands.group(aliases = ['ccmd', 'customcmd'], invoke_without_command = True)
@commands.cooldown(rate = 1, per = 5.0, type = commands.BucketType.guild)
@commands.bot_has_permissions(add_reactions = True, read_message_history = True, send_messages = True)
async def ccommand(self, ctx):
'''
View custom commands for this guild.
**Usage:** {usage}
**Cooldown:** 5 seconds per 1 use (guild)
**Example:** {prefix}{command_name}
**You need:** None.
**I need:** `Add Reactions`, `Read Message History`, `Send Messages`.
'''
async with self.bot.pool.acquire() as conn:
custom_commands = await DB.CustomCommand.get_commands(conn, ctx.guild.id)
if custom_commands == [None] * len(custom_commands):
return await ctx.reply("*Cricket noises*", mention_author = False)
from templates.navigate import listpage_generator
def title_formatter(command):
embed = Facility.get_default_embed(
title = "Custom Commands",
timestamp = dt.datetime.utcnow()
).set_author(
name = ctx.guild.name,
icon_url = ctx.guild.icon_url
)
return embed
def item_formatter(embed, command):
embed.add_field(
name = command["name"],
value = f"*{command['description']}*" if command["description"] != "" else "*None*",
inline = False
)
page = listpage_generator(3, custom_commands, title_formatter, item_formatter)
await page.start(ctx)
@ccommand.command()
@commands.cooldown(rate = 1, per = 5.0, type = commands.BucketType.guild)
@commands.has_guild_permissions(manage_guild = True)
@commands.bot_has_permissions(read_message_history = True, send_messages = True)
async def add(self, ctx : commands.Context, name, *, input):
'''
Add a custom command to the guild.
The `input` is in the form of arguments commonly used within terminals.
There are 5 arguments, one of which is required:
- `--description`: The command's description.
- **`--message`: This is required. The command's response.**
- `--channel`: The channel the command will send the response to. Must be ID.
- `--reply`: A flag indicating whether the message will be a reply.
- `--addroles`: The roles the bot will add to the command invoker. Must be IDs.
- `--rmvroles`: The roles the bot will remove to the command invoker. Must be IDs.
Order is not important.
**Usage:** {usage}
**Cooldown:** 5 seconds per 1 use (guild)
**Example 1:** {prefix}{command_name} test --message Hello
**Example 2:** {prefix}{command_name} test2 --description Give some cool roles --message Enjoy :D --reply --addroles 704527865173114900 644339804141518848
**You need:** `Manage Server`.
**I need:** `Read Message History`, `Send Messages`.
'''
builtin_existed = ctx.bot.get_command(name)
if builtin_existed is not None:
return await ctx.reply("This command's name already existed within the bot. Please choose a different one.")
async with self.bot.pool.acquire() as conn:
existed = await DB.CustomCommand.get_command(conn, ctx.guild.id, name)
if existed is not None:
return await ctx.reply(f"This guild already has a command with the name `{name}`. Please choose a different one.")
arguments = Facility.flag_parse(input, self.__flags__)
description = arguments["--description"]
message = arguments["--message"]
channel = arguments["--channel"]
is_reply = arguments["--reply"]
addroles = arguments["--addroles"]
rmvroles = arguments["--rmvroles"]
addroles_list = []
rmvroles_list = []
if isinstance(description, bool):
return await ctx.reply("`--description` is not a flag but rather an argument.")
if message is None:
return await ctx.reply("`--message` is a required argument.")
if isinstance(message, bool):
return await ctx.reply("`--message` is not a flag but rather an argument.")
if isinstance(channel, bool):
return await ctx.reply("`--channel` must be an existed channel's ID.")
elif channel is not None:
try:
channel = int(channel)
except ValueError:
return await ctx.reply("`--channel` must be an existed channel's ID.")
dchannel = ctx.guild.get_channel(channel)
if dchannel is None:
return await ctx.reply("`--channel` must be an existed channel's ID.")
# I decide to make `--reply` both a flag and argument (although there will be no info in argument).
if is_reply is not None:
is_reply = True
else:
is_reply = False
if isinstance(addroles, bool) or isinstance(rmvroles, bool):
return await ctx.reply("`--addroles`/`--rmvroles` is not a flag but rather an argument.")
if isinstance(addroles, str):
addroles_list = []
for role in addroles.split():
try:
drole = ctx.guild.get_role(int(role))
except ValueError:
return await ctx.reply("`--addroles` must contain existed roles' ID.")
if drole is not None and drole < ctx.guild.get_member(self.bot.user.id).top_role:
addroles_list.append(int(role))
if isinstance(rmvroles, str):
rmvroles_list = []
for role in rmvroles.split():
try:
drole = ctx.guild.get_role(int(role))
except ValueError:
return await ctx.reply("`--rmvroles` must contain existed roles' ID.")
if drole is not None and drole < ctx.guild.get_member(self.bot.user.id).top_role:
rmvroles_list.append(int(role))
async with conn.transaction():
await DB.CustomCommand.add(conn, ctx.guild.id, name, {
"description": description,
"message": message,
"channel": channel,
"is_reply": is_reply,
"addroles": addroles_list,
"rmvroles": rmvroles_list
})
await ctx.reply(f"Added command `{name}`.", mention_author = False)
@ccommand.command()
@commands.cooldown(rate = 1, per = 5.0, type = commands.BucketType.guild)
@commands.has_guild_permissions(manage_guild = True)
@commands.bot_has_permissions(read_message_history = True, send_messages = True)
async def remove(self, ctx, name):
'''
Remove a custom command from the guild.
**Usage:** {usage}
**Cooldown:** 5 seconds per 1 use (guild)
**Example:** {prefix}{command_name} test
**You need:** `Manage Server`.
**I need:** `Read Message History`, `Send Messages`.
'''
builtin_existed = ctx.bot.get_command(name)
if builtin_existed is not None:
return await ctx.reply("This command's name somehow matches the bot's default commands. Contact the developer.")
async with self.bot.pool.acquire() as conn:
existed = await DB.CustomCommand.get_command(conn, ctx.guild.id, name)
if existed is None:
return await ctx.reply(f"There is no such command in this guild.")
async with conn.transaction():
await DB.CustomCommand.remove(conn, ctx.guild.id, name)
await ctx.reply(f"Removed command `{name}`.", mention_author = False)
@ccommand.command()
async def edit(self, ctx):
# Make edit the same as creation with a few catch:
# - name is not changeable; it'll be ignored if provided.
# - if any arguments is not provided, it'll retain the old behavior.
# - to clear an optional argument (say --addroles), you need to provide the string "clear" (case-insensitive).
# - to toggle, simply provide the argument again.
pass
def setup(bot : MichaelBot):
bot.add_cog(CustomCommand(bot))
| 2.578125 | 3 |
demo/host_game.py | hilearn/ai-game | 0 | 12788166 | <gh_stars>0
import socket
from game import Game, RemotePlayer, _3X5Map, PlayerStats, Stats
def main(host, port):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((host, port))
s.listen()
players = [RemotePlayer(s,
stats=PlayerStats(8, 3, 1),
weapon_stats=Stats(12, 1),
image='Blue.png'),
RemotePlayer(s,
stats=PlayerStats(8, 3, 1),
weapon_stats=Stats(12, 1),
image='Red.png')]
game = Game(_3X5Map, players)
game.run()
if __name__ == '__main__':
main('127.0.0.1', 3000)
| 2.578125 | 3 |
construct_call_graph.py | cancer525/pythontest | 0 | 12788167 | #!/usr/bin/env python
'''
generates call graph of given python code file
in dot format input for graphviz.
limitations:
* statically tried to figure out functions calls
* does not understand classes
* algorithm is naive and may not statically find
all cases
'''
import sys
import parser
import symbol, token
import pprint
import optparse
try: s = set()
except: import sets; set = sets.Set
def annotate_ast_list(ast_list):
code = ast_list[0]
if code in symbol.sym_name: code = symbol.sym_name[code]
else: code = token.tok_name[code]
ast_list[0] = code
for index, item in enumerate(ast_list):
if index == 0: continue
if isinstance(item, list):
ast_list[index] = annotate_ast_list(item)
return ast_list
def get_atom_name(atom):
first_child = atom[1]
first_child_code = first_child[0]
if first_child_code != token.NAME: return None
return first_child[1]
def get_fn_call_data(ast_list):
if len(ast_list) < 3: return None
first_child, second_child = ast_list[1:3]
first_child_code = first_child[0]
if first_child_code != symbol.atom: return None
fn_name = get_atom_name(first_child)
second_child_code = second_child[0]
if second_child_code != symbol.trailer: return None
if len(second_child) < 3: return None
if second_child[1][0] == token.LPAR and second_child[-1][0] == token.RPAR:
return fn_name
else: return None
def find_fn_call(ast_list, calls):
code = ast_list[0]
if code == symbol.power:
fn_name = get_fn_call_data(ast_list)
if fn_name != None and getattr(__builtins__, fn_name, None) == None: calls.add(fn_name)
for item in ast_list[1:]:
if isinstance(item, list):
find_fn_call(item, calls)
def process_fn(fn_ast_list, call_graph):
dummy, dummy, func_name = fn_ast_list[:3]
dummy, func_name = func_name
calls = set()
find_fn_call(fn_ast_list, calls)
call_graph[func_name] = list(calls)
def construct_call_graph(ast_list, call_graph):
code = ast_list[0]
if code == symbol.funcdef:
process_fn(ast_list, call_graph)
for item in ast_list[1:]:
if isinstance(item, list):
construct_call_graph(item, call_graph)
return call_graph
def generate_dot_code(python_code):
ast = parser.suite(python_code)
ast_list = parser.ast2list(ast)
#annotated_ast_list = annotate_ast_list(ast_list)
#pprint.pprint(annotated_ast_list)
call_graph = {}
construct_call_graph(ast_list, call_graph)
#pprint.pprint(call_graph)
dot = []
dot.append("digraph G {")
dot.append("rankdir=LR")
for from_fn, to_fns in call_graph.iteritems():
if not to_fns:
dot.append('%s;' % from_fn)
for to_fn in to_fns:
if to_fn not in call_graph: continue
dot.append('%s -> %s;' % (from_fn, to_fn))
dot.append("}")
return '\n'.join(dot)
if __name__ == '__main__':
oparser = optparse.OptionParser()
oparser.add_option('-i', '--input-file', default=None, metavar='FILE', help='python code file to process')
options, args = oparser.parse_args()
if options.input_file:
python_code = open(options.input_file).read()
else:
python_code = sys.stdin.read()
dot_code = generate_dot_code(python_code)
print dot_code
| 2.75 | 3 |
etl_nested/tests/unit/test_factories.py | CloverHealth/pycon2017 | 6 | 12788168 | import datetime
import os
import sqlalchemy.orm as sa_orm
from app import factories
from app.etl import transformers
from app.util.json import load_json_file
def test_make_response_from_nested_schema(session: sa_orm.Session, data_dir):
schema = load_json_file(os.path.join(data_dir, 'nested_schema.json'))
form = factories.FormFactory(schema=schema)
user = factories.UserFactory()
session.add_all([form, user])
session.flush()
get_node_path_map = transformers.get_node_path_map_cache(session)
result = factories.make_response(get_node_path_map, form.id)
assert result
assert type(result) == dict
# test a selection of the nested values
assert result['root']
assert type(result['root']) == dict
assert result['root']['primary_care_doctor_phone'] is not None
assert type(result['root']['primary_care_doctor_phone']) == str
assert result['root']['emergency_visits'] is not None
assert type(result['root']['emergency_visits']) == int
assert result['root']['medical_conditions']['mobility_help']['walker'] is not None
assert type(result['root']['medical_conditions']['mobility_help']['walker']) == bool
assert result['root']['date_of_birth'] is not None
assert type(result['root']['date_of_birth']) == str
parsed_date = datetime.datetime.strptime(result['root']['date_of_birth'], '%Y-%m-%d')
assert parsed_date
| 2.546875 | 3 |
userAgentRandomizer/__init__.py | MattWaller/userAgentRandomizer | 0 | 12788169 | from glob import glob
import os, random
class userAgents(object):
def __init__(self):
files_ = glob(f'{os.path.dirname(os.path.realpath(__file__))}/assets/*.txt')
self.uas,self.Chrome,self.Edge,self.Firefox,self.Opera,self.Safari = [],[],[],[],[],[]
for file_ in files_:
with open(file_,'r') as f:
records_ = f.read().split('\n')
f.close()
for rec in records_:
self.uas.append(rec)
if 'Chrome' in file_:
self.Chrome.append(rec)
elif 'Edge' in file_:
self.Edge.append(rec)
elif 'Firefox' in file_:
self.Firefox.append(rec)
elif 'Opera' in file_:
self.Opera.append(rec)
elif 'Safari' in file_:
self.Safari.append(rec)
def random(self,engine=None):
if engine == 'Chrome':
return random.choice(self.Chrome)
elif engine == 'Edge':
return random.choice(self.Edge)
elif engine == 'Firefox':
return random.choice(self.Firefox)
elif engine == 'Opera':
return random.choice(self.Opera)
elif engine == 'Safari':
return random.choice(self.Safari)
else:
return random.choice(self.uas)
def count(self):
return len(self.uas)
if __name__ == "__main__":
ua = userAgents()
print(ua.random('Firefox'))
print(ua.count()) | 2.8125 | 3 |
investments/contrib/tags/models.py | gatsinski/investments | 0 | 12788170 | import uuid
from django.contrib.auth import get_user_model
from django.db import models
from django.utils.translation import gettext_lazy as _
from investments.models import TimestampedModel
UserModel = get_user_model()
class Tag(TimestampedModel):
uuid = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(_("Name"), max_length=254)
author = models.ForeignKey(UserModel, related_name="tags", on_delete=models.CASCADE)
class Meta:
verbose_name = _("Tag")
verbose_name_plural = _("Tags")
def __str__(self):
return f"{self.name} - {self.author}"
| 2.375 | 2 |
server/authentication/utils.py | Arun89-crypto/codechefsrm | 0 | 12788171 | from typing import Any, Dict
from .errors import NoTokenFound, InvalidToken
from . import tokens
def get_token(header: Dict[str, Any]):
if "Authorization" in header:
return header["Authorization"]
raise NoTokenFound()
def validate_token_type(token: str, token_type: str = "Bearer"):
_token_type, token = token.split()
assert _token_type == token_type, "Invalid token type"
return token
def generate_token(payload: Dict[str, Any], **kwargs):
return tokens.generate_key(payload=payload, **kwargs)
async def refresh_to_access(token: str):
payload = tokens.verify_key(token)
if payload.get("refresh"):
payload.pop("refresh")
return tokens.generate_key(payload, get_refresh=True)
raise InvalidToken("Invalid refresh token")
| 2.703125 | 3 |
test/unit/test_yaml_spec.py | izakp/hokusai | 85 | 12788172 | <filename>test/unit/test_yaml_spec.py<gh_stars>10-100
import os
import httpretty
from test import HokusaiUnitTestCase
from hokusai import CWD
from hokusai.lib.exceptions import HokusaiError
from hokusai.services.yaml_spec import YamlSpec
httpretty.enable()
httpretty.HTTPretty.allow_net_connect = False
class TestYamlSpec(HokusaiUnitTestCase):
def setUp(self):
self.kubernetes_yml = os.path.join(CWD, 'test/fixtures/kubernetes-config.yml')
@httpretty.activate
def test_yaml_spec(self):
httpretty.register_uri(httpretty.POST, "https://sts.amazonaws.com/",
body=self.fixture('sts-get-caller-identity-response.xml'),
content_type="application/xml")
httpretty.register_uri(httpretty.POST, "https://api.ecr.us-east-1.amazonaws.com/",
body=self.fixture('ecr-repositories-response.json'),
content_type="application/x-amz-json-1.1")
yaml_spec = YamlSpec(self.kubernetes_yml).to_list()
self.assertEqual(yaml_spec[0]['metadata']['name'], 'hello')
self.assertEqual(yaml_spec[0]['spec']['template']['spec']['containers'][0]['name'], 'web')
self.assertEqual(yaml_spec[0]['spec']['template']['spec']['containers'][0]['image'], 'eggs')
self.assertEqual(yaml_spec[0]['spec']['template']['spec']['containers'][1]['name'], 'worker')
self.assertEqual(yaml_spec[0]['spec']['template']['spec']['containers'][1]['image'], 'eggs')
| 2.234375 | 2 |
mbserializer/fields/attribute_fields.py | gomafutofu/mbserializer | 0 | 12788173 | __author__ = '<NAME>'
from .declarations import (
StringAttribute as Str,
IntegerAttribute as Int,
FloatAttribute as Float,
DecimalAttribute as Decimal,
BooleanAttribute as Bool,
DatetimeAttribute as Datetime,
DateAttribute as Date,
EnumAttribute as Enum,
) | 1.695313 | 2 |
lecture-2/loops.py | wendelsilva/CS50-Web-Programming-with-Python-and-JavaScript | 0 | 12788174 | <filename>lecture-2/loops.py
for i in range(6):
print(i)
names = ["Harry", "Ron", "Hermione", "Ginny"]
for name in names:
print(name)
name = "Harry"
for character in name:
print(character) | 4.28125 | 4 |
fewsum/data_pipelines/assemblers/tuning_pipeline.py | mancunian1792/FewSum | 28 | 12788175 | from mltoolkit.mldp.utils.constants.vocabulary import PAD, START, END, UNK
from mltoolkit.mldp import PyTorchPipeline
from mltoolkit.mldp.steps.readers import CsvReader
from mltoolkit.mldp.steps.transformers.nlp import TokenProcessor, VocabMapper, \
SeqLenComputer, Padder, SeqWrapper
from mltoolkit.mldp.steps.general import ChunkAccumulator
from mltoolkit.mldp.steps.transformers.general import Shuffler
from mltoolkit.mldp.steps.transformers.field import FieldRenamer
from fewsum.data_pipelines.steps import RevMapper, AmazonTransformer,\
SummMapper, GoldSummRevIndxsCreator, NumpyFormatter
from fewsum.utils.fields import ModelF, GoldDataF
from csv import QUOTE_NONE
from fewsum.data_pipelines.steps.props import SummRougeProp, DummyProp, SummLenProp,\
POVProp
def assemble_tuning_pipeline(word_vocab, max_groups_per_batch=1, tok_func=None,
lowercase=False):
"""
The pipeline yields tokenized reviews and summaries that can be used for
training (fine-tuning of the model).
"""
assert START in word_vocab and END in word_vocab
reader = CsvReader(sep='\t', encoding='utf-8', engine='python',
chunk_size=None,
use_lists=True, quating=QUOTE_NONE)
chunk_accum = ChunkAccumulator(new_size=max_groups_per_batch)
ama_spec_trans = AmazonTransformer(fnames_to_copy=[GoldDataF.PROD_ID,
GoldDataF.CAT,
])
summ_mapper = SummMapper(fname=ModelF.SUMMS,
new_indx_fname=ModelF.SUMM_GROUP_INDX)
token_processor = TokenProcessor(fnames=[ModelF.REV, ModelF.SUMM],
tok_func=tok_func, lowercase=lowercase)
vocab_mapper = VocabMapper({ModelF.REV: word_vocab,
ModelF.SUMM: word_vocab})
fname_renamer = FieldRenamer({GoldDataF.PROD_ID: ModelF.GROUP_ID,
GoldDataF.CAT: ModelF.CAT,
ModelF.SUMMS: ModelF.SUMM})
seq_wrapper = SeqWrapper(fname=[ModelF.REV, ModelF.SUMM],
start_el=word_vocab[START].id,
end_el=word_vocab[END].id)
padder = Padder(fname=[ModelF.REV, ModelF.SUMM],
new_mask_fname=[ModelF.REV_MASK, ModelF.SUMM_MASK],
pad_symbol=word_vocab[PAD].id, padding_mode='right')
indxs_creator = GoldSummRevIndxsCreator()
# rev_mapper = RevMapper(group_rev_indxs_fname=ModelF.GROUP_REV_INDXS,
# group_rev_mask_fname=ModelF.GROUP_REV_INDXS_MASK,
# rev_mask_fname=ModelF.REV_MASK)
# props
len_prop = SummLenProp(summ_fname=ModelF.SUMM, rev_fname=ModelF.REV,
group_rev_indxs_fname=ModelF.GROUP_REV_INDXS,
summ_group_indx_fname=ModelF.SUMM_GROUP_INDX,
new_fname=ModelF.LEN_PROP)
pov_prop = POVProp(text_fname=ModelF.SUMM, new_fname=ModelF.POV_PROP)
rouge_prop = SummRougeProp(summ_fname=ModelF.SUMM, rev_fname=ModelF.REV,
group_rev_indxs_fname=ModelF.GROUP_REV_INDXS,
summ_group_indx_fname=ModelF.SUMM_GROUP_INDX,
new_fname=ModelF.ROUGE_PROP)
rating_prop = DummyProp(fname=ModelF.SUMM, new_fname=ModelF.RATING_PROP,
fval=0.)
np_formatter = NumpyFormatter([ModelF.LEN_PROP, ModelF.RATING_PROP,
ModelF.POV_PROP, ModelF.ROUGE_PROP])
pipeline = PyTorchPipeline(reader=reader, error_on_invalid_chunk=False)
# pipeline.add_step(shuffler)
pipeline.add_step(chunk_accum)
pipeline.add_step(ama_spec_trans)
pipeline.add_step(summ_mapper)
pipeline.add_step(fname_renamer)
pipeline.add_step(indxs_creator)
# props
pipeline.add_step(rating_prop)
pipeline.add_step(rouge_prop)
pipeline.add_step(token_processor)
# the props below require tokenization
pipeline.add_step(len_prop)
pipeline.add_step(pov_prop)
pipeline.add_step(vocab_mapper)
pipeline.add_step(seq_wrapper)
pipeline.add_step(padder)
pipeline.add_step(np_formatter)
return pipeline
| 2.46875 | 2 |
bindings/python/fdb/tenant_management.py | sfc-gh-bvr/foundationdb | 1 | 12788176 | <filename>bindings/python/fdb/tenant_management.py
#
# tenant_management.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# FoundationDB Python API
"""Documentation for this API can be found at
https://apple.github.io/foundationdb/api-python.html"""
from fdb import impl as _impl
_tenant_map_prefix = b'\xff\xff/management/tenant_map/'
# If the existence_check_marker is an empty list, then check whether the tenant exists.
# After the check, append an item to the existence_check_marker list so that subsequent
# calls to this function will not perform the existence check.
#
# If the existence_check_marker is a non-empty list, return None.
def _check_tenant_existence(tr, key, existence_check_marker, force_maybe_commited):
if not existence_check_marker:
existing_tenant = tr[key].wait()
existence_check_marker.append(None)
if force_maybe_commited:
raise _impl.FDBError(1021) # maybe_committed
return existing_tenant != None
return None
# Attempt to create a tenant in the cluster. If existence_check_marker is an empty
# list, then this function will check if the tenant already exists and fail if it does.
# Once the existence check is completed, it will not be done again if this function
# retries. As a result, this function may return successfully if the tenant is created
# by someone else concurrently. This behavior allows the operation to be idempotent with
# respect to retries.
#
# If the existence_check_marker is a non-empty list, then the existence check is skipped.
@_impl.transactional
def _create_tenant_impl(tr, tenant_name, existence_check_marker, force_existence_check_maybe_committed=False):
tr.options.set_special_key_space_enable_writes()
key = b'%s%s' % (_tenant_map_prefix, tenant_name)
if _check_tenant_existence(tr, key, existence_check_marker, force_existence_check_maybe_committed) is True:
raise _impl.FDBError(2132) # tenant_already_exists
tr[key] = b''
# Attempt to delete a tenant from the cluster. If existence_check_marker is an empty
# list, then this function will check if the tenant already exists and fail if it does
# not. Once the existence check is completed, it will not be done again if this function
# retries. As a result, this function may return successfully if the tenant is deleted
# by someone else concurrently. This behavior allows the operation to be idempotent with
# respect to retries.
#
# If the existence_check_marker is a non-empty list, then the existence check is skipped.
@_impl.transactional
def _delete_tenant_impl(tr, tenant_name, existence_check_marker, force_existence_check_maybe_committed=False):
tr.options.set_special_key_space_enable_writes()
key = b'%s%s' % (_tenant_map_prefix, tenant_name)
if _check_tenant_existence(tr, key, existence_check_marker, force_existence_check_maybe_committed) is False:
raise _impl.FDBError(2131) # tenant_not_found
del tr[key]
class FDBTenantList(object):
"""Iterates over the results of list_tenants query. Returns
KeyValue objects.
"""
def __init__(self, rangeresult):
self._range = rangeresult
self._iter = iter(self._range)
def to_list(self):
return list(self.__iter__())
def __iter__(self):
for next_item in self._iter:
tenant_name = _impl.remove_prefix(next_item.key, _tenant_map_prefix)
yield _impl.KeyValue(tenant_name, next_item.value)
# Lists the tenants created in the cluster, specified by the begin and end range.
# Also limited in number of results by the limit parameter.
# Returns an iterable object that yields KeyValue objects
# where the keys are the tenant names and the values are the unprocessed
# JSON strings of the tenant metadata
@_impl.transactional
def _list_tenants_impl(tr, begin, end, limit):
tr.options.set_read_system_keys()
begin_key = b'%s%s' % (_tenant_map_prefix, begin)
end_key = b'%s%s' % (_tenant_map_prefix, end)
rangeresult = tr.get_range(begin_key, end_key, limit)
return FDBTenantList(rangeresult)
def create_tenant(db_or_tr, tenant_name):
tenant_name = _impl.process_tenant_name(tenant_name)
# Only perform the existence check when run using a database
# Callers using a transaction are expected to check existence themselves if required
existence_check_marker = [] if not isinstance(db_or_tr, _impl.TransactionRead) else [None]
_create_tenant_impl(db_or_tr, tenant_name, existence_check_marker)
def delete_tenant(db_or_tr, tenant_name):
tenant_name = _impl.process_tenant_name(tenant_name)
# Only perform the existence check when run using a database
# Callers using a transaction are expected to check existence themselves if required
existence_check_marker = [] if not isinstance(db_or_tr, _impl.TransactionRead) else [None]
_delete_tenant_impl(db_or_tr, tenant_name, existence_check_marker)
def list_tenants(db_or_tr, begin, end, limit):
begin = _impl.process_tenant_name(begin)
end = _impl.process_tenant_name(end)
return _list_tenants_impl(db_or_tr, begin, end, limit)
| 1.875 | 2 |
ofcourse/tests/test_yaml.py | liam-middlebrook/ofCourse | 0 | 12788177 | <reponame>liam-middlebrook/ofCourse
import os
import yaml
import unittest
from validator import Required, validate, InstanceOf
class TestAllYaml(unittest.TestCase):
def test_recursive_yaml(self):
yaml_files = []
for root, _, fnames in os.walk(os.getcwd()):
for fname in fnames:
if (fname.endswith('.yaml') or fname.endswith('.yml')):
yaml_files.append(os.path.join(root, fname))
for fullname in yaml_files:
with open(fullname, 'r') as yfile:
try:
yaml.safe_load(yfile)
except Exception as e:
msg = "File {name} is broken: {exc}"
self.fail(msg.format(name=fullname, exc=str(e)))
def test_student_yaml(self):
is_str = InstanceOf(type(""))
spec = {
'blog': [Required, is_str],
'feed': [Required, is_str],
'forges': [Required, InstanceOf(list)],
'hw': [Required, InstanceOf(dict)],
'irc': [Required, is_str],
'name': [Required, is_str],
'rit_dce': [Required, is_str],
# optional fields
'bio': [is_str],
'twitter': [is_str],
'coderwall': [is_str],
}
student_files = []
for root, _, fnames in os.walk(
os.path.join(os.getcwd(), "people")):
for fname in fnames:
if (fname.endswith('.yaml') or fname.endswith('.yml')):
student_files.append(os.path.join(root, fname))
for fullname in student_files:
with open(fullname, 'r') as student:
content = yaml.safe_load(student)
validity = validate(spec, content)
if not validity[0]:
out = ""
for k, v in validity[1].items():
out += ("File: {f} Key: {key} "
"{check}\n\n".format(key=k,
check=v,
f=fullname)
)
self.fail(out)
| 2.71875 | 3 |
themis/model/emr_model.py | igor-lema-ifood/themis | 54 | 12788178 | import themis.monitoring.emr_monitoring
import themis.scaling.emr_scaling
from themis.util import aws_common
from themis import config
from themis.model.aws_model import *
class EmrCluster(Scalable, Monitorable):
def __init__(self, id=None):
super(EmrCluster, self).__init__(id)
self.type = None
self.ip = None
self.ip_public = None
self.monitoring_data = {}
def fetch_data(self):
if self.needs_scaling():
self.monitoring_data = themis.monitoring.emr_monitoring.collect_info(self)
return self.monitoring_data
def needs_scaling(self, params=None):
app_config = config.get_config()
cluster_ids = app_config.general.get_autoscaling_clusters()
return self.id in cluster_ids
def perform_scaling(self, params=None):
themis.scaling.emr_scaling.perform_scaling(self)
class EmrClusterType(object):
PRESTO = aws_common.CLUSTER_TYPE_PRESTO
HIVE = aws_common.CLUSTER_TYPE_HIVE
| 2.078125 | 2 |
kmodes/__init__.py | sdkugelmass/kmodes | 0 | 12788179 | __version__ = 'nb 0.11.2'
| 1.023438 | 1 |
homeserver/__init__.py | miikama/home-server | 0 | 12788180 |
import sys
import os
import logging
from flask import Flask
HOME_SERVER_DIR = os.path.dirname(os.path.abspath(__file__))
#the absolute path of this script
app_path = os.path.dirname(os.path.realpath(__file__))
#config for the project
from homeserver.server_config import load_config, setup_logging
config = load_config(os.path.join(app_path, 'server.ini'))
# update logger settings
logger =setup_logging(config, logging.DEBUG)
#load devices and connect them
from homeserver.device_handler import DeviceHandler
device_handler = DeviceHandler( os.path.join( app_path, 'device_configs') )
def create_app(config):
#init the app
app = Flask(__name__)
#add the config parameters to the app config
app.config.update(config._values)
#load the webpage routes
from homeserver.server import api_routes
app.register_blueprint(api_routes)
#start voice control
# from homeserver.voice_control.voice_controller import VoiceController
# app.voice_controller = VoiceController(start=True)
# app.device_handler.add_interface(app.voice_controller)
return app
app = create_app(config)
| 2.28125 | 2 |
tests/gen_test_clips.py | sevagh/chord-detection | 55 | 12788181 | #!/usr/bin/env python3.7
import unittest
import numpy
import os
import librosa
import soundfile
import sys
from tempfile import TemporaryDirectory
def main():
dest = "tests/test_1_note_Csharp3.wav"
tone = librosa.tone(138.59, sr=22050, length=44100)
soundfile.write(dest, tone, 22050)
print("Created {0} with note C#3".format(dest))
dest = "tests/test_1_note_E4.wav"
tone = librosa.tone(329.63, sr=22050, length=44100)
soundfile.write(dest, tone, 22050)
print("Created {0} with note E4".format(dest))
dest = "tests/test_2_notes_E2_F3.wav"
tone = numpy.zeros(44100)
tone += librosa.tone(82.41, sr=22050, length=44100)
tone += librosa.tone(174.61, sr=22050, length=44100)
soundfile.write(dest, tone, 22050)
print("Created {0} with notes E2, F3".format(dest))
dest = "tests/test_2_notes_G3_Asharp4.wav"
tone = numpy.zeros(44100)
tone += librosa.tone(196, sr=22050, length=44100)
tone += librosa.tone(466.16, sr=22050, length=44100)
soundfile.write(dest, tone, 22050)
print("Created {0} with notes G3, A#4".format(dest))
dest = "tests/test_3_notes_G2_B2_G#3.wav"
tone = numpy.zeros(44100)
tone += librosa.tone(98, sr=22050, length=44100)
tone += librosa.tone(123.47, sr=22050, length=44100)
tone += librosa.tone(207.65, sr=22050, length=44100)
soundfile.write(dest, tone, 22050)
print("Created {0} with notes G2, B2, G#3".format(dest))
return 0
if __name__ == "__main__":
sys.exit(main())
| 2.515625 | 3 |
src/forum/migrations/0018_auto_20170629_2130.py | shashankmohabia/gymkhana-master | 1 | 12788182 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-29 16:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('forum', '0017_reply'),
]
operations = [
migrations.AlterModelOptions(
name='reply',
options={'verbose_name_plural': 'replies'},
),
]
| 1.53125 | 2 |
examples/example_application.py | samson0v/python_tb_rest_client | 30 | 12788183 | # Copyright 2020. ThingsBoard
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
# Importing models and REST client class from Community Edition version
from tb_rest_client.rest_client_ce import *
from tb_rest_client.rest import ApiException
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(module)s - %(lineno)d - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# ThingsBoard REST API URL
url = "http://localhost:8080"
# Default Tenant Administrator credentials
username = "<EMAIL>"
password = "<PASSWORD>"
# Creating the REST client object with context manager to get auto token refresh
with RestClientCE(base_url=url) as rest_client:
try:
# Auth with credentials
rest_client.login(username=username, password=password)
# Creating an Asset
asset = Asset(name="Building 1", type="building")
asset = rest_client.save_asset(asset)
logging.info("Asset was created:\n%r\n", asset)
# creating a Device
device = Device(name="Thermometer 1", type="thermometer")
device = rest_client.save_device(device)
logging.info(" Device was created:\n%r\n", device)
# Creating relations from device to asset
relation = EntityRelation(_from=asset.id, to=device.id, type="Contains")
relation = rest_client.save_relation(relation)
logging.info(" Relation was created:\n%r\n", relation)
except ApiException as e:
logging.exception(e)
| 1.726563 | 2 |
alembic/versions/16c2d576e01e_add_schools_with_description_2020.py | AlonMaor14/anyway | 69 | 12788184 | <reponame>AlonMaor14/anyway
"""add_schools_with_description_2020
Revision ID: 16c2d576e01e
Revises: <PASSWORD>
Create Date: 2020-08-16 10:30:23.748793
"""
# revision identifiers, used by Alembic.
revision = '16c2d576e01e'
down_revision = '7c472e4582de'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
import geoalchemy2
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('schools_with_description2020',
sa.Column('id', sa.BigInteger(), autoincrement=True, nullable=False),
sa.Column('school_id', sa.Integer(), nullable=True),
sa.Column('school_name', sa.Text(), nullable=True),
sa.Column('municipality_name', sa.Text(), nullable=True),
sa.Column('yishuv_name', sa.Text(), nullable=True),
sa.Column('institution_type', sa.Text(), nullable=True),
sa.Column('lowest_grade', sa.Text(), nullable=True),
sa.Column('highest_grade', sa.Text(), nullable=True),
sa.Column('location_accuracy', sa.Text(), nullable=True),
sa.Column('geom', geoalchemy2.types.Geometry(geometry_type='POINT', srid=4326, from_text='ST_GeomFromEWKT', name='geometry'), nullable=True),
sa.Column('x', sa.Float(), nullable=True),
sa.Column('y', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('latitude', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_schools_with_description2020_geom'), 'schools_with_description2020', ['geom'], unique=False)
op.create_index(op.f('ix_schools_with_description2020_id'), 'schools_with_description2020', ['id'], unique=False)
op.create_index(op.f('ix_schools_with_description2020_municipality_name'), 'schools_with_description2020', ['municipality_name'], unique=False)
op.create_index(op.f('ix_schools_with_description2020_school_id'), 'schools_with_description2020', ['school_id'], unique=False)
op.create_index(op.f('ix_schools_with_description2020_yishuv_name'), 'schools_with_description2020', ['yishuv_name'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_schools_with_description2020_yishuv_name'), table_name='schools_with_description2020')
op.drop_index(op.f('ix_schools_with_description2020_school_id'), table_name='schools_with_description2020')
op.drop_index(op.f('ix_schools_with_description2020_municipality_name'), table_name='schools_with_description2020')
op.drop_index(op.f('ix_schools_with_description2020_id'), table_name='schools_with_description2020')
op.drop_index(op.f('ix_schools_with_description2020_geom'), table_name='schools_with_description2020')
op.drop_table('schools_with_description2020')
# ### end Alembic commands ###
| 1.671875 | 2 |
data/simple_analysis.py | tonitick/CacheZoom | 13 | 12788185 | <reponame>tonitick/CacheZoom
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import operator, sys
MAX_MEASUREMENT = 50000
def get_set(addr):
return (addr >> 6) & 0b111111
def calc_threshold(set_stat):
# print '[+] Calculating eviction thresholds...'
threshold = []
for k in set_stat:
k_sorted = sorted(k.items(), key=operator.itemgetter(1), reverse=True)
select = 0
for tu in k_sorted:
if tu[0] > select and tu[1] > 400:
select = tu[0]
if select >= 80:
threshold.append(255)
else:
threshold.append(select)
return threshold
def unpack_plot_binary(data):
# print '[+] Unpacking measurement binary...'
f_raw = open(data, 'rb')
samples = []
set_stat = []
for i in xrange(MAX_MEASUREMENT):
lst = map(ord, list(f_raw.read(64)))
samples.append(lst)
for j in xrange(64):
if j >=len(set_stat):
set_stat.append({})
if lst[j] != 0:
if set_stat[j].has_key(lst[j]):
set_stat[j][lst[j]] += 1
else:
set_stat[j].update({lst[j]: 1})
return samples, calc_threshold(set_stat)
def compress_data(samples, thresholds):
# print '[+] Filter measurments...'
compressed = []
l = 0
r = MAX_MEASUREMENT
if len(sys.argv) > 2:
l = int(sys.argv[1])
r = int(sys.argv[2])
for i in xrange(l, r):
c = 0
for j in xrange(64):
if samples[i][j] > thresholds[j]:
c += 1
if c < 30 and c > 0:
compressed.append((i, samples[i]))
return compressed
def calc_eviction_percentage(samples, thresholds, i):
l = int(sys.argv[1])
r = int(sys.argv[2])
c = 0
for j in xrange(l, r):
if samples[j][i] > thresholds[i]:
c += 1
return (c * 1.0) / (r - l) * 100
def auto_tweak_thresholds(samples, thresholds, per_limit):
# print '[+] Tweaking tresholds for the range in (%s, %s)...'%(sys.argv[1], sys.argv[2])
for i in xrange(64):
if thresholds[i] == 255:
continue
per = calc_eviction_percentage(samples, thresholds, i)
while(per < per_limit):
thresholds[i] -= 2
per = calc_eviction_percentage(samples, thresholds, i)
if per > per_limit:
thresholds[i] += 2
break
while(per > per_limit):
thresholds[i] += 2
per = calc_eviction_percentage(samples, thresholds, i)
return thresholds
samples, thresholds = unpack_plot_binary('samples.data')
if len(sys.argv) > 2:
thresholds = auto_tweak_thresholds(samples, thresholds, int(sys.argv[3]))
compressed = compress_data(samples, thresholds)
l = 1
r = 0
if len(sys.argv) > 2:
l = int(sys.argv[1])
r = int(sys.argv[2])
evict = {}
plot_x = []
plot_y = []
for i in xrange(len(compressed)):
for j in xrange(64):
if list(compressed[i])[1][j] > thresholds[j]:
plot_x.append(list(compressed[i])[0])
plot_y.append(j)
distance = float(list(compressed[i])[0] - l) / (r - l)
if evict.has_key(j):
evict[j].append(distance)
else:
evict[j] = [distance]
for k in evict:
_sum = 0
for v in evict[k]:
_sum += v
evict[k] = _sum / len(evict[k])
evict = sorted(evict.items(), key=operator.itemgetter(1), reverse=False)
print evict
fig, ax = plt.subplots()
ax.grid(True)
ax.set_yticks(range(64))
ax.set_xlabel('Measuerment')
ax.set_ylabel('Set number')
ax.plot(plot_x, plot_y, 'o')
plt.show()
| 2.359375 | 2 |
payplug/test/test_init/test_dao_payment.py | SOGEXIS/payplug-python | 0 | 12788186 | <filename>payplug/test/test_init/test_dao_payment.py
# -*- coding: utf-8 -*-
import pytest
from mock import patch
import payplug
from payplug import resources
from payplug.test import TestBase
@patch('payplug.config.secret_key', 'a_secret_key')
@patch.object(payplug.HttpClient, 'post', lambda *args, **kwargs: ({'id': 'pay_payment_id'}, 201))
@patch.object(payplug.HttpClient, 'patch', lambda *args, **kwargs: ({'id': 'pay_payment_id'}, 200))
@patch.object(payplug.HttpClient, 'get', lambda *args, **kwargs: ({'id': 'pay_payment_id'}, 200))
class TestPaymentCreateRetrieveAbort(TestBase):
def test_retrieve(self):
payment = payplug.Payment.retrieve('pay_payment_id')
assert isinstance(payment, resources.Payment)
assert payment.id == 'pay_payment_id'
def test_abort(self):
payment = payplug.Payment.abort('pay_payment_id')
assert isinstance(payment, resources.Payment)
assert payment.id == 'pay_payment_id'
def test_abort_with_payment_object(self):
payment = payplug.Payment.retrieve('pay_payment_id')
payment = payplug.Payment.abort(payment)
assert isinstance(payment, resources.Payment)
assert payment.id == 'pay_payment_id'
def test_create(self):
payment = payplug.Payment.create(some='payment', da='ta')
assert isinstance(payment, resources.Payment)
assert payment.id == 'pay_payment_id'
def get_payments_fixture():
return {
"type": "list",
"page": 0,
"per_page": 10,
"count": 2,
"data": [
{
"id": "pay_5iHMDxy4ABR4YBVW4UscIn",
"object": "payment",
},
]
}
@patch('payplug.config.secret_key', 'a_secret_key')
@patch.object(payplug.HttpClient, 'get', lambda *args, **kwargs: (get_payments_fixture(), 200))
class TestPaymentList(TestBase):
@patch('payplug.routes.url')
def test_list_pagination_no_arguments(self, url_mock):
payplug.Payment.list()
assert url_mock.call_args[1]['pagination'] == {}
@patch('payplug.routes.url')
def test_list_pagination_page_argument(self, url_mock):
payplug.Payment.list(page=1)
assert url_mock.call_args[1]['pagination'] == {'page': 1}
@patch('payplug.routes.url')
def test_list_pagination_per_page_argument(self, url_mock):
payplug.Payment.list(per_page=1)
assert url_mock.call_args[1]['pagination'] == {'per_page': 1}
@patch('payplug.routes.url')
def test_list_pagination_page_and_per_page_arguments(self, url_mock):
payplug.Payment.list(page=42, per_page=1)
assert url_mock.call_args[1]['pagination'] == {'page': 42, 'per_page': 1}
def test_list(self):
payments = payplug.Payment.list()
assert isinstance(payments, resources.APIResourceCollection)
assert next(payments).id == 'pay_5iHMDxy4ABR4YBVW4UscIn'
| 2.15625 | 2 |
oil_lang/grammar_gen.py | msingle/oil | 0 | 12788187 | #!/usr/bin/env python2
"""
grammar_gen.py - Use pgen2 to generate tables from Oil's grammar.
"""
from __future__ import print_function
import os
import sys
from _devbuild.gen.id_kind_asdl import Id, Kind
from _devbuild.gen.syntax_asdl import source
from core import alloc
from core import meta
from core.util import log
from frontend import lexer, match, reader
from pgen2 import parse, pgen
# Used at grammar BUILD time.
OPS = {
'(': Id.Op_LParen,
')': Id.Op_RParen,
'[': Id.Op_LBracket,
']': Id.Op_RBracket, # Problem: in OilOuter, this is OP_RBracket.
# OK I think the metalanguage needs to be
# extended to take something other than ']'
# It needs proper token names!
'{': Id.Op_LBrace,
'}': Id.Op_RBrace,
'$[': Id.Left_DollarBracket,
'${': Id.Left_DollarBrace,
'$(': Id.Left_DollarParen,
'$/': Id.Left_DollarSlash,
'@[': Id.Left_AtBracket,
'.': Id.Expr_Dot,
'->': Id.Expr_RArrow,
# TODO: Add Ellipsis.
'...': Id.Expr_Dot,
# TODO: do we need div= and xor= ?
}
# TODO: We should be able to remove all these.
TERMINALS = {
'NAME': Id.Expr_Name,
'NUMBER': Id.Expr_Digits,
# The grammar seems something like 'for' or '>='
# These need to be looked up at "_Classify" time?
#'STRING': Id.Expr_Name,
'NEWLINE': Id.Op_Newline,
'ENDMARKER': Id.Eof_Real,
}
if 0: # unused because the grammar compile keeps track of keywords!
KEYWORDS = {
'div': Id.Expr_Div,
'xor': Id.Expr_Xor,
'and': Id.Expr_And,
'or': Id.Expr_Or,
'not': Id.Expr_Not,
'for': Id.Expr_For,
'is': Id.Expr_Is,
'in': Id.Expr_In,
'if': Id.Expr_If,
'else': Id.Expr_Else,
'match': Id.Expr_Match,
'func': Id.Expr_Func,
}
class OilTokenDef(object):
def __init__(self, arith_ops):
self.arith_ops = arith_ops
def GetTerminalNum(self, label):
"""
e.g. translate Expr_Name in the grammar to 178
"""
id_ = TERMINALS.get(label) or getattr(Id, label)
#log('Id %s = %d', id_, id_.enum_id)
assert id_.enum_id < 256, id_
return id_.enum_id
def GetOpNum(self, value):
id_ = OPS.get(value) or self.arith_ops[value]
assert id_.enum_id < 256, id_
return id_.enum_id
def MakeOilLexer(code_str, arena):
arena.PushSource(source.MainFile('pgen2_main'))
line_reader = reader.StringLineReader(code_str, arena)
line_lexer = lexer.LineLexer(match.MATCHER, '', arena)
lex = lexer.Lexer(line_lexer, line_reader)
return lex
def main(argv):
action = argv[1]
argv = argv[2:]
# Common initialization
arith_ops = {}
for _, token_str, id_ in meta.ID_SPEC.LexerPairs(Kind.Arith):
arith_ops[token_str] = id_
if 0:
from pprint import pprint
pprint(arith_ops)
tok_def = OilTokenDef(arith_ops)
if action == 'marshal': # generate the grammar and parse it
grammar_path = argv[0]
out_dir = argv[1]
basename, _ = os.path.splitext(os.path.basename(grammar_path))
# HACK for find:
if basename == 'find':
from tools.find import parse as find_parse
tok_def = find_parse.TokenDef()
with open(grammar_path) as f:
gr = pgen.MakeGrammar(f, tok_def=tok_def)
marshal_path = os.path.join(out_dir, basename + '.marshal')
with open(marshal_path, 'wb') as out_f:
gr.dump(out_f)
nonterm_path = os.path.join(out_dir, basename + '_nt.py')
with open(nonterm_path, 'w') as out_f:
gr.dump_nonterminals(out_f)
log('Compiled %s -> %s and %s', grammar_path, marshal_path, nonterm_path)
#gr.report()
elif action == 'parse': # generate the grammar and parse it
# Remove build dependency
from oil_lang import expr_parse
grammar_path = argv[0]
start_symbol = argv[1]
code_str = argv[2]
# For choosing lexer and semantic actions
grammar_name, _ = os.path.splitext(os.path.basename(grammar_path))
with open(grammar_path) as f:
gr = pgen.MakeGrammar(f, tok_def=tok_def)
arena = alloc.Arena()
lex = MakeOilLexer(code_str, arena)
is_expr = grammar_name in ('calc', 'grammar')
p = expr_parse.ExprParser(gr)
try:
pnode, _ = p.Parse(lex, gr.symbol2number[start_symbol])
except parse.ParseError as e:
log('Parse Error: %s', e)
return 1
from frontend import parse_lib
names = parse_lib.MakeGrammarNames(gr)
p_printer = expr_parse.ParseTreePrinter(names) # print raw nodes
p_printer.Print(pnode)
if is_expr:
from oil_lang import expr_to_ast
tr = expr_to_ast.Transformer(gr)
if start_symbol == 'eval_input':
ast_node = tr.Expr(pnode)
else:
ast_node = tr.OilAssign(pnode)
ast_node.PrettyPrint()
print()
elif action == 'stdlib-test':
# This shows how deep Python's parse tree is. It doesn't use semantic
# actions to prune on the fly!
import parser # builtin module
t = parser.expr('1+2')
print(t)
t2 = parser.st2tuple(t)
print(t2)
else:
raise RuntimeError('Invalid action %r' % action)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except RuntimeError as e:
print('FATAL: %s' % e, file=sys.stderr)
sys.exit(1)
| 2.234375 | 2 |
unittest_flow/uf04_testsuite_for_module_uf01.py | RagnorLixiaomeng/apiAssurance | 0 | 12788188 | <reponame>RagnorLixiaomeng/apiAssurance
# -*- coding: utf-8 -*-
# @Time : 2021/5/29 2:28 PM
# @Author: lixiaomeng_someday
# @Email : <EMAIL>
# @File : uf04_testsuite_for_module_uf01.py
import unittest
from apiAssurance.unittest_flow.uf02_test_mathmethod_cases import TestMathMethod
from apiAssurance.unittest_flow.uf03_test_multi_method_cases import TestMultiMethod
"""test suite 1: for module uf02:optional cases"""
suite_for_MathMethodModule = unittest.TestSuite()
suite_for_MathMethodModule.addTests([
TestMathMethod("test_two_positive_plus"),
TestMathMethod("test_two_negative_plus"),
TestMathMethod("test_two_zero_plus")
])
"""test suite 2: for module uf03"""
suite_for_MultiMethodModule = unittest.TestSuite()
suite_for_MultiMethodModule.addTest(unittest.TestLoader().loadTestsFromModule(TestMultiMethod))
| 2.078125 | 2 |
tasks/utils.py | RossBrunton/RThing | 0 | 12788189 | <gh_stars>0
"""Utility functions for the tasks app"""
from django.template.loader import render_to_string
from django.template import RequestContext
from django.core.cache import cache
from tasks.templatetags import fragments
from courses.models import get_iface
import random
# This is used to seperate the user output from the hidden output; it is inserted via a generic_print after code, and
# all output after it will be hidden from the user
# This really isn't the best way of doing this, but I can't think up any other way
_SPLIT_TOKEN = "<KEY>SPLIT_TOKEN_QPZMWOXN"
def perform_execute(code, task, user):
"""Executes provided code (a string) and the model answer and returns (output, media, is_error, is_correct)
It compares it to the model answer, that's what is_correct means.
is_correct will always be false if the task has automark set to false.
This will try to do as little work as possible by storing the model answer in a cache, and by trying is_equivilant
on the interface.
"""
# Encode it to ascii to get rid of unicode chars
code = code.encode("ascii", "ignore").decode("ascii")
# Strip whitespace from both ends
code = code.strip()
iface = task.iface
# Ensure the code ends with ";" or whatever
if not code.endswith(iface.LINE_END):
code += iface.LINE_END
# First, check to see if they are equivalent and the answer exists
# if the task cannot be automarked, equiv is always false
equiv = iface.is_equivalent(task.model_answer, code) and task.automark
# Look up prior
prior = ""
if task.takes_prior and task.previous():
prior = task.previous().as_prior() + iface.generic_print(_SPLIT_TOKEN)
# Generate a seed if needed
seed = 0
if task.random_poison():
seed = random.randint(0, 1 << 30)
if task.takes_prior and task.previous():
seed = task.previous().prior_seed(user)
# Run the user's code, only if the lines of code are not equivalent
if not equiv:
user_code = "".join(filter(lambda x: bool(x), [
prior,
task.hidden_pre_code,
task.visible_pre_code,
code,
iface.generic_print(_SPLIT_TOKEN),
task.validate_answer,
task.post_code
])).strip()
user_input = {
"commands":user_code, "namespace":task.lesson.pk, "uses_random":task.random_poison(),
"uses_image":task.uses_image, "automark":task.automark, "seed":seed, "user":user.pk,
"timeout":task.course.timeout
}
user_output = iface.run(user_input)
if user_output["is_error"]:
# If the output has an error, assume it's wrong
return (
user_output["err"],
None,
True,
False
)
# Run the model answer, but only if an answer exists
if task.automark:
# Check the cache for a model answer
cache_value = None
if not task.random_poison():
cache_value = cache.get("task_model_{}".format(task.pk))
if not cache_value:
# Miss
model_code = "".join(filter(lambda x: bool(x), [
prior,
task.hidden_pre_code,
task.visible_pre_code,
task.model_answer,
iface.generic_print(_SPLIT_TOKEN),
task.validate_answer,
task.post_code
])).strip()
model_input = {
"commands":model_code, "namespace":task.lesson.pk, "uses_random":task.random_poison(),
"uses_image":task.uses_image, "automark":task.automark, "seed":seed, "user":user.pk, "model":True,
"timeout":task.course.timeout
}
model_output = iface.run(model_input)
if not task.random_poison():
cache.set("task_model_{}".format(task.pk), model_output)
else:
# Hit
model_output = cache_value
# If the answers are equivalent, then set the users output to the models output
if equiv:
user_output = model_output
# Strip all lines after the split token
displayed_output = ""
range_end = 0
range_start = 0
lines = user_output["out"].split("\n")
for l in range(len(lines)-1, -1, -1):
# Loop backwards until we find the token, to see what range of lines we should output
# This is for takes_prior (takes_prior injects another split token before the command) for the start of range
if range_end and _SPLIT_TOKEN in lines[l]:
range_start = -len(lines)+l+1
break
# And this is for the end of the range, to delete post_code and validate_answer
if _SPLIT_TOKEN in lines[l]:
range_end = -len(lines)+l
if not task.takes_prior:
break
displayed_output = "\n".join(lines[range_start:range_end])
# Store the seed
if seed:
uot = task.get_uot(user)
uot.seed = seed
uot.save()
# And return
return (
displayed_output,
user_output.get("media", None),
False,
equiv or (
task.automark
and user_output["out"] == model_output["out"]
and user_output.get("media", None) == model_output.get("media", None)
)
)
def validate_execute(task, instance):
"""Executes the model answer treating the task as if it were a dict and returns (is_error, error)
This is used when validating the contents of the model when saving it.
"""
# If prior is true this method doesn't seem to work
if task["takes_prior"]:
return (False, "")
# Look up prior
prior = ""
if task["takes_prior"] and instance.previous():
prior = instance.previous().as_prior() + get_iface(task["language"]).generic_print(_SPLIT_TOKEN)
# Generate a seed
seed = random.randint(0, 1 << 30)
# Run the model answer
model_code = "".join(filter(lambda x: bool(x), [
prior,
task["hidden_pre_code"],
task["visible_pre_code"],
task["model_answer"],
task["validate_answer"],
task["post_code"]
])).strip()
model_input = {
"commands":model_code, "namespace":task["section"].lesson.pk, "uses_random":True,
"uses_image":task["uses_image"], "automark":task["automark"], "seed":seed, "user":0
}
model_output = get_iface(task["language"]).run(model_input)
if model_output["is_error"]:
return (True, model_output["err"])
# And return
return (False, "")
def fragmentate(type, obj, request, content_select=None, content_value=None):
"""Generates a fragment for the given type and returns it as a python dict
See doc/task_submit_interface.md for details.
"""
frag = {"type":type}
if type == "task":
frag["id"] = obj.pk
frag["order"] = "{}-{}".format(obj.section.order, obj.order)
frag["html"] = render_to_string("tasks/task.html", fragments.task(obj))
return frag
if type == "task-content":
frag["id"] = obj.pk
frag["select"] = content_select
frag["html"] = content_value
return frag
if type == "lesson-start":
frag["html"] = render_to_string("tasks/lesson_start.html", fragments.lesson_start(obj))
return frag
if type == "lesson-end":
frag["html"] = render_to_string("tasks/lesson_end.html", fragments.lesson_end(obj))
return frag
if type == "section-start":
frag["order"] = obj.order
frag["html"] = render_to_string("tasks/section_start.html", fragments.section_start(obj, request.user.is_staff))
return frag
if type == "section-end":
frag["order"] = obj.order
frag["html"] = render_to_string("tasks/section_end.html", fragments.section_end(obj))
return frag
if type == "prompt-entry":
frag["id"] = obj.pk
frag["html"] = render_to_string("tasks/prompt_entry.html", fragments.prompt_entry(obj))
return frag
raise RuntimeError("{} is not a fragment type".format(type))
| 2.6875 | 3 |
examples/algorithms/algorithms.py | rorodata/lambdapool | 0 | 12788190 | def fibonacci(n):
'''A naive implementation of computing n'th fibonacci number
'''
if n==0: return 0
if n==1: return 1
return fibonacci(n-1) + fibonacci(n-2)
def factorial(n):
'''Returns factorial of a number
'''
if n<0: raise ValueError('Factorial of a negative number does not exist')
if n==0: return 1
return n*factorial(n-1)
| 4.25 | 4 |
V-generalization/examples/stereo_neural_inference/face_detector_v2.py | Ikomia-dev/ikomia-oakd | 0 | 12788191 | <reponame>Ikomia-dev/ikomia-oakd<gh_stars>0
from pathlib import Path
import depthai as dai
import cv2
import sys
# Importing from parent folder
sys.path.insert(0, str(Path(__file__).parent.parent.parent)) # move to parent path
from utils.draw import drawROI, displayFPS
from utils.OakRunner import OakRunner
frame_width, frame_height = 300, 300
# Function called before entering inside the process loop, useful to set few arguments
def init(runner, device):
runner.custom_arguments["required_confidence"] = 0.2
# Function called inside the process loop, useful to apply any treatment
def process(runner):
for side in ["left", "right"]:
frame = runner.output_queues[side+"_cam"].get().getCvFrame()
faces_data = runner.output_queues["nn_"+side+"_faces"].get().getFirstLayerFp16()
if(faces_data[2] > runner.custom_arguments["required_confidence"]):
drawROI(frame, (faces_data[3],faces_data[4]), (faces_data[5],faces_data[6]), color=(0,200,230))
displayFPS(frame, runner.getFPS())
cv2.imshow(side, frame)
runner = OakRunner()
for side in ["left", "right"]:
if(side == "left"):
runner.setLeftCamera(frame_width, frame_height)
face_manip = runner.getLeftCameraManip()
else:
runner.setRightCamera(frame_width, frame_height)
face_manip = runner.getRightCameraManip()
face_manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) # Switch to BGR (but still grayscaled)
runner.addNeuralNetworkModel(stream_name="nn_"+side+"_faces", path=str(Path(__file__).parent) + "/../../../_models/face_detection.blob", handle_mono_depth=False)
face_manip.out.link(runner.neural_networks["nn_"+side+"_faces"].input) # link transformed video stream to neural network entry
runner.run(process=process, init=init) | 2.03125 | 2 |
alert_bot.py | nacitar/proxitar | 0 | 12788192 | #!/usr/bin/env python3
# TODO: how will commands handle incorrectly cased names? will need to be able to do that, preferably without losing original case in messages.
# TODO: initial 'all clear'? here, or in main?
# TODO: save 'seen' persistently upon changes?
# TODO: commands, reporting/unique players (later), saving 'seen' to disk
# figure out clearing of state after a disconnect (or is that a 'main' thing?)
# TODO: inverted warnings, for population info
import news_reel_monitor
import bisect
import datetime
from enum import Enum, auto
UNCLANNED_PLACEHOLDER = 'unclanned'
class DataSetCategory(Enum):
ADMINISTRATOR = 'admin'
ALLY_CLAN = 'allyclan'
ALLY_PLAYER = 'allyplayer'
IGNORE_CLAN = 'ignoreclan'
IGNORE_PLAYER = 'ignoreplayer'
IGNORE_HOLDING = 'ignoreholding'
class DataMappingCategory(Enum):
FILTER_PLAYER = 'filterplayer'
FILTER_CLAN = 'filterclan'
FILTER_HOLDING = 'filterholding'
def oxford_comma_delimited_string(entries):
count = len(entries)
if count:
if count == 1:
return entries[0]
return f"{', '.join(entries[:-1])}, and {entries[-1]}"
return ''
class AlertBot(object):
def __init__(self, monitor):
self.holding_alert = {}
self.monitor = monitor
self.seen_players = {}
# TODO: the 'unique' stuff
self._data_sets = {}
self._data_mappings = {}
def data_set(self, category):
data = self._data_sets.get(category)
if data is None:
self._data_sets[category] = data = set()
return data
def data_mapping(self, category):
data = self._data_mappings.get(category)
if data is None:
self._data_mappings[category] = data = {}
return data
def is_friendly(self, name, clan):
name = name.lower()
clan = clan.lower()
return (
name in self.data_set(DataSetCategory.ALLY_PLAYER) or
clan in self.data_set(DataSetCategory.ALLY_CLAN) or
name in self.data_set(DataSetCategory.IGNORE_PLAYER) or
clan in self.data_set(DataSetCategory.IGNORE_CLAN)
)
def filter_player(self, name):
# TODO: offensive/stupid player names
filtered_name = self.data_mapping(DataMappingCategory.FILTER_PLAYER).get(name.lower())
if filtered_name is not None:
return filtered_name
return name
def filter_clan(self, clan):
if clan is None:
global UNCLANNED_PLACEHOLDER
return UNCLANNED_PLACEHOLDER
# TODO: offensive/stupid clan names
filtered_clan = self.data_mapping(DataMappingCategory.FILTER_CLAN).get(clan.lower())
if filtered_clan is not None:
return filtered_clan
return clan
def filter_holding(self, holding):
# TODO: change it to change how TTS pronounces it? to fix the capitalization of certain cities?
filtered_holding = self.data_mapping(DataMappingCategory.FILTER_HOLDING).get(holding.lower())
if filtered_holding is not None:
return filtered_holding
return holding
def _get_alerts(self, full_status, all_warnings_on_change=False):
any_alert_changed = False
prioritized_warnings = []
notices = []
total_enemies = 0
if full_status:
all_warnings_on_change = True
# for simplicity, just always check all holdings... we only report new events anyway,
# and this is necessary for 'all clear' messages anyway
for holding in self.monitor.holdings():
holding_string = self.filter_holding(holding)
if holding_string == holding:
# unfiltered, fix the case instead
holding_string = self.monitor.cased_holding_name.get(holding)
# Get the full holding message
last_alert = self.holding_alert.get(holding)
if last_alert is None:
self.holding_alert[holding] = last_alert = f'{holding_string} is clear'
holding_state = self.monitor.holding_state(holding)
enemies_by_clan = {}
enemy_count = 0
most_numerous_clan_enemy_count = 0
most_numerous_clan = None
for name in holding_state.players:
clan, rank = self.monitor.get_player_clan_info(name)
if self.is_friendly(name, clan):
continue
enemies = enemies_by_clan.get(clan)
if enemies is None:
enemies_by_clan[clan] = enemies = set()
enemies.add(name)
# if it's a new highest total or the same but with a clan alphabetically earlier (prioritizing clans over unclanned None entries)
clan_enemy_count = len(enemies)
enemy_count += clan_enemy_count
if clan_enemy_count > most_numerous_clan_enemy_count or (clan_enemy_count == most_numerous_clan_enemy_count and (
# most numerous is unclanned, or it is a clan and this clan is one alphabetically earlier
# (prioritizing clans over unclanned 'None' entries)
not most_numerous_clan or (clan and clan < most_numerous_clan))):
most_numerous_clan_enemy_count = clan_enemy_count
most_numerous_clan = clan
if enemy_count:
total_enemies += enemy_count
if len(enemies_by_clan) == 1:
clan, enemies = next(iter(enemies_by_clan.items()))
clan_string = self.filter_clan(clan)
if clan_string == clan:
# unfiltered, fix the case instead
clan_string = self.monitor.cased_clan_name.get(clan)
if len(enemies) == 1:
name = next(iter(enemies))
name_string = self.filter_player(name)
if name_string == name:
# unfiltered, fix the case instead
name_string = self.monitor.cased_player_name.get(name)
alert = f'{holding_string} has enemy {name_string} from {clan_string}'
else:
alert = f'{holding_string} has {enemy_count} enemies from {clan_string}'
else:
clan_string = self.filter_clan(most_numerous_clan)
if clan_string == most_numerous_clan:
# unfiltered, fix the case instead
clan_string = self.monitor.cased_clan_name.get(most_numerous_clan)
alert = f'{holding_string} has {enemy_count} enemies, mostly from {clan_string}'
is_warning = True
else:
alert = f'{holding_string} is clear'
is_warning = False
this_alert_changed = (last_alert != alert)
if this_alert_changed or (is_warning and all_warnings_on_change):
if this_alert_changed:
any_alert_changed = True
# this is a new alert, add it to the list to be output
if is_warning:
# just for sorting the messages by enemy count and holding name
bisect.insort(prioritized_warnings, (-enemy_count, holding, alert))
else:
# for sorting by holding name
bisect.insort(notices, (holding, alert))
#print(f'CHANGED! "{last_alert}" != {alert}')
self.holding_alert[holding] = alert
alerts = []
if any_alert_changed or full_status:
warnings = [entry[2] for entry in prioritized_warnings]
notices = [entry[1] for entry in notices]
#print(f'ALERT CHANGED: {warnings} ____ {notices}')
if warnings:
alerts.append(f'WARNING: {oxford_comma_delimited_string(warnings)}')
# if everything is clear, and either we want a status
# update or this is indeed new (because a new notice exists)
if not total_enemies and (full_status or notices):
alerts.append('NOTICE: all clear')
elif notices:
alerts.append(f'NOTICE: {oxford_comma_delimited_string(notices)}')
# TODO: remove debug divider
#print('----------------')
return alerts
def check_for_changes(self, full_status=False, all_warnings_on_change=False):
now = datetime.datetime.now()
changed_proximity, changed_resources = self.monitor.check_for_changes()
if changed_proximity:
for holding, player_state in changed_proximity.items():
# check the new events for 'seen' functionality
for name, state in player_state.items():
present, is_current = state
if is_current:
# by checking if it's current, we're sure that this is the latest
# location, for situations where the player has left multiple holdings
# within the contents of a single update.
self.seen_players[name] = (now, holding)
return (now, self._get_alerts(full_status=full_status, all_warnings_on_change=all_warnings_on_change))
# get the status without checking
def status(self):
now = datetime.datetime.now()
return (now, self._get_alerts(full_status=True)) | 2.515625 | 3 |
horus_media_examples/list_recordings.py | horus-view-and-explore/horus-media-client | 3 | 12788193 | # Copyright(C) 2020 Horus View and Explore B.V.
import psycopg2
from horus_db import Frames, Recordings, Frame, Recording
# This example shows how to iterate over all the recordings
def get_connection():
return psycopg2.connect(
"dbname=HorusWebMoviePlayer user=postgres password=<PASSWORD>")
connection = get_connection()
recordings = Recordings(connection)
cursor = recordings.all()
recording = Recording(cursor)
while recording is not None:
print(" ", recording.id, " ", recording.directory)
recording = Recording(cursor)
| 2.9375 | 3 |
INTEST.py | ankitpipalia/codechef-solutions | 1 | 12788194 | <reponame>ankitpipalia/codechef-solutions<filename>INTEST.py
n, divider = input().split(" ")
n = int(n)
divider = int(divider)
counter = 0
while(n > 0):
n -= 1
value = int(input())
if value % divider == 0:
counter += 1
print(counter) | 3.40625 | 3 |
python/hopsworks/git_provider.py | robzor92/hopsworks-api | 0 | 12788195 | <reponame>robzor92/hopsworks-api
#
# Copyright 2022 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import humps
import json
from hopsworks.core import git_provider_api
from hopsworks import util
class GitProvider:
def __init__(
self,
username=None,
token=None,
git_provider=None,
url=None,
name=None,
type=None,
href=None,
expand=None,
items=None,
count=None,
project_id=None,
project_name=None,
):
self._username = username
self._git_provider = git_provider
self._git_provider_api = git_provider_api.GitProviderApi(
project_id, project_name
)
@classmethod
def from_response_json(cls, json_dict, project_id, project_name):
# Count is not set by the backend so parse based on items array
json_decamelized = humps.decamelize(json_dict)
if len(json_decamelized["items"]) == 0:
return []
else:
return [
cls(**provider, project_id=project_id, project_name=project_name)
for provider in json_decamelized["items"]
]
@property
def username(self):
"""Username set for the provider"""
return self._username
@property
def git_provider(self):
"""Name of the provider, can be GitHub, GitLab or BitBucket"""
return self._git_provider
def delete(self):
"""Remove the git provider configuration.
# Raises
`RestAPIError`.
"""
self._git_provider_api._delete_provider(self.git_provider)
def json(self):
return json.dumps(self, cls=util.Encoder)
def __str__(self):
return self.json()
def __repr__(self):
return f"GitProvider({self._username!r}, {self._git_provider!r})"
| 2.09375 | 2 |
alerta/checker/client.py | MailOnline/alerta | 0 | 12788196 | <reponame>MailOnline/alerta
import os
import sys
import subprocess
import shlex
from alerta.common import log as logging
from alerta.common import config
from alerta.common.alert import Alert
from alerta.common.heartbeat import Heartbeat
from alerta.common import severity_code
from alerta.common.api import ApiClient
Version = '2.0.3'
LOG = logging.getLogger(__name__)
CONF = config.CONF
class CheckerClient(object):
nagios_opts = {
'nagios_plugins': '/usr/lib64/nagios/plugins',
}
def __init__(self):
config.register_opts(CheckerClient.nagios_opts)
def main(self):
if CONF.heartbeat:
msg = Heartbeat(version=Version)
else:
# Run Nagios plugin check
args = shlex.split(os.path.join(CONF.nagios_plugins, CONF.nagios_cmd))
LOG.info('Running %s', ' '.join(args))
try:
check = subprocess.Popen(args, stdout=subprocess.PIPE)
except Exception, e:
LOG.error('Nagios check did not execute: %s', e)
sys.exit(1)
stdout = check.communicate()[0]
rc = check.returncode
LOG.debug('Nagios plugin %s => %s (rc=%d)', CONF.nagios_cmd, stdout, rc)
if rc == 0:
severity = severity_code.NORMAL
elif rc == 1:
severity = severity_code.WARNING
elif rc == 2:
severity = severity_code.CRITICAL
elif rc == 3:
severity = severity_code.UNKNOWN
else:
rc = -1
severity = severity_code.INDETERMINATE
# Parse Nagios plugin check output
text = ''
long_text = ''
perf_data = ''
extra_perf_data = False
for num, line in enumerate(stdout.split('\n'), start=1):
if num == 1:
if '|' in line:
text = line.split('|')[0].rstrip(' ')
perf_data = line.split('|')[1]
value = perf_data.split(';')[0].lstrip(' ')
else:
text = line
value = 'rc=%s' % rc
else:
if '|' in line:
long_text += line.split('|')[0]
perf_data += line.split('|')[1]
extra_perf_data = True
elif extra_perf_data is False:
long_text += line
else:
perf_data += line
LOG.debug('Short Output: %s', text)
LOG.debug('Long Output: %s', long_text)
LOG.debug('Perf Data: %s', perf_data)
graph_urls = None
msg = Alert(
resource=CONF.resource,
event=CONF.event,
correlate=CONF.correlate,
group=CONF.group,
value=value,
severity=severity,
environment=CONF.environment,
service=CONF.service,
text=text + ' ' + long_text,
event_type='nagiosAlert',
tags=CONF.tags,
threshold_info=CONF.nagios_cmd,
timeout=CONF.timeout,
raw_data=stdout,
more_info=perf_data,
graph_urls=graph_urls,
)
if CONF.dry_run:
print msg
else:
LOG.debug('Message => %s', repr(msg))
api = ApiClient()
api.send(msg)
return msg.get_id() | 2.03125 | 2 |
ros/src/twist_controller/twist_controller.py | hssavage/CarND-Capstone | 0 | 12788197 | '''
###############################################################################
# twist_controller.py #
# --------------------------------------------------------------------------- #
# #
# Description: #
# ------------ #
# This module contains the source for the command controller for the throttle #
# brakes and steering for the Self-Driving Car System. #
# #
# Change Log: #
# ----------- #
# +--------------------+---------------+------------------------------------+ #
# | Date | Author | Description | #
# +--------------------+---------------+------------------------------------+ #
# | 2/24/2018 | <NAME> | Initial pass on the code | #
# +--------------------+---------------+------------------------------------+ #
# | 2/27/2018 | <NAME> | Integrated a velocity controller | #
# | | | that works better than a PID | #
# +--------------------+---------------+------------------------------------+ #
# | 2/28/2018 | <NAME> | Remove a few irrelevant lines of | #
# | | | code and added comments | #
# +--------------------+---------------+------------------------------------+ #
# | 3/13/2018 | <NAME> | Changed twist cmd update interface | #
# | | | to "set_target_*" for clarity | #
# +--------------------+---------------+------------------------------------+ #
# | 3/29/2018 | <NAME> | Updated the velocity_controller | #
# +--------------------+---------------+------------------------------------+ #
# | 4/12/2018 | <NAME> | Reverted some changes to carry max | #
# | | | accel values for thresholding in | #
# | | | the velocity controller | #
# +--------------------+---------------+------------------------------------+ #
###############################################################################
'''
# Debug prints - to be removed
import rospy
# For steering control
from yaw_controller import YawController
# For throttle/brake control
from velocity_controller import VelocityController
class Controller(object):
def __init__(self, wheel_base=0.0, steer_ratio=0.0, min_speed=0.0,
max_lat_accel=0.0, max_steer_angle=0.0, vehicle_mass=1e-6,
max_accel=0.0, max_decel=0.0, max_input_accel=0.0,
max_input_decel=0.0, deadband=0.0, fuel_capacity=0.0,
wheel_radius=0.0):
'''
Initializes the controller object
'''
# Steering controller
self.steering_controller = YawController(wheel_base=wheel_base,
steer_ratio=steer_ratio,
min_speed=min_speed,
max_lat_accel=max_lat_accel,
max_steer_angle=max_steer_angle)
# Throttle/Brake Controller
self.throttle_controller = VelocityController(
vehicle_mass=vehicle_mass,
max_accel=max_accel,
max_decel=max_decel,
max_input_accel=max_input_accel,
max_input_decel=max_input_decel,
wheel_radius=wheel_radius,
deadband=deadband,
fuel_capacity=fuel_capacity)
# Vehicle Status variables
self.cur_linear_velocity = 0
self.cur_angular_velocity = 0
# Desired state variables
self.target_linear_velocity = 0
self.target_angular_velocity = 0
def set_current_linear_velocity(self, vel=0):
'''
Sets the current linear velocity of the vehicle for the controller
to use
Returns:
float: vel - the current linear velocity (m/s)
Complexity: O(1)
'''
self.cur_linear_velocity = vel
def set_current_angular_velocity(self, vel=0):
'''
Sets the current angular velocity of the vehicle for the controller
to use
Returns:
float: vel - the current angular velocity (m/s)
Complexity: O(1)
'''
self.cur_angular_velocity = vel
def set_target_linear_velocity(self, vel=0):
'''
Sets the target linear velocity of the vehicle for the controller
to use
Returns:
float: vel - the target linear velocity (m/s)
Complexity: O(1)
'''
self.target_linear_velocity = vel
def set_target_angular_velocity(self, vel=0):
'''
Sets the target angular velocity of the vehicle for the controller
to use
Returns:
float: vel - the target angular velocity (m/s)
Complexity: O(1)
'''
self.target_angular_velocity = vel
def control(self):
'''
Returns a list of the desired throttle, brake and steering values
Returns:
list<float>: [throttle, brake, steering]
Complexity: O(1)
'''
# Values to return
throttle = 0.0
brake = 0.0
steer = 0.0
# Run steering controller
steer = self.steering_controller.get_steering(
self.target_linear_velocity,
self.target_angular_velocity,
self.cur_linear_velocity
)
# Run throttle controller
throttle, brake = self.throttle_controller.get_throttle_brake(
self.target_linear_velocity,
self.target_angular_velocity,
self.cur_linear_velocity
)
# Hand back values
return throttle, brake, steer
| 2.359375 | 2 |
manage.py | zagaran/instant-census | 1 | 12788198 | #!/usr/bin/python
""" DO NOT IMPORT ANYTHING HIGHER UP THAN UTILS IN THE GLOBAL SCOPE.
DOING SO WILL BREAK SERVER SETUP """
import sys
import unittest2 as unittest
from IPython import start_ipython
from utils.local import untracked_file_check, delete_pyc_files
from utils.server import TEST_RUN
COMMANDS = {"shell": "runs an ipython shell to interact with the database",
"import_check": "ensures that code is good to commit",
"test": "runs the regular test suite",
"full_tests": "runs a lengthy test suite",
"ping": "checks if the database is up or down",
"setup": "args: customer_name [admin_emalis]; needs sudo",
"deploy_check": "checks if the system is safe to deploy",
"make_customer": ""}
def project_import_check():
import app # @UnusedImport to ensure no syntax errors in the codebase
import cron # @UnusedImport to ensure no syntax errors in the codebase
import utils.database # @UnusedImport to ensure database connection
def regular_test_suite():
global TEST_RUN
TEST_RUN = True
project_import_check()
test_loader = unittest.TestLoader()
test_suite = test_loader.discover(".")
#to run individual tests, comment out the line above (test_loader.discoverer) and use the following:
#from tests.test_cohort_dashboard import TestCohortsDashboard
#from tests.test_integration import TestIntegration
#from tests.test_status_change_messages import TestStatusChangeMessages
#test_suite = unittest.TestSuite()
#test_suite.addTest(TestIntegration("test_copied_schedule"))
test_runner = unittest.TextTestRunner(buffer=True)
test_runner.run(test_suite)
def full_test_suite():
from tests.test_locking import full_lock_drill
full_lock_drill()
regular_test_suite()
if __name__ == "__main__":
delete_pyc_files()
if len(sys.argv) <= 1 or sys.argv[1] not in COMMANDS:
print "Usage: python manage.py COMMAND"
print "\nCOMMAND takes one of the following options:\n"
for k, v in COMMANDS.items():
print "%s: %s" % (k, v)
print
elif sys.argv[1] == "shell":
start_ipython(argv=["-i", "supertools/terminal.py"])
elif sys.argv[1] == "ping":
from utils.database import mongo_ping
mongo_ping(output=True)
elif sys.argv[1] == "import_check":
untracked_file_check()
project_import_check()
elif sys.argv[1] == "test":
print "\nWarning: Settings in settings_override.py should be disabled prior to running tests.\n"
regular_test_suite()
elif sys.argv[1] == "full_tests":
print "\nWarning: Settings in settings_override.py should be disabled prior to running tests.\n"
full_test_suite()
elif sys.argv[1] == "deploy_check":
from supertools.cron_status import safe_to_deploy
if not safe_to_deploy():
raise Exception("Error: hourly cron may be running; it is not currently safe to deploy")
print "Deploy check passed"
elif sys.argv[1] == "setup":
if len(sys.argv) <= 2:
print "Usage: setup customer_name [admin_emalis]"
exit(1)
from supertools.setup import do_setup
do_setup(sys.argv[2], sys.argv[3:])
else:
raise Exception("Not Implemented")
| 2.203125 | 2 |
SimpleMaze.py | MarcusRainbow/Maze | 0 | 12788199 | from typing import List, Set, Optional, Tuple
from random import randrange, shuffle, random
from RatInterface import Rat, MazeInfo
from SimpleRats import AlwaysLeftRat, RandomRat
from Localizer import Localizer, NonLocalLocalizer, OneDimensionalLocalizer, TwoDimensionalOneStepLocalizer
from graphviz import Graph
class SimpleMaze:
"""
A simple maze is a vector of vectors of edges. It supports one
rat at a time. It has one start and one end. WLOG, the start is
always the first element and the end is one after the last. There is no
concept of compass directions in this maze, and there is no
policing to prevent crossing paths.
"""
def __init__(self, edges: List[List[int]], fill_back_steps: bool):
"""
Initialise with a set of edges. If fill_back_steps is true, we
generate backward edges to make it an undirected graph.
"""
validate_edges(edges, fill_back_steps)
self.all_edges = edges
def __str__(self):
return "SimpleMaze(%s)" % self.all_edges
def maze(self) -> List[List[int]]:
return self.all_edges
def solve(self, rat: Rat, max_iterations: int, info: Optional[MazeInfo] = None) -> bool:
"""
Tries to solve the maze. Returns the number of iterations used.
If it exceeds max_iterations, returns max_iterations + 1. If it
fails for any other reason, returns 0.
"""
# always start from the beginning
pos = 0
iterations = 0
# set the last_pos such that the back path is the last in the first list
last_pos = self.all_edges[pos][-1]
#print("pos=%i last_pos=%i" % (pos, last_pos))
# keep going until the end
end = len(self.all_edges)
while (pos < end) and (iterations <= max_iterations):
# find the edges from the current node
edges = self.all_edges[pos]
# one of these edges should point back to where we came from
if edges.count(last_pos) != 1:
print("Problem: no edge from %i to %i" % (pos, last_pos))
back = edges.index(last_pos)
# supply maze info for rats that need it. There is only one rat,
# so supply rat number zero
num_edges = len(edges)
if info:
info.set_pos(pos, back, num_edges, rat)
# get the rat to choose a direction
turn = rat.turn(num_edges, info)
if (turn >= num_edges) or (turn < 0):
return 0 # give up
# going in some direction
direction = (turn + back) % num_edges
last_pos = pos
pos = edges[direction]
iterations = iterations + 1
#print("pos=%i last_pos=%i" % (pos, last_pos))
# hit the end, or failed with an iteration count that is too high
# (technically we should worry about the case where we hit max
# iterations with a valid exit, but this is unlikely and does not
# matter much).
return iterations
def validate_edges(edges: List[List[int]], fill_back_steps: bool):
"""
validate and optionally fill back steps
"""
end = len(edges)
if end < 1:
raise Exception("Must be at least one node")
has_end = False
edge_from = 0
for node in edges:
if len(set(node)) != len(node):
raise Exception("Must not have duplicate edges")
for edge_to in node:
if edge_to == end:
has_end = True # OK to have multiple routes to end
elif edge_to > end:
raise Exception("Edge out of range")
elif edge_to == edge_from:
raise Exception("Not allowed to have edges to self")
elif fill_back_steps:
# make sure we have a return edge matching this
ensure_edge(edges, edge_to, edge_from)
# next node
edge_from = edge_from + 1
# We validate that at least one node has an edge leading to the
# exit. However, we do not currently check that there is a clear
# path to any such node.
if not has_end:
raise Exception("No edge to the end node")
def ensure_edge(maze: List[List[int]], edge_from: int, edge_to: int):
"""
Validates that we have an edge (and if necessary inserts one)
"""
node = maze[edge_from]
count = node.count(edge_to)
if count == 1:
return # already have this edge. Nothing more to do
elif count > 1:
raise Exception("Edges must be unique")
# We need this edge. Append it (no attempt to avoid crossing paths)
node.append(edge_to)
def random_maze(allow_loops: float, local: Localizer) -> List[List[int]]:
"""
Creates a random maze with the specified number of nodes.
"""
# Do NOT write maze = [[]] * node_count as this makes all list elements the same memory!
node_count = local.node_count()
maze = [[] for y in range(node_count)]
# Remember all the nodes that connect to the origin. So far, just
# contains the origin, which is zero by definition.
accessible = { 0 }
# First do a random walk until we hit the end. There may be loops,
# but we don't worry about that. Just make sure there are no duplicate
# edges. Also, create bidirectional edges as we go.
edge_from = 0
while edge_from != node_count:
edge_to = local.random_step(edge_from, True)
add_bidirectional_edges(maze, accessible, edge_from, edge_to, allow_loops)
edge_from = edge_to
# We now have a working maze, but not a very interesting one, in that it
# just has one random path from start to end. Add some blind alleys and
# ensure that every node has at least one edge, which somehow connects to
# the original random walk, hence the start (and the end)
for i in range(node_count):
if not (i in accessible):
# random walk from i until we hit the accessible set
new_path = { i }
edge_from = i
while not (edge_from in accessible):
edge_to = local.random_step(edge_from, False) # avoid the exit
add_bidirectional_edges(maze, new_path, edge_from, edge_to, allow_loops)
edge_from = edge_to
# all these nodes are now accessible
accessible.update(new_path)
# We now have a maze with some blind alleys and all nodes are accessible.
# Shuffle the edges in each node (we do not want the first edge to always
# be the one that leads to the exit) and return it.
for node in maze:
shuffle(node)
return maze
def add_bidirectional_edges(
maze: List[List[int]],
accessible: Set[int],
edge_from: int,
edge_to: int,
allow_loops: float):
"""
Adds (or at least ensures the existence of) bidirectional edges, and adds
the end node to a set of accessible nodes. If allow_loops is zero, we prevent
loops (avoid adding an edge that leads to an accessible node). If it is one,
we allow them. If between zero and one, we randomly allow them or not.
"""
if edge_to != edge_from and allow_edge(allow_loops, edge_to, accessible):
ensure_edge(maze, edge_from, edge_to)
if edge_to != len(maze): # do not need back path from the exit
ensure_edge(maze, edge_to, edge_from)
accessible.add(edge_to)
def allow_edge(allow_loops: float, edge_to: int, accessible: Set[int]) -> bool:
EPSILON = 1e-10
if allow_loops > 1.0 - EPSILON:
return True
elif not (edge_to in accessible):
return True
elif allow_loops < EPSILON:
return False
elif random() < allow_loops:
return True
else:
return False
def render_graph(maze: List[List[int]], file_name):
"""
Generate a PDF file showing the maze as an undirected graph. Uses
GraphViz, which must be installed and on the PATH. Note that
the resulting graph shows only the nodes and their connections. The
ordering of edges around each node is determined by GraphViz itself.
You therefore cannot rely on this rendering to tell you whether to
turn left or right at each node.
"""
if len(maze) > 26:
raise Exception("render_graph can only handle up to 26 nodes")
dot = Graph()
this = 0
edges = []
unknowns = 0
A = ord('A')
a = ord('a')
for node in maze:
id = str(chr(A + this))
if this == 0:
dot.node(id, "Start (A)")
else:
dot.node(id, id)
for edge in node:
# avoid duplicating edges by only showing to > from
if edge > this:
edge_str = id + str(chr(A + edge))
edges.append(edge_str)
elif edge < 0:
unknown_id = str(chr(a + unknowns))
unknowns = unknowns + 1
edge_str = id + unknown_id
edges.append(edge_str)
dot.node(unknown_id, "Unknown")
this = this + 1
# The final node is not in the list, as it exists only as the destination
# of one or more edge
id = str(chr(A + len(maze)))
dot.node(id, "End (%s)" % id)
#print(edges)
dot.edges(edges)
#print(dot.source)
dot.render(file_name, view=True)
def are_equal_mazes(left: List[List[int]], right: List[List[int]],
left_start: int = 0, right_start: int = 0) -> bool:
"""
Test whether two mazes are the same. The nodes may not be in the same
order, and the edges may be rotated, but the topology should be the same.
Handle negative nodes as a wildcard, matching anything.
"""
return are_nodes_equal(left, right, left_start, right_start, -1, -1, set())
def are_nodes_equal(
left: List[List[int]],
right: List[List[int]],
left_node: int,
right_node: int,
left_back: int,
right_back: int,
already_checked: Set[Tuple[int, int]]) -> bool:
#print("are_nodes_equal(%i, %i, %i, %i)"
# % (left_node, right_node, left_back, right_back))
# Treat negative nodes as wildcards, matching anything
if left_node < 0 or right_node < 0:
return True
# Only match nodes that are out of range if both are
left_ended = left_node >= len(left)
right_ended = right_node >= len(right)
if left_ended != right_ended:
#print("not equal: one of %i and %i is the end" % (left_node, right_node))
return False
elif left_ended:
return True
# Avoid recursing for ever if there are loops in the mazes
if (left_node, right_node) in already_checked:
return True
already_checked.add((left_node, right_node))
# Got two real nodes. Make sure they have the same number of edges
left_edges = left[left_node]
right_edges = right[right_node]
edge_count = len(left_edges)
if edge_count != len(right_edges):
#print("not equal: %i has %i edges and %i has %i" % (left_node, len(left_edges), right_node, len(right_edges)))
return False
# May both be empty (unlikely, as this would make a very trivial maze)
if not left_edges:
return True
# We rely on the back pointer to tell us the relative rotation.
if left_back >= 0 and left_back in left_edges and right_back >= 0 and right_back in right_edges:
left_index = left_edges.index(left_back)
right_index = right_edges.index(right_back)
rotation = right_index - left_index
return are_edges_equal(left_edges, right_edges, right, left,
left_node, right_node, rotation, already_checked)
# if no back-pointer defined, just try all the possibilities
else:
for r in range(edge_count):
if are_edges_equal(left_edges, right_edges, right, left,
left_node, right_node, r, already_checked):
return True
#print("not equal: no possible rotation of %i and %i works" % (left_node, right_node))
return False
def are_edges_equal(
left_edges: List[int],
right_edges: List[int],
left: List[List[int]],
right: List[List[int]],
left_node: int,
right_node: int,
rotation: int,
already_checked: Set[Tuple[int, int]]) -> bool:
#print("are_edges_equal(%s, %s, %i, %i, %i)"
# % (left_edges, right_edges, left_node, right_node, rotation))
edge_count = len(left_edges)
assert(edge_count == len(right_edges))
for i, left_edge in enumerate(left_edges):
right_edge = right_edges[(i + rotation) % edge_count]
if not are_nodes_equal(right, left, left_edge, right_edge,
left_node, right_node, already_checked):
return False
return True
def test_fill_back_steps():
maze = SimpleMaze([[1, 3], [2], [3, 0]], True)
print("test_fill_back_steps: %s" % maze)
assert(maze.maze() == [[1, 3, 2], [2, 0], [3, 0, 1]])
def test_equal_mazes():
maze1 = SimpleMaze([[1, 3], [2], [0, 3]], True)
maze2 = SimpleMaze([[2, 3], [0, 3], [1]], True)
#print(maze1.maze())
#print(maze2.maze())
assert(are_equal_mazes(maze1.maze(), maze2.maze()))
print("test_equal_mazes succeeded")
def test_unequal_mazes():
maze1 = SimpleMaze([[1, 3, 2], [2, 0], [0, 3, 1]], False)
maze2 = SimpleMaze([[2, 3, 1], [3, 0, 2], [1, 0]], False)
#print(maze1.maze())
#print(maze2.maze())
assert(not are_equal_mazes(maze1.maze(), maze2.maze()))
print("test_unequal_mazes succeeded")
def test_left_rat():
rat = AlwaysLeftRat()
maze = SimpleMaze([[1, 3], [2], [3, 0]], True)
MAX_ITER = 10
iter = maze.solve(rat, MAX_ITER)
print("test_left_rat solved in %i iterations" % iter)
assert(iter > 0 and iter <= MAX_ITER)
def test_left_rat_fail():
rat = AlwaysLeftRat()
# this maze has a loop in it (0 -> 1 -> 2 -> 0)
maze = SimpleMaze([[1, 3], [2], [0, 3]], True)
MAX_ITER = 10
iter = maze.solve(rat, MAX_ITER)
print("test_left_rat_fail timed out as desired after %i iterations" % iter)
assert(iter > MAX_ITER)
def test_random_rat():
rat = RandomRat()
maze = SimpleMaze([[1, 3], [2], [3, 0]], True)
MAX_ITER = 1000
iter = maze.solve(rat, MAX_ITER)
print("test_random_rat solved in %i iterations" % iter)
assert(iter > 0 and iter < MAX_ITER)
def test_big_maze():
rat = RandomRat()
maze = SimpleMaze([[5, 3], [6], [5, 3, 17, 14, 13, 20],
[2, 0, 4, 14, 13, 5, 17, 12], [7, 3], [0, 14, 9, 2, 6, 3],
[5, 13, 1], [8, 4, 19, 10], [14, 7], [14, 5, 17], [7, 13],
[15, 16], [3, 15], [6, 17, 10, 3, 16, 2], [5, 9, 2, 8, 3, 19],
[12, 11, 18], [11, 13], [13, 2, 9, 3], [15], [14, 7]], False)
MAX_ITER = 1000
iter = maze.solve(rat, MAX_ITER)
print("test_big_maze solved in %i iterations" % iter)
assert(iter > 0 and iter < MAX_ITER)
def test_random_maze():
maze = SimpleMaze(random_maze(0.5, NonLocalLocalizer(25)), False)
#print(maze)
render_graph(maze.maze(), "temp/random_maze")
rat = RandomRat()
MAX_ITER = 1000
iter = maze.solve(rat, MAX_ITER)
print("test_random_maze solved in %i iterations" % iter)
assert(iter > 0 and iter < MAX_ITER)
def test_random_noloop_maze():
maze = SimpleMaze(random_maze(0.0, NonLocalLocalizer(25)), False)
#print(maze)
render_graph(maze.maze(), "temp/random_noloop_maze")
rat = AlwaysLeftRat()
MAX_ITER = 1000
iter = maze.solve(rat, MAX_ITER)
print("test_random_noloop_maze solved in %i iterations" % iter)
assert(iter > 0 and iter < MAX_ITER)
def test_random_1d_maze():
maze = SimpleMaze(random_maze(0.5, OneDimensionalLocalizer(25, 5)), False)
#print(maze)
render_graph(maze.maze(), "temp/random_1d_maze")
rat = RandomRat()
MAX_ITER = 1000
iter = maze.solve(rat, MAX_ITER)
print("test_random_1d_maze solved in %i iterations" % iter)
assert(iter > 0 and iter < MAX_ITER)
def test_random_2d_maze():
maze = SimpleMaze(random_maze(0.1, TwoDimensionalOneStepLocalizer(25, 5)), False)
#print(maze)
render_graph(maze.maze(), "temp/random_2d_maze")
rat = RandomRat()
MAX_ITER = 1000
iter = maze.solve(rat, MAX_ITER)
print("test_random_2d_maze solved in %i iterations" % iter)
assert(iter > 0 and iter < MAX_ITER)
if __name__ == "__main__":
test_fill_back_steps()
test_equal_mazes()
test_unequal_mazes()
test_left_rat()
test_left_rat_fail()
test_random_rat()
test_big_maze()
test_random_maze()
test_random_noloop_maze()
test_random_1d_maze()
test_random_2d_maze()
| 3.484375 | 3 |
vgg16/17flowers/extractor.py | Rumiachang/keras-examples | 0 | 12788200 | import os
#from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#from tensorflow.keras.models import Sequential
#from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
#from tensorflow.keras import optimizers
#from tensorflow.keras.utils
import np_utils
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D,Input
import tensorflow.keras.callbacks
from tensorflow.keras.optimizers import SGD
import numpy as np
#from smallcnn import save_history
"""
classes = ['Tulip', 'Snowdrop', 'LilyValley', 'Bluebell', 'Crocus',
'Iris', 'Tigerlily', 'Daffodil', 'Fritillary', 'Sunflower',
'Daisy', 'ColtsFoot', 'Dandelion', 'Cowslip', 'Buttercup',
'Windflower', 'Pansy']
"""
classes = ['Dog', 'Cat', 'Raccoon', 'Macaque']
#IMAGE_SIZE = 150
BATCH_SIZE = 32
#1バッチの画像数
NUM_TRAINING_SAMPLES = 4000
#トレーニング画像の総枚数
NUM_VALIDATION_SAMPLES = 1000
#テストデータ画像の総枚数
EPOCHS = 50
#エポック数
N_CLASSES = len(classes)
#クラス数
IMG_ROWS, IMG_COLS = 150, 150
#画像の大きさ
CHANNELS = 3
#画像のチャンネル数(RGBなので3)
train_data_dir = 'data/train'
#トレーニングデータのディレクトリ
validation_data_dir = 'data/validation'
#テストデータのディレクトリ
result_dir = 'results'
#リザルトのディレクトリ
if not os.path.exists(result_dir):
os.mkdir(result_dir)
def save_history(history, result_file):
loss = history.history['loss']
acc = history.history['acc']
val_loss = history.history['val_loss']
val_acc = history.history['val_acc']
nb_epoch = len(acc)
with open(result_file, "w") as fp:
fp.write("epoch\tloss\tacc\tval_loss\tval_acc\n")
for i in range(nb_epoch):
fp.write("%d\t%f\t%f\t%f\t%f\n" % (i, loss[i], acc[i], val_loss[i], val_acc[i]))
def train_top_model():
#InceptionV3のボトルネック特徴量を入力とし、正解クラスを出力とするFCNNを作成する
input_tensor = Input(shape=(IMG_ROWS, IMG_COLS, CHANNELS))
#入力テンソル(画像の縦横ピクセルとRGBチャンネルによる3階テンソル)
base_model = InceptionV3(weights='imagenet', include_top=False,input_tensor=input_tensor)
x = base_model.output
x = GlobalAveragePooling2D()(x)
#出力テンソルをflatten
x = Dense(1024, activation='relu')(x)
#全結合,ノード数1024,活性化関数relu
predictions = Dense(N_CLASSES, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
for layer in base_model.layers:
layer.trainable = False
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy',metrics=['accuracy'])
model.summary()
train_datagen = ImageDataGenerator(rescale=1.0 / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
rotation_range=10
)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_directory(directory=train_data_dir,
target_size=(IMG_ROWS, IMG_COLS),
batch_size=BATCH_SIZE,
class_mode='categorical',
shuffle=True
)
validation_generator = test_datagen.flow_from_directory(directory=validation_data_dir,
target_size=(IMG_ROWS, IMG_COLS),
batch_size=BATCH_SIZE,
class_mode='categorical',
shuffle=True
)
hist = model.fit_generator(generator=train_generator,
steps_per_epoch=NUM_TRAINING_SAMPLES//BATCH_SIZE,
epochs=EPOCHS,
verbose=1,
validation_data=validation_generator,
validation_steps=NUM_VALIDATION_SAMPLES//BATCH_SIZE,
)
#model.save('vermins_fc_model.hdf5')
model.save(os.path.join(result_dir, 'vermins_fc_model.h5'))
save_history(hist, os.path.join(result_dir, 'history_extractor.txt'))
if __name__ == '__main__':
#save_bottleneck_features()
train_top_model()
| 2.1875 | 2 |
eb_butterflies_to_CSV.py | acic2017/Butterfly-SDM | 1 | 12788201 | <reponame>acic2017/Butterfly-SDM<filename>eb_butterflies_to_CSV.py
#!/usr/bin/env python
#latin_name, english_name, year, month, latitude, longitude
import pandas as pd
from sqlalchemy import create_engine
query = """
WITH Location AS (SELECT *
FROM eb_butterflies.observations o JOIN eb_central.checklists c on c.checklist_id=o.checklist_id
JOIN eb_central.sites s ON s.site_id=c.site_id)
Select o.observation_id, s.english_name, s.latin_name, Extract(Year from o.created) as "year_created" , Extract(month from o.created) as "month_created", l.latitude, l.longitude
FROM eb_butterflies.observations o Join eb_butterflies.species s on o.species_id=s.species_id
JOIN Location l on l.observation_id=o.observation_id
;
"""
e = create_engine(\
'postgresql://postgres:postgres@localhost:5432/ebutterfly')
c = e.connect()
pd.read_sql_query(query,c).to_csv('eb_butterflies.csv', index = False)
| 2.5625 | 3 |
pyatv/protocols/mrp/protobuf/AudioFadeResponseMessage_pb2.py | crxporter/pyatv | 0 | 12788202 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/protocols/mrp/protobuf/AudioFadeResponseMessage.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pyatv.protocols.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n;pyatv/protocols/mrp/protobuf/AudioFadeResponseMessage.proto\x1a\x32pyatv/protocols/mrp/protobuf/ProtocolMessage.proto\"0\n\x18\x41udioFadeResponseMessage\x12\x14\n\x0c\x66\x61\x64\x65\x44uration\x18\x01 \x01(\x03:M\n\x18\x61udioFadeResponseMessage\x12\x10.ProtocolMessage\x18Y \x01(\x0b\x32\x19.AudioFadeResponseMessage')
AUDIOFADERESPONSEMESSAGE_FIELD_NUMBER = 89
audioFadeResponseMessage = DESCRIPTOR.extensions_by_name['audioFadeResponseMessage']
_AUDIOFADERESPONSEMESSAGE = DESCRIPTOR.message_types_by_name['AudioFadeResponseMessage']
AudioFadeResponseMessage = _reflection.GeneratedProtocolMessageType('AudioFadeResponseMessage', (_message.Message,), {
'DESCRIPTOR' : _AUDIOFADERESPONSEMESSAGE,
'__module__' : 'pyatv.protocols.mrp.protobuf.AudioFadeResponseMessage_pb2'
# @@protoc_insertion_point(class_scope:AudioFadeResponseMessage)
})
_sym_db.RegisterMessage(AudioFadeResponseMessage)
if _descriptor._USE_C_DESCRIPTORS == False:
pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(audioFadeResponseMessage)
DESCRIPTOR._options = None
_AUDIOFADERESPONSEMESSAGE._serialized_start=115
_AUDIOFADERESPONSEMESSAGE._serialized_end=163
# @@protoc_insertion_point(module_scope)
| 1.421875 | 1 |
iv/Leetcode/easy/e021_merge_sorted_list.py | iamsuman/iv | 2 | 12788203 | # Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def make_list(A):
if len(A) == 0:
return None
head = ListNode(A[0])
ptr = head
for i in A[1:]:
ptr.next = ListNode(i)
ptr = ptr.next
return head
def display(head):
if not head:
return []
L = []
while head.next:
L.append(head.val)
head = head.next
L.append(head.val)
print(L)
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
res = l1
prev = None
while l1 and l2:
if l1.val <= l2.val:
if prev:
prev.next = l1
temp1 = l1.next
l1 = temp1
prev = prev.next
prev.next = l1
else:
prev = l1
l1 = l1.next
else:
if prev:
prev.next = l2
temp2 = l2.next
l2 = temp2
prev = prev.next
prev.next = l1
else:
prev = l2
res = l2
l2 = l2.next
if l2:
if prev:
prev.next = l2
else:
res = l2
if l1:
if prev:
prev.next = l1
else:
res = l1
return res
l1 = [1,3,5]; l2 = [2,4,6]
# l1 = [1,2,4]; l2 = [1,3,4]
# l1 = []; l2 = []
# l1 = []; l2 = [0]
# l1 = [0]; l2 = []
# l1 = [2]; l2 = [1]
# l1 = [-9,-7,-6,-1,0,1,2,4,7]; l2 = [-10,-6,4,6,8]
ll1 = make_list(l1)
ll2 = make_list(l2)
s = Solution()
head = s.mergeTwoLists(ll1, ll2)
display(head)
| 3.828125 | 4 |
Living_SDK/board/tg7100cevb/ucube.py | AL-Answer/HF-270-1.6.6 | 12 | 12788204 | <reponame>AL-Answer/HF-270-1.6.6
linux_only_targets="smart_outlet smart_led_bulb"
| 1.257813 | 1 |
scripts/repo.py | mjfwest/OpenMDAO-Framework | 69 | 12788205 | <reponame>mjfwest/OpenMDAO-Framework<gh_stars>10-100
#!/usr/bin/env python
"""
Repository maintenance.
Can record lock/unlock state of a repository.
Useful to keep multiple developers from stepping on each other,
but requires some discipline :-(
"""
import glob
import optparse
import os.path
import platform
import shutil
import stat
import subprocess
import sys
import time
if sys.platform != 'win32':
import pwd
LOCKFILE = 'repo_lock'
def main():
""" Repository maintenance. """
usage = """\
%prog OP [options] repository, where OP may be:
check -- check for lock
lock -- lock repository
unlock -- unlock repository
set -- set this as current repository
(sets OPENMDAO_REPO environment variable and starts a new shell)
fix -- fix permissions and remove generated directories
rmpyc -- remove 'orphan' .pyc files"""
parser = optparse.OptionParser(usage)
parser.add_option('-f', '--force', action='store_true',
default=False, help='forced unlock')
parser.add_option('-v', '--verbose', action='store_true',
default=False, help='print info messages')
try:
operation = sys.argv.pop(1)
except IndexError:
parser.print_help()
sys.exit(1)
options, arguments = parser.parse_args()
repository = ''
if len(arguments) > 0:
if len(arguments) == 1:
repository = arguments[0]
else:
parser.print_help()
sys.exit(1)
if not repository:
try:
repository = os.environ['OPENMDAO_REPO']
except KeyError:
pass
this_user = get_username()
path = find_repository(repository, this_user)
if not path:
print 'Cannot find repository!'
sys.exit(2)
if options.verbose:
print 'Repository root:', path
if operation == 'check':
do_check(path)
elif operation == 'lock':
do_lock(path)
elif operation == 'unlock':
do_unlock(path, options)
elif operation == 'set':
do_set(path, this_user)
elif operation == 'fix':
do_fix(path, options)
do_rmpyc(path)
elif operation == 'rmpyc':
do_rmpyc(path)
else:
parser.print_help()
sys.exit(1)
def do_check(path):
""" Perform 'check' operation. """
(user, mtime) = check_lockfile(path)
if user is not None:
print 'Repository locked by', user, 'at', mtime
else:
print 'Repository unlocked'
sys.exit(0)
def do_lock(path):
""" Perform 'lock' operation. """
(user, mtime) = check_lockfile(path)
if user is None:
create_lockfile(path)
sys.exit(0)
else:
print 'Repository already locked by', user, 'at', mtime
sys.exit(1)
def do_unlock(path, options):
""" Perform 'unlock' operation. """
(user, mtime) = check_lockfile(path)
if user is None:
print 'Repository is not locked'
sys.exit(1)
elif user == get_username() or options.force:
remove_lockfile(path)
sys.exit(0)
else:
print 'Repository locked by', user, 'at', mtime
sys.exit(1)
def do_set(path, user):
""" Perform 'set' operation. """
if find_repository(os.getcwd(), user) != path:
print 'Moving to', path
os.chdir(path)
os.environ['OPENMDAO_REPO'] = path
os.environ['PATH'] = os.path.join(path, 'buildout', 'bin') \
+ os.pathsep + os.path.join(path, 'scripts') \
+ os.pathsep + os.environ['PATH']
if sys.platform == 'win32':
sys.exit(subprocess.call(os.environ['ComSpec']))
else:
sys.exit(subprocess.call(os.environ['SHELL']))
def do_fix(repo_path, options):
""" Check/fix permissions and remove generated directories. """
directories = (
'buildout/bin',
'buildout/develop-eggs',
'buildout/eggs',
'buildout/html',
'buildout/parts',
'docs/_build',
)
files = (
'examples/openmdao.examples.bar3simulation/openmdao/examples/bar3simulation/bar3-f2pywrappers.f',
'examples/openmdao.examples.bar3simulation/openmdao/examples/bar3simulation/bar3module.c'
)
for relpath in directories:
if sys.platform == 'win32':
relpath.replace('/', '\\')
directory = os.path.join(repo_path, relpath)
if os.path.exists(directory):
shutil.rmtree(directory)
for relpath in files:
if sys.platform == 'win32':
relpath.replace('/', '\\')
filename = os.path.join(repo_path, relpath)
if os.path.exists(filename):
os.remove(filename)
for dirpath, dirnames, filenames in os.walk(repo_path):
if options.verbose:
print dirpath[len(repo_path):]
names = dirnames
names.extend(filenames)
for name in names:
path = os.path.join(dirpath, name)
info = os.stat(path)
mode = info.st_mode
fixup = mode
if (mode & stat.S_IRUSR) and not (mode & stat.S_IRGRP):
fixup |= stat.S_IRGRP
if (mode & stat.S_IWUSR) and not (mode & stat.S_IWGRP):
fixup |= stat.S_IWGRP
if (mode & stat.S_IXUSR) and not (mode & stat.S_IXGRP):
fixup |= stat.S_IXGRP
if options.verbose:
if fixup != mode:
print ' fixing %s %s' % (permission_bits(mode), name)
else:
print ' %s %s' % (permission_bits(mode), name)
elif fixup != mode:
print 'fixing %s %s' % (permission_bits(mode), path)
if fixup != mode:
try:
os.chmod(path, fixup)
except OSError, exc:
print ' %s' % exc
print ' (owner %s)' % get_username(info.st_uid)
def do_rmpyc(repo_path):
""" Remove 'orphan' .pyc files. """
for dirpath, dirnames, filenames in os.walk(repo_path):
for name in filenames:
if not name.endswith('.pyc'):
continue
path = os.path.join(dirpath, name)
if not os.path.exists(path[:-1]):
print 'removing', path
os.remove(path)
def permission_bits(mode):
""" Format permission bits in UNIX 'ls' style. """
bits = ''
if mode & stat.S_IRUSR:
bits += 'r'
else:
bits += '-'
if mode & stat.S_IWUSR:
bits += 'w'
else:
bits += '-'
if mode & stat.S_IXUSR:
bits += 'x'
else:
bits += '-'
if mode & stat.S_IRGRP:
bits += 'r'
else:
bits += '-'
if mode & stat.S_IWGRP:
bits += 'w'
else:
bits += '-'
if mode & stat.S_IXGRP:
bits += 'x'
else:
bits += '-'
if mode & stat.S_IROTH:
bits += 'r'
else:
bits += '-'
if mode & stat.S_IWOTH:
bits += 'w'
else:
bits += '-'
if mode & stat.S_IXOTH:
bits += 'x'
else:
bits += '-'
return bits
def find_repository(repository, user):
""" Return repository's root directory path, or None. """
user_base = os.path.join(os.sep, 'OpenMDAO', 'dev', user)
shared_base = os.path.join(os.sep, 'OpenMDAO', 'dev', 'shared')
if not repository:
path = find_bzr()
if path and platform.node() == 'torpedo.grc.nasa.gov' and \
not path.startswith('/OpenMDAO'):
# On OpenMDAO home use default search if not an OpenMDAO repository.
path = ''
if not path:
# Use default if this user only has one.
paths = glob.glob(os.path.join(user_base, '*'))
if len(paths) == 1:
repository = paths[0]
elif len(paths) == 0:
repository = 'working_main' # Default shared if no user repo.
else:
print 'Default repository is ambiguous:'
for path in paths:
print ' ', path
sys.exit(1)
path = find_bzr(repository)
if not path:
path = os.path.join(user_base, repository)
path = find_bzr(path)
if not path:
path = os.path.join(shared_base, repository)
path = find_bzr(path)
return path
def find_bzr(path=None):
""" Return bzr root directory path, or None. """
if not path:
path = os.getcwd()
if not os.path.exists(path):
return None
while path:
if os.path.exists(os.path.join(path, '.bzr')) or \
os.path.exists(os.path.join(path, '.bzrignore')):
return os.path.abspath(path)
else:
pth = path
path = os.path.dirname(path)
if path == pth:
return None
return None
def check_lockfile(path):
""" Return (user, modification time) of lockfile, or (None, None). """
path = os.path.join(path, LOCKFILE)
if os.path.exists(path):
try:
info = os.stat(path)
except OSError, exc:
print 'Cannot access lockfile:', exc
sys.exit(1)
else:
user = get_username(info.st_uid)
mtime = time.asctime(time.localtime(info.st_mtime))
return (user, mtime)
else:
return (None, None)
def create_lockfile(path):
""" Create lockfile. """
path = os.path.join(path, LOCKFILE)
try:
os.open(path, os.O_CREAT|os.O_EXCL|os.O_WRONLY, 0660)
except OSError, exc:
print 'Cannot create lockfile:', exc
sys.exit(1)
def remove_lockfile(path):
""" Remove lockfile. """
path = os.path.join(path, LOCKFILE)
try:
os.unlink(path)
except OSError, exc:
print 'Cannot remove lockfile:', exc
sys.exit(1)
def get_username(uid=None):
""" Return username for `uid`, or current username if `uid` is None. """
if uid:
if sys.platform == 'win32':
return 'unknown-%s' % uid
else:
return pwd.getpwuid(uid).pw_name
else:
if sys.platform == 'win32':
return os.environ['USERNAME']
else:
return pwd.getpwuid(os.getuid()).pw_name
if __name__ == '__main__':
main()
| 2.25 | 2 |
tests/test_plugin_scroll.py | thecarebot/carebot | 50 | 12788206 | <gh_stars>10-100
#!/usr/bin/env python
import app_config
app_config.DATABASE_NAME = 'carebot_test.db'
app_config.DEFAULT_CONFIG_PATH = 'tests/config_test.yml'
try:
import unittest2 as unittest
except ImportError:
import unittest
import datetime
from plugins.npr.scrolldepth import NPRScrollDepth
from util.config import Config
class TestNPRSCrollDepth(unittest.TestCase):
def test_fill_in_max(self):
test_data = [[1, 100, 3], [1, 200, 3], [1, 500, 3], [1, 200, 3]]
results = NPRScrollDepth.fill_in_max(test_data)
self.assertEqual(results[0][1], 500)
self.assertEqual(results[1][1], 500)
self.assertEqual(results[2][1], 500)
self.assertEqual(results[3][1], 200)
| 2.328125 | 2 |
bots_detector/models/base_model.py | cloutavista/bot-detector | 0 | 12788207 | <gh_stars>0
### This script will be responsible for building a model, optionally leveraging a training set ###
from abc import ABC
class Model(ABC):
def train(self):
return NotImplemented
def predict(self, profile):
return NotImplemented | 2.375 | 2 |
src/python3/dsv/commands/find_node_id.py | MikeAT/visualizer | 6 | 12788208 | #!/usr/bin/env python3
#
# Copyright 2018-2020 Internet Corporation for Assigned Names and Numbers.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at https://mozilla.org/MPL/2.0/.
#
# Developed by Sinodun IT (sinodun.com)
#
# Find the node ID given server and node names.
#
# Usage: dsv-find-node-id <server-name> <node-name>
#
import psycopg2
description = 'find node ID from Postgres.'
def add_args(parser):
parser.add_argument('servername',
help='the server name',
metavar='SERVERNAME')
parser.add_argument('nodename',
help='the node name',
metavar='NODENAME')
def main(args, cfg):
conn = None
try:
pgcfg = cfg['postgres']
conn = psycopg2.connect(host=pgcfg['host'],
dbname=pgcfg['database'],
user=pgcfg['user'],
password=pgcfg['password'])
with conn.cursor() as cur:
cur.execute('SELECT node.id FROM node '
'INNER JOIN node_server ON node_server.id = node.server_id '
'WHERE (node_server.name=%(server)s OR '
' node_server.altname=%(server)s) '
'AND (node.name=%(node)s OR node.altname=%(node)s)',
{'server': args.servername, 'node': args.nodename})
res = cur.fetchone()
conn.close()
if res:
print(res[0])
return 0
return 1
except Exception:
if conn is not None:
conn.rollback()
conn.close()
raise
| 2.375 | 2 |
getpw/__init__.py | N0x1s/getpw | 2 | 12788209 | <reponame>N0x1s/getpw
from .main import getpw
| 1.046875 | 1 |
haven/haven_jobs/slurm_manager.py | liuzheyu1998/haven-ai | 0 | 12788210 | <filename>haven/haven_jobs/slurm_manager.py
from .. import haven_utils as hu
from .. import haven_chk as hc
import os
import time
import copy
import pandas as pd
import numpy as np
import getpass
import pprint
import requests
# Job submission
# ==============
def submit_job(api, account_id, command, job_config, workdir, savedir_logs=None):
job_spec = get_job_spec(job_config, command, savedir_logs, workdir=workdir)
job = api.v1_account_job_post(account_id=account_id, human=1, job_spec=job_spec)
job_id = job.id
return job_id
def get_job_spec(job_config, command, savedir_logs, workdir):
_job_config = copy.deepcopy(job_config)
_job_config['workdir'] = workdir
if savedir_logs is not None:
path_log = os.path.join(savedir_logs, "logs.txt")
path_err = os.path.join(savedir_logs, "err.txt")
command_with_logs = '%s 1>%s 2>%s' % (command, path_log, path_err)
else:
command_with_logs = command
_job_config['command'] = ['/bin/bash', '-c', command_with_logs]
_job_config['resources'] = eai_toolkit_client.JobSpecResources(**_job_config['resources'])
job_spec = eai_toolkit_client.JobSpec(**_job_config)
# Return the Job command in Byte format
return job_spec
# Job status
# ===========
def get_job(api, job_id):
"""Get job information."""
try:
return api.v1_job_get_by_id(job_id)
except ApiException as e:
raise ValueError("job id %s not found." % job_id)
def get_jobs(api, account_id):
# account_id = hu.subprocess_call('eai account get').split('\n')[-2].split(' ')[0]
return api.v1_account_job_get(account_id=account_id,
limit=1000,
order='-created',
q="alive_recently=True").items
# return api.v1_me_job_get(limit=1000,
# order='-created',
# q="alive_recently=True").items
# Job kill
# ===========
def kill_job(api, job_id):
"""Kill a job job until it is dead."""
job = get_job(api, job_id)
if not job.alive:
print('%s is already dead' % job_id)
else:
# toolkit
api.v1_job_delete_by_id(job_id)
print('%s CANCELLING...' % job_id)
job = get_job(api, job_id)
while job.state == "CANCELLING":
time.sleep(2.0)
job = get_job(api, job_id)
print('%s now is dead.' % job_id)
| 2.015625 | 2 |
Python/gksdudaovld/MapperTest.py | ForestHouse2316/gksdudaovld | 0 | 12788211 | from gksdudaovld import KoEngMapper as Mapper
from SimpleTester import SimpleTester
tester = SimpleTester("./test_en2ko.txt", Mapper.conv_en2ko)
print(tester.start().log)
# tester = SimpleTester("./test_ko2en.txt", Mapper.conv_ko2en)
# print(tester.start().log) | 2 | 2 |
sudoku.py | pkmatador/simple_sudoku_solver | 0 | 12788212 | <gh_stars>0
# coding: utf-8
import numpy as np
from collections import defaultdict
sudoku = np.array(
[[5, 1, 7, 6, 0, 0, 0, 3, 4],
[2, 8, 9, 0, 0, 4, 0, 0, 0],
[3, 4, 6, 2, 0, 5, 0, 9, 0],
[6, 0, 2, 0, 0, 0, 0, 1, 0],
[0, 3, 8, 0, 0, 6, 0, 4, 7],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 9, 0, 0, 0, 0, 0, 7, 8],
[7, 0, 3, 4, 0, 0, 5, 6, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]).reshape((9,9))
def solve_sudoku(sudoku, solve_multisolution: bool = False):
'''Takes sudoku 9x9 array and solves zeros to numbers from 1 to 9. *Only works for 01 solution sudokus.'''
sudoku = np.array(sudoku, dtype = np.int).reshape((9, 9))
def subgrid(i, j) -> np.ndarray:
''' Take cell's coordinates and then slices respective subgrid.'''
i, j = np.floor(np.divide((i, j), 3)).astype(int)
return sudoku[i*3:(i+1)*3,j*3:(j+1)*3]
iteration_count = 0
print("Solving sudoku... (0 to 8 x 0 to 8) = 9x9", "\n", sudoku)
while(True):
notes = defaultdict(set) #all possible solutions for one given cell
if iteration_count == 0:
cells = zip(*np.where(sudoku!=-1)) #check all cells
else:
cells = zip(*np.where(sudoku==0)) #refresh to check only the zeros to decrease needed computations
for (i, j) in cells:
iunique, icounts = np.unique(sudoku[ i, :], return_counts=True)
junique, jcounts = np.unique(sudoku[ :, j], return_counts=True)
gunique, gcounts = np.unique(subgrid(i, j), return_counts=True) #g=grid
if iteration_count == 0:
if (np.any(icounts[1:]>1) or np.any(jcounts[1:]>1) or np.any(gcounts[1:]>1)): #[1:] excludes the zero(0) count
print("Invalid sudoku! Repeating value found near ({}, {})...".format(i, j))
return {'solved': False, 'result': sudoku, 'notes': notes}
if sudoku[i,j] == 0:
valids = set(range(1, 9+1)) - set().union(iunique).union(junique).union(gunique)
if len(valids) == 0:
print("Invalid sudoku! Unsolvable cell found ({}, {})...".format(i, j))
return {'solved': False, 'result': sudoku, 'notes': notes}
elif len(valids) > 0:
for valid in valids: notes[(i, j)].add(valid)
solved_count = 0
for (i, j) in list(t for t in notes.keys() if len(notes[t]) == 1):
sudoku[(i, j)] = list(notes.pop((i, j)))[0]
solved_count += 1
iteration_count += 1
if notes != {} or np.any(sudoku == 0):
if solved_count == 0:
print("Unsolvable/multisolution sudoku!")
print(sudoku, "\n", "Notes: {}".format(notes))
if solve_multisolution == False:
return {'solved': False, 'result': sudoku, 'notes': notes}
else:
print("Making up a solution...")
raise Exception("Oh... There's no code to solve it here :(")
print("Iteration no. {} completed. (Solved {} cells!)".format(iteration_count, solved_count))
else:
print("Solved!")
print(sudoku)
return {'solved': True, 'result': sudoku, 'notes': notes}
| 3.609375 | 4 |
tests/test_treeview.py | vhn0912/pygubu | 0 | 12788213 | <gh_stars>0
# encoding: utf8
import os
import sys
import unittest
import tkinter as tk
import tkinter.ttk as ttk
pygubu_basedir = os.path.abspath(os.path.dirname(
os.path.dirname(os.path.realpath(sys.argv[0]))))
if pygubu_basedir not in sys.path:
sys.path.insert(0, pygubu_basedir)
import pygubu
import support
class TestTreeview(unittest.TestCase):
def setUp(self):
support.root_deiconify()
xmldata = """<?xml version="1.0" ?>
<interface>
<object class="ttk.Frame" id="mainwindow">
<property name="height">250</property>
<property name="width">250</property>
<layout>
<property name="sticky">nesw</property>
<property name="row">0</property>
<property name="column">0</property>
<property name="propagate">True</property>
</layout>
<child>
<object class="ttk.Treeview" id="treeview">
<property name="selectmode">browse</property>
<layout>
<property name="column">0</property>
<property name="row">0</property>
<property name="propagate">True</property>
</layout>
<child>
<object class="ttk.Treeview.Column" id="treecolumn">
<property name="tree_column">True</property>
<property name="visible">True</property>
<property name="text">Tree</property>
<property name="command">on_treecolumn_click</property>
<property name="heading_anchor">w</property>
<property name="column_anchor">w</property>
<property name="minwidth">200</property>
<property name="stretch">True</property>
<property name="width">200</property>
</object>
</child>
<child>
<object class="ttk.Treeview.Column" id="column1">
<property name="tree_column">False</property>
<property name="visible">True</property>
<property name="text">Column 1</property>
<property name="heading_anchor">center</property>
<property name="column_anchor">w</property>
<property name="minwidth">200</property>
<property name="stretch">False</property>
<property name="width">200</property>
</object>
</child>
<child>
<object class="ttk.Treeview.Column" id="hidden_column">
<property name="tree_column">False</property>
<property name="visible">False</property>
<property name="text">hidden</property>
<property name="heading_anchor">w</property>
<property name="column_anchor">w</property>
<property name="minwidth">20</property>
<property name="stretch">True</property>
<property name="width">200</property>
</object>
</child>
</object>
</child>
</object>
</interface>
"""
self.builder = builder = pygubu.Builder()
builder.add_from_string(xmldata)
self.widget = builder.get_object('treeview')
self.widget.wait_visibility()
def tearDown(self):
support.root_withdraw()
def test_class(self):
self.assertIsInstance(self.widget, ttk.Treeview)
self.widget.destroy()
def test_selectmode(self):
expected = 'browse'
value = str(self.widget.cget('selectmode'))
self.assertEqual(expected, value)
self.widget.destroy()
def test_columns(self):
columns = ('column1', 'hidden_column')
wcolumns = self.widget.cget('columns')
self.assertEqual(columns, wcolumns)
dcolumns = ('column1',)
wdcolumns = self.widget.cget('displaycolumns')
self.assertEqual(dcolumns, wdcolumns)
self.widget.destroy()
def test_tree_heading(self):
wh = self.widget.heading('#0')
heading = {
'text': 'Tree',
'anchor': 'w',
}
for k, v in heading.items():
self.assertEqual(v, wh[k])
self.widget.destroy()
def test_tree_column(self):
wc = self.widget.column('#0')
column = {
'anchor': 'w',
'stretch': 1,
'width': 200,
'minwidth': 200,
}
for k, v in column.items():
self.assertEqual(v, wc[k])
self.widget.destroy()
def test_command_dict(self):
success = []
def on_treecolumn_click():
success.append(1)
cbdic = {'on_treecolumn_click': on_treecolumn_click}
self.builder.connect_callbacks(cbdic)
x, y = self.widget.winfo_x(), self.widget.winfo_y()
self.widget.event_generate('<ButtonPress-1>', x=x + 5, y=y + 5)
self.widget.event_generate('<ButtonRelease-1>', x=x + 5, y=y + 5)
self.widget.update()
self.assertTrue(success)
self.widget.destroy()
def test_command_self(self):
success = []
class AnObject:
def on_treecolumn_click(self):
success.append(1)
cbobj = AnObject()
self.builder.connect_callbacks(cbobj)
x, y = self.widget.winfo_x(), self.widget.winfo_y()
self.widget.event_generate('<ButtonPress-1>', x=x + 5, y=y + 5)
self.widget.event_generate('<ButtonRelease-1>', x=x + 5, y=y + 5)
self.widget.update()
self.assertTrue(success)
self.widget.destroy()
if __name__ == '__main__':
unittest.main()
| 2.34375 | 2 |
hubblestack/__init__.py | instructure/hubble | 2 | 12788214 | <filename>hubblestack/__init__.py
__version__ = '4.0.0'
__buildinfo__ = {'branch': 'BRANCH_NOT_SET', 'last_commit': 'COMMIT_NOT_SET'} | 1.3125 | 1 |
nestingnote/test_nestedlist.py | woodsonmiles/nested-notes | 0 | 12788215 | <reponame>woodsonmiles/nested-notes
#!/usr/bin/python3
import unittest
from nestingnote.nestedlist import NestedList, NullNestedList
from typing import List
class TestNestedList(unittest.TestCase):
def test_instantiation(self):
root = NestedList()
self.assertTrue(isinstance(root, NestedList))
child = root.insert_child()
self.assertTrue(isinstance(child, NestedList))
sibling = root.insert_sibling()
self.assertTrue(isinstance(sibling, NestedList))
def test_row_iter(self):
root = NestedList(["1", "2", "3"])
result = ''
for field in root.row_iter:
result += field
target = "1 2 3"
self.assertEqual(result, target)
def test_delete_simple(self):
root = NestedList(["root"])
root.insert_sibling(["sib"])
del root.sibling
target = NestedList(["root"])
self.assertEqual(root, target)
root.insert_child(["child"])
del root.child
self.assertEqual(root, target)
root.insert_sibling(["sib1"])
root.insert_sibling(["sib2"])
del root.sibling
target.insert_sibling(["sib1"])
self.assertEqual(root, target)
def test_delete_sibling(self):
one = NestedList(["one"])
two = one.insert_sibling(["two"])
three = two.insert_child(["three"])
four = three.insert_sibling(["four"])
five = two.insert_sibling(["five"])
target = "one\n" \
+ "two\n" \
+ " three\n" \
+ " four\n" \
+ "five\n"
actual = str(one)
self.assertEqual(target, actual)
del one.sibling
target = "one\n" \
+ " three\n" \
+ " four\n" \
+ "five\n"
actual = str(one)
self.assertEqual(target, actual)
def test_str(self):
root = NestedList(["123", "1"])
target = "123 1\n"
self.assertEqual(str(root), target)
root.insert_child(["1234"])
target += " 1234\n"
self.assertEqual(str(root), target)
root.insert_sibling(["12"])
target += "12\n"
def test_str_complex(self):
root = NestedList(fields=["root"])
child = root.insert_child(texts=["child"])
child2 = child.insert_sibling(["child2"])
grandchild = child2.insert_child(["grandchild"])
child3 = child2.insert_sibling(["child3"])
child3.insert_child(["grandchild2"])
target = "root" \
+ "\n child" \
+ "\n child2" \
+ "\n grandchild" \
+ "\n child3" \
+ "\n grandchild2" \
+ "\n"
self.__comp_str_to_node(root, target)
def test_columns(self):
root = NestedList(["123", "1"])
target = "123 1\n"
self.__comp_str_to_node(root, target)
root.insert_sibling(["1234"])
target = "123 1\n"
target += "1234\n"
self.__comp_str_to_node(root, target)
del root.sibling
target = "123 1\n"
self.__comp_str_to_node(root, target)
def __comp_str_to_node(self, root: NestedList, target: str):
actual = str(root)
self.assertEqual(actual, target)
def test_get_count(self):
root = NestedList()
self.assertEqual(root.count(), 1)
sibling = root.insert_sibling()
self.assertEqual(root.count(), 2)
child = sibling.insert_child()
self.assertEqual(sibling.count(), 2)
self.assertEqual(root.count(), 3)
grandchild = child.insert_child()
self.assertEqual(root.count(), 4)
grandchild2 = grandchild.insert_sibling()
self.assertEqual(root.count(), 5)
grandchild.insert_sibling()
self.assertEqual(root.count(), 6)
child.insert_sibling()
self.assertEqual(root.count(), 7)
def test_new_node(self):
root = NestedList()
child = root.insert_child()
grandchild = child.insert_child()
child2 = child.insert_sibling()
self.assertIs(root.child, child)
self.assertIs(child.sibling, child2)
self.assertIs(root.child.child, grandchild)
def test_get_node(self):
root = NestedList()
child = root.insert_child()
grandchild = child.insert_child()
child2 = child.insert_sibling()
"""
root
child
grandchild
child2
"""
self.assertIs(root.get_node(0), root)
self.assertIs(root.get_node(1), child)
self.assertIs(root.get_node(2), grandchild)
self.assertIs(root.get_node(3), child2)
grandchild2 = grandchild.insert_sibling()
child3 = child2.insert_sibling()
greatgrandchild = grandchild.insert_child()
"""
root
child
grandchild
greatgrandchild
grandchild2
child2
child3
"""
self.assertIs(root.get_node(0), root)
self.assertIs(root.get_node(1), child)
self.assertIs(root.get_node(2), grandchild)
self.assertIs(root.get_node(3), greatgrandchild)
self.assertIs(root.get_node(4), grandchild2)
self.assertIs(root.get_node(5), child2)
self.assertIs(root.get_node(6), child3)
def __test_iter_helper(self, root: NestedList, targets: List[NestedList]):
for index, actual in enumerate(root):
self.assertIs(actual, targets[index])
def test_iter(self):
root = NestedList()
# breakpoint()
self.__test_iter_helper(root, [root])
child = root.insert_child()
self.__test_iter_helper(root, [root, child])
child2 = child.insert_sibling()
self.__test_iter_helper(root, [root, child, child2])
grandchild = child2.insert_child()
self.__test_iter_helper(root, [root, child, child2, grandchild])
child3 = child2.insert_sibling()
self.__test_iter_helper(root, [root, child, child2, grandchild, child3])
greatgrandchild = grandchild.insert_child()
self.__test_iter_helper(root, [root, child, child2, grandchild, greatgrandchild, child3])
greatgreatgrandchild = greatgrandchild.insert_child()
self.__test_iter_helper(root, [root, child, child2, grandchild, greatgrandchild, greatgreatgrandchild, child3])
sibling2 = root.insert_sibling()
sibling = root.insert_sibling()
self.__test_iter_helper(root, [root, child, child2, grandchild, greatgrandchild, greatgreatgrandchild, child3,
sibling, sibling2])
def test_eq_simple(self):
root = NestedList(fields=["01234"])
self.assertEqual(root, root)
other = NestedList(fields=["01234"])
self.assertEqual(root, other)
def test_eq(self):
root = NestedList(fields=["01234"])
child = root.insert_child(texts=["012", "0", "0"])
child.insert_child(["012"])
child.insert_sibling(["0", "0", "0123", ""])
root_copy = NestedList(fields=["01234"])
child_copy = root_copy.insert_child(texts=["012", "0", "0"])
child_copy.insert_child(["012"])
child_copy.insert_sibling(["0", "0", "0123", ""])
self.assertEqual(root, root_copy)
root_dif = NestedList(fields=["01234"])
child_dif = root_dif.insert_child(texts=["012", "0", "0"])
child_dif.insert_child(["012"])
child_dif.insert_sibling(["0", "0", "0123", "9"])
self.assertNotEqual(root, root_dif)
def test_unindent_simple(self):
root = NestedList(["root"])
child = root.insert_child(["child"])
child.unindent(root)
target = NestedList(["root"])
target.insert_sibling(["child"])
self.assertEqual(root, target)
def test_unindent(self):
root = NestedList(["root"])
child = root.insert_child(["child"])
child.insert_child(["grandchild"])
child.unindent(root)
target = NestedList(["root"])
sibling = target.insert_sibling(["child"])
sibling.insert_child(["grandchild"])
self.assertEqual(root, target)
def test_unindent_complex(self):
one = NestedList(fields=["one"])
two = one.insert_child(texts=["two"])
three = two.insert_sibling(["three"])
three.insert_child(["four"])
five = three.insert_sibling(["five"])
five.insert_child(["six"])
target = "one" \
+ "\n two" \
+ "\n three" \
+ "\n four" \
+ "\n five" \
+ "\n six" \
+ "\n"
self.__comp_str_to_node(one, target)
two.unindent(parent=one)
target = "one" \
+ "\ntwo" \
+ "\n three" \
+ "\n four" \
+ "\n five" \
+ "\n six" \
+ "\n"
self.__comp_str_to_node(one, target)
# cleanup old references
del two
del three
del five
two = one.sibling
three = two.child
three.unindent(parent=two)
target = "one" \
+ "\ntwo" \
+ "\nthree" \
+ "\n four" \
+ "\n five" \
+ "\n six" \
+ "\n"
self.__comp_str_to_node(one, target)
# cleanup
del two
del three
three = one.sibling.sibling
five = three.child.sibling
five.unindent(parent=three)
target = "one" \
+ "\ntwo" \
+ "\nthree" \
+ "\n four" \
+ "\nfive" \
+ "\n six" \
+ "\n"
self.__comp_str_to_node(one, target)
def test_serialization_simple(self):
one = NestedList(["one", "two", "three"])
pickle = one.serialize()
copy = NestedList.deserialize(pickle)
self.assertEqual(str(one), str(copy))
self.assertEqual(one, copy)
def test_serialization_complex(self):
root = NestedList(["one", "two", "three"])
child = root.insert_child(["child1", "child2"])
grandchild = child.insert_child(["gc1"])
grandchild.insert_sibling(["gc2", "gc2"])
sibling = root.insert_sibling()
sibling.insert_sibling(["sib2", "sib2"])
pickle = root.serialize()
copy = NestedList.deserialize(pickle)
self.assertEqual(str(root), str(copy))
self.assertEqual(root, copy)
if __name__ == '__main__':
unittest.main()
| 3.46875 | 3 |
tests/unit/test_sitri.py | Egnod/sitri | 11 | 12788216 | <gh_stars>10-100
import pytest
from sitri import Sitri
def test_sitri_init():
with pytest.raises(TypeError):
Sitri()
def test_sitri_get_config(test_sitri, monkeypatch):
monkeypatch.setenv("TEST_KEY1", "1")
monkeypatch.setenv("TEST_KEY2", "2")
assert test_sitri.get("key1") == "1"
assert test_sitri.get("key2") == "2"
assert not test_sitri.get("key3")
monkeypatch.undo()
| 2.234375 | 2 |
easymunk/pygame/draw_options.py | fabiommendes/easymunk | 1 | 12788217 | <filename>easymunk/pygame/draw_options.py
from typing import Sequence, Tuple
import pygame
from ..drawing import DrawOptions as DrawOptionsBase, Color
from ..linalg import Vec2d
class DrawOptions(DrawOptionsBase):
_COLOR = DrawOptionsBase._COLOR
surface: pygame.surface.Surface
def __init__(self, surface=None, flip_y=False) -> None:
"""Draw a easymunk.Space on a pygame.Surface object.
Typical usage::
>>> import easymunk
>>> surface = pygame.Surface((10,10))
>>> space = easymunk.Space()
>>> options = easymunk.pygame.DrawOptions(surface)
>>> space.debug_draw(options)
You can control the color of a shape by setting shape.color to the color
you want it drawn in::
>>> c = easymunk.Circle(10, color=pygame.Color("pink"))
See pygame_util.demo.py for a full example
Since pygame uses a coordiante system where y points down (in contrast
to many other cases), you either have to make the physics simulation
with Pymunk also behave in that way, or flip everything when you draw.
The easiest is probably to just make the simulation behave the same
way as Pygame does. In that way all coordinates used are in the same
orientation and easy to reason about::
>>> space = mk.Space()
>>> space.gravity = (0, -1000)
>>> body = space.Body()
>>> body.position = (0, 0) # will be positioned in the top left corner
>>> space.debug_draw(options)
To flip the drawing its possible to set the module property
:py:data:`positive_y_is_up` to True. Then the pygame drawing will flip
the simulation upside down before drawing::
>>> body = mk.Body()
>>> body.position = (0, 0)
>>> # Body will be position in bottom left corner
:Parameters:
surface : pygame.Surface
Surface that the objects will be drawn on
"""
if surface is None and pygame.display.get_init():
self.surface = pygame.display.get_surface()
elif surface is None:
self.surface = pygame.display.set_mode((800, 600))
else:
self.surface = surface
self.flip_y: bool = flip_y
super(DrawOptions, self).__init__()
def draw_circle(
self,
pos: Vec2d,
radius: float,
angle: float = 0.0,
outline_color: Color = _COLOR,
fill_color: Color = _COLOR,
) -> None:
p = self.to_pygame(pos)
pygame.draw.circle(self.surface, fill_color, p, round(radius), 0)
circle_edge = pos + Vec2d(radius, 0).rotated(angle)
p2 = self.to_pygame(circle_edge)
line_r = 2 if radius > 20 else 1
pygame.draw.lines(self.surface, outline_color, False, [p, p2], line_r)
def draw_segment(self, a: Vec2d, b: Vec2d, color: Color = _COLOR) -> None:
p1 = self.to_pygame(a)
p2 = self.to_pygame(b)
pygame.draw.aalines(self.surface, color, False, [p1, p2])
def draw_fat_segment(
self,
a: Tuple[float, float],
b: Tuple[float, float],
radius: float = 0.0,
outline_color: Color = _COLOR,
fill_color: Color = _COLOR,
) -> None:
p1 = self.to_pygame(a)
p2 = self.to_pygame(b)
r = round(max(1, radius * 2))
pygame.draw.lines(self.surface, fill_color, False, [p1, p2], r)
if r > 2:
orthog = [abs(p2[1] - p1[1]), abs(p2[0] - p1[0])]
if orthog[0] == 0 and orthog[1] == 0:
return
scale = radius / (orthog[0] * orthog[0] + orthog[1] * orthog[1]) ** 0.5
orthog[0] = round(orthog[0] * scale)
orthog[1] = round(orthog[1] * scale)
points = [
(p1[0] - orthog[0], p1[1] - orthog[1]),
(p1[0] + orthog[0], p1[1] + orthog[1]),
(p2[0] + orthog[0], p2[1] + orthog[1]),
(p2[0] - orthog[0], p2[1] - orthog[1]),
]
pygame.draw.polygon(self.surface, fill_color, points)
pygame.draw.circle(
self.surface,
fill_color,
(round(p1[0]), round(p1[1])),
round(radius),
)
pygame.draw.circle(
self.surface,
fill_color,
(round(p2[0]), round(p2[1])),
round(radius),
)
def draw_polygon(
self,
verts: Sequence[Tuple[float, float]],
radius: float = 0.0,
outline_color: Color = _COLOR,
fill_color: Color = _COLOR,
) -> None:
ps = [self.to_pygame(v) for v in verts]
ps += [ps[0]]
pygame.draw.polygon(self.surface, fill_color, ps)
if radius > 0:
for i in range(len(verts)):
a = verts[i]
b = verts[(i + 1) % len(verts)]
self.draw_fat_segment(a, b, radius, outline_color, outline_color)
def draw_dot(self, size: float, pos: Tuple[float, float], color: Color) -> None:
p = self.to_pygame(pos)
pygame.draw.circle(self.surface, color, p, round(size), 0)
def mouse_pos(self) -> Vec2d:
"""Get position of the mouse pointer in pymunk coordinates."""
p = pygame.mouse.get_pos()
return self.from_pygame(p)
def to_pygame(self, p: Tuple[float, float], surface=None) -> Vec2d:
"""Convenience method to convert pymunk coordinates to pygame surface
local coordinates.
Note that in case positive_y_is_up is False, this function wont actually do
anything except converting the point to integers.
"""
if self.flip_y:
surface = surface or self.surface
return Vec2d(round(p[0]), surface.get_height() - round(p[1]))
else:
return Vec2d(round(p[0]), round(p[1]))
def from_pygame(self, p: Tuple[float, float]) -> Vec2d:
"""Convenience method to convert pygame surface local coordinates to
pymunk coordinates
"""
return self.to_pygame(p)
| 4.34375 | 4 |
macstats/cpu.py | cloverstd/macstats-python | 1 | 12788218 | <filename>macstats/cpu.py<gh_stars>1-10
#!/usr/bin/env python
# encoding: utf-8
from . import _smc
class CPU(object):
@property
def temp(self, F=False):
_temp = _smc.temp()
return (_temp * (9.0 / 5.0)) + 32.0 if F else _temp
cpu = CPU()
| 2.71875 | 3 |
src/call.py | Ewpratten/dirobium | 0 | 12788219 | <reponame>Ewpratten/dirobium
import os
import sys
import registers as registers
sys.path.append(os.getcwd() + "/devices")
def call(callid):
if str(callid) == "0":
exit(0)
if str(callid) == "11":
print(registers.stack[registers.read_only[0] - 1])
else:
calls[str(callid)].call(registers.general, registers.extended)
try:
from a.src import __main__ as m1
except:
m1 = ""
try:
from b.src import __main__ as m2
except:
m2 = ""
try:
from c.src import __main__ as m3
except:
m3 = ""
try:
from d.src import __main__ as m4
except:
m4 = ""
try:
from e.src import __main__ as m5
except:
m5 = ""
try:
from f.src import __main__ as m6
except:
m6 = ""
try:
from g.src import __main__ as m7
except:
m7 = ""
try:
from h.src import __main__ as m8
except:
m8 = ""
try:
from i.src import __main__ as m9
except:
m9 = ""
try:
from j.src import __main__ as m10
except:
m10 = ""
calls = {
"1":m1,
"2":m2,
"3":m3,
"4":m4,
"5":m5,
"6":m6,
"7":m7,
"8":m8,
"9":m9,
"10":m10
}
| 2.140625 | 2 |
book_figures/chapter1/fig_NASA_atlas.py | autocorr/astroML | 3 | 12788220 | <reponame>autocorr/astroML
"""
NASA Atlas dataset example
--------------------------
"""
# Author: <NAME>
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.datasets import fetch_nasa_atlas
from astroML.plotting.tools import devectorize_axes
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
data = fetch_nasa_atlas()
RA = data['RA']
DEC = data['DEC']
# convert coordinates to degrees
RA -= 180
RA *= np.pi / 180
DEC *= np.pi / 180
fig = plt.figure(figsize=(5, 3.75))
ax = plt.axes(projection='mollweide')
plt.scatter(RA, DEC, s=1, lw=0, c=data['Z'], cmap=plt.cm.copper)
plt.grid(True)
# devectorize: otherwise the resulting pdf figure becomes very large
#devectorize_axes(ax, dpi=400)
plt.title('NASA Atlas Galaxy Locations')
cb = plt.colorbar(cax=plt.axes([0.05, 0.1, 0.9, 0.05]),
orientation='horizontal',
ticks=np.linspace(0, 0.05, 6))
cb.set_label('redshift')
plt.show()
| 2.859375 | 3 |
tests/components/nanoleaf/test_config_flow.py | hnrkp/core | 1 | 12788221 | """Test the Nanoleaf config flow."""
from unittest.mock import patch
from pynanoleaf import InvalidToken, NotAuthorizingNewTokens, Unavailable
from homeassistant import config_entries
from homeassistant.components.nanoleaf.const import DOMAIN
from homeassistant.const import CONF_HOST, CONF_TOKEN
from homeassistant.core import HomeAssistant
TEST_NAME = "Canvas ADF9"
TEST_HOST = "192.168.0.100"
TEST_TOKEN = "<KEY>"
TEST_OTHER_TOKEN = "<KEY>"
TEST_DEVICE_ID = "5E:2E:EA:XX:XX:XX"
TEST_OTHER_DEVICE_ID = "5E:2E:EA:YY:YY:YY"
async def test_user_unavailable_user_step(hass: HomeAssistant) -> None:
"""Test we handle Unavailable errors when host is not available in user step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
side_effect=Unavailable("message"),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result2["type"] == "form"
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
assert not result2["last_step"]
async def test_user_unavailable_link_step(hass: HomeAssistant) -> None:
"""Test we abort if the device becomes unavailable in the link step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
return_value=None,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result2["type"] == "form"
assert result2["step_id"] == "link"
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
side_effect=Unavailable("message"),
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result3["type"] == "abort"
assert result3["reason"] == "cannot_connect"
async def test_user_unavailable_setup_finish(hass: HomeAssistant) -> None:
"""Test we abort if the device becomes unavailable during setup_finish."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
return_value=None,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result2["type"] == "form"
assert result2["step_id"] == "link"
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
return_value=None,
), patch(
"homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info",
side_effect=Unavailable("message"),
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result3["type"] == "abort"
assert result3["reason"] == "cannot_connect"
async def test_user_not_authorizing_new_tokens(hass: HomeAssistant) -> None:
"""Test we handle NotAuthorizingNewTokens errors."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
assert not result["last_step"]
assert result["step_id"] == "user"
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
side_effect=NotAuthorizingNewTokens("message"),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result2["type"] == "form"
assert result2["errors"] is None
assert result2["step_id"] == "link"
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
)
assert result3["type"] == "form"
assert result3["errors"] is None
assert result3["step_id"] == "link"
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
side_effect=NotAuthorizingNewTokens("message"),
):
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result4["type"] == "form"
assert result4["step_id"] == "link"
assert result4["errors"] == {"base": "not_allowing_new_tokens"}
async def test_user_exception(hass: HomeAssistant) -> None:
"""Test we handle Exception errors."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result2["type"] == "form"
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "unknown"}
assert not result2["last_step"]
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
return_value=None,
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result3["step_id"] == "link"
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
side_effect=Exception,
):
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result4["type"] == "form"
assert result4["step_id"] == "link"
assert result4["errors"] == {"base": "unknown"}
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
return_value=None,
), patch(
"homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info",
side_effect=Exception,
):
result5 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result5["type"] == "abort"
assert result5["reason"] == "unknown"
async def test_zeroconf_discovery(hass: HomeAssistant) -> None:
"""Test zeroconfig discovery flow init."""
zeroconf = "_nanoleafms._tcp.local"
with patch(
"homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info",
return_value={"name": TEST_NAME},
), patch(
"homeassistant.components.nanoleaf.config_flow.load_json",
return_value={},
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
"host": TEST_HOST,
"name": f"{TEST_NAME}.{zeroconf}",
"type": zeroconf,
"properties": {"id": TEST_DEVICE_ID},
},
)
assert result["type"] == "form"
assert result["step_id"] == "link"
async def test_homekit_discovery_link_unavailable(
hass: HomeAssistant,
) -> None:
"""Test homekit discovery and abort if device is unavailable."""
homekit = "_hap._tcp.local"
with patch(
"homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info",
return_value={"name": TEST_NAME},
), patch(
"homeassistant.components.nanoleaf.config_flow.load_json",
return_value={},
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HOMEKIT},
data={
"host": TEST_HOST,
"name": f"{TEST_NAME}.{homekit}",
"type": homekit,
"properties": {"id": TEST_DEVICE_ID},
},
)
assert result["type"] == "form"
assert result["step_id"] == "link"
context = next(
flow["context"]
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert context["title_placeholders"] == {"name": TEST_NAME}
assert context["unique_id"] == TEST_NAME
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
side_effect=Unavailable("message"),
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_import_config(hass: HomeAssistant) -> None:
"""Test configuration import."""
with patch(
"homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info",
return_value={"name": TEST_NAME},
), patch(
"homeassistant.components.nanoleaf.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
}
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 1
async def test_import_config_invalid_token(hass: HomeAssistant) -> None:
"""Test configuration import with invalid token."""
with patch(
"homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info",
side_effect=InvalidToken("message"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "abort"
assert result["reason"] == "invalid_token"
async def test_import_last_discovery_integration_host_zeroconf(
hass: HomeAssistant,
) -> None:
"""
Test discovery integration import from < 2021.4 (host) with zeroconf.
Device is last in Nanoleaf config file.
"""
zeroconf = "_nanoleafapi._tcp.local"
with patch(
"homeassistant.components.nanoleaf.config_flow.load_json",
return_value={TEST_HOST: {"token": TEST_TOKEN}},
), patch(
"homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info",
return_value={"name": TEST_NAME},
), patch(
"homeassistant.components.nanoleaf.config_flow.os.remove",
return_value=None,
) as mock_remove, patch(
"homeassistant.components.nanoleaf.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
"host": TEST_HOST,
"name": f"{TEST_NAME}.{zeroconf}",
"type": zeroconf,
"properties": {"id": TEST_DEVICE_ID},
},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
}
mock_remove.assert_called_once()
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 1
async def test_import_not_last_discovery_integration_device_id_homekit(
hass: HomeAssistant,
) -> None:
"""
Test discovery integration import from >= 2021.4 (device_id) with homekit.
Device is not the only one in the Nanoleaf config file.
"""
homekit = "_hap._tcp.local"
with patch(
"homeassistant.components.nanoleaf.config_flow.load_json",
return_value={
TEST_DEVICE_ID: {"token": TEST_TOKEN},
TEST_OTHER_DEVICE_ID: {"token": TEST_OTHER_TOKEN},
},
), patch(
"homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info",
return_value={"name": TEST_NAME},
), patch(
"homeassistant.components.nanoleaf.config_flow.save_json",
return_value=None,
) as mock_save_json, patch(
"homeassistant.components.nanoleaf.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HOMEKIT},
data={
"host": TEST_HOST,
"name": f"{TEST_NAME}.{homekit}",
"type": homekit,
"properties": {"id": TEST_DEVICE_ID},
},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
}
mock_save_json.assert_called_once()
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 1
| 2.21875 | 2 |
w3resource/BeautifulSoup/BeautifulSoup08.py | DanielPascualSenties/pythonw3 | 0 | 12788222 | import urllib
from bs4 import BeautifulSoup
from urllib import request
page = urllib.request.urlopen('https://www.python.org')
st = page.read()
soup = BeautifulSoup(st, 'html.parser')
print("Text in the first a tag:")
li = soup.find_all("li")
for i in li:
a = i.find('a')
print(a.attrs['href'])
| 3.421875 | 3 |
app/models/commons.py | medsci-tech/mime_analysis_flask_2017 | 0 | 12788223 | <reponame>medsci-tech/mime_analysis_flask_2017
from . import db
class Disease(db.Model):
__tablename__ = 'disease'
id = db.Column(db.Integer(), primary_key=True)
name_ch = db.Column('name_ch', db.String(30), nullable=False)
class Region(db.Model):
__tablename__ = 'region'
id = db.Column(db.Integer(), primary_key=True)
province = db.Column('province', db.String(30), nullable=False)
city = db.Column('city', db.String(30), nullable=False)
longitude = db.Column('longitude', db.Numeric(12, 6), nullable=True)
latitude = db.Column('latitude', db.Numeric(12, 6), nullable=True)
province_id = db.Column(db.Integer(), db.ForeignKey('province.id'))
class Province(db.Model):
__tablename__ = 'province'
id = db.Column(db.Integer(), primary_key=True)
province = db.Column('province', db.String(30), nullable=False)
class AgeGroup(db.Model):
__tablename__ = 'age_group'
id = db.Column(db.Integer(), primary_key=True)
range = db.Column('range', db.String(30), nullable=False)
range_description = db.Column('range_description', db.String(30), nullable=False)
age_index = db.Column('age_index', db.Integer(), nullable=False)
class HospitalLevel(db.Model):
__tablename__ = 'hospital_level'
id = db.Column(db.Integer(), primary_key=True)
level = db.Column('level', db.String(30), nullable=False)
level_description = db.Column('level_description', db.String(30), nullable=False)
class HospitalOffice(db.Model):
__tablename__ = 'hospital_office'
id = db.Column(db.Integer(), primary_key=True)
office = db.Column('office', db.String(30), nullable=False)
office_description = db.Column('office_description', db.String(30), nullable=False)
class DoctorTitle(db.Model):
__tablename__ = 'doctor_title'
id = db.Column(db.Integer(), primary_key=True)
title = db.Column('title', db.String(30), nullable=False)
title_description = db.Column('title_description', db.String(30), nullable=False)
class Hospital(db.Model):
__tablename__ = 'hospital'
id = db.Column(db.Integer(), primary_key=True)
title = db.Column('hospital_name', db.String(50), nullable=False)
region_id = db.Column(db.Integer(), db.ForeignKey('region.id'))
hospital_level_id = db.Column(db.Integer(), db.ForeignKey('hospital_level.id'))
| 2.375 | 2 |
ietf/test/mirror-test.py | lafrenierejm/ietf-cli | 0 | 12788224 | #!/usr/bin/env python3
import os
import sys
import unittest
from ietf.cmd.mirror import assemble_rsync
class TestAssembleRsync(unittest.TestCase):
boilerplate = ['rsync', '-az', '--delete-during']
rsync_no_path = (('charter', boilerplate +
['ietf.org::everything-ftp/ietf/']),
('conflict', boilerplate +
['rsync.ietf.org::everything-ftp/conflict-reviews/']),
('draft', boilerplate +
["--exclude='*.xml'",
"--exclude='*.pdf'",
'rsync.ietf.org::internet-drafts']),
('iana', boilerplate +
['rsync.ietf.org::everything-ftp/iana/']),
('iesg', boilerplate + ['rsync.ietf.org::iesg-minutes/']),
('rfc', boilerplate +
["--exclude='tar*'",
"--exclude='search*'",
"--exclude='PDF-RFC*'",
"--exclude='tst/'",
"--exclude='pdfrfc/'",
"--exclude='internet-drafts/'",
"--exclude='ien/'",
'ftp.rfc-editor.org::everything-ftp/in-notes/']),
('status', boilerplate +
['rsync.ietf.org::everything-ftp/status-changes/']))
def test_assemble_rsync(self):
test_path = '/sample/path'
for doc_type, cmd_array in self.rsync_no_path:
expected_path = test_path + '/' + doc_type
expected_cmd = cmd_array + [expected_path]
returned_cmd, returned_path = assemble_rsync(doc_type, test_path,
False)
self.assertEqual(expected_cmd, returned_cmd)
self.assertEqual(expected_path, returned_path)
def test_assemble_rsync_flat(self):
expected_path = '/sample/path'
for doc_type, cmd_array in self.rsync_no_path:
expected_cmd = cmd_array + [expected_path]
returned_cmd, returned_path = assemble_rsync(doc_type,
expected_path, True)
self.assertEqual(expected_cmd, returned_cmd)
self.assertEqual(expected_path, returned_path)
if __name__ == '__main__':
unittest.main()
| 2.1875 | 2 |
v3/serializer.py | bwisgood/FRF | 3 | 12788225 | <reponame>bwisgood/FRF<filename>v3/serializer.py
from inspect import ismethod
from datetime import datetime
from .exceptions import ArgumentException
class Serializer(object):
model_class = None
fields = "__all__"
extend_fields = None
logical_delete = {"is_delete": 0}
data_time_format = "%Y/%m/%d %H:%M:%S"
_map = {
"integer": int,
"small_integer": int,
"boolean": bool,
"string": str,
# "datetime": datetime,
# "date": date,
"float": float
# "time"
}
def __init__(self):
"""
self.relations: 所有relations
self.model_class: 序列化的类对象
self.table: sqlalchemy的table对象
self.foreign_keys: 所有外键
self.extra_func:额外的方法
self.all_fields:所有字段
"""
assert self.model_class is not None, "serializer_obj can not be None"
self.relations = []
self.table = self.model_class.__dict__.get('__table__')
self.foreign_keys = self.get_all_foreign_keys()
self.extra_func = [ismethod(getattr(self.model_class, attr[0], None)) for attr in
self.model_class.__dict__.items()]
self.all_fields = self.get_all_fields()
@classmethod
def get_all_fields(cls):
"""
获取所有字段
:return:
"""
all_attr_str_list = list(cls.model_class.__dict__.get("__table__").c)
all_fields = [str(c).split(".")[1] for c in all_attr_str_list]
return all_fields
def get_all_foreign_keys(self):
"""
获取所有字段
:return:
"""
tmp = []
fks = self.table.foreign_keys
for fk in fks:
tmp.append(fk.parent.name)
return tmp
def add_extend_fields(self, return_data):
if self.extend_fields is None:
return return_data
for item in self.extend_fields.items():
# 查询值
data = return_data.get(item[0])
if data is not None:
return_data.pop(item[0])
else:
continue
extra_data = item[1].query.filter_by(**{"id": data}).first()
extend_fields_in = getattr(self, "extend_fields_" + item[0], None)
if extend_fields_in:
return_data1 = dict(
map(self.mapping_func,
dict(filter(lambda x: x[0] in extend_fields_in, extra_data.__dict__.items())).items()))
else:
return_data1 = dict(
map(self.mapping_func,
dict(filter(lambda x: not x[0].startswith("_"), extra_data.__dict__.items())).items()))
return_data.update(**{item[0]: return_data1})
return return_data
def mapping_func(self, y):
if isinstance(y[1], datetime):
temp = y[1].strftime(self.data_time_format)
return y[0], temp
else:
return y
def serialize(self, instance):
"""
序列化返回值
:param instance: 实例
:return: 参数data
"""
data = self.to_serializer_able(instance)
if self.fields == '__all__':
return_data = dict(map(self.mapping_func, data.items()))
else:
assert isinstance(self.fields, list)
return_data = dict(
map(self.mapping_func, dict(filter(lambda x: x[0] in self.fields, data.items())).items()))
return_data = self.add_extend_fields(return_data)
return return_data
def to_serializer_able(self, instance):
data_ = {}
for field in self.all_fields:
data_[field] = getattr(instance, field, None)
return data_
def create(self, data, qs):
# 先做排除无用数据
data = self.filter_table_data(data)
# 再拿去做校验和类型转换
data = self.validate(data)
# 创建实例
instance = self.model_class()
# 将data的值传递给实例赋值
# print(data)
for k, v in data.items():
setattr(instance, k, v)
# 保存instance
instance = qs.save(instance)
# 返回序列化之后的数据
return self.serialize(instance)
def validate(self, data):
# 校验参数
for field in self.all_fields:
# 先非空校验
_column = getattr(self.model_class, field, None)
match_data = data.get(_column)
self.check_field_not_none(_column, match_data)
# 再做类型校验
self.check_field_type(_column, match_data, data)
return data
@staticmethod
def check_field_not_none(_column, match_data):
if _column.nullable is False and match_data is None and not _column.primary_key:
raise ArgumentException
def check_field_type(self, _column, match_data, data):
if match_data is not None:
column_type = _column.type.__visit_name__
if column_type in self._map:
incident_type = self._map.get(column_type)
try:
# 用转换过的类型做一次匹配
data[_column] = incident_type(match_data)
except Exception:
raise ArgumentException
def filter_table_data(self, data):
# 过滤一遍data,将不属于table的属性排除
assert isinstance(data, dict)
data = dict(filter(lambda x: x[0] in self.all_fields, data.items()))
return data
def modify(self, instance, data, qs):
# 先过滤一遍data,将不属于table的属性排除
data = self.filter_table_data(data)
# 在做一次参数校验
data = self.validate(data)
for k, v in data.items():
setattr(instance, k, v)
qs.commit()
return self.serialize(instance)
def delete(self, instance, qs):
# 判断是否需要逻辑删除
data = self.serialize(instance)
if not self.logical_delete:
qs.delete(instance)
else:
for k, v in self.logical_delete:
setattr(instance, k, v)
qs.commit()
return data
| 2.40625 | 2 |
examples/extension.py | TuXiaokang/karas | 3 | 12788226 | <reponame>TuXiaokang/karas<gh_stars>1-10
import torch
from karas.training import extension
class Eval(extension.Extension):
def initialize(self, trainer):
pass
def __call__(self, trainer):
updater = trainer.updater
reporter = trainer.reporter
loader = trainer.get_loader('test')
correct = 0
updater.model.eval()
torch.set_grad_enabled(False)
for batch in loader:
input, target = updater.converter(batch)
output = updater.model(input)
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
accuracy = correct / len(loader.dataset)
with reporter.scope('scalar'):
reporter.report({'accuracy': accuracy})
updater.model.train()
torch.set_grad_enabled(True)
def finalize(self):
pass
| 2.125 | 2 |
hanoi/subscriber.py | emmanueltech1/edgex-go | 0 | 12788227 | # Subscribers are created with ZMQ.SUB socket types.
# A zmq subscriber can connect to many publishers.
import sys
import zmq
import base64
import simplejson as json
port = "5563"
if len(sys.argv) > 1:
port = sys.argv[1]
int(port)
# Socket to talk to server
context = zmq.Context()
socket = context.socket(zmq.SUB)
print("Collecting updates from server...")
#socket.connect("tcp://localhost:%s" % port)
socket.connect("tcp://127.0.0.1:%s" % port)
# Subscribes to all topics you can selectively create multiple workers
# that would be responsible for reading from one or more predefined topics
# if you have used AWS SNS this is a simliar concept
socket.subscribe("")
while True:
# Receives a string format message
print(socket.recv())
#data = socket.recv()
#dumps the json object into an element
#json_str = json.dumps(data)
#load the json to a string
#resp = json.loads(json_str)
#print the resp
#print (resp)
#print(resp["payload"])
#extract an element in the response
#print(resp['payload'])
#dataEncoded = base64.b64decode(socket.recv())
#print(dataEncoded)
# extract an element in the response
#print (dataEncoded['name'])
#print (dataEncoded['value']) | 2.953125 | 3 |
tests/test_log.py | elifesciences/elife-bot | 17 | 12788228 | <reponame>elifesciences/elife-bot
import logging
import log
import unittest
class TestLog(unittest.TestCase):
def test_logger_creation_and_usage(self):
logger = log.logger("worker.log", "INFO", "worker_123")
self.assertIsInstance(logger, logging.Logger)
logger.info("Test info message")
logger.error("Test error message")
def test_identity_generation(self):
self.assertRegex(log.identity("worker"), r"^worker_[0-9]+$")
| 2.6875 | 3 |
auraliser/realtime.py | FRidh/auraliser | 2 | 12788229 | import numpy as np
import itertools
from scintillations.stream import modulate as apply_turbulence
from scintillations.stream import transverse_speed
from streaming.stream import Stream, BlockStream
from streaming.signal import *
import streaming.signal
import logging
from acoustics.signal import impulse_response_real_even
import auraliser.tools
logger = auraliser.tools.create_logger(__name__)
def apply_atmospheric_attenuation(signal, fs, distance, nhop, atmosphere, ntaps, inverse=False, distance_reducer=np.mean):
"""Apply atmospheric attenuation to signal.
:param distance: Iterable with distances.
:param fs: Sample frequency.
:param atmosphere: Atmosphere.
:param ntaps: Amount of filter taps.
:param sign: Sign.
:rtype: :class:`streaming.Stream`
Compute and apply the attenuation due to atmospheric absorption.
The attenuation can change with distance. The attenuation is a magnitude-only filter.
We design a linear-phase filter
.. note:: The filter delay is compensated by dropping the first `ntaps//2` samples.
"""
# Partition `distance` into blocks, and reduce with `distance_reducer`.
distance = distance.blocks(nhop).map(distance_reducer)
ir = Stream(atmosphere.impulse_response(d, fs, ntaps=ntaps, inverse=inverse) for d in distance)
signal = convolve_overlap_save(signal, ir, nhop, ntaps)
signal = signal.samples().drop(int(ntaps//2)) # Linear phase, correct for group delay caused by FIR filter.
return signal
def apply_reflection_strength(emission, nhop, spectra, effective, ntaps, force_hard):
"""Apply mirror source strength.
:param signal: Signal.
:param nblock: Amount of samples per block.
:param spectra: Spectrum per block.
:param effective: Whether the source is effective or not.
:param ntaps: Amount of filter taps.
:param force_hard: Whether to force a hard ground.
:returns: Signal with correct strength.
.. warning:: This operation will cause a delay that may vary over time.
"""
if effective is not None:
# We have an effectiveness value for each hop (which is a block of samples)
emission = BlockStream(map(lambda x,y: x *y, emission.blocks(nhop), effective), nblock=nhop)
if force_hard:
logger.info("apply_reflection_strength: Hard ground.")
else:
logger.info("apply_reflection_strength: Soft ground.")
impulse_responses = Stream(impulse_response_real_even(s, ntaps) for s in spectra)
emission = convolve_overlap_save(emission, impulse_responses, nhop, ntaps)
# Filter has a delay we need to correct for.
emission = emission.samples().drop(int(ntaps//2))
return emission
#def apply_ground_reflection(signal, ir, nblock):
#"""Apply ground reflection strength.
#:param signal: Signal before ground reflection strength is applied.
#:param ir: Impulse response per block.
#:param nblock: Amount of samples per block.
#:returns: Signal after reflection strength is applied.
#:type: :class:`streaming.BlockStream`
#"""
#signal = convolve(signal=signal, impulse_responses=ir, nblock=nblock)
def apply_doppler(signal, delay, fs, initial_value=0.0, inverse=False):
"""Apply Doppler shift.
:param signal: Signal before Doppler shift.
:param delay: Propagation delay.
:param fs: Constant sample frequency.
:returns: Doppler-shifted signal.
:rtype: :class:`streaming.Stream`
"""
if inverse:
delay = delay * -1 # Unary operators are not yet implemented in Stream
return vdl(signal, times(1./fs), delay, initial_value=initial_value)
def apply_spherical_spreading(signal, distance, inverse=False):#, nblock):
"""Apply spherical spreading.
:param signal. Signal. Iterable.
:param distance: Distance. Iterable.
:param nblock: Amount of samples in block.
"""
if inverse:
return signal * distance
else:
return signal / distance
#def undo_reflection(signal, nhop, impedance, angle, ntaps, force_hard):
#"""Undo reflection
#:param signal: Signal.
#:param nhop: Hop size.
#:param impedance: Fixed impedance.
#:param angle: Angle per hop.
#:param ntaps: Taps.
#:param force_hard: Whether to assume infinite impedance.
#"""
#if force_hard:
#tf =
#strength = Stream(reflection_factor_plane_wave(impedance, a) for a in angles.samples())
#tf = 1. / (1. + strength)
#impulse_responses = Stream(atmosphere.impulse_response(d, fs, ntaps=ntaps, inverse=inverse) for d in distance)
#signal = convolve_overlap_save(signal, impulse_responses, nhop, ntaps)
def nextpow2(x):
return int(2**np.ceil(np.log2(x)))
| 2.5 | 2 |
exam_reader/cli.py | 9kin/exam-reader | 1 | 12788230 | import subprocess
import sys
import time
from pathlib import Path
import click
from .database import Job, db
from .lint import lint
from .pdf_reader import pdf_workers
from .utils import add_files
dir = Path(__file__).resolve().parent.parent
@click.group(invoke_without_command=True)
@click.pass_context
def cli(ctx):
if ctx.invoked_subcommand is None:
click.echo("I was invoked without subcommand")
@cli.command()
@click.option("-c", "--count", default=3, help="Number of workers.")
@click.option("-d", "--debug", default=False, is_flag=True, help="Debug.")
@click.option("-f", "--files", default=0, help="Number of files for debug.")
def worker(count, debug, files):
if files != 0:
add_files([dir.joinpath("files", "ege2016rus.pdf")] * files)
pdf_workers(workers_count=count, debug=debug, files=files)
@cli.command()
@click.argument("files", nargs=-1, type=click.Path())
def add(files):
all_files = []
for file_path in files:
full_file_path = dir.joinpath(file_path)
if not full_file_path.is_file() or not full_file_path.exists():
continue
all_files.append(full_file_path)
add_files(all_files)
PROMPT = "❯"
CHAR_SLEEP = 0.05
def slowprint(command):
for char in command + "\n":
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(CHAR_SLEEP)
def show(command, execute=True):
slowprint(command)
if execute:
start = time.time()
subprocess.call(["python3", "-m", *command.split()])
print(f"took {int(time.time() - start)}s", end="")
@cli.command()
def debug_worker_speed():
db.drop_tables([Job])
db.create_tables([Job])
show("exam_reader worker -c 2 -d -f 2")
@cli.command()
def debug_worker():
db.drop_tables([Job])
db.create_tables([Job])
show("exam_reader worker -c 2 -d")
@cli.command("lint")
def lint_command():
lint()
"""
termtosvg docs/source/static/debug_worker_speed.svg \
--command='python3 -m exam_reader debug-worker-speed' \
--screen-geometry=80x3
"""
| 2.21875 | 2 |
setup.py | 1313e/viscm | 1 | 12788231 | from setuptools import setup, find_packages
import sys
import os.path
# Must be one line or PyPI will cut it off
DESC = ("A colormap tool")
LONG_DESC = open("README.rst").read()
setup(
name="viscm",
version="0.10.0",
description=DESC,
long_description=LONG_DESC,
author="<NAME>, <NAME>, <NAME>",
author_email="<EMAIL>, <EMAIL>, <EMAIL>",
url="https://github.com/1313e/viscm",
license="MIT",
classifiers =
[ "Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
packages=find_packages(),
install_requires=["colorspacious>=1.1.0",
"matplotlib>=3.2.0",
"numpy>=1.8",
"pyqt5==5.12.*",
"scipy>=1.0.0",
"cmasher>=1.5.0",
"guipy>=0.0.2",
"qtpy>=1.9.0"],
python_requires='>=3.5, <4',
package_data={'viscm': ['examples/*']},
entry_points={
'console_scripts': [
"viscm = viscm.gui:main"]},
)
| 1.445313 | 1 |
src/modules/snowflake/sync-connector/sync-connector-lambda/snowflake_export.py | aws-samples/aws-iot-twinmaker-samples-snowflake | 1 | 12788232 | <reponame>aws-samples/aws-iot-twinmaker-samples-snowflake
#!/usr/bin/env python
import argparse
import json
import os
import snowflake.connector
from snowflake.connector import DictCursor
import time
from library import *
'''
Read the model meta-data (queried from snowflake) and convert it into a Iottwinmaker entities (json)
input:
-b --bucket The bucket to exported snowflake artifacts to.
-p --prefix The prefix under which snowflake data will be exported to.
-s --secrets-name Name of the secret in secret manager that stores snowflake credentials
-f --query-file File containing the query that will be executed against the snowflake data
#-r --iottwinmaker-role-arn ARN of the role assumed by Iottwinmaker
-w --workspace-id Workspace id passed to import, optional for export
output:
Spits out Iottwinmaker entity json for all records
'''
## -w Workspace ID
def parse_arguments():
parser = argparse.ArgumentParser(
description='Convert OSI PI (SQL Records) to Iottwinmaker Entities (JSON)')
parser.add_argument('-b', '--bucket',
help='S3 bucket to store the exported files to.',
required=True)
parser.add_argument('-p', '--prefix',
help='prefix path within the S3 bucket',
required=True)
parser.add_argument('-s', '--secrets-name',
help='Name of the secret in secret manager that stores snowflake credentials',
required=True)
parser.add_argument('-f', '--query-file',
help='File containing the query that will be executed against the snowflake database',
required=True)
#parser.add_argument('-r', '--iottwinmaker-role-arn',
# help='ARN of the role assumed by Iottwinmaker',
# default=False,
# required=True)
parser.add_argument('-w', '--workspace-id',
help='Workspace id passed to import, optional for export',
required=False)
return parser
## upsert the entities in the workspace with that from the snowflake records
def process_records(sf_records):
jrec = {"entities":[]}
entities = jrec["entities"]
for row_tuple in sf_records:
attributes = json.loads(row_tuple['ATTR_NAME'])
values = json.loads(row_tuple['ATTR_PI_PT'])
properties = {}
for i, attr in enumerate(attributes):
## Temporary if condition, remove it once snowflake query is fixed.
if i < len(values):
value = underscored(values[i]) if i < len(values) else ""
properties[underscored(attr)] = {
'definition': { 'dataType': {'type':'STRING'} },
'value' : {'stringValue': value}
}
entity = { "entity_name": underscored(row_tuple['ELEM_NAME']),
"entity_id": underscored(row_tuple['ELEM_ID']),
"parent_name": underscored(row_tuple.get('PARENT_NAME')),
"parent_entity_id": underscored(row_tuple.get('ELEM_PARENT_ID')),
"component_type": row_tuple.get('COMP_TYPE'),
"description": row_tuple.get('EPATH'),
"properties": properties }
entities.append(entity)
return jrec
def query_records(secret, qry_file):
records = []
sf_creds = get_snowflake_credentials(secret)
ctx = snowflake.connector.connect(
user=sf_creds['USER'],
password=sf_creds['PASSWORD'],
account=sf_creds['ACCOUNT'],
ROLE=sf_creds['ROLE'],
WAREHOUSE=sf_creds['WAREHOUSE'],
DATABASE=sf_creds['DATABASE'],
SCHEMA=sf_creds['SCHEMA']
)
cs = ctx.cursor(DictCursor)
try:
with open(qry_file, 'r') as sql:
qry = sql.read().replace('\n', '')
cs.execute('use warehouse ' + sf_creds['WAREHOUSE'])
cs.execute(qry)
records = cs.fetchall()
finally:
cs.close()
ctx.close()
return records
def lambda_handler(event, context):
#load_env()
secrets_name = event.get('secretsName')
query_file = event.get('queryFile')
records = query_records(secrets_name, query_file)
SERVICE_ENDPOINT= os.environ.get('AWS_ENDPOINT')
s3 = boto3_session().resource('s3')
ws_bucket = event.get('bucket')
filename = '{}/{}.json'.format(event.get('prefix'),str(time.time()))
s3object = s3.Object(ws_bucket, filename)
json_data = process_records(records)
s3object.put(
Body=(bytes(json.dumps(json_data).encode('UTF-8')))
)
return {
'statusCode': 200,
'body': { 'outputBucket': ws_bucket,
'outputPath':filename,
'componentTypeId':'com.snowflake.connector:1',
'workspaceId':event.get('workspaceId')},
#'iottwinmakerRoleArn':event.get('iottwinmakerRoleArn')},
'status' : "SUCCEEDED"
}
def main():
if __name__ != '__main__':
return
parser = parse_arguments()
args = parser.parse_args()
r = lambda_handler( {
'secretsName': args.secrets_name,
'queryFile': args.query_file,
'bucket':args.bucket,
'prefix':args.prefix,
'workspaceId':args.workspace_id},None)
#'iottwinmakerRoleArn':args.iottwinmaker_role_arn},None)
print(r)
main()
| 2.25 | 2 |
PeterMaar-NetLrnChatBot/Client/ClientTerminal.py | alset333/NetworkedLearningChatbot | 3 | 12788233 | <reponame>alset333/NetworkedLearningChatbot
#!/usr/bin/env python3
# ClientTerminal.py
import time
import socket
__author__ = '<NAME>'
__version__ = '0.1.0'
def send(ip, port, message):
udp_ip = ip
udp_port = port
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.sendto(bytes(message, "utf-8"), (udp_ip, udp_port))
sock.close()
def receive(port):
udp_ip = ''
udp_port = port
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind((udp_ip, udp_port))
data, addr = sock.recvfrom(268435456) # buffer size is 1/4 GB
recievedMessage = data.decode("utf-8")
sock.close()
return recievedMessage
srvrIp = input("Enter the IP address or hostname of the server:\n")
srvrPort = int(input("Enter the port number of the server:\n"))
msg = ''
lastReceive = ''
while msg.lower() != 'q':
msg = input("Enter message to send, or 'q' to quit:\n")
if msg.lower() == 'q':
time.sleep(1)
print("Bye!")
break
send(srvrIp, srvrPort, lastReceive + "\n\n\n:::\n\n\n" + msg)
lastReceive = receive(srvrPort)
time.sleep(len(lastReceive)/10) # Wait to make it seem like the bot is 'typing'
print(lastReceive)
| 3.28125 | 3 |
DocTAG_Dockerized/DocTAG_App/utils_configuration_and_update.py | DocTAG/doctag-core | 4 | 12788234 | <filename>DocTAG_Dockerized/DocTAG_App/utils_configuration_and_update.py<gh_stars>1-10
from DocTAG_App.utils import *
from psycopg2.extensions import register_adapter, AsIs
def addapt_numpy_float64(numpy_float64):
return AsIs(numpy_float64)
def addapt_numpy_int64(numpy_int64):
return AsIs(numpy_int64)
register_adapter(numpy.float64, addapt_numpy_float64)
register_adapter(numpy.int64, addapt_numpy_int64)
from collections import defaultdict
from DocTAG_App.utils_pubmed import *
import os.path
from DocTAG_App.utils_doctag import *
"""This .py file includes the methods needed to configure MedTAG and update its configuration"""
LANGUAGES_NLTK = [
"arabic","danish","dutch","english","finnish","french","german","hungarian","italian","norwegian","porter","portuguese","romanian","russian","spanish","swedish"
]
def check_file(reports,pubmedfiles, labels, concepts, jsonDisp, jsonAnn, username, password,topics,runs,tf_idf):
"""This method checks whether the inserted files complies with the requirements"""
json_resp = {}
json_keys = []
usecases_list = []
docs = []
tops = []
topics_ids = []
documents_ids = []
languages = []
json_resp['general_message'] = ''
json_resp['username_message'] = ''
json_resp['report_message'] = ''
json_resp['pubmed_message'] = ''
json_resp['concept_message'] = ''
json_resp['label_message'] = ''
json_resp['topic_message'] = ''
json_resp['tfidf_message'] = ''
json_resp['runs_message'] = ''
json_resp['fields_message'] = ''
json_resp['keys'] = json_keys
#added 2/11
if (len(labels) == 0 and len(concepts) == 0) and len(topics) == 0 and (len(reports) == 0 and len(pubmedfiles) == 0) and len(runs) == 0:
json_resp[
'general_message'] = 'ERROR - You must provide at least four files: the lables (or concepts), the topics, the runs and the reports.'
# if len(jsonAnn) == 0:
# json_resp[
# 'general_message'] = 'ERROR - You must provide at least one field to annotate.'
elif len(reports) == 0 and len(pubmedfiles) == 0:
json_resp['general_message'] = 'ERROR - You must provide a file with one or more reports or one or more pubmed files.'
elif len(pubmedfiles) > 0 and len(concepts) == 0 and len(labels) == 0:
json_resp['general_message'] = 'PUBMED - only mentions allowed.'
try:
try:
cursor = connection.cursor()
cursor.execute('SELECT * FROM public.user WHERE username = %s', (str(username),))
ans = cursor.fetchall()
# Error on username and password: duplicated username or missing
if len(ans) > 0 or username == 'Test':
json_resp['username_message'] = 'USERNAME - The username you selected is already taken. Choose another one.'
if (username == ''):
json_resp['username_message'] = 'USERNAME - Please, provide a username.'
if password == '' and username == '':
json_resp['username_message'] = 'USERNAME - Please, provide a username and a password.'
except (Exception, psycopg2.Error) as e:
print(e)
json_resp[
'username_message'] = 'An error occurred handling the username and the password. Please, insert them again.'
pass
else:
if json_resp['username_message'] == '':
json_resp['username_message'] = 'Ok'
# This is necessary to collect the fields to annotate and display
fields = []
fields_to_ann = []
jsondisp = ''.join(jsonDisp)
jsonann = ''.join(jsonAnn)
jsondisp = jsondisp.split(',')
jsonann = jsonann.split(',')
for el in jsondisp:
if len(el) > 0:
fields.append(el)
for el in jsonann:
if len(el) > 0:
fields_to_ann.append(el)
if not tf_idf.isdigit():
json_resp['tfidf_message'] = 'TF-IDF - the value must include only digits'
if json_resp['tfidf_message'] == '':
json_resp['tfidf_message'] = 'Ok'
# Error if 0 report files are added
if len(reports) == 0 and len(pubmedfiles) == 0:
json_resp['report_message'] = 'REPORTS FILES - You must provide at least one file containing reports or at least one file containing PubMED IDs before checking'
json_resp['pubmed_message'] = 'PUBMED FILES - You must provide at least one file containing reports or at least one file containing PubMED IDs before checking'
if len(topics) == 0:
json_resp['topic_message'] = 'TOPIC FILES - You must provide at least one file containing topics'
if len(runs) == 0:
json_resp['runs_message'] = 'RUNS FILES - You must provide at least one file containing runs'
# docs_tot = []
for j in range(len(reports)):
r = decompress_files([reports[j]])
for i in range(len(r)):
if isinstance(r[i], str):
rep_name = r[i]
workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in
r[i] = os.path.join(workpath, 'static/tmp/' + r[i])
else:
rep_name = r[i].name
if not rep_name.endswith('csv') and not rep_name.endswith('json'):
json_resp['report_message'] = 'DOCUMENTS FILE - ' + rep_name + ' - The file must be .csv and .json and .txt and .Z and .zip'
break
if rep_name.endswith('.csv'):
try:
df = pd.read_csv(r[i])
df = df.where(pd.notnull(df), None)
df = df.reset_index(drop=True) # Useful if the csv includes only commas
except Exception as e:
print(e)
json_resp['report_message'] = 'DOCUMENTS FILE - ' + rep_name + ' - An error occurred while parsing the csv. Check if it is well formatted. Check if it contains as many columns as they are declared in the header.'
pass
else:
# check if colunns are allowed and without duplicates
cols = list(df.columns)
list_db_col = ['document_id','language']
list_not_db_col = []
missing = False
for el in list_db_col:
if el not in cols and el == 'document_id':
json_resp['report_message'] = 'DOCUMENTS FILE - ' + rep_name + ' - The column: ' + el + ' is missing, please add it.'
missing = True
break
if missing:
break
for id in list(df.document_id.unique()):
if not str(id) in documents_ids:
# json_resp['report_message'] = 'WARNING DOCUMENTS FILE - ' + rep_name + ' - The id: ' + str(id) + ' is duplicated. The duplicates are ignored.'
# else:
documents_ids.append(str(id))
for el in cols:
if el not in list_db_col:
list_not_db_col.append(el)
for el in df.document_id:
if el.lower().startswith('pubmed_'):
json_resp['report_message'] = 'DOCUMENTS FILE - ' + rep_name + ' - reports\' ids can not start with "PUBMED_", please, change the name'
# if 0 optional columns are added
if len(list_not_db_col) == 0:
json_resp['report_message'] = 'DOCUMENTS FILE - ' + rep_name + ' - You must provide at least one column other than the documents\' ids'
break
# Check if the csv is empty with 0 rows
if df.shape[0] == 0:
json_resp['report_message'] = 'DOCUMENTS FILE - ' + rep_name + ' - You must provide at least a report.'
break
else:
# check if columns id_report and language have no duplicates
df_dup = df[df.duplicated(subset=['document_id'], keep=False)]
if 'language' in df:
df_dup = df[df.duplicated(subset=['document_id','language'], keep=False)]
if df_dup.shape[0] > 0:
json_resp['report_message'] = 'WARNING DOCUMENTS FILE - ' + rep_name + ' - The rows: ' + str(
df_dup.index.to_list()) + ' are duplicated. The duplicates are ignored.'
# Check if the optional rows are empty for one or more reports.
exit = False
# docs_tot.extend(list(df.document_id.unique()))
for ind in range(df.shape[0]):
count_both = 0
not_none_cols = []
isnone = True
for el in list_not_db_col:
if df.loc[ind, el] is not None:
isnone = False
not_none_cols.append(el)
for el in not_none_cols:
if el not in jsonann and el not in jsondisp:
count_both = count_both + 1
if count_both == len(not_none_cols):
json_resp['fields_message'] = 'WARNING REPORT FIELDS TO DISPLAY AND ANNOTATE - ' + rep_name + ' - With this configuration the report at the row: ' + str(
ind) + ' would not be displayed since the columns to display are all empty for that report.'
if isnone:
exit = True
json_resp['report_message'] = 'DOCUMENTS FILE - ' + rep_name + ' - The report at row ' + str(ind) + ' has the columns: ' + ', '.join(
list_not_db_col) + ' empty. Provide a value for at least one of these columns.'
break
if exit:
break
# check if there are None in mandatory columns
el = ''
if None in df['document_id'].tolist():
el = 'document_id'
if el != '':
lista = df[el].tolist()
ind = lista.index(None)
json_resp['report_message'] = 'DOCUMENTS FILE - ' + rep_name + ' - The column ' + el + ' is empty at the row: ' + str(ind) + '.'
break
elif rep_name.endswith('.json'):
if isinstance(r[i], str):
r[i] = open(r[i], 'r')
d = json.load(r[i])
if 'collection' not in d.keys():
json_resp['reports_message'] = 'DOCUMENTS FILE - ' + rep_name + ' - The json is not well formatted.'
break
exit = False
keys_list = []
if len(d['collection']) == 0:
json_resp['report_message'] = 'DOCUMENTS FILE - ' + rep_name + ' - You must provide at least a report.'
break
for document in d['collection']:
ind = d['collection'].index(document)
if 'document_id' not in list(document.keys()) or document['document_id'] is None:
json_resp['reports_message'] = 'DOCUMENTS FILE - ' + rep_name + ' - The ' + str(
ind) + ' document does not contain the "document_id" key which is mandatory.'
exit = True
break
doc_keys = list(document.keys())
# docs_tot.append(str(document['document_id']))
if 'language' in document.keys():
doc_keys.remove('language')
doc_keys.remove('document_id')
is_none = True
for key in doc_keys:
if key != 'document_id' and key != 'language':
if document[key] is not None:
is_none = False
break
if (('document_id' in doc_keys and len(doc_keys) == 1) or ('document_id' in doc_keys and 'language' in doc_keys and len(doc_keys) == 2)) or is_none:
json_resp['reports_message'] = 'DOCUMENTS FILE - ' + rep_name + ' - The ' + str(ind) + ' document does not contain the document\' s text.'
keys_list.extend(doc_keys)
if str(document['document_id']) in documents_ids:
json_resp['report_message'] = 'WARNING DOCUMENTS FILE - ' + rep_name + ' - The id ' + str(document['document_id']) + ' is duplicated.'
else:
documents_ids.append(str(document['document_id']))
count_both = 0
for el in doc_keys:
if el not in jsonann and el not in jsondisp:
count_both += 1
if count_both == len(doc_keys):
json_resp['fields_message'] = 'WARNING REPORT FIELDS TO DISPLAY AND ANNOTATE - ' + rep_name + ' - With this configuration the report at the row: ' + str(
ind) + ' would not be displayed since the columns to display are all empty for that report.'
if exit == True:
break
if isinstance(r[i], str):
r[i].close()
if len(reports) > 0:
if json_resp['report_message'] == '':
json_resp['report_message'] = 'Ok'
for i in range(len(pubmedfiles)):
# Error if the file is not csv
if not pubmedfiles[i].name.endswith('csv') and not pubmedfiles[i].name.endswith('json') and not pubmedfiles[i].name.endswith(
'txt'):
json_resp['pubmed_message'] = 'PUBMED FILE - ' + pubmedfiles[i].name + ' - The file must be .csv or .json or .txt'
break
if pubmedfiles[i].name.endswith('csv'):
try:
df = pd.read_csv(pubmedfiles[i])
df = df.where(pd.notnull(df), None)
df = df.reset_index(drop=True) # Useful if the csv includes only commas
except Exception as e:
print(e)
json_resp['pubmed_message'] = 'PUBMED FILE - ' + pubmedfiles[
i].name + ' - An error occurred while parsing the csv. Check if it is well formatted. Check if it contains as many columns as they are declared in the header.'
pass
else:
# check if colunns are allowed and without duplicates
cols = list(df.columns)
list_db_col = ['document_id']
missing = False
for el in list_db_col:
if el not in cols and el == 'document_id':
json_resp['pubmed_message'] = 'PUBMED FILE - ' + pubmedfiles[i].name + ' - The column: ' + el + ' is missing, please add it.'
missing = True
break
if missing:
break
for column in cols:
null_val = df[df[column].isnull()].index.tolist()
if len(null_val) > 0:
json_resp['pubmed_message'] = 'PUBMED FILE - ' + pubmedfiles[i].name + ' - You did not inserted the '+column +' for rows: '+null_val.split(', ')
# Check if the csv is empty with 0 rows
if df.shape[0] == 0:
json_resp['pubmed_message'] = 'PUBMED FILE - ' + pubmedfiles[i].name + ' - You must provide at least a report.'
break
else:
# check if columns id_report and language have no duplicates
if 'language' in df:
df_dup = df[df.duplicated(subset=['document_id','language'], keep=False)]
else:
df_dup = df[df.duplicated(subset=['document_id'], keep=False)]
if df_dup.shape[0] > 0:
json_resp['pubmed_message'] = 'WARNING PUBMED FILE - ' + pubmedfiles[i].name + ' - The rows: ' + str(df_dup.index.to_list()) + ' are duplicated. The duplicates are ignored.'
ids = ['PUBMED_'+str(id) for id in list(df.document_id.unique())]
documents_ids.extend(ids)
elif pubmedfiles[i].name.endswith('json'):
d = json.load(pubmedfiles[i])
if 'pubmed_ids' not in d.keys():
json_resp['pubmed_message'] = 'PUBMED FILE - ' + pubmedfiles[
i].name + ' - json is not well formatted.'
break
if d['pubmed_ids'] == []:
json_resp['pubmed_message'] = 'PUBMED FILE - ' + pubmedfiles[
i].name + ' - you must provide at least an article id.'
break
if not isinstance(d['pubmed_ids'],list):
json_resp['pubmed_message'] = 'PUBMED FILE - ' + pubmedfiles[
i].name + ' - you must provide at least an article id.'
break
if len(d['pubmed_ids']) != len(list(set(d['pubmed_ids']))):
json_resp['pubmed_message'] = 'WARNING PUBMED FILE - ' + runs[
i].name + ' - some ids seem to be duplicated. They will be ignored.'
break
ids = ['PUBMED_'+str(id) for id in d['pubmed_ids']]
documents_ids.extend(ids)
elif pubmedfiles[i].name.endswith('txt'):
lines = pubmedfiles[i].readlines()
ids = ['PUBMED_'+str(line) for line in lines]
if len(lines) == 0 :
json_resp['pubmed_message'] = 'PUBMED FILE - ' + runs[
i].name + ' - the file is empty.'
break
if len(lines) != len(list(set(lines))):
json_resp['pubmed_message'] = 'WARNING PUBMED FILE - ' + runs[
i].name + ' - the file contain some duplicates: they will be ignored.'
documents_ids.extend(ids)
if len(pubmedfiles)>0:
if json_resp['pubmed_message'] == '':
json_resp['pubmed_message'] = 'Ok'
if len(topics) > 0:
for i in range(len(topics)):
if not topics[i].name.endswith('csv') and not topics[i].name.endswith('json') and not topics[i].name.endswith('txt'):
json_resp['topic_message'] = 'TOPIC FILE - ' + topics[i].name + ' - The file must be .csv or .json or .txt'
break
if topics[i].name.endswith('csv'):
# if not labels[i].name.endswith('csv'):
# json_resp['label_message'] = 'LABELS FILE - ' + labels[i].name + ' - The file must be .csv'
try:
df = pd.read_csv(topics[i])
df = df.where(pd.notnull(df), None)
df = df.reset_index(drop=True)
except Exception as e:
json_resp['topic_message'] = 'TOPIC FILE - ' + topics[
i].name + ' - An error occurred while parsing the csv. Check if is well formatted.'
pass
else:
cols = list(df.columns)
list_db_col = ['topic_id','title','description','narrative']
# if 'usecase' in cols:
# df['usecase'] = df['usecase'].str.lower()
#
esco = False
for el in list_db_col:
if el not in cols and el == 'topic_id':
esco = True
json_resp['topic_message'] = 'TOPIC FILE - ' + topics[
i].name + ' - The column: ' + el + ' is not present and it is mandatory.'
break
for el in cols:
if el not in list_db_col:
json_resp['topic_message'] = 'WARNING TOPIC FILE - ' + topics[
i].name + ' - The column: ' + el + ' is not present.'
if esco == True:
break
for el in cols:
if el not in list_db_col:
json_resp['topic_message'] = 'TOPIC FILE - ' + topics[
i].name + ' - The column ' + el + ' is not allowed.'
break
for id in list(df.topic_id.unique()):
if str(id) in topics_ids:
json_resp['topic_message'] = 'WARNING TOPIC FILE - ' + topics[
i].name + ' - The topic: ' + str(id) + ' is duplicated. The duplicates are ignored.'
else:
topics_ids.append(str(id))
if df.shape[0] == 0:
json_resp['topic_message'] = 'TOPIC FILE - ' + topics[
i].name + ' - You must provide at least a row.'
break
else:
# check if columns annotation_label and name have no duplicates
df_dup = df[df.duplicated(subset=['topic_id'], keep=False)]
if df_dup.shape[0] > 0:
json_resp['topic_message'] = 'WARNING TOPIC FILE - ' + topics[
i].name + ' - The rows: ' + str(
df_dup.index.to_list()) + ' are duplicated. The duplicates will be ignored.'
el = ''
# if None in df['usecase'].tolist():
# el = 'usecase'
if None in df['topic_id'].tolist():
el = 'topic_id'
if el != '':
lista = df[el].tolist()
ind = lista.index(None)
json_resp['topic_message'] = 'TOPIC FILE - ' + topics[
i].name + ' - The column ' + el + ' is empty at the row: ' + str(ind) + ' .'
break
elif topics[i].name.endswith('.json'):
# with open(topics[i], 'r') as f:
d = json.load(topics[i])
doc_top = []
if 'topics' not in d.keys():
json_resp['topic_message'] = 'TOPIC FILE - ' + topics[
i].name + ' - json is not well formatted.'
break
if d['topics'] == []:
json_resp['topic_message'] = 'TOPIC FILE - ' + labels[
i].name + ' - you must provide at least a label.'
break
for topic in d['topics']:
ind = d['topics'].index(topic)
if 'topic_id' not in list(topic.keys()):
json_resp['topic_message'] = 'TOPIC FILE - ' + topics[
i].name + ' - you must provide a topic number in the '+str(ind)+' th topic.'
break
doc_top.append(str(topic['topic_id']))
if str(topic['topic_id']) in topics_ids:
json_resp['topic_message'] = 'WARNING TOPIC FILE - ' + topics[
i].name + ' - the list of topics contains duplicates. They will be ignored.'
else:
topics_ids.append(str(topic['topic_id']))
if len(doc_top) > len(set(doc_top)):
json_resp['topic_message'] = 'WARNING TOPIC FILE - ' + topics[
i].name + ' - the list of topics contains duplicates. They will be ignored.'
elif topics[i].name.endswith('.txt'):
arr_to_ret = elaborate_runs(runs)
topics_ids = elaborate_TREC_topic_files([],topics[i],'check')
topics_ids = [str(i) for i in topics_ids]
if isinstance(topics_ids,list) == False:
json_resp['topic_message'] = 'TOPIC FILE - ' + topics[i].name + ' - topics are not well formatted.'
break
if json_resp['topic_message'] == '':
json_resp['topic_message'] = 'Ok'
if len(runs) > 0:
for i in range(len(runs)):
if not runs[i].name.endswith('csv') and not runs[i].name.endswith('json') and not runs[i].name.endswith('txt'):
json_resp['runs_message'] = 'RUNS FILE - ' + runs[i].name + ' - The file must be .csv or .json or .txt'
break
if runs[i].name.endswith('csv'):
print(runs[i])
# if not labels[i].name.endswith('csv'):
# json_resp['label_message'] = 'LABELS FILE - ' + labels[i].name + ' - The file must be .csv'
try:
df = pd.read_csv(runs[i])
df = df.where(pd.notnull(df), None)
df = df.reset_index(drop=True)
except Exception as e:
print(e)
json_resp['runs_message'] = 'RUNS FILE - ' + runs[
i].name + ' - An error occurred while parsing the csv. Check if is well formatted.'
pass
else:
cols = list(df.columns)
list_db_col = ['topic_id', 'document_id','language']
esco = False
for el in list_db_col:
if el not in cols and el != 'language':
esco = True
json_resp['runs_message'] = 'RUNS FILE - ' + runs[
i].name + ' - The column: ' + el + ' is not present and it is mandatory.'
break
if esco == True:
break
for el in cols:
if el not in list_db_col:
json_resp['runs_message'] = 'RUNS FILE - ' + runs[
i].name + ' - The column ' + el + ' is not allowed.'
break
if df.shape[0] == 0:
json_resp['runs_message'] = 'RUNS FILE - ' + runs[
i].name + ' - You must provide at least a row.'
break
else:
# check if columns annotation_label and name have no duplicates
if 'language' in df:
df_dup = df[df.duplicated(subset=['topic_id','document_id','language'], keep=False)]
else:
df_dup = df[df.duplicated(subset=['topic_id','document_id'], keep=False)]
if df_dup.shape[0] > 0:
json_resp['runs_message'] = 'WARNING RUNS FILE - ' + runs[
i].name + ' - The rows: ' + str(
df_dup.index.to_list()) + ' are duplicated. The duplicates will be ignored.'
el = ''
# if None in df['usecase'].tolist():
# el = 'usecase'
if None in df['topic_id'].tolist():
el = 'topic_id'
if None in df['document_id'].tolist():
el = 'document_id'
if 'language' in df:
el = 'language'
if el != '':
lista = df[el].tolist()
ind = lista.index(None)
json_resp['runs_message'] = 'RUNS FILE - ' + runs[
i].name + ' - The column ' + el + ' is empty at the row: ' + str(ind) + ' .'
break
tops.extend(df.topic_id.unique())
for el in tops:
if str(el) not in topics_ids:
json_resp['runs_message'] = 'RUNS FILE - ' + runs[
i].name + ' - The topic: ' + str(el) + ' is not in the provided list of topics.'
break
docs.extend(df.document_id.unique())
for el in docs:
if str(el) not in documents_ids:
json_resp['runs_message'] = 'RUNS FILE - ' + runs[
i].name + ' - The document: ' + str(el) + ' is not in the provided list of documents.'
break
elif runs[i].name.endswith('.json'):
# with open(runs[i], 'r') as f:
d = json.load(runs[i])
if 'run' not in d.keys():
json_resp['runs_message'] = 'RUNS FILE - ' + runs[
i].name + ' - json is not well formatted.'
break
if d['run'] == []:
json_resp['runs_message'] = 'RUNS FILE - ' + runs[
i].name + ' - you must provide at least a topic and one or more documents associated.'
break
for r in d['run']:
ind = d['run'].index(r)
if 'topic_id' not in r.keys():
json_resp['runs_message'] = 'RUNS FILE - ' + topics[
i].name + ' - you must provide a topic number in the ' + str(ind) + ' th element.'
break
if 'documents' not in r.keys():
json_resp['runs_message'] = 'RUNS FILE - ' + topics[
i].name + ' - you must provide a topic\'s list for the topic: ' + str(r['num']) + '.'
break
if isinstance(r['documents'][0],dict):
doc1 = [el['document_id'] for el in r['documents']]
else:
doc1 = r['documents']
for el in r['documents']:
if isinstance(el,dict):
if 'document_id' not in el.keys() and 'language' not in el.keys():
json_resp['runs_message'] = 'RUNS FILE - ' + topics[
i].name + ' - you must provide a document_id and a language'
break
tops.append(r['topic_id'])
docs.extend(doc1)
for el in tops:
if el not in topics_ids:
json_resp['runs_message'] = 'RUNS FILE - ' + runs[
i].name + ' - The topic: ' + str(el) + ' is not in the provided list of topics.'
break
for el in docs:
if str(el) not in documents_ids:
json_resp['runs_message'] = 'RUNS FILE - ' + runs[
i].name + ' - The document: ' + str(el) + ' is not in the provided list of documents.'
break
elif runs[i].name.endswith('.txt'):
# with open(runs[i], 'r') as f:
lines = runs[i].readlines()
tups = []
for line in lines:
line = line.decode('utf-8')
if len(line.split()) == 2 or len(line.split() == 3):
topic = line.split()[0]
tops.append(topic)
doc = line.split()[1]
docs.append(doc)
tups.append((topic,doc))
elif len(line.split()) > 2: #TREC
topic = line.split()[0]
tops.append(topic)
doc = line.split()[2]
tups.append((topic, doc))
docs.append(doc)
else:
json_resp['run_message'] = 'RUNS FILE - ' + runs[i].name + ' - txt file is not well formatted.'
break
for el in tops:
if str(el) not in topics_ids:
json_resp['runs_message'] = 'RUNS FILE - ' + runs[
i].name + ' - The topic: ' + str(el) + ' is not in the provided list of topics.'
break
for el in docs:
if str(el) not in documents_ids:
json_resp['runs_message'] = 'RUNS FILE - ' + runs[
i].name + ' - The document: ' + str(el) + ' is not in the provided list of documents.'
break
if json_resp['runs_message'] == '':
json_resp['runs_message'] = 'Ok'
if len(concepts) > 0:
for i in range(len(concepts)):
# Check if it is a csv
if concepts[i].name.endswith('csv'):
# json_resp['concept_message'] = 'CONCEPTS FILE - ' + concepts[i].name + ' - The file must be .csv'
try:
df = pd.read_csv(concepts[i])
df = df.where(pd.notnull(df), None)
df = df.reset_index(drop=True)
except Exception as e:
json_resp['concept_message'] = 'CONCEPTS FILE - ' + concepts[i].name + ' - An error occurred while parsing the csv. Check if it is well formatted. '
pass
# print(df)
else:
cols = list(df.columns)
columns_wrong = False
list_db_col = ['concept_url', 'concept_name']
# if 'usecase' in cols:
# df['usecase'] = df['usecase'].str.lower()
# Check if all the mandatory cols are present
for el in list_db_col:
if el not in cols:
columns_wrong = True
json_resp['concept_message'] = 'CONCEPTS FILE - ' + concepts[i].name + ' - The column ' + el + ' is not present. The only columns allowed are: concept_utl, concept_name, usecase, area'
break
if columns_wrong == True:
break
# if load_concepts is not None:
# for el in load_concepts:
# if el in df.usecase.unique():
# json_resp['concept_message'] = 'CONCEPTS FILE - ' + concepts[i].name + ' - You can not insert concepts files for the use case ' + el + ' after having decide to use EXAMODE concepts.'
# break
# header length must be the same, no extra columns
if len(list_db_col) != len(cols):
json_resp['concept_message'] = 'CONCEPTS FILE - ' + concepts[i].name + ' - The columns allowed are: concept_url, concept_name. If you inserted more (less) columns please, remove (add) them.'
break
# Check if the df has no rows
if df.shape[0] == 0:
json_resp['concept_message'] = 'CONCEPTS FILE - ' + concepts[i].name + ' - You must provide at least a concept.'
break
else:
# check if column concept_url has no duplicates
df_dup = df[df.duplicated(subset=['concept_url'], keep=False)]
if df_dup.shape[0] > 0:
json_resp['concept_message'] = 'WARNING CONCEPTS FILE - ' + concepts[i].name + ' - The rows: ' + str(df_dup.index.to_list()) + ' are duplicated. The duplicates will be ignored.'
# Check if there are None in mandatory cols
el = ''
if None in df['concept_url'].tolist():
el = 'concept_url'
elif None in df['concept_name'].tolist():
el = 'concept_name'
if el != '':
lista = df[el].tolist()
ind = lista.index(None)
json_resp['concept_message'] = 'CONCEPTS FILE - ' + concepts[i].name + ' - The column ' + el + ' is empty at the row: ' + str(ind) + ' .'
break
if concepts[i].name.endswith('json'):
# json_resp['concept_message'] = 'CONCEPTS FILE - ' + concepts[i].name + ' - The file must be .csv'
d = json.load(concepts[i])
if 'concepts_list' not in d.keys():
json_resp['concept_message'] = 'CONCEPTS FILE - ' + concepts[
i].name + ' - json is not well formatted'
break
if not isinstance(d['concepts_list'],list):
json_resp['concept_message'] = 'CONCEPTS FILE - ' + concepts[
i].name + ' - json is not well formatted'
break
if len(d['concepts_list']) == 0:
json_resp['concept_message'] = 'CONCEPTS FILE - ' + concepts[
i].name + ' - the list is empty'
break
dup_list = []
for element in d['concepts_list']:
if 'concept_url' not in element.keys() and 'concept_name' not in element.keys():
json_resp['concept_message'] = 'CONCEPTS FILE - ' + concepts[
i].name + ' - each element in the list of concepts must contains concept_url and concept_name'
break
for element in d['concepts_list']:
if element['concept_url'] in dup_list:
json_resp['concept_message'] = 'WARNING CONCEPTS FILE - ' + concepts[
i].name + ' - Some concepts are duplicated, these will be ignored'
if json_resp['concept_message'] == '':
json_resp['concept_message'] = 'Ok'
if len(labels) > 0:
for i in range(len(labels)):
if labels[i].name.endswith('csv'):
# if not labels[i].name.endswith('csv'):
# json_resp['label_message'] = 'LABELS FILE - ' + labels[i].name + ' - The file must be .csv'
try:
df = pd.read_csv(labels[i])
df = df.where(pd.notnull(df), None)
df = df.reset_index(drop=True)
except Exception as e:
json_resp['label_message'] = 'LABELS FILE - ' + labels[i].name + ' - An error occurred while parsing the csv. Check if is well formatted.'
pass
else:
cols = list(df.columns)
list_db_col = ['label']
# if 'usecase' in cols:
# df['usecase'] = df['usecase'].str.lower()
#
esco = False
for el in list_db_col:
if el not in cols:
esco = True
json_resp['label_message'] = 'LABELS FILE - ' + labels[i].name + ' - The columns: ' + el + ' is not present. The columns allowed are: labels, usecase.'
if esco == True:
break
if len(cols) != len(list_db_col):
json_resp['label_message'] = 'LABELS FILE - ' + labels[i].name + ' - The columns allowed are: label, usecase. If you inserted more (less) columns please, remove (add) them.'
break
if df.shape[0] == 0:
json_resp['label_message'] = 'LABELS FILE - ' + labels[i].name + ' - You must provide at least a row.'
break
else:
# check if columns annotation_label and name have no duplicates
df_dup = df[df.duplicated(subset=['label'], keep=False)]
if df_dup.shape[0] > 0:
json_resp['label_message'] = 'WARNING LABELS FILE - ' + labels[i].name + ' - The rows: ' + str(df_dup.index.to_list()) + ' are duplicated. The duplicates will be ignored.'
el = ''
# if None in df['usecase'].tolist():
# el = 'usecase'
if None in df['label'].tolist():
el = 'label'
if el != '':
lista = df[el].tolist()
ind = lista.index(None)
json_resp['label_message'] = 'LABELS FILE - ' + labels[i].name + ' - The column ' + el + ' is empty at the row: ' + str(ind) + ' .'
break
elif labels[i].name.endswith('.json'):
# with open(labels[i],'r') as f:
d = json.load(labels[i])
if 'labels' not in d.keys():
json_resp['label_message'] = 'LABELS FILE - ' + labels[i].name + ' - json is not well formatted.'
break
if d['labels'] == []:
json_resp['label_message'] = 'LABELS FILE - ' + labels[
i].name + ' - you must provide at least a label.'
break
labels = d['labels']
if len(labels) > len(set(labels)):
json_resp['label_message'] = 'WARNING LABELS FILE - ' + labels[
i].name + ' - the list of labels contains duplicates. They will be ignored.'
elif labels[i].name.endswith('.txt'):
# with open(labels[i], 'r') as f:
lines = labels[i].readlines()
labels_list = []
if len(lines) == 0:
json_resp['label_message'] = 'LABELS FILE - ' + labels[i].name + ' - you must provide at least a label.'
break
for line in lines:
line = line.decode('utf-8')
if line not in labels_list:
labels_list.append(line)
else:
json_resp['label_message'] = 'WARNING LABELS FILE - ' + labels[
i].name + ' - the list of labels contains duplicates. They will be ignored.'
if json_resp['label_message'] == '':
json_resp['label_message'] = 'Ok'
if len(jsonAnn) == 0 and len(jsonDisp) == 0 and len(reports)>0:
json_resp['fields_message'] = 'REPORT FIELDS TO DISPLAY AND ANNOTATE - Please provide at least one field to be displayed and/or at least one field to be annotated.'
elif len(jsonAnn) == 0 and len(reports)>0:
if json_resp['fields_message'] == '':
json_resp['fields_message'] = 'WARNING REPORT FIELDS TO ANNOTATE - ok but with this configuration you will not be able to perform mention annotation and linking. Please, select also at least a field to annotate if you want to find some mentions and link them.'
if len(reports) > 0:
if json_resp['fields_message'] == '':
json_resp['fields_message'] = 'Ok'
except Exception as e:
print(e)
json_resp['general_message'] = 'An error occurred. Please check if it is similar to the example we provided.'
return json_resp
else:
if json_resp['general_message'] == '':
json_resp['general_message'] = 'Ok'
return json_resp
import time
from datetime import date
def configure_data(pubmedfiles,reports, labels, concepts, jsondisp, jsonann, jsonall, username, password, topics,runs,tfidf):
"""This method is run after having checked the files inserted by the user"""
filename = ''
language = 'english'
error_location = 'database'
report_usecases = []
created_file = False
today = str(date.today())
try:
with transaction.atomic():
cursor = connection.cursor()
cursor.execute("DELETE FROM annotate;")
cursor.execute("DELETE FROM linked;")
cursor.execute("DELETE FROM associate;")
cursor.execute("DELETE FROM contains;")
cursor.execute("DELETE FROM mention;")
cursor.execute("DELETE FROM belong_to;")
cursor.execute("DELETE FROM annotation_label;")
cursor.execute("DELETE FROM concept;")
cursor.execute("DELETE FROM ground_truth_log_file;")
cursor.execute("DELETE FROM topic_has_document;")
cursor.execute("DELETE FROM report;")
cursor.execute("DELETE FROM use_case;")
cursor.execute("DELETE FROM semantic_area;")
# connection.commit()
cursor.execute("DELETE FROM public.user WHERE username = 'Test'")
cursor.execute("INSERT INTO semantic_area VALUES (%s)",('default_area',))
if username is not None and password is not None:
cursor.execute("INSERT INTO public.user (username,password,profile,ns_id) VALUES(%s,%s,%s,%s);",
(str(username), hashlib.md5(str(password).encode()).hexdigest(), 'Admin', 'Human'))
# cursor.execute("INSERT INTO public.user (username,password,profile,ns_id) VALUES(%s,%s,%s,%s);",
# (str(username), hashlib.md5(str(password).encode()).hexdigest(), 'Admin', 'Robot'))
fields = []
all_fields = []
fields_to_ann = []
jsonall = ''.join(jsonall)
jsondisp = ''.join(jsondisp)
jsonann = ''.join(jsonann)
jsonall = jsonall.split(',')
jsondisp = jsondisp.split(',')
jsonann = jsonann.split(',')
for el in jsonall:
if len(el) > 0:
all_fields.append(el)
for el in jsondisp:
if len(el) > 0:
fields.append(el)
if el not in all_fields:
all_fields.append(el)
for el in jsonann:
if len(el) > 0:
fields_to_ann.append(el)
if el not in all_fields:
all_fields.append(el)
language = 'english'
arr_to_ret = elaborate_runs(runs)
error_location = 'Topic'
for topic in topics:
if topic.name.endswith('txt'):
elaborate_TREC_topic_files(arr_to_ret,topic)
elif topic.name.endswith('json'):
process_topic_json_file(arr_to_ret,topic)
elif topic.name.endswith('csv'):
process_topic_csv_file(arr_to_ret,topic)
error_location = 'Collection'
for file in reports:
reps = decompress_files([file])
for f in reps:
if isinstance(f, str):
file_name = f
workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in
f = os.path.join(workpath, 'static\\tmp\\' + f)
else:
file_name = f.name
if file_name.endswith('json'):
find_docs_in_json_collection(arr_to_ret,f)
elif file_name.endswith('csv'):
find_docs_in_csv_collection(arr_to_ret,f)
for file in pubmedfiles:
# if file.name.endswith('json'):
find_docs_in_json_pubmed_collection(arr_to_ret,file)
error_location = 'Runs'
for el in arr_to_ret:
if len(el) == 3:
language = el[2]
topic = UseCase.objects.get(name = el[0])
doc = Report.objects.get(id_report =str(el[1]),language = 'english')
TopicHasDocument.objects.get_or_create(name = topic,language = doc.language,id_report =doc)
if len(labels) > 0:
labs = []
error_location = 'Labels'
for label_file in labels:
if label_file.name.endswith('csv'):
df_labels = pd.read_csv(label_file)
df_labels = df_labels.where(pd.notnull(df_labels), None)
df_labels = df_labels.reset_index(drop=True)
# df_labels['usecase'] = df_labels['usecase'].str.lower()
count_lab_rows = df_labels.shape[0]
for i in range(count_lab_rows):
label = str(df_labels.loc[i, 'label'])
labs.append(label.rstrip())
elif label_file.name.endswith('json'):
d = json.load(label_file)
labels = d['labels']
for label in labels:
labs.append(label.rstrip())
elif label_file.name.endswith('txt'):
lines = label_file.readlines()
for line in lines:
line = line.decode('utf-8')
labs.append(line.replace('\n',''))
for label in labs:
cursor.execute('SELECT * FROM annotation_label')
ans = cursor.fetchall()
if len(ans) == 0:
seq_number = 1
else:
cursor.execute('SELECT seq_number FROM annotation_label ORDER BY seq_number DESC;')
ans = cursor.fetchall()
seq_number = int(ans[0][0]) + 1
cursor.execute("SELECT * FROM annotation_label WHERE label = %s;",
(str(label),))
ans = cursor.fetchall()
if len(ans) == 0:
cursor.execute("INSERT INTO annotation_label (label,seq_number) VALUES (%s,%s);",
(str(label), int(seq_number)))
# Popolate the concepts table
error_location = 'Concepts'
# if load_concepts is not None and load_concepts != '' and load_concepts !=[] and len(concepts) == 0:
# configure_concepts(cursor,load_concepts,'admin')
for concept_file in concepts:
if concept_file.name.endswith('csv'):
df_concept = pd.read_csv(concept_file)
df_concept = df_concept.where(pd.notnull(df_concept), None)
df_concept = df_concept.reset_index(drop=True)
# df_concept['usecase'] = df_concept['usecase'].str.lower()
# print(df_concept)
count_conc_rows = df_concept.shape[0]
for i in range(count_conc_rows):
df_concept = df_concept.where(pd.notnull(df_concept), None)
concept_url = str(df_concept.loc[i, 'concept_url'])
concept_name = str(df_concept.loc[i, 'concept_name'])
# usecase = str(df_concept.loc[i, 'usecase'])
# semantic_area = str(df_concept.loc[i, 'area'])
cursor.execute("SELECT concept_url,json_concept FROM concept WHERE concept_url = %s;",
(str(concept_url),))
ans = cursor.fetchall()
if len(ans) == 0:
# json_concept = json.dumps({'provenance': 'admin', 'insertion_author': 'admin'})
cursor.execute("INSERT INTO concept (concept_url,name) VALUES (%s,%s);",
(str(concept_url), str(concept_name)))
cursor.execute("SELECT * FROM belong_to WHERE concept_url = %s AND name=%s;",
(str(concept_url), 'default_area'))
ans = cursor.fetchall()
if len(ans) == 0:
cursor.execute("INSERT INTO belong_to (concept_url,name) VALUES (%s,%s);",
(str(concept_url), 'default_area'))
elif concept_file.name.endswith('json'):
d = json.load(concept_file)
count_conc_rows = len(d['concepts_list'])
for i in range(count_conc_rows):
concept_url = str(d['concepts_list'][i]['concept_url'])
concept_name = str(d['concepts_list'][i]['concept_name'])
# usecase = str(df_concept.loc[i, 'usecase'])
# semantic_area = str(df_concept.loc[i, 'area'])
cursor.execute("SELECT concept_url,json_concept FROM concept WHERE concept_url = %s;",
(str(concept_url),))
ans = cursor.fetchall()
if len(ans) == 0:
# json_concept = json.dumps({'provenance': 'admin', 'insertion_author': 'admin'})
cursor.execute("INSERT INTO concept (concept_url,name) VALUES (%s,%s);",
(str(concept_url), str(concept_name)))
cursor.execute("SELECT * FROM belong_to WHERE concept_url = %s AND name=%s;",
(str(concept_url), 'default_area'))
ans = cursor.fetchall()
if len(ans) == 0:
cursor.execute("INSERT INTO belong_to (concept_url,name) VALUES (%s,%s);",
(str(concept_url), 'default_area'))
data = {}
data['fields'] = fields
data['fields_to_ann'] = fields_to_ann
data['all_fields'] = all_fields
version = get_version()
workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in
version_new = int(version) + 1
filename = 'fields' + str(version_new)
created_file = False
with open(os.path.join(workpath, './config_files/data/' + filename + '.json'), 'w') as outfile:
json.dump(data, outfile)
created_file = True
except (Exception, psycopg2.Error) as e:
print(e)
print('rollback')
# connection.rollback()
if created_file == True:
workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in
if filename != '' and filename != 'fields0':
path = os.path.join(workpath, './config_files/data/' + filename + '.json')
os.remove(path)
json_resp = {'error': 'an error occurred in: ' + error_location + '.'}
return json_resp
else:
# connection.commit()
if created_file == True:
for filen in os.listdir(os.path.join(workpath, './config_files/data')):
if filen.endswith('json'):
if filen != '' and filen != 'fields0.json' and filen != filename+'.json':
path = os.path.join(workpath, './config_files/data/' + filen )
os.remove(path)
outfile.close()
if tfidf is not None or (len(runs) > 0 and len(topics) > 0 and (len(reports) > 0) or len(pubmedfiles) > 0):
print(str(tfidf))
cursor = connection.cursor()
cursor.execute('SELECT DISTINCT language FROM report')
ans = cursor.fetchall()
languages = []
for el in ans:
languages.append(el[0])
st = time.time()
if int(tfidf) > 0:
workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in
path1 = os.path.join(workpath, './config_files/config.json')
g = open(path1,'r')
data = json.load(g)
data['TF-IDF_k'] = tfidf
with open(path1, 'w') as f:
json.dump(data, f)
t = UseCase.objects.all()
cursor = connection.cursor()
json_to_write = {}
for top in t:
print('topic:'+str(top))
json_to_write[top.name] = {}
topic = {}
corpus = []
cursor.execute(
"SELECT r.id_report,r.language,r.report_json FROM report as r inner join topic_has_document as t on t.id_report = r.id_report and r.language = t.language where t.name = %s",
[str(top.name)])
ans = cursor.fetchall()
for el in ans:
e = json.loads(el[2])
r_j1 = {}
r_j1['document_id'] = str(el[0])
r_j1['text'] = ''
for k in e.keys():
if k != 'document_id' or (str(el[0]).startswith('PUBMED_') and (k == 'abstract' or k == 'title')):
r_j1['text'] = r_j1['text'] + ' ' + str(e[k])
if el[1].lower() in LANGUAGES_NLTK:
corpus.append(r_j1)
topic['title'] = top.title
topic['description'] = top.description
# df_tfidf = gen_tfidf_map(corpus,language)
for el in ans:
if el[1].lower() in LANGUAGES_NLTK:
language = el[1].lower()
start = time.time()
print('working on ', str(el[0]))
e = json.loads(el[2])
r_j1 = {}
r_j1['document_id'] = str(el[0])
r_j1['text'] = ''
for k in e.keys():
# print(k)
# print(e[k])
if isinstance(e[k],list):
e[k] = ', '.join(e[k])
if k != 'document_id' and k != 'language' and e[k] is not None:
r_j1['text'] = r_j1['text'] + ' ' + e[k]
tfidf_matcher = QueryDocMatcher(topic=topic,doc= r_j1, corpus=corpus,language=language)
top_k_matching_words = tfidf_matcher.get_words_to_highlight()
# print(top_k_matching_words)
# json_val = {}
# json_val[str(el[0])] = top_k_matching_words
# json_val['words'] = top_k_matching_words
json_to_write[top.name][str(el[0])] = top_k_matching_words
# print(json_to_write)
end = time.time()
print('elaborated in '+str(end-start)+' seconds')
else:
json_to_write = {}
end = time.time()
print('time',end-st)
path2 = os.path.join(workpath, './config_files/tf_idf_map.json')
with open(path2, 'w') as f:
json.dump(json_to_write, f)
json_resp = {'message': 'Ok'}
return json_resp
#-------------------UPDATE----------------------------
def check_for_update(type_req, pubmedfiles, reports, labels, concepts, jsonDisp, jsonAnn, jsonDispUp, jsonAnnUp,topics,runs,tf_idf):
"""This method checks the files inserted by the user to update the db"""
keys = get_fields_from_json()
ann = keys['fields_to_ann']
disp = keys['fields']
tops = []
docs = []
if jsonDispUp is not None and jsonAnnUp is not None:
jsonDispUp = ''.join(jsonDispUp)
jsonAnnUp = ''.join(jsonAnnUp)
jsonDispUp = jsonDispUp.split(',')
jsonAnnUp = jsonAnnUp.split(',')
try:
cursor = connection.cursor()
message = ''
if tf_idf is not None:
message = 'TF-IDF - the value must include only digits'
return message
if len(concepts) > 0:
message = ''
for i in range(len(concepts)):
if not concepts[i].name.endswith('csv') and not concepts[i].name.endswith('json'):
message = 'CONCEPTS FILE - ' + labels[i].name + ' - The file must be .csv, .json'
return message
if concepts[i].name.endswith('csv'):
try:
df = pd.read_csv(concepts[i])
df = df.where(pd.notnull(df), None)
df = df.reset_index(drop=True)
except Exception as e:
message = 'CONCEPTS FILE - ' + concepts[
i].name + ' - An error occurred while parsing the csv. Check if it is well formatted.'
return message
else:
list_db_col = ['concept_url', 'concept_name']
cols = list(df.columns)
for el in list_db_col:
if el not in cols:
message = 'CONCEPTS FILE - ' + concepts[i].name + ' - The columns: ' + el + ' is missing. Please, add it.'
return message
if len(list_db_col) != len(cols):
message = 'CONCEPTS FILE - ' + concepts[i].name + ' - The columns allowed are: concept_url, concept_name. If you inserted more (less) columns please, remove (add) them.'
return message
if df.shape[0] == 0:
message = 'CONCEPTS FILE - ' + concepts[i].name + ' - You must provide at least a concept.'
return message
else:
# duplicates in file
df_dup = df[df.duplicated(subset=['concept_url'], keep=False)]
if df_dup.shape[0] > 0:
message = 'WARNING CONCEPTS FILE - ' + concepts[i].name + ' - The rows: ' + str(
df_dup.index.to_list()) + ' are duplicated. The duplicates will be ignored.'
el = ''
if None in df['concept_url'].tolist():
el = 'concept_url'
elif None in df['concept_name'].tolist():
el = 'concept_name'
if el != '':
lista = df[el].tolist()
ind = lista.index(None)
message = 'CONCEPTS FILE - ' + concepts[i].name + ' - The column ' + el + ' is empty at the row: ' + str(ind) + '.'
return message
# Check for duplicates in db
for ind in range(df.shape[0]):
cursor.execute('SELECT COUNT(*) FROM concept WHERE concept_url = %s',
[str(df.loc[ind, 'concept_url'])])
num = cursor.fetchone()
cursor.execute('SELECT COUNT(*) FROM belong_to WHERE concept_url = %s and name = %s',
[str(df.loc[ind, 'concept_url']),str(df.loc[ind, 'area'])])
num_b = cursor.fetchone()
if num[0] > 0 and num_b[0] > 0:
message = 'WARNING CONCEPTS FILE - ' + concepts[i].name + ' - The concept: ' + str(
df.loc[ind, 'concept_url']) + ' is already present in the database. It will be ignored.'
if concepts[i].name.endswith('json'):
# json_resp['concept_message'] = 'CONCEPTS FILE - ' + concepts[i].name + ' - The file must be .csv'
d = json.load(concepts[i])
if 'concepts_list' in d.keys():
message = 'CONCEPTS FILE - ' + concepts[
i].name + ' - json is not well formatted'
return message
if not isinstance(d['concepts_list'], list):
message = 'CONCEPTS FILE - ' + concepts[
i].name + ' - json is not well formatted'
return message
if len(d['concepts_list']) == 0:
message = 'CONCEPTS FILE - ' + concepts[
i].name + ' - the list is empty'
return message
dup_list = []
for element in d['concepts_list']:
if 'concept_url' not in element.keys() and 'concept_name' not in element.keys():
message = 'CONCEPTS FILE - ' + concepts[
i].name + ' - each element in the list of concepts must contains concept_url and concept_name fields'
return message
for element in d['concepts_list']:
if element['concept_url'] in dup_list:
message = 'WARNING CONCEPTS FILE - ' + concepts[
i].name + ' - Some concepts are duplicated, these will be ignored'
return message
elif len(labels) > 0:
message = ''
for i in range(len(labels)):
if not labels[i].name.endswith('csv') and not labels[i].name.endswith('json') and not labels[i].name.endswith('txt'):
message = 'LABELS FILE - ' + labels[i].name + ' - The file must be .csv, .json, .txt'
return message
if labels[i].name.endswith('csv'):
try:
df = pd.read_csv(labels[i])
df = df.where(pd.notnull(df), None)
df = df.reset_index(drop=True)
except Exception as e:
message = 'LABELS FILE - ' + labels[i].name + ' - An error occurred while parsing the csv. Check if is well formatted.'
return message
else:
cols = list(df.columns)
list_db_col = ['label']
# if 'usecase' in cols:
# df['usecase'] = df['usecase'].str.lower()
#
esco = False
for el in list_db_col:
if el not in cols:
esco = True
message = 'LABELS FILE - ' + labels[i].name + ' - The columns: ' + el + ' is not present. The columns allowed are: labels, usecase.'
return message
if esco == True:
break
if len(cols) != len(list_db_col):
message = 'LABELS FILE - ' + labels[i].name + ' - The columns allowed are: label, usecase. If you inserted more (less) columns please, remove (add) them.'
return message
if df.shape[0] == 0:
message = 'LABELS FILE - ' + labels[i].name + ' - You must provide at least a row.'
return message
else:
# check if columns annotation_label and name have no duplicates
df_dup = df[df.duplicated(subset=['label'], keep=False)]
if df_dup.shape[0] > 0:
message = 'WARNING LABELS FILE - ' + labels[i].name + ' - The rows: ' + str(df_dup.index.to_list()) + ' are duplicated. The duplicates will be ignored.'
return message
el = ''
# if None in df['usecase'].tolist():
# el = 'usecase'
if None in df['label'].tolist():
el = 'label'
if el != '':
lista = df[el].tolist()
ind = lista.index(None)
message = 'LABELS FILE - ' + labels[i].name + ' - The column ' + el + ' is empty at the row: ' + str(ind) + ' .'
return message
for ind in range(df.shape[0]):
cursor.execute('SELECT COUNT(*) FROM annotation_label WHERE label = %s',
[str(df.loc[ind, 'label'])])
num = cursor.fetchone()
if num[0] > 0:
message = 'WARNING LABELS FILE - ' + labels[i].name + ' - The label: ' + str(df.loc[ind, 'label']) + ' is already present in the database. It will be ignored.'
elif labels[i].name.endswith('.json'):
# with open(labels[i],'r') as f:
d = json.load(labels[i])
if 'labels' not in d.keys():
message = 'LABELS FILE - ' + labels[i].name + ' - json is not well formatted.'
return message
if d['labels'] == []:
message = 'LABELS FILE - ' + labels[
i].name + ' - you must provide at least a label.'
return message
l = d['labels']
if len(l) > len(set(l)):
message = 'WARNING LABELS FILE - ' + labels[
i].name + ' - the list of labels contains duplicates. They will be ignored.'
for ind in range(len(l)):
cursor.execute('SELECT COUNT(*) FROM annotation_label WHERE label = %s',
[str(l[ind])])
num = cursor.fetchone()
if num[0] > 0:
message = 'WARNING LABELS FILE - ' + labels[i].name + ' - The label: ' + str(l[ind]) + ' is already present in the database. It will be ignored.'
elif labels[i].name.endswith('.txt'):
# with open(labels[i], 'r') as f:
lines = labels[i].readlines()
labels_list = []
if len(lines) == 0:
message = 'LABELS FILE - ' + labels[i].name + ' - you must provide at least a label.'
return message
for line in lines:
line = line.decode('utf-8')
if line not in labels_list:
labels_list.append(line)
else:
message = 'WARNING LABELS FILE - ' + labels[
i].name + ' - the list of labels contains duplicates. They will be ignored.'
for ind in range(len(labels_list)):
cursor.execute('SELECT COUNT(*) FROM annotation_label WHERE label = %s',
[str(labels_list[ind])])
num = cursor.fetchone()
if num[0] > 0:
message = 'WARNING LABELS FILE - ' + labels[i].name + ' - The label: ' + str(labels_list[ind]) + ' is already present in the database. It will be ignored.'
return message
elif (len(pubmedfiles) > 0 or len(reports) > 0) and len(topics) > 0 and len(runs) > 0:
message = ''
documents_ids = []
topics_ids = []
to = UseCase.objects.all().values('name')
for el in to:
topics_ids.append(el['name'])
ids = Report.objects.all().values('id_report')
for el in ids:
documents_ids.append(str(el['id_report']))
for i in range(len(pubmedfiles)):
# Error if the file is not csv
if not pubmedfiles[i].name.endswith('csv') and not pubmedfiles[i].name.endswith('json') and not \
pubmedfiles[i].name.endswith(
'txt'):
message = 'PUBMED FILE - ' + pubmedfiles[
i].name + ' - The file must be .csv or .json or .txt'
return message
if pubmedfiles[i].name.endswith('csv'):
try:
df = pd.read_csv(pubmedfiles[i])
df = df.where(pd.notnull(df), None)
df = df.reset_index(drop=True) # Useful if the csv includes only commas
except Exception as e:
print(e)
message = 'PUBMED FILE - ' + pubmedfiles[
i].name + ' - An error occurred while parsing the csv. Check if it is well formatted. Check if it contains as many columns as they are declared in the header.'
return message
else:
# check if colunns are allowed and without duplicates
cols = list(df.columns)
list_db_col = ['document_id']
for el in list_db_col:
if el not in cols and el == 'document_id':
message = 'PUBMED FILE - ' + pubmedfiles[
i].name + ' - The column: ' + el + ' is missing, please add it.'
return message
for column in cols:
null_val = df[df[column].isnull()].index.tolist()
if len(null_val) > 0:
message = 'PUBMED FILE - ' + pubmedfiles[
i].name + ' - You did not inserted the ' + column + ' for rows: ' + null_val.split(
', ')
# Check if the csv is empty with 0 rows
if df.shape[0] == 0:
message = 'PUBMED FILE - ' + pubmedfiles[
i].name + ' - You must provide at least a report.'
return message
else:
# check if columns id_report and language have no duplicates
# if 'language' in df:
# df_dup = df[df.duplicated(subset=['document_id', 'language'], keep=False)]
# else:
df_dup = df[df.duplicated(subset=['document_id'], keep=False)]
if df_dup.shape[0] > 0:
message = 'WARNING PUBMED FILE - ' + pubmedfiles[
i].name + ' - The rows: ' + str(
df_dup.index.to_list()) + ' are duplicated. The duplicates are ignored.'
for ind in range(df.shape[0]):
found = False
id_report = 'PUBMED_' + str(df.loc[ind, 'document_id'])
cursor.execute('SELECT COUNT(*) FROM report WHERE id_report = %s AND institute = %s',
[str(id_report), 'PUBMED'])
num = cursor.fetchone()
if num[0] > 0:
message = 'WARNING PUBMED FILE - ' + pubmedfiles[i].name + ' - The report: ' + str(
id_report) + ' is already present in the database. It will be ignored.'
for el in list_db_col:
if df.loc[ind, el] is not None:
found = True
break
if found == False:
message = 'PUBMED FILE - ' + pubmedfiles[i].name + ' - The report at row ' + str(
ind) + ' has the columns: ' + ', '.join(
list_db_col) + ' empty. Provide a value for at least one of these columns.'
return message
el = ''
if None in df['document_id'].tolist():
el = 'institute'
if el != '':
lista = df[el].tolist()
ind = lista.index(None)
message = 'PUBMED FILE - ' + pubmedfiles[
i].name + ' - The column ' + el + ' is empty at the row: ' + str(ind) + '.'
return message
elif pubmedfiles[i].name.endswith('json'):
d = json.load(runs[i])
if 'pubmed_ids' not in d.keys():
message = 'PUBMED FILE - ' + runs[
i].name + ' - json is not well formatted.'
return message
if d['pubmed_ids'] == []:
message = 'PUBMED FILE - ' + runs[
i].name + ' - you must provide at least an article id.'
break
if not isinstance(d['pubmed_ids'], list):
message = 'PUBMED FILE - ' + runs[
i].name + ' - you must provide at least an article id.'
return message
if len(d['pubmed_ids']) != len(list(set(d['pubmed_ids']))):
message = 'WARNING PUBMED FILE - ' + runs[
i].name + ' - some ids seem to be duplicated. They will be ignored.'
return message
for el in d['pubmed_ids']:
id_report = 'PUBMED_' + str(str(el))
cursor.execute('SELECT COUNT(*) FROM report WHERE id_report = %s AND institute = %s',
[str(id_report), 'PUBMED'])
num = cursor.fetchone()
if num[0] > 0:
message = 'WARNING PUBMED FILE - ' + pubmedfiles[i].name + ' - The report: ' + str(
id_report) + ' is already present in the database. It will be ignored.'
elif pubmedfiles[i].name.endswith('txt'):
lines = pubmedfiles[i].readlines()
if len(lines) == 0:
message = 'PUBMED FILE - ' + runs[
i].name + ' - the file is empty.'
return message
if len(lines) != len(list(set(lines))):
message = 'WARNING PUBMED FILE - ' + runs[
i].name + ' - the file contain some duplicates: they will be ignored.'
for line in lines:
id_report = 'PUBMED_' + str(line.split()[0])
cursor.execute('SELECT COUNT(*) FROM report WHERE id_report = %s AND institute = %s',
[str(id_report), 'PUBMED'])
num = cursor.fetchone()
if num[0] > 0:
message = 'WARNING PUBMED FILE - ' + pubmedfiles[i].name + ' - The report: ' + str(
id_report) + ' is already present in the database. It will be ignored.'
# elif len(reports) > 0 and len(runs) > 0 and len(topics) > 0:
# message = ''
for i in range(len(reports)):
reps = decompress_files([reports[i]])
for rep in reps:
if isinstance(rep, str):
rep_name = rep
workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in
rep = os.path.join(workpath, 'static/tmp/' + rep_name)
else:
rep_name = rep.name
if not rep.name.endswith('csv') and not rep_name.endswith('txt') and not rep.name.endswith('json'):
message = 'DOCUMENTS FILE - ' + rep_name + ' - The file must be .csv, .json, .txt'
return message
if rep_name.endswith('csv'):
try:
df = pd.read_csv(rep)
df = df.where(pd.notnull(df), None)
df = df.reset_index(drop=True)
except Exception as e:
message = 'DOCUMENTS FILE - ' + rep_name + ' - An error occurred while parsing the csv. Check if it is well formatted. '
return message
else:
cols = list(df.columns)
count = 0
list_db_col = ['document_id','language']
list_not_db_col = []
for el in list_db_col:
if el not in cols and el == 'document_id':
message = 'DOCUMENTS FILE - ' + rep_name + ' - The column: ' + str(el) + ' must be present.'
return message
for id in list(df.document_id.unique()):
# if str(id) in documents_ids:
# json_resp['report_message'] = 'WARNING DOCUMENTS FILE - ' + rep_name + ' - The id: ' + str(id) + ' is duplicated. The duplicates are ignored.'
# else:
documents_ids.append(str(id))
for el in cols:
if el not in list_db_col:
list_not_db_col.append(el)
if jsonDispUp is not None and jsonAnnUp is not None:
if len(disp) > 0 or len(ann) > 0:
ann_intersect = list(set(ann) & set(list_not_db_col))
for el in list_not_db_col:
if (el not in disp and el not in ann) and (el not in jsonDispUp and el not in jsonAnnUp):
count = count + 1
if count == len(list_not_db_col):
message = 'DOCUMENTS FIELDS - Please, provide at least one field to display in file: ' + \
rep_name + '. Be careful that if you do not provide one field to annotate you will not be able to perform mention annotation and linking.'
return message
elif len(ann_intersect) == 0 and (jsonAnnUp[0]) == '':
message = 'WARNING DOCUMENTS FIELDS - file: ' + rep_name + ' Please, provide at least one field to annotate if you want to find mentions and perform linking.'
if len(list_not_db_col) == 0:
message = 'DOCUMENTS FILE - ' + rep_name + ' - You must provide at least one column other than document_id'
return message
if df.shape[0] == 0:
message = 'DOCUMENTS FILE - ' + rep_name + ' - You must provide at least a report.'
return message
else:
df_dup = df[df.duplicated(subset=['document_id'], keep=False)]
if 'language' in df:
df_dup = df[df.duplicated(subset=['document_id','language'], keep=False)]
if df_dup.shape[0] > 0:
message = 'WARNING DOCUMENTS FILE - ' + rep_name + ' - The rows: ' + str(
df_dup.index.to_list()) + ' are duplicated. The duplicates are ignored.'
for id in list(df.document_id.unique()):
if str(id) in documents_ids:
message = 'WARNING DOCUMENTS FILE - ' + rep_name + ' - The id: ' + str(
id) + ' is duplicated. The duplicates are ignored.'
else:
documents_ids.append(str(id))
for ind in range(df.shape[0]):
found = False
if 'language' in df:
language = str(df.loc[ind, 'language'])
else:
language = 'english'
cursor.execute('SELECT COUNT(*) FROM report WHERE id_report = %s AND language = %s',
[str(df.loc[ind, 'document_id']),language])
num = cursor.fetchone()
if num[0] > 0:
message = 'WARNING DOCUMENTS FILE - ' + rep_name + ' - The report: ' + str(
df.loc[ind, 'document_id']) + ' is already present in the database. It will be ignored.'
for el in list_db_col:
if df.loc[ind, el] is not None:
found = True
break
if found == False:
message = 'DOCUMENTS FILE - ' + rep_name + ' - The report at row ' + str(
ind) + ' has the column: ' + ', '.join(
list_db_col) + ' empty. '
return message
found = False
count_both = 0
not_none_cols = []
for el in list_not_db_col:
if df.loc[ind, el] is not None:
found = True
not_none_cols.append(el)
if found == False:
message = 'DOCUMENTS FILE - ' + rep_name + ' - The report at row ' + str(
ind) + ' has the columns: ' + ', '.join(
list_not_db_col) + ' empty. Provide a value for at least one of these columns, or delete this report from the csv file.'
return message
for el in not_none_cols:
if jsonAnnUp is not None and jsonDispUp is not None:
if el not in disp and el not in jsonDispUp and el not in ann and el not in jsonAnnUp:
count_both = count_both + 1
else:
if el not in disp and el not in ann:
count_both = count_both + 1
if count_both == len(not_none_cols):
message = 'WARNING DOCUMENTS FIELDS TO DISPLAY AND ANNOTATE - ' + rep_name + ' - With the current configuration the report at the row: ' + str(
ind) + ' would not be displayed since the columns to display are all empty for that report.'
# for el in df.institute.unique():
# if el.lower() == 'pubmed':
# message = 'REPORTS FILE - ' + reports[
# i].name + ' - calling an institute "PUBMED" is forbidden, please, change the name'
#
for el in df.document_id:
if el.lower().startswith('pubmed_'):
message = 'DOCUMENTS FILE - ' + rep_name + ' - reports\' ids can not start with "PUBMED_", please, change the name'
el = ''
if None in df['document_id'].tolist():
el = 'document_id'
if 'language' in df:
el = 'language'
if el != '':
lista = df[el].tolist()
ind = lista.index(None)
message = 'DOCUMENTS FILE - ' + rep_name + ' - The column ' + el + ' is empty at the row: ' + str(ind) + '.'
return message
elif rep_name.endswith('json'):
# with open(rep, 'r') as f:
d = json.load(rep)
if 'collection' not in d.keys():
message = 'DOCUMENTS FILE - ' + rep_name + ' - The json is not well formatted.'
break
exit = False
keys_list = []
if len(d['collection']) == 0:
message = 'DOCUMENTS FILE - ' + rep_name + ' - You must provide at least a report.'
break
for document in d['collection']:
if 'language' in document.keys():
language = document['language']
else:
language = 'english'
cursor.execute('SELECT COUNT(*) FROM report WHERE id_report = %s AND language = %s',
[document['document_id'], language])
num = cursor.fetchone()
if num[0] > 0:
message = 'WARNING REPORT FILE - ' +rep_name + ' - The report: ' + str(
document['document_id']) + ' is already present in the database. It will be ignored.'
ind = d['collection'].index(document)
if 'document_id' not in document.keys() or document['document_id'] is None:
message = 'DOCUMENTS FILE - ' + rep_name + ' - The ' + str(ind) + ' document does not contain the "document_id" key which is mandatory.'
exit = True
break
doc_keys = list(document.keys())
if 'language' in list(document.keys()):
doc_keys.remove('language')
doc_keys.remove('document_id')
is_none = True
for key in list(document.keys()):
if key != 'document_id' and key != 'language':
if document[key] is not None:
is_none = False
break
if ('document_id' in list(document.keys()) and len(list(document.keys())) == 1) or ('language' in list(document.keys()) and len(list(document.keys())) == 2) or is_none:
message = 'DOCUMENTS FILE - ' + rep_name + ' - The ' + str(ind) + ' document does not contain the document\' s text.'
keys_list.extend(list(document.keys()))
if str(document['document_id']) in documents_ids:
message = 'WARNING DOCUMENTS FILE - ' + rep_name + ' - The id ' + str(document['document_id']) + ' is duplicated.'
else:
documents_ids.append(str(document['document_id']))
count_both = 0
for el in doc_keys:
if jsonAnnUp is not None and jsonDispUp is not None:
if el not in disp and el not in jsonDispUp and el not in ann and el not in jsonAnnUp:
count_both = count_both + 1
else:
if el not in disp and el not in ann:
count_both = count_both + 1
if count_both == len(doc_keys):
message = 'WARNING DOCUMENTS FIELDS TO DISPLAY AND ANNOTATE - ' + reports[
i].name + ' - With the current configuration the report at the row: ' + str(
ind) + ' would not be displayed since the columns to display are all empty for that report.'
if exit == True:
break
if len(topics) > 0:
for i in range(len(topics)):
if not topics[i].name.endswith('csv') and not topics[i].name.endswith('json') and not topics[
i].name.endswith('txt'):
message = 'TOPIC FILE - ' + topics[i].name + ' - The file must be .csv or .json or .txt'
return message
if topics[i].name.endswith('csv'):
try:
df = pd.read_csv(labels[i])
df = df.where(pd.notnull(df), None)
df = df.reset_index(drop=True)
except Exception as e:
message = 'TOPIC FILE - ' + topics[
i].name + ' - An error occurred while parsing the csv. Check if is well formatted.'
pass
else:
cols = list(df.columns)
list_db_col = ['topic_id', 'title', 'description', 'narrative']
for el in list_db_col:
if el not in cols and el == 'topic_id':
message = 'TOPIC FILE - ' + topics[
i].name + ' - The column: ' + el + ' is not present and it is mandatory.'
return message
elif el not in cols:
message = 'WARNING TOPIC FILE - ' + topics[
i].name + ' - The column: ' + el + ' is not present.'
for el in cols:
if el not in list_db_col:
message = 'TOPIC FILE - ' + topics[
i].name + ' - The column ' + el + ' is not allowed.'
return message
for id in list(df.topic_id.unique()):
if id in topics_ids:
message = 'WARNING TOPIC FILE - ' + topics[i].name + ' - The topic: ' + str(id) + ' is duplicated. The duplicates are ignored.'
else:
topics_ids.append(id)
if df.shape[0] == 0:
message = 'TOPIC FILE - ' + topics[
i].name + ' - You must provide at least a row.'
return message
else:
# check if columns annotation_label and name have no duplicates
df_dup = df[df.duplicated(subset=['topic_id'], keep=False)]
if df_dup.shape[0] > 0:
message = 'WARNING TOPIC FILE - ' + topics[
i].name + ' - The rows: ' + str(
df_dup.index.to_list()) + ' are duplicated. The duplicates will be ignored.'
el = ''
if None in df['topic_id'].tolist():
el = 'topic_id'
if el != '':
lista = df[el].tolist()
ind = lista.index(None)
message = 'TOPIC FILE - ' + topics[
i].name + ' - The column ' + el + ' is empty at the row: ' + str(ind) + ' .'
return message
elif topics[i].name.endswith('.txt'):
arr_to_ret = elaborate_runs(runs)
topics_ids = elaborate_TREC_topic_files([], topics[i], 'check')
topics_ids = [str(i) for i in topics_ids]
if isinstance(topics_ids, list) == False:
message = 'TOPIC FILE - ' + topics[
i].name + ' - topics are not well formatted.'
elif topics[i].name.endswith('.json'):
# with open(topics[i], 'r') as f:
d = json.load(topics[i])
doc_top = []
if 'topics' not in d.keys():
message = 'TOPIC FILE - ' + topics[
i].name + ' - json is not well formatted.'
return message
if d['topics'] == []:
message = 'TOPIC FILE - ' + labels[
i].name + ' - you must provide at least a label.'
return message
for topic in d['topics']:
ind = d['topics'].index(topic)
if 'topic_id' not in topic.keys():
message = 'TOPIC FILE - ' + topics[
i].name + ' - you must provide a topic number in the ' + str(ind) + ' th topic.'
return message
doc_top.append(topic['topic_id'])
if str(topic['topic_id']) in topics_ids:
message = 'WARNING TOPIC FILE - ' + topics[
i].name + ' - the list of topics contains duplicates. They will be ignored.'
else:
topics_ids.append(str(topic['topic_id']))
if len(doc_top) > len(set(doc_top)):
message = 'WARNING TOPIC FILE - ' + topics[
i].name + ' - the list of topics contains duplicates. They will be ignored.'
if len(runs) > 0:
language = 'english'
for i in range(len(runs)):
if not runs[i].name.endswith('csv') and not runs[i].name.endswith('json') and not runs[i].name.endswith('txt'):
message = 'RUNS FILE - ' + runs[i].name + ' - The file must be .csv or .json or .txt'
break
if runs[i].name.endswith('csv'):
try:
df = pd.read_csv(runs[i])
df = df.where(pd.notnull(df), None)
df = df.reset_index(drop=True)
except Exception as e:
message = 'RUNS FILE - ' + runs[
i].name + ' - An error occurred while parsing the csv. Check if is well formatted.'
return message
else:
cols = list(df.columns)
list_db_col = ['topic_id', 'document_id', 'language']
for el in list_db_col:
if el not in cols and el != 'language':
message = 'RUNS FILE - ' + runs[
i].name + ' - The column: ' + el + ' is not present and it is mandatory.'
return message
for el in cols:
if el not in list_db_col:
message = 'RUNS FILE - ' + runs[
i].name + ' - The column ' + el + ' is not allowed.'
return message
if df.shape[0] == 0:
message = 'RUNS FILE - ' + runs[
i].name + ' - You must provide at least a row.'
return message
else:
# check if columns annotation_label and name have no duplicates
df_dup = df[df.duplicated(subset=['topic_id', 'document_id'], keep=False)]
if 'language' in df:
df_dup = df[df.duplicated(subset=['topic_id', 'document_id','language'], keep=False)]
if df_dup.shape[0] > 0:
message = 'WARNING RUNS FILE - ' + runs[
i].name + ' - The rows: ' + str(
df_dup.index.to_list()) + ' are duplicated. The duplicates will be ignored.'
for i in range(df.shape[0]):
doc = str(df.loc[i,'document_id'])
top = str(df.loc[i,'topic_id'])
if 'language' in df:
language = str(df.loc[i,'language'])
report = Report.objects.get(id_report = str(doc),language = language)
topic = UseCase.objects.get(name = str(top))
v = TopicHasDocument.objects.filter(name = topic,id_report = report,language =language)
if v.count() > 0:
message = 'WARNING RUNS FILE - ' + runs[i].name + ' - The topic: ' + str(topic) +' is already associated to the document: ' + str(doc)+ ' it will be ignored.'
el = ''
# if None in df['usecase'].tolist():
# el = 'usecase'
if None in df['topic_id'].tolist():
el = 'topic_id'
if None in df['document_id'].tolist():
el = 'document_id'
if 'language' in df:
if None in df['language'].tolist():
el = 'language'
if el != '':
lista = df[el].tolist()
ind = lista.index(None)
message = 'RUNS FILE - ' + topics[
i].name + ' - The column ' + el + ' is empty at the row: ' + str(ind) + ' .'
return message
tops.extend(df.topic_id.unique())
for el in df.topic_id.unique():
if el not in topics_ids:
message = 'RUNS FILE - ' + runs[
i].name + ' - The topic: ' + str(el) + ' is not in the provided list of topics.'
return message
docs.extend(list(df.document_id.unique()))
for el in (df.document_id.unique()):
if str(el) not in documents_ids:
message = 'RUNS FILE - ' + runs[
i].name + ' - The document: ' + str(
el) + ' is not in the provided list of documents.'
return message
elif runs[i].name.endswith('.json'):
# with open(runs[i], 'r') as f:
d = json.load(runs[i])
if 'run' not in d.keys():
message = 'RUNS FILE - ' + runs[
i].name + ' - json is not well formatted.'
return message
if d['run'] == []:
message = 'RUNS FILE - ' + runs[
i].name + ' - you must provide at least a topic and one or more documents associated.'
return message
for r in d['run']:
ind = d['run'].index(r)
tops.append(r['topic_id'])
docs.extend(r['documents'])
if 'topic_id' not in r.keys():
message = 'RUNS FILE - ' + topics[
i].name + ' - you must provide a topic number in the ' + str(
ind) + ' th element.'
return message
if 'documents' not in r.keys():
message = 'RUNS FILE - ' + topics[
i].name + ' - you must provide a topic\'s list for the topic: ' + str(
r['topic_id']) + '.'
return message
for el in r['documents']:
if isinstance(el,str) or isinstance(el,int):
if Report.objects.filter(id_report=str(el)).exists():
report = Report.objects.get(id_report=str(el), language='english')
topic = UseCase.objects.get(name=str(r['topic_id']))
v = TopicHasDocument.objects.filter(name=topic, id_report=report,
language='english')
if v.count() > 0:
message = 'WARNING RUNS FILE - ' + runs[i].name + ' - The topic: ' + str(
topic) + ' is already associated to the document: ' + str(
el) + ' it will be ignored.'
elif isinstance(el,dict):
if Report.objects.filter(id_report=str(el['document_id'])).exists():
report = Report.objects.get(id_report=str(el['document_id']), language=el['language'])
topic = UseCase.objects.get(name=str(r['topic_id']))
v = TopicHasDocument.objects.filter(name=topic, id_report=report,
language=el['language'])
if v.count() > 0:
message = 'WARNING RUNS FILE - ' + runs[i].name + ' - The topic: ' + str(
topic) + ' is already associated to the document: ' + str(
el['document_id']) + ' it will be ignored.'
for el in tops:
if str(el) not in topics_ids:
message = 'RUNS FILE - ' + runs[i].name + ' - The topic: ' + str(el) + ' is not in the provided list of topics.'
return message
for el in docs:
if isinstance(el,dict):
el = el['document_id']
if str(el) not in documents_ids:
message = 'RUNS FILE - ' + runs[
i].name + ' - The document: ' + str(
el) + ' is not in the provided list of documents.'
return message
elif runs[i].name.endswith('.txt'):
# with open(runs[i], 'r') as f:
lines = runs[i].readlines()
tups = []
for line in lines:
if len(line.split()) == 2:
topic = line.split()[0]
tops.append(topic)
doc = line.split()[1]
docs.append(doc)
tups.append((topic, doc))
report = Report.objects.get(id_report=str(doc), language='english')
topic = UseCase.objects.get(name=str(topic))
v = TopicHasDocument.objects.filter(name=topic, id_report=report,
language='english')
if v.count() > 0:
message = 'WARNING RUNS FILE - ' + runs[i].name + ' - The topic: ' + str(
topic) + ' is already associated to the document: ' + str(
doc) + ' it will be ignored.'
elif len(line.split()) > 2: # TREC
topic = line.split()[0]
tops.append(topic)
doc = line.split()[2]
tups.append((topic, doc))
docs.append(doc)
report = Report.objects.get(id_report=str(doc), language='english')
topic = UseCase.objects.get(name=str(topic))
v = TopicHasDocument.objects.filter(name=topic, id_report=report,
language='english')
if v.count() > 0:
message = 'WARNING RUNS FILE - ' + runs[i].name + ' - The topic: ' + str(
topic) + ' is already associated to the document: ' + str(
doc) + ' it will be ignored.'
else:
message = 'RUNS FILE - ' + runs[
i].name + ' - txt file is not well formatted.'
return message
for el in tops:
if el not in topics_ids:
message = 'RUNS FILE - ' + runs[
i].name + ' - The topic: ' + str(el) + ' is not in the provided list of topics.'
return message
for el in docs:
if str(el) not in documents_ids:
message = 'RUNS FILE - ' + runs[
i].name + ' - The document: ' + str(
el) + ' is not in the provided list of documents.'
return message
if jsonAnn is not None and jsonDisp is not None:
if type_req == 'json_fields' and len(jsonAnn) == 0 and len(jsonDisp) == 0 and len(ann) == 0:
message = 'REPORT FIELDS TO ANNOTATE - You must provide at least one field to display and/or one field to display and annotate.'
return message
elif type_req == 'json_fields' and len(jsonAnn) == 0:
message = 'WARNING REPORT FIELDS TO ANNOTATE - ok, but with this configuration you will not be able to perform mention annotation and linking. Please, select also at least a field to annotate if you want to find some mentions and to link them'
return message
if type_req == 'labels' and len(labels) == 0:
message = 'LABELS - Please insert a labels file.'
return message
if type_req == 'concepts' and len(concepts) == 0:
message = 'CONCEPTS - Please insert a concepts file.'
return message
if type_req == 'reports' and len(reports) == 0:
message = 'REPORTS - Please insert a reports file.'
return message
if type_req == 'pubmed' and len(pubmedfiles) == 0:
message = 'PUBMED - Please insert a reports file.'
return message
return message
except (Exception, psycopg2.Error) as e:
print(e)
message = 'An error occurred in ' + type_req + ' file(s). Please check if it is similar to the example we provided.'
return message
def update_db_util(reports,pubmedfiles,labels,concepts,jsondisp,jsonann,jsondispup,jsonannup,jsonall,topics,runs,batch,tf_idf):
"""This method is run after having checked the files inserted for the update. It updates the db."""
filename = ''
today = str(date.today())
error_location = 'database'
usecases = []
sem_areas = []
created_file = False
cursor = connection.cursor()
try:
with transaction.atomic():
all_fields = []
fields = []
fields_to_ann = []
version = get_version()
if int(version) != 0:
json_resp = get_fields_from_json()
all_fields = json_resp['all_fields']
fields = json_resp['fields']
fields_to_ann = json_resp['fields_to_ann']
if jsonannup != '' or jsondispup != '' or jsonall != '':
data = {}
all_fields = []
fields = []
fields_to_ann = []
version = get_version()
if int(version) != 0:
json_resp = get_fields_from_json()
all_fields = json_resp['all_fields']
fields = json_resp['fields']
fields_to_ann = json_resp['fields_to_ann']
jsondispup = ''.join(jsondispup)
jsonannup = ''.join(jsonannup)
jsonall = ''.join(jsonall)
jsondispup = jsondispup.split(',')
jsonannup = jsonannup.split(',')
jsonall = jsonall.split(',')
for el in jsondispup:
if len(el) > 0:
if el not in all_fields:
all_fields.append(el)
if el not in fields:
fields.append(el)
for el in jsonannup:
if len(el) > 0:
if el not in fields_to_ann:
fields_to_ann.append(el)
if el not in all_fields:
all_fields.append(el)
for el in jsonall:
if el not in all_fields and el:
all_fields.append(el)
data['fields'] = fields
data['fields_to_ann'] = fields_to_ann
data['all_fields'] = all_fields
version = get_version()
version_new = int(version) + 1
filename = 'fields' + str(version_new)
workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in
with open(os.path.join(workpath, '.config_files/data/' + filename + '.json'), 'w') as outfile:
json.dump(data, outfile)
created_file = True
if (len(reports) > 0 or len(pubmedfiles) > 0) or len(runs) > 0 or len(topics) > 0:
language = 'english'
arr_to_ret = elaborate_runs(runs)
error_location = 'Topic'
for topic in topics:
if topic.name.endswith('txt'):
elaborate_TREC_topic_files(arr_to_ret, topic)
elif topic.name.endswith('json'):
process_topic_json_file(arr_to_ret, topic)
elif topic.name.endswith('csv'):
process_topic_csv_file(arr_to_ret, topic)
error_location = 'Collection'
for file in reports:
reps = decompress_files([file])
for f in reps:
if isinstance(f, str):
file_name = f
workpath = os.path.dirname(
os.path.abspath(__file__)) # Returns the Path your .py file is in
f = os.path.join(workpath, 'static\\tmp\\' + f)
else:
file_name = f.name
if file_name.endswith('json'):
find_docs_in_json_collection(arr_to_ret, f)
elif file_name.endswith('csv'):
find_docs_in_csv_collection(arr_to_ret, f)
for file in pubmedfiles:
find_docs_in_json_pubmed_collection(arr_to_ret, file)
error_location = 'Runs'
for el in arr_to_ret:
if len(el) == 3:
language = el[2]
topic = UseCase.objects.get(name=el[0])
doc = Report.objects.get(id_report=el[1], language=language)
TopicHasDocument.objects.get_or_create(name=topic, language=doc.language, id_report=doc)
# Popolate the labels table
if len(labels) > 0:
labs = []
error_location = 'Labels'
for label_file in labels:
if label_file.name.endswith('csv'):
df_labels = pd.read_csv(label_file)
df_labels = df_labels.where(pd.notnull(df_labels), None)
df_labels = df_labels.reset_index(drop=True)
# df_labels['usecase'] = df_labels['usecase'].str.lower()
count_lab_rows = df_labels.shape[0]
for i in range(count_lab_rows):
label = str(df_labels.loc[i, 'label'])
labs.append(label.rstrip())
elif label_file.name.endswith('json'):
d = json.load(label_file)
labels = d['labels']
for label in labels:
labs.append(label.rstrip())
elif label_file.name.endswith('txt'):
lines = label_file.readlines()
for line in lines:
line = line.decode('utf-8')
labs.append(line.replace('\n', ''))
for label in labs:
cursor.execute('SELECT * FROM annotation_label')
ans = cursor.fetchall()
if len(ans) == 0:
seq_number = 1
else:
cursor.execute('SELECT seq_number FROM annotation_label ORDER BY seq_number DESC;')
ans = cursor.fetchall()
seq_number = int(ans[0][0]) + 1
cursor.execute("SELECT * FROM annotation_label WHERE label = %s;",
(str(label),))
ans = cursor.fetchall()
if len(ans) == 0:
cursor.execute("INSERT INTO annotation_label (label,seq_number) VALUES (%s,%s);",
(str(label), int(seq_number)))
# Popolate the concepts table
if len(concepts) > 0:
error_location = 'Concepts'
for concept_file in concepts:
if concept_file.name.endswith('csv'):
df_concept = pd.read_csv(concept_file)
df_concept = df_concept.where(pd.notnull(df_concept), None)
df_concept = df_concept.reset_index(drop=True)
# df_concept['usecase'] = df_concept['usecase'].str.lower()
# print(df_concept)
count_conc_rows = df_concept.shape[0]
for i in range(count_conc_rows):
df_concept = df_concept.where(pd.notnull(df_concept), None)
concept_url = str(df_concept.loc[i, 'concept_url'])
concept_name = str(df_concept.loc[i, 'concept_name'])
# usecase = str(df_concept.loc[i, 'usecase'])
# semantic_area = str(df_concept.loc[i, 'area'])
cursor.execute("SELECT concept_url,json_concept FROM concept WHERE concept_url = %s;",
(str(concept_url),))
ans = cursor.fetchall()
if len(ans) == 0:
# json_concept = json.dumps({'provenance': 'admin', 'insertion_author': 'admin'})
cursor.execute("INSERT INTO concept (concept_url,name) VALUES (%s,%s);",
(str(concept_url), str(concept_name)))
cursor.execute("SELECT * FROM belong_to WHERE concept_url = %s AND name=%s;",
(str(concept_url), 'default_area'))
ans = cursor.fetchall()
if len(ans) == 0:
cursor.execute("INSERT INTO belong_to (concept_url,name) VALUES (%s,%s);",
(str(concept_url), 'default_area'))
elif concept_file.name.endswith('json'):
d = json.load(concept_file)
count_conc_rows = len(d['concepts_list'])
for i in range(count_conc_rows):
concept_url = str(d['concepts_list'][i]['concept_url'])
concept_name = str(d['concepts_list'][i]['concept_name'])
# usecase = str(df_concept.loc[i, 'usecase'])
# semantic_area = str(df_concept.loc[i, 'area'])
cursor.execute("SELECT concept_url,json_concept FROM concept WHERE concept_url = %s;",
(str(concept_url),))
ans = cursor.fetchall()
if len(ans) == 0:
# json_concept = json.dumps({'provenance': 'admin', 'insertion_author': 'admin'})
cursor.execute("INSERT INTO concept (concept_url,name) VALUES (%s,%s);",
(str(concept_url), str(concept_name)))
cursor.execute("SELECT * FROM belong_to WHERE concept_url = %s AND name=%s;",
(str(concept_url), 'default_area'))
ans = cursor.fetchall()
if len(ans) == 0:
cursor.execute("INSERT INTO belong_to (concept_url,name) VALUES (%s,%s);",
(str(concept_url), 'default_area'))
if ((jsonann is not None) and (jsonann != '')) or ((jsondisp is not None) and jsondisp != ''):
data = {}
jsondisp = ''.join(jsondisp)
jsonann = ''.join(jsonann)
jsondisp = jsondisp.split(',')
jsonann = jsonann.split(',')
for el in jsondisp:
if len(el) > 0:
if el not in fields:
fields.append(el)
if el not in all_fields:
all_fields.append(el)
for el in jsonann:
if len(el) > 0:
if el not in fields_to_ann:
fields_to_ann.append(el)
if el not in all_fields:
all_fields.append(el)
data['fields'] = fields
data['all_fields'] = all_fields
data['fields_to_ann'] = fields_to_ann
version = get_version()
workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in
version_new = int(version) + 1
filename = 'fields' + str(version_new)
with open(os.path.join(workpath, '.config_files/data/' + filename + '.json'), 'w') as outfile:
json.dump(data, outfile)
created_file = True
except (Exception,psycopg2.IntegrityError) as e:
print(e)
# connection.rollback()
print('rolledback')
if created_file == True:
workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in
if filename != '' and filename != 'fields0':
path = os.path.join(workpath, '.config_files/data/'+filename+'.json')
os.remove(path)
json_resp = {'error': 'an error occurred in: ' + error_location + '. The configuration failed.'}
return json_resp
else:
# connection.commit()
if created_file == True:
for filen in os.listdir(os.path.join(workpath, 'config_files/data')):
if filen.endswith('json'):
print(filen)
if filen != '' and filen != 'fields0.json' and filen != filename + '.json':
path = os.path.join(workpath, '.config_files/data/' + filen)
os.remove(path)
if ((jsonann is not None) and (jsonann != '')) or ((jsondisp is not None) and jsondisp != ''):
outfile.close()
if tf_idf is not None :
print(str(tf_idf))
data = {}
cursor = connection.cursor()
cursor.execute('SELECT DISTINCT language FROM report')
ans = cursor.fetchall()
languages = []
for el in ans:
languages.append(el[0])
st = time.time()
if int(tf_idf) > 0:
workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in
path1 = os.path.join(workpath, './config_files/config.json')
g = open(path1, 'r')
data = json.load(g)
data['TF-IDF_k'] = tf_idf
with open(path1, 'w') as f:
json.dump(data, f)
t = UseCase.objects.all()
cursor = connection.cursor()
json_to_write = {}
for top in t:
print('topic:' + str(top))
json_to_write[top.name] = {}
topic = {}
corpus = []
cursor.execute(
"SELECT r.id_report,r.language,r.report_json FROM report as r inner join topic_has_document as t on t.id_report = r.id_report and r.language = t.language where t.name = %s",
[str(top.name)])
ans = cursor.fetchall()
for el in ans:
e = json.loads(el[2])
r_j1 = {}
r_j1['document_id'] = str(el[0])
r_j1['text'] = ''
for k in e.keys():
if k != 'document_id' or (
str(el[0]).startswith('PUBMED_') and (k == 'abstract' or k == 'title')):
r_j1['text'] = r_j1['text'] + ' ' + str(e[k])
if el[1].lower() in LANGUAGES_NLTK:
corpus.append(r_j1)
topic['title'] = top.title
topic['description'] = top.description
# df_tfidf = gen_tfidf_map(corpus,language)
for el in ans:
if el[1].lower() in LANGUAGES_NLTK:
language = el[1].lower()
start = time.time()
print('working on ', str(el[0]))
e = json.loads(el[2])
r_j1 = {}
r_j1['document_id'] = str(el[0])
r_j1['text'] = ''
for k in e.keys():
# print(k)
# print(e[k])
if isinstance(e[k], list):
e[k] = ', '.join(e[k])
if k != 'document_id' and k != 'language' and e[k] is not None:
r_j1['text'] = r_j1['text'] + ' ' + e[k]
tfidf_matcher = QueryDocMatcher(topic=topic, doc=r_j1, corpus=corpus, language=language)
top_k_matching_words = tfidf_matcher.get_words_to_highlight()
# print(top_k_matching_words)
# json_val = {}
# json_val[str(el[0])] = top_k_matching_words
# json_val['words'] = top_k_matching_words
json_to_write[top.name][str(el[0])] = top_k_matching_words
# print(json_to_write)
end = time.time()
print('elaborated in ' + str(end - start) + ' seconds')
else:
json_to_write = {}
end = time.time()
print('time', end - st)
path2 = os.path.join(workpath, './config_files/tf_idf_map.json')
with open(path2, 'w') as f:
json.dump(json_to_write, f)
json_resp = {'message': 'Ok'}
return json_resp
| 2.203125 | 2 |
tests/test_context_processors.py | uktrade/directory-header-footer | 1 | 12788235 | from unittest.mock import Mock
import pytest
from directory_header_footer import context_processors
from directory_constants.constants import urls as default_urls
@pytest.fixture
def sso_user():
return Mock(
id=1,
email='<EMAIL>'
)
@pytest.fixture
def request_logged_in(rf, sso_user):
request = rf.get('/')
request.sso_user = sso_user
return request
@pytest.fixture
def request_logged_out(rf):
request = rf.get('/')
request.sso_user = None
return request
def test_sso_logged_in(request_logged_in):
context = context_processors.sso_processor(request_logged_in)
assert context['sso_is_logged_in'] is True
def test_sso_profile_url(request_logged_in, settings):
settings.SSO_PROFILE_URL = 'http://www.example.com/profile/'
context = context_processors.sso_processor(request_logged_in)
assert context['sso_profile_url'] == settings.SSO_PROFILE_URL
def test_sso_register_url_url(request_logged_in, settings):
settings.SSO_PROXY_SIGNUP_URL = 'http://www.example.com/signup/'
context = context_processors.sso_processor(request_logged_in)
assert context['sso_register_url'] == (
'http://www.example.com/signup/?next=http://testserver/'
)
def test_sso_logged_out(request_logged_out):
context = context_processors.sso_processor(request_logged_out)
assert context['sso_is_logged_in'] is False
def test_sso_login_url(request_logged_in, settings):
settings.SSO_PROXY_LOGIN_URL = 'http://www.example.com/login/'
expected = 'http://www.example.com/login/?next=http://testserver/'
context = context_processors.sso_processor(request_logged_in)
assert context['sso_login_url'] == expected
def test_sso_logout_url(request_logged_in, settings):
settings.SSO_PROXY_LOGOUT_URL = 'http://www.example.com/logout/'
context = context_processors.sso_processor(request_logged_in)
assert context['sso_logout_url'] == (
'http://www.example.com/logout/?next=http://testserver/'
)
def test_sso_user(request_logged_in, sso_user):
context = context_processors.sso_processor(request_logged_in)
assert context['sso_user'] == sso_user
def test_header_footer_context_processor(settings):
settings.HEADER_FOOTER_CONTACT_US_URL = 'http://bones.com'
settings.HEADER_FOOTER_CSS_ACTIVE_CLASSES = {'fab': True}
context = context_processors.header_footer_context_processor(None)
assert context == {
'header_footer_contact_us_url': 'http://bones.com',
'header_footer_css_active_classes': {'fab': True},
}
def test_urls_processor(rf, settings):
settings.GREAT_HOME = 'http://home.com'
settings.GREAT_EXPORT_HOME = 'http://export.com'
settings.EXPORTING_NEW = 'http://export.com/new'
settings.EXPORTING_OCCASIONAL = 'http://export.com/occasional'
settings.EXPORTING_REGULAR = 'http://export.com/regular'
settings.GUIDANCE_MARKET_RESEARCH = 'http://market-research.com'
settings.GUIDANCE_CUSTOMER_INSIGHT = 'http://customer-insight.com'
settings.GUIDANCE_FINANCE = 'http://finance.com'
settings.GUIDANCE_BUSINESS_PLANNING = 'http://business-planning.com'
settings.GUIDANCE_GETTING_PAID = 'http://getting-paid.com'
settings.GUIDANCE_OPERATIONS_AND_COMPLIANCE = 'http://compliance.com'
settings.SERVICES_FAB = 'http://export.com/fab'
settings.SERVICES_SOO = 'http://export.com/soo'
settings.SERVICES_EXOPPS = 'http://export.com/exopps'
settings.SERVICES_GET_FINANCE = 'http://export.com/get-finance'
settings.SERVICES_EVENTS = 'http://export.com/events'
settings.INFO_ABOUT = 'http://about.com'
settings.INFO_CONTACT_US_DIRECTORY = 'http://contact.com'
settings.INFO_PRIVACY_AND_COOKIES = 'http://privacy-and-cookies.com'
settings.INFO_TERMS_AND_CONDITIONS = 'http://terms-and-conditions.com'
settings.INFO_DIT = 'http://dit.com'
settings.CUSTOM_PAGE = 'http://custom.com'
actual = context_processors.urls_processor(None)
expected_urls = {
'great_home': 'http://home.com',
'great_export_home': 'http://export.com',
'new_to_exporting': 'http://export.com/new',
'occasional_exporter': 'http://export.com/occasional',
'regular_exporter': 'http://export.com/regular',
'guidance_market_research': 'http://market-research.com',
'guidance_customer_insight': 'http://customer-insight.com',
'guidance_finance': 'http://finance.com',
'guidance_business_planning': 'http://business-planning.com',
'guidance_getting_paid': 'http://getting-paid.com',
'guidance_operations_and_compliance': 'http://compliance.com',
'services_fab': 'http://export.com/fab',
'services_soo': 'http://export.com/soo',
'services_exopps': 'http://export.com/exopps',
'services_get_finance': 'http://export.com/get-finance',
'services_events': 'http://export.com/events',
'info_about': 'http://about.com',
'info_contact_us': 'http://contact.com',
'info_privacy_and_cookies': 'http://privacy-and-cookies.com',
'info_terms_and_conditions': 'http://terms-and-conditions.com',
'info_dit': 'http://dit.com',
'custom_page': 'http://custom.com',
}
assert actual == {
'header_footer_urls': expected_urls
}
def test_urls_processor_defaults(rf, settings):
actual = context_processors.urls_processor(None)
expected_urls = {
'great_home': default_urls.GREAT_HOME,
'great_export_home': default_urls.GREAT_EXPORT_HOME,
'new_to_exporting': default_urls.EXPORTING_NEW,
'occasional_exporter': default_urls.EXPORTING_OCCASIONAL,
'regular_exporter': default_urls.EXPORTING_REGULAR,
'guidance_market_research': default_urls.GUIDANCE_MARKET_RESEARCH,
'guidance_customer_insight': default_urls.GUIDANCE_CUSTOMER_INSIGHT,
'guidance_finance': default_urls.GUIDANCE_FINANCE,
'guidance_business_planning': default_urls.GUIDANCE_BUSINESS_PLANNING,
'guidance_getting_paid': default_urls.GUIDANCE_GETTING_PAID,
'guidance_operations_and_compliance': (
default_urls.GUIDANCE_OPERATIONS_AND_COMPLIANCE),
'services_fab': default_urls.SERVICES_FAB,
'services_soo': default_urls.SERVICES_SOO,
'services_exopps': default_urls.SERVICES_EXOPPS,
'services_get_finance': default_urls.SERVICES_GET_FINANCE,
'services_events': default_urls.SERVICES_EVENTS,
'info_about': default_urls.INFO_ABOUT,
'info_contact_us': default_urls.INFO_CONTACT_US_DIRECTORY,
'info_privacy_and_cookies': default_urls.INFO_PRIVACY_AND_COOKIES,
'info_terms_and_conditions': default_urls.INFO_TERMS_AND_CONDITIONS,
'info_dit': default_urls.INFO_DIT,
'custom_page': default_urls.CUSTOM_PAGE,
}
assert actual == {'header_footer_urls': expected_urls}
def test_urls_processor_defaults_explicitly_none(rf, settings):
settings.GREAT_HOME = None
settings.GREAT_EXPORT_HOME = None
settings.EXPORTING_NEW = None
settings.EXPORTING_OCCASIONAL = None
settings.EXPORTING_REGULAR = None
settings.GUIDANCE_MARKET_RESEARCH = None
settings.GUIDANCE_CUSTOMER_INSIGHT = None
settings.GUIDANCE_BUSINESS_PLANNING = None
settings.GUIDANCE_GETTING_PAID = None
settings.GUIDANCE_OPERATIONS_AND_COMPLIANCE = None
settings.SERVICES_FAB = None
settings.SERVICES_SOO = None
settings.SERVICES_EXOPPS = None
settings.SERVICES_GET_FINANCE = None
settings.SERVICES_EVENTS = None
settings.INFO_ABOUT = None
settings.INFO_CONTACT_US_DIRECTORY = None
settings.INFO_PRIVACY_AND_COOKIES = None
settings.INFO_TERMS_AND_CONDITIONS = None
settings.INFO_DIT = None
settings.CUSTOM_PAGE = None
actual = context_processors.urls_processor(None)
expected_urls = {
'great_home': default_urls.GREAT_HOME,
'great_export_home': default_urls.GREAT_EXPORT_HOME,
'new_to_exporting': default_urls.EXPORTING_NEW,
'occasional_exporter': default_urls.EXPORTING_OCCASIONAL,
'regular_exporter': default_urls.EXPORTING_REGULAR,
'guidance_market_research': default_urls.GUIDANCE_MARKET_RESEARCH,
'guidance_customer_insight': default_urls.GUIDANCE_CUSTOMER_INSIGHT,
'guidance_finance': default_urls.GUIDANCE_FINANCE,
'guidance_business_planning': default_urls.GUIDANCE_BUSINESS_PLANNING,
'guidance_getting_paid': default_urls.GUIDANCE_GETTING_PAID,
'guidance_operations_and_compliance': (
default_urls.GUIDANCE_OPERATIONS_AND_COMPLIANCE
),
'services_fab': default_urls.SERVICES_FAB,
'services_soo': default_urls.SERVICES_SOO,
'services_exopps': default_urls.SERVICES_EXOPPS,
'services_get_finance': default_urls.SERVICES_GET_FINANCE,
'services_events': default_urls.SERVICES_EVENTS,
'info_about': default_urls.INFO_ABOUT,
'info_contact_us': default_urls.INFO_CONTACT_US_DIRECTORY,
'info_privacy_and_cookies': default_urls.INFO_PRIVACY_AND_COOKIES,
'info_terms_and_conditions': default_urls.INFO_TERMS_AND_CONDITIONS,
'info_dit': default_urls.INFO_DIT,
'custom_page': default_urls.CUSTOM_PAGE,
}
assert actual == {'header_footer_urls': expected_urls}
| 2.171875 | 2 |
nova_powervm/tests/virt/powervm/nvram/fake_api.py | openstack/nova-powervm | 24 | 12788236 | <filename>nova_powervm/tests/virt/powervm/nvram/fake_api.py<gh_stars>10-100
# Copyright 2016, 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova_powervm.virt.powervm.nvram import api
class NoopNvramStore(api.NvramStore):
def store(self, instance, data, force=True):
"""Store the NVRAM into the storage service.
:param instance: The nova instance object OR instance UUID.
:param data: the NVRAM data base64 encoded string
:param force: boolean whether an update should always be saved,
otherwise, check to see if it's changed.
"""
pass
def fetch(self, instance):
"""Fetch the NVRAM from the storage service.
:param instance: The nova instance object OR instance UUID.
:returns: the NVRAM data base64 encoded string
"""
return None
def delete(self, instance):
"""Delete the NVRAM from the storage service.
:param instance: The nova instance object OR instance UUID.
"""
pass
class ExpNvramStore(NoopNvramStore):
def fetch(self, instance):
"""Fetch the NVRAM from the storage service.
:param instance: The nova instance object OR instance UUID.
:returns: the NVRAM data base64 encoded string
"""
# Raise exception. This is to ensure fetch causes a failure
# when an exception is raised
raise Exception('Error')
def delete(self, instance):
"""Delete the NVRAM from the storage service.
:param instance: The nova instance object OR instance UUID.
"""
# Raise exception. This is to ensure delete does not fail
# despite an exception being raised
raise Exception('Error')
| 2.46875 | 2 |
csp/admin.py | jsocol/django-csp | 2 | 12788237 | <gh_stars>1-10
from django.contrib import admin
from csp.models import Group, Report
class GroupAdmin(admin.ModelAdmin):
fields = ('name', 'identifier',)
list_display = ('name', 'identifier', 'count',)
readonly_fields = ('identifier',)
def has_add_permission(*a, **kw):
return False
class ReportAdmin(admin.ModelAdmin):
date_hierarchy = 'reported'
list_display = ('get_identifier', 'document_uri', 'blocked_uri',
'violated_directive', 'referrer', 'reported')
readonly_fields = ('group', 'document_uri', 'blocked_uri', 'referrer',
'violated_directive', 'original_policy', 'reported')
def has_add_permission(*a, **kw):
return False
admin.site.register(Group, GroupAdmin)
admin.site.register(Report, ReportAdmin)
| 2.125 | 2 |
examples/mocsar_pl_human.py | cogitoergoread/muszi-macrohard.hu | 1 | 12788238 | """
A toy example of playing against random bot on Mocsár
Using env "mocsar" and 'human_mode'. It implies using random agent.
"""
import rlcard3
# Make environment and enable human mode
env = rlcard3.make(env_id='mocsar', config={'human_mode': True})
# Reset environment
state = env.reset()
while not env.is_over():
legal_actions = state['legal_actions']
legal_actions.insert(0, 0)
action = input('>> You choose action (integer): ')
if action == '-1':
print('Break the game...')
break
while not action.isdigit() \
or int(action) not in legal_actions:
print('Action illegal...')
action = input('>> Re-choose action (integer): ')
state, reward, done = env.step(int(action))
| 3.140625 | 3 |
tests/test_env.py | timgates42/flask-assets | 0 | 12788239 | <reponame>timgates42/flask-assets
import os
from nose.tools import assert_raises
from flask import Flask
from flask_assets import Environment, Bundle
class TestEnv:
def setup(self):
self.app = Flask(__name__)
self.env = Environment(self.app)
self.env.debug = True
self.env.register('test', 'file1', 'file2')
def test_tag_available(self):
"""Jinja tag has been made available.
"""
t = self.app.jinja_env.from_string('{% assets "test" %}{{ASSET_URL}};{% endassets %}')
assert t.render() == '/static/file1;/static/file2;'
def test_from_yaml(self):
"""YAML configuration gets loaded
"""
f = open('test.yaml', 'w')
f.write("""
yamltest:
contents:
- yamlfile1
- yamlfile2
""")
f.close()
self.env.from_yaml('test.yaml')
t = self.app.jinja_env.from_string('{% assets "yamltest" %}{{ASSET_URL}};{% endassets %}')
assert t.render() == '/static/yamlfile1;/static/yamlfile2;'
os.remove('test.yaml')
def test_from_python_module(self):
"""Python configuration module gets loaded
"""
import types
module = types.ModuleType('test')
module.pytest = Bundle('pyfile1', 'pyfile2')
self.env.from_module(module)
t = self.app.jinja_env.from_string('{% assets "pytest" %}{{ASSET_URL}};{% endassets %}')
assert t.render() == '/static/pyfile1;/static/pyfile2;'
| 2.296875 | 2 |
tests/gbe/test_coordinator_performer_autocomplete.py | bethlakshmi/gbe-divio-djangocms-python2.7 | 1 | 12788240 | <reponame>bethlakshmi/gbe-divio-djangocms-python2.7<filename>tests/gbe/test_coordinator_performer_autocomplete.py
from django.test import TestCase
from django.test import Client
from django.urls import reverse
from tests.factories.gbe_factories import (
PersonaFactory,
ProfileFactory,
TroupeFactory,
)
from tests.functions.gbe_functions import (
grant_privilege,
login_as,
)
from gbe.functions import validate_profile
class TestLimitedPerformerAutoComplete(TestCase):
url = reverse('coordinator-performer-autocomplete')
def setUp(self):
self.client = Client()
self.privileged_user = ProfileFactory.create().user_object
grant_privilege(self.privileged_user,
'Act Coordinator',
'view_performer')
login_as(self.privileged_user, self)
self.persona = PersonaFactory()
self.troupe = TroupeFactory()
def test_list_performer(self):
response = self.client.get(self.url)
self.assertContains(response, self.persona.name)
self.assertContains(response, self.persona.pk)
self.assertContains(response, self.troupe.name)
self.assertContains(response, self.troupe.pk)
def test_list_personae_w_search_by_persona_name(self):
response = self.client.get("%s?q=%s" % (self.url, self.persona.name))
self.assertContains(response, self.persona.name)
self.assertContains(response, self.persona.pk)
self.assertNotContains(response, self.troupe.name)
| 2.234375 | 2 |
tests/test_core.py | mbannick/CorrelatedCounts | 3 | 12788241 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
test_core
~~~~~~~~~
Test the core module
"""
import numpy as np
import pytest
import ccount.core as core
import ccount.utils as utils
# test problem
m = 5
n = 3
l = 1
d = np.array([[2]*n]*l)
Y = np.random.randn(m, n)
X = [[np.random.randn(m, d[k, j])
for j in range(n)] for k in range(l)]
@pytest.mark.parametrize("group_id",
[None, np.array([1, 1, 2, 2, 3])])
def test_correlated_model(group_id):
cm = core.CorrelatedModel(m, n, l, d, Y, X,
[lambda x: x] * l,
lambda y, p: 0.5*(y - p[0])**2,
group_id=group_id)
assert all([np.linalg.norm(cm.beta[k][j]) < 1e-10
for k in range(l)
for j in range(n)])
assert np.linalg.norm(cm.U) < 1e-10
assert all([np.linalg.norm(cm.D[k] - np.identity(n)) < 1e-10
for k in range(l)])
assert np.linalg.norm(cm.P) < 1e-10
if group_id is not None:
assert cm.U.shape == (cm.l, 3, cm.n)
@pytest.mark.parametrize("group_id",
[None, np.array([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("offset",
[np.ones((5, 1))])
@pytest.mark.parametrize("beta",
[None, [[np.ones(d[k, j])
for j in range(n)] for k in range(l)]])
@pytest.mark.parametrize("U", [None, 1])
def test_correlated_model_compute_P(group_id, beta, U, offset):
cm = core.CorrelatedModel(m, n, l, d, Y, X,
[lambda x: x] * l,
lambda y, p: 0.5 * (y - p[0]) ** 2,
group_id=group_id)
if U is not None:
U = np.ones((cm.m, cm.num_groups, cm.n))
P = cm.compute_P(beta=beta, U=U, X=cm.X, m=cm.m,
group_sizes=cm.group_sizes, offset=offset)
if beta is None:
beta = cm.beta
if U is None:
U = cm.U
true_P = np.array([cm.X[k][j].dot(beta[k][j])
for k in range(l)
for j in range(n)])
if group_id is not None:
U = np.repeat(U, cm.group_sizes, axis=1)
true_P = true_P.reshape((l, n, m)).transpose(0, 2, 1) + U
assert np.linalg.norm(P - true_P) < 1e-10
@pytest.mark.parametrize("beta",
[None, [[np.ones(d[k, j])
for j in range(n)] for k in range(l)]])
@pytest.mark.parametrize("U", [None, np.zeros((l, m, n))])
@pytest.mark.parametrize("D", [None,
np.array([np.identity(n) for i in range(l)])])
@pytest.mark.parametrize("P", [None, np.random.randn(l, m, n)])
def test_correlated_model_update_params(beta, U, D, P):
cm = core.CorrelatedModel(m, n, l, d, Y, X,
[lambda x: x] * l,
lambda y, p: 0.5*(y - p[0])**2)
cm.update_params(beta=beta, U=U, D=D, P=P)
if beta is None:
beta = cm.beta
assert np.linalg.norm(utils.beta_to_vec(beta) -
utils.beta_to_vec(cm.beta)) < 1e-10
if U is None:
U = cm.U
assert np.linalg.norm(U - cm.U) < 1e-10
if D is None:
D = cm.D
assert np.linalg.norm(D - cm.D) < 1e-10
if P is None:
P = cm.P
assert np.linalg.norm(P - cm.P) < 1e-10
@pytest.mark.parametrize("group_id",
[None, np.array([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("beta",
[None, [[np.ones(d[k, j])
for j in range(n)] for k in range(l)]])
@pytest.mark.parametrize("U", [None, 0])
@pytest.mark.parametrize("D", [None,
np.array([np.identity(n)]*l)])
def test_correlated_model_neg_log_likelihood(group_id, beta, U, D):
cm = core.CorrelatedModel(m, n, l, d, Y, X,
[lambda x: x] * l,
lambda y, p: 0.5*(y - p[0])**2,
group_id=group_id)
if U is not None:
U = np.zeros((cm.m, cm.num_groups, cm.n))
cm.update_params(beta=beta, U=U, D=D)
assert np.abs(cm.neg_log_likelihood() -
0.5*np.mean(np.sum((cm.Y - cm.P[0])**2, axis=1)) -
0.5*np.sum(cm.U[0]*cm.U[0])/cm.m) < 1e-10
| 2.046875 | 2 |
scripts/analyzeSignals.py | simras/CAP | 1 | 12788242 | <filename>scripts/analyzeSignals.py
#!/usr/bin/python
# agrep.py
# Example cat seqs.fa | agrep.py -p gcttcatagccccttcccaat -m 3
# By <NAME>
# Bioinformatics Centre
# University of Copenhagen
# gcttcatagccccttcccaat
from types import *
def analyzeSignals(fn):
import os
import sys
from operator import itemgetter
cwd = os.getcwd()
if fn != "":
file1 = open(cwd + "/" + fn,'r')
lines = file1.readlines()
else:
lines = sys.stdin.readlines()
i = 0
j = 0
start = -1
lst = []
first = True
for l in lines:
if l[0] == ">":
i = i + 1
if not first:
print ID,start
if pred == 0:
j = j + 1
ID = l[1:].rstrip()
pred = 0
first = False
firstSig = True
elif l[0:4] == "%GFF":
if firstSig:
tmp_pred = float(l.split()[6]) + .15
else:
tmp_pred = float(l.split()[6])
if tmp_pred > pred:
pred = tmp_pred
start = int(l.split()[4])
firstSig = False
print >> sys.stderr, "analyzeSignals.py: Number of predictions with no Signal: ", j, i#, lst[0]
# print sorted(lst, key=itemgetter(1))[0:10]
# print sorted(lst, reverse=True,key=itemgetter(1))[0:10]
def analyzeSignals_viterbi(fn,prime):
import os
import sys
from operator import itemgetter
cwd = os.getcwd()
if fn != "":
file1 = open(cwd + "/" + fn,'r')
sin = False
# lines = file1.readlines()
else:
sin = True
i = 0
j = 0
start = -1
#lst = []
first = True
l = '#'
ID = ""
printID = True
ldist = {}
while True:
# print "#1" + l + "#2"
if sin:
l = sys.stdin.readline()
else:
l = file1.readline()
if l == '':
break
if first and l.strip() == "":
printID = False
if l[0] == ">":
printID = True
i = i + 1
if not first:
print ID,start
if ldist.has_key(lseq - start + 1):
ldist[lseq - start + 1] = ldist[lseq - start + 1] + 1
else:
ldist[lseq - start + 1] = 1
ID = l[1:].split()[0].rstrip()
first = False
elif l[0:4] == "%len":
lseq = int(l.split("len ")[1])
#print lseq
elif l[0:7] == "%pred V":
try:
if not prime:
start = int(l.split("s")[1].split()[0])
else:
# start is actually the end of the adapter
start = lseq - int(l.split("b")[-1].split()[0])
except:
if not prime:
start = int(l.split("b")[-1].split()[-1]) + 1
else:
start = 0
j = j + 1
printID = True
if start != -1 and printID:
print ID,start
for k,v in sorted(ldist.items(),key=itemgetter(0)):
if k == 0:
print >> sys.stderr, "analyzeSignals.py: Sequences with no adaptor:",v
else:
print >> sys.stderr, "analyzeSignals.py: Adaptor length",k,"number",v
print >> sys.stderr, "analyzeSignals.py: Sequences with adapter", i-j, "all sequences",i
# print sorted(lst, key=itemgetter(1))[0:10]
# print sorted(lst, reverse=True,key=itemgetter(1))[0:10]
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-p", action="store", type="string", dest="predF",default="", help="input pred file")
parser.add_option("-5", action="store_true", dest="prime", default=False, help="5prime adapter?")
(options, args) = parser.parse_args()
analyzeSignals_viterbi(options.predF,options.prime)
| 2.65625 | 3 |
skype.py | divyanshupandey/face-recognize-ml | 0 | 12788243 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 14 13:04:49 2017
@author: acer
"""
from skpy import Skype
from getpass import getpass
Skype("pandey.divyanshu34", getpass(), ".tokens-pandey.divyanshu34")
sk = Skype(connect=False)
print(sk.contacts) | 1.789063 | 2 |
21爬虫入门/day01/note.py | HaoZhang95/PythonAndMachineLearning | 937 | 12788244 | """
爬虫的用途:12306抢票,短信轰炸,数据获取
分类:通用爬虫:是搜索引擎抓取系统的重要部分,主要是把互联网上的页面下载到本地作为一个镜像备份
聚焦爬虫:对特定需求进行数据获取,会对页面的内容进行筛选,保证只抓取和需求相关的网页信息
Http:端口号80
Https: 端口号443
使用第三方的requests进行请求:支持python2和3,在urllib中2和3的语法有些不一样
"""
import requests
kw = {'wd': '长城'}
# headers伪装成一个浏览器进行的请求
# 不加这个的话,网页会识别出请求来自一个python而不是浏览器的正常请求
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
response = requests.get("https://www.baidu.com/s?", params=kw, headers=headers)
# 返回的是unicode格式解码的str的数据
print(response.text)
# 返回字节流的二进制数据,并根据unicode进行解码
print(response.content)
print(response.content.decode())
# 返回完整的url地址
print(response.url)
# 返回字符编码
print(response.encoding)
# 返回状态吗
print(response.status_code)
# 保存响应结果
with open('baidu.html', 'wb') as f:
f.write(response.content)
| 3.34375 | 3 |
summarize.py | grajekf/wae-2018 | 0 | 12788245 | <gh_stars>0
#!/usr/bin/env python3
import argparse
import numpy as np
import pandas as pd
def args():
parser = argparse.ArgumentParser()
parser.add_argument('output', metavar="OUTPUT", help="Output file", type=str)
parser.add_argument('files', nargs="+", metavar="FILES", help="Files to summarize (extract the last line)", type=str)
ret = parser.parse_args()
return ret.output, ret.files
def main():
output, files = args()
dfs = [pd.read_csv(path).tail(1) for path in files]
summary = pd.concat(dfs, ignore_index=True)
summary.to_csv(output)
if __name__ == '__main__':
main() | 3.21875 | 3 |
rasa/graph_components/converters/nlu_message_converter.py | praneethgb/rasa | 8 | 12788246 | <reponame>praneethgb/rasa
from __future__ import annotations
from typing import Dict, Text, Any, Optional, List
from rasa.core.channels.channel import UserMessage
from rasa.engine.graph import GraphComponent, ExecutionContext
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.shared.nlu.constants import TEXT
from rasa.shared.nlu.training_data.message import Message
class NLUMessageConverter(GraphComponent):
"""Converts the user message into a NLU Message object."""
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> NLUMessageConverter:
"""Creates component (see parent class for full docstring)."""
return cls()
@staticmethod
def convert_user_message(message: Optional[UserMessage]) -> List[Message]:
"""Converts user message into Message object.
Returns:
List containing only one instance of Message.
Else empty list if user message is None.
"""
if message:
data = {
TEXT: message.text,
"message_id": message.message_id,
"metadata": message.metadata,
}
return [Message(data=data)]
return []
| 2.28125 | 2 |
tronn/preprocess/variants.py | kundajelab/tronn | 5 | 12788247 | <gh_stars>1-10
# description code for working with variants
import os
import gzip
import logging
# basically generate two fasta files, and then
# run a version of setup_h5_dataset that takes in two fasta files
# to generate two one hot encoded sets
def generate_new_fasta(vcf_file, fasta_file, out_fasta_file, ref=True):
"""given a variant file and choice of ref or alt, adjust the fasta at those
positions
"""
logging.info("WARNING: when processing the VCF, grabs the first base pair given")
# set up the tmp snp file
if ref:
tmp_snp_file = "{}/{}.ref.snp".format(
os.path.dirname(out_fasta_file),
os.path.basename(vcf_file).split(".gz")[0])
else:
tmp_snp_file = "{}/{}.alt.snp".format(
os.path.dirname(out_fasta_file),
os.path.basename(vcf_file).split(".gz")[0])
with open(tmp_snp_file, "w") as out:
with open(vcf_file, "r") as fp:
for line in fp:
if line.startswith("#"):
continue
fields = line.strip().split()
chrom, pos, snp_id = fields[0], int(fields[1]), fields[2]
if ref:
basepair = fields[3][0]
else:
basepair = fields[4][0]
out.write("chr{}\t{}\t{}\t{}\n".format(chrom, pos, snp_id, basepair))
# adjust the fasta
mutate = "seqtk mutfa {} {} > {}".format(fasta_file, tmp_snp_file, out_fasta_file)
print mutate
os.system(mutate)
return out_fasta_file
def generate_bed_file_from_variant_file(vcf_file, out_bed_file, bin_size):
"""given a variant file and params for dataset generation, create a bed file
that will correctly tile the snp region when dataset is generated
"""
with gzip.open(out_bed_file, "w") as out:
with open(vcf_file, "r") as fp:
for line in fp:
if line.startswith("#"):
continue
fields = line.strip().split()
chrom, snp_pos, snp_id = fields[0], int(fields[1]), fields[2]
start = snp_pos - bin_size
stop = snp_pos
metadata = "snp_id={};snp_pos={}:{}".format(snp_id, chrom, snp_pos)
out.write("{}\t{}\t{}\t{}\n".format(chrom, start, stop, metadata))
return None
| 2.515625 | 3 |
Python/Learning/Libraries/TerminalTables.py | prynix/learning-programming | 2 | 12788248 | <gh_stars>1-10
"""
Happy now?
"""
from terminaltables import AsciiTable, SingleTable
table_data_large = [
['', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'],
['8:00 - 8:45', 'Mathematics', 'PE', '', '', ''],
['8:55 - 9:40', 'History', 'Advisory', 'Business', 'French', 'Mathematics'],
['9:50 - 10:35', 'Civics', 'Business', 'Polish', 'Biology', 'English'],
['10:55 - 11:40', 'Safety E', 'Polish', 'Mathematics', 'History', 'PE'],
['11:50 - 12:35', 'Religion', 'English', 'Physics', 'Mathematics', 'Chemistry'],
['12:55 - 13:40', 'Polish', 'Geography', 'Religion', 'Culture', 'IT'],
['13:50 - 14:35', 'Polish', 'French', 'PE', 'English', 'IT']
]
table_data_small = [
['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'],
['Mathematics', 'PE', '', '', ''],
['History', 'Advisory', 'Business', 'French', 'Mathematics'],
['Civics', 'Business', 'Polish', 'Biology', 'English'],
['Safety E', 'Polish', 'Mathematics', 'History', 'PE'],
['Religion', 'English', 'Physics', 'Mathematics', 'Chemistry'],
['Polish', 'Geography', 'Religion', 'Culture', 'IT'],
['Polish', 'French', 'PE', 'English', 'IT']
]
tableAsci = AsciiTable(table_data_large)
tableSingle = SingleTable(table_data_large)
if not (tableSingle.ok): tableSingle = SingleTable(table_data_small)
if not (tableAsci.ok): tableAsci = AsciiTable(table_data_small)
tableSingle.justify_columns[0] = 'center'
tableSingle.justify_columns[1] = 'center'
tableSingle.justify_columns[2] = 'center'
tableSingle.justify_columns[3] = 'center'
tableSingle.justify_columns[4] = 'center'
tableSingle.justify_columns[5] = 'center'
print(tableSingle.table)
print(tableAsci.table)
| 2.21875 | 2 |
songs-generator2.py | Antoniii/Generator-of-songs-without-neural-network-and-SMS | 0 | 12788249 | import markovify
# with open("esenin.txt", 'r', encoding='utf-8') as f0, \
# open("kish.txt", 'r', encoding='utf-8') as f1, \
# open("kino.txt", 'r', encoding='utf-8') as f2, \
# open("kukr.txt", 'r', encoding='utf-8') as f3, \
# open("dataset.txt", 'a', encoding='utf-8') as f:
# f.write(f0.read())
# f.write(f1.read())
# f.write(f2.read())
# f.write(f3.read())
with open("dataset.txt", 'r', encoding='utf-8') as f:
text = f.read()
text_model = markovify.Text(text)
my_file = open("result.txt", 'a', encoding='utf-8')
for i in range(10):
my_file.write(text_model.make_short_sentence(280))
my_file.close() | 2.984375 | 3 |
select.py | misterpah/ldtp_adapter | 0 | 12788250 | <gh_stars>0
def selectitem(windowTitle, object, value, handle=None):
log("{} :not implemented yet".format(sys._getframe().f_code.co_name))
options = pyacc.comboboxListOptions(reverse_ldtp_me(windowTitle),reverse_ldtp_me(object),handle)
# get position of the wanted item
for each in range(0,len(options)):
if options[each] == value:
break
# get position of the selected item
selected = pyacc.comboboxGetSelected(reverse_ldtp_me(windowTitle),reverse_ldtp_me(object),handle)
selected_index = options.index(selected)
# calculate how many keypress needed
diff = each - selected_index
key = ""
if diff == 0:
# no need to move anything
return 1
elif diff > 0:
#positive. move down
key = "<down>"
else:
#negative. move up
key = "<up>"
diff *= -1
# apply changes
ldtp_extend_mouse_click_here()
for each in range(0,diff):
keypress(key)
keyrelease(key)
time.sleep(0.1)
keypress("<enter>")
keyrelease("<enter>") | 2.921875 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.