code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
tiponpython Simulacion de ensayos de acuiferos
Copyright 2012 Andres Pias
This file is part of tiponpython.
tiponpython is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
tiponpython is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with tiponpython. If not, see http://www.gnu.org/licenses/gpl.txt.
"""
# Form implementation generated from reading ui file 'ingresarCaudal.ui'
#
# Created: Wed Dec 14 21:03:09 2011
# by: PyQt4 UI code generator 4.8.5
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
import observacion
import observacionesensayo
import sys
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Dialog(QtGui.QDialog):
def setupUi(self, Dialog, cont):
global ContEnsayo
ContEnsayo=cont
self.observaciones=[]
Dialog.setObjectName(_fromUtf8("ingresarobservacionesensayo"))
Dialog.resize(375, 214)
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Ingresar Observaciones Ensayo", None, QtGui.QApplication.UnicodeUTF8))
self.txttiempo = QtGui.QTextEdit(Dialog)
self.txttiempo.setGeometry(QtCore.QRect(170, 40, 101, 31))
self.txttiempo.setObjectName(_fromUtf8("txttiempo"))
self.label = QtGui.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(100, 50, 46, 21))
self.label.setText(QtGui.QApplication.translate("Dialog", "Tiempo", None, QtGui.QApplication.UnicodeUTF8))
self.label.setObjectName(_fromUtf8("label"))
self.label_2 = QtGui.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(100, 100, 46, 13))
self.label_2.setText(QtGui.QApplication.translate("Dialog", "Nivel", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.txtcaudal = QtGui.QTextEdit(Dialog)
self.txtcaudal.setGeometry(QtCore.QRect(170, 90, 101, 31))
self.txtcaudal.setObjectName(_fromUtf8("txtcaudal"))
self.btnagregar = QtGui.QPushButton(Dialog)
self.btnagregar.setGeometry(QtCore.QRect(100, 150, 71, 23))
self.btnagregar.setText(QtGui.QApplication.translate("Dialog", "Agregar", None, QtGui.QApplication.UnicodeUTF8))
self.btnagregar.setObjectName(_fromUtf8("btnagregar"))
self.btnfinalizar = QtGui.QPushButton(Dialog)
self.btnfinalizar.setGeometry(QtCore.QRect(200, 150, 71, 23))
self.btnfinalizar.setText(QtGui.QApplication.translate("Dialog", "Finalizar", None, QtGui.QApplication.UnicodeUTF8))
self.btnfinalizar.setObjectName(_fromUtf8("btnfinalizar"))
self.dialogo=Dialog
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.btnagregar, QtCore.SIGNAL(_fromUtf8("clicked()")), self.agregar)
QtCore.QObject.connect(self.btnfinalizar, QtCore.SIGNAL(_fromUtf8("clicked()")), self.finalizar)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
pass
def agregar(self):
global ContEnsayo
control=True
t=float(self.txttiempo.toPlainText())
print "tiempo: "+str(t)
## Se verifica que vengas los datos con sus tiempos ordenados de manera creciente sino salta
control=ContEnsayo.verificarFormato(self.observaciones, t)
if (control==False):
reply = QtGui.QMessageBox.critical(self,
"Error",
"Los datos de bombeo no fueron agregaos. Debe ingresar un valor para el tiempo mayor a los ingresados anteriormente.")
else:
n=float(self.txtcaudal.toPlainText())
print "caudal: "+str(n)
o=observacion.observacion(t,n)
self.observaciones.append(o)
reply = QtGui.QMessageBox.information(None,
"Información",
"Se agrego la nueva observacion del ensayo. Presione finalizar para guardar las observaciones")
self.txttiempo.setText('')
self.txtcaudal.setText('')
def finalizar(self):
global ContEnsayo
####Pedir un nombre para el ensayo
nombre, ok=QtGui.QInputDialog.getText(self,"Finalzar registro ",
"Nombre: ", QtGui.QLineEdit.Normal)
## Se manda al controlador las observaciones y se retorna el id de las observaciones
obse=ContEnsayo.agregarObservacion(self.observaciones, nombre)
reply = QtGui.QMessageBox.information(self,
"Información",
"Se ha creado un nuevo conjunto de observaciones en el sistema. El id es: "+ str(obse.id))
if reply == QtGui.QMessageBox.Ok:
print "OK"
self.dialogo.close()
else:
print "Escape"
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
frmImpProyecto = QtGui.QWidget()
ui = Ui_Dialog()
ui.setupUi(frmImpProyecto)
frmImpProyecto.show()
sys.exit(app.exec_())
| fenixon/tiponpython | views/ingresarObservaciones.py | Python | gpl-3.0 | 5,555 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import common
class TestHrHolidaysBase(common.TransactionCase):
def setUp(self):
super(TestHrHolidaysBase, self).setUp()
Users = self.env['res.users'].with_context(no_reset_password=True)
# Find Employee group
group_employee_id = self.ref('base.group_user')
# Test users to use through the various tests
self.user_hruser_id = Users.create({
'name': 'Armande HrUser',
'login': 'Armande',
'alias_name': 'armande',
'email': '[email protected]',
'groups_id': [(6, 0, [group_employee_id, self.ref('base.group_hr_user')])]
}).id
self.user_hrmanager_id = Users.create({
'name': 'Bastien HrManager',
'login': 'bastien',
'alias_name': 'bastien',
'email': '[email protected]',
'groups_id': [(6, 0, [group_employee_id, self.ref('base.group_hr_manager')])]
}).id
self.user_employee_id = Users.create({
'name': 'David Employee',
'login': 'david',
'alias_name': 'david',
'email': '[email protected]',
'groups_id': [(6, 0, [group_employee_id])]
}).id
# Hr Data
self.employee_emp_id = self.env['hr.employee'].create({
'name': 'David Employee',
'user_id': self.user_employee_id,
}).id
self.employee_hruser_id = self.env['hr.employee'].create({
'name': 'Armande HrUser',
'user_id': self.user_hruser_id,
}).id
| ayepezv/GAD_ERP | addons/hr_holidays/tests/common.py | Python | gpl-3.0 | 1,704 |
from pycbc.types import zeros, complex64, complex128
import numpy as _np
import ctypes
import pycbc.scheme as _scheme
from pycbc.libutils import get_ctypes_library
from .core import _BaseFFT, _BaseIFFT
from ..types import check_aligned
# IMPORTANT NOTE TO PYCBC DEVELOPERS:
# Because this module is loaded automatically when present, and because
# no FFTW function should be called until the user has had the chance
# to set the threading backend, it is ESSENTIAL that simply loading this
# module should not actually *call* ANY functions.
#FFTW constants, these are pulled from fftw3.h
FFTW_FORWARD = -1
FFTW_BACKWARD = 1
FFTW_MEASURE = 0
FFTW_DESTROY_INPUT = 1 << 0
FFTW_UNALIGNED = 1 << 1
FFTW_CONSERVE_MEMORY = 1 << 2
FFTW_EXHAUSTIVE = 1 << 3
FFTW_PRESERVE_INPUT = 1 << 4
FFTW_PATIENT = 1 << 5
FFTW_ESTIMATE = 1 << 6
FFTW_WISDOM_ONLY = 1 << 21
# Load the single and double precision libraries
# We need to construct them directly with CDLL so
# we can give the RTLD_GLOBAL mode, which we must do
# in order to use the threaded libraries as well.
double_lib = get_ctypes_library('fftw3',['fftw3'],mode=ctypes.RTLD_GLOBAL)
float_lib = get_ctypes_library('fftw3f',['fftw3f'],mode=ctypes.RTLD_GLOBAL)
if (double_lib is None) or (float_lib is None):
raise ImportError("Unable to find FFTW libraries")
# Support for FFTW's two different threading backends
_fftw_threaded_lib = None
_fftw_threaded_set = False
_double_threaded_lib = None
_float_threaded_lib = None
HAVE_FFTW_THREADED = False
# Although we set the number of threads based on the scheme,
# we need a private variable that records the last value used so
# we know whether we need to call plan_with_nthreads() again.
_fftw_current_nthreads = 0
# This function sets the number of threads used internally by FFTW
# in planning. It just takes a number of threads, rather than itself
# looking at scheme.mgr.num_threads, because it should not be called
# directly, but only by functions that get the value they use from
# scheme.mgr.num_threads
def _fftw_plan_with_nthreads(nthreads):
global _fftw_current_nthreads
if not HAVE_FFTW_THREADED:
if (nthreads > 1):
raise ValueError("Threading is NOT enabled, but {0} > 1 threads specified".format(nthreads))
else:
_pycbc_current_threads = nthreads
else:
dplanwthr = _double_threaded_lib.fftw_plan_with_nthreads
fplanwthr = _float_threaded_lib.fftwf_plan_with_nthreads
dplanwthr.restype = None
fplanwthr.restype = None
dplanwthr(nthreads)
fplanwthr(nthreads)
_fftw_current_nthreads = nthreads
# This is a global dict-of-dicts used when initializing threads and
# setting the threading library
_fftw_threading_libnames = { 'unthreaded' : {'double' : None, 'float' : None},
'openmp' : {'double' : 'fftw3_omp', 'float' : 'fftw3f_omp'},
'pthreads' : {'double' : 'fftw3_threads', 'float' : 'fftw3f_threads'}}
def _init_threads(backend):
# This function actually sets the backend and initializes. It returns zero on
# success and 1 if given a valid backend but that cannot be loaded. It raises
# an exception if called after the threading backend has already been set, or
# if given an invalid backend.
global _fftw_threaded_set
global _fftw_threaded_lib
global HAVE_FFTW_THREADED
global _double_threaded_lib
global _float_threaded_lib
if _fftw_threaded_set:
raise RuntimeError(
"Threading backend for FFTW already set to {0}; cannot be changed".format(_fftw_threaded_lib))
try:
double_threaded_libname = _fftw_threading_libnames[backend]['double']
float_threaded_libname = _fftw_threading_libnames[backend]['float']
except KeyError:
raise ValueError("Backend {0} for FFTW threading does not exist!".format(backend))
if double_threaded_libname is not None:
try:
# Note that the threaded libraries don't have their own pkg-config files;
# we must look for them wherever we look for double or single FFTW itself
_double_threaded_lib = get_ctypes_library(double_threaded_libname,['fftw3'],mode=ctypes.RTLD_GLOBAL)
_float_threaded_lib = get_ctypes_library(float_threaded_libname,['fftw3f'],mode=ctypes.RTLD_GLOBAL)
if (_double_threaded_lib is None) or (_float_threaded_lib is None):
raise RuntimeError("Unable to load threaded libraries {0} or {1}".format(double_threaded_libname,
float_threaded_libname))
dret = _double_threaded_lib.fftw_init_threads()
fret = _float_threaded_lib.fftwf_init_threads()
# FFTW for some reason uses *0* to indicate failure. In C.
if (dret == 0) or (fret == 0):
return 1
HAVE_FFTW_THREADED = True
_fftw_threaded_set = True
_fftw_threaded_lib = backend
return 0
except:
return 1
else:
# We get here when we were given the 'unthreaded' backend
HAVE_FFTW_THREADED = False
_fftw_threaded_set = True
_fftw_threaded_lib = backend
return 0
def set_threads_backend(backend=None):
# This is the user facing function. If given a backend it just
# calls _init_threads and lets it do the work. If not (the default)
# then it cycles in order through threaded backends,
if backend is not None:
retval = _init_threads(backend)
# Since the user specified this backend raise an exception if the above failed
if retval != 0:
raise RuntimeError("Could not initialize FFTW threading backend {0}".format(backend))
else:
# Note that we pop() from the end, so 'openmp' is the first thing tried
_backend_list = ['unthreaded','pthreads','openmp']
while not _fftw_threaded_set:
_next_backend = _backend_list.pop()
retval = _init_threads(_next_backend)
# Function to import system-wide wisdom files.
def import_sys_wisdom():
if not _fftw_threaded_set:
set_threads_backend()
double_lib.fftw_import_system_wisdom()
float_lib.fftwf_import_system_wisdom()
# We provide an interface for changing the "measure level"
# By default this is 0, which does no planning,
# but we provide functions to read and set it
_default_measurelvl = 0
def get_measure_level():
"""
Get the current 'measure level' used in deciding how much effort to put into
creating FFTW plans. From least effort (and shortest planning time) to most
they are 0 to 3. No arguments.
"""
return _default_measurelvl
def set_measure_level(mlvl):
"""
Set the current 'measure level' used in deciding how much effort to expend
creating FFTW plans. Must be an integer from 0 (least effort, shortest time)
to 3 (most effort and time).
"""
global _default_measurelvl
if mlvl not in (0,1,2,3):
raise ValueError("Measure level can only be one of 0, 1, 2, or 3")
_default_measurelvl = mlvl
_flag_dict = {0: FFTW_ESTIMATE,
1: FFTW_MEASURE,
2: FFTW_MEASURE|FFTW_PATIENT,
3: FFTW_MEASURE|FFTW_PATIENT|FFTW_EXHAUSTIVE}
def get_flag(mlvl,aligned):
if aligned:
return _flag_dict[mlvl]
else:
return (_flag_dict[mlvl]|FFTW_UNALIGNED)
# Add the ability to read/store wisdom to filenames
def wisdom_io(filename, precision, action):
"""Import or export an FFTW plan for single or double precision.
"""
if not _fftw_threaded_set:
set_threads_backend()
fmap = {('float', 'import'): float_lib.fftwf_import_wisdom_from_filename,
('float', 'export'): float_lib.fftwf_export_wisdom_to_filename,
('double', 'import'): double_lib.fftw_import_wisdom_from_filename,
('double', 'export'): double_lib.fftw_export_wisdom_to_filename}
f = fmap[(precision, action)]
f.argtypes = [ctypes.c_char_p]
retval = f(filename.encode())
if retval == 0:
raise RuntimeError(('Could not {0} wisdom '
'from file {1}').format(action, filename))
def import_single_wisdom_from_filename(filename):
wisdom_io(filename, 'float', 'import')
def import_double_wisdom_from_filename(filename):
wisdom_io(filename, 'double', 'import')
def export_single_wisdom_to_filename(filename):
wisdom_io(filename, 'float', 'export')
def export_double_wisdom_to_filename(filename):
wisdom_io(filename, 'double', 'export')
def set_planning_limit(time):
if not _fftw_threaded_set:
set_threads_backend()
f = double_lib.fftw_set_timelimit
f.argtypes = [ctypes.c_double]
f(time)
f = float_lib.fftwf_set_timelimit
f.argtypes = [ctypes.c_double]
f(time)
# Create function maps for the dtypes
plan_function = {'float32': {'complex64': float_lib.fftwf_plan_dft_r2c_1d},
'float64': {'complex128': double_lib.fftw_plan_dft_r2c_1d},
'complex64': {'float32': float_lib.fftwf_plan_dft_c2r_1d,
'complex64': float_lib.fftwf_plan_dft_1d},
'complex128': {'float64': double_lib.fftw_plan_dft_c2r_1d,
'complex128': double_lib.fftw_plan_dft_1d}
}
execute_function = {'float32': {'complex64': float_lib.fftwf_execute_dft_r2c},
'float64': {'complex128': double_lib.fftw_execute_dft_r2c},
'complex64': {'float32': float_lib.fftwf_execute_dft_c2r,
'complex64': float_lib.fftwf_execute_dft},
'complex128': {'float64': double_lib.fftw_execute_dft_c2r,
'complex128': double_lib.fftw_execute_dft}
}
def plan(size, idtype, odtype, direction, mlvl, aligned, nthreads, inplace):
if not _fftw_threaded_set:
set_threads_backend()
if nthreads != _fftw_current_nthreads:
_fftw_plan_with_nthreads(nthreads)
# Convert a measure-level to flags
flags = get_flag(mlvl,aligned)
# We make our arrays of the necessary type and size. Things can be
# tricky, especially for in-place transforms with one of input or
# output real.
if (idtype == odtype):
# We're in the complex-to-complex case, so lengths are the same
ip = zeros(size, dtype=idtype)
if inplace:
op = ip
else:
op = zeros(size, dtype=odtype)
elif (idtype.kind == 'c') and (odtype.kind == 'f'):
# Complex-to-real (reverse), so size is length of real array.
# However the complex array may be larger (in bytes) and
# should therefore be allocated first and reused for an in-place
# transform
ip = zeros(size/2+1, dtype=idtype)
if inplace:
op = ip.view(dtype=odtype)[0:size]
else:
op = zeros(size, dtype=odtype)
else:
# Real-to-complex (forward), and size is still that of real.
# However it is still true that the complex array may be larger
# (in bytes) and should therefore be allocated first and reused
# for an in-place transform
op = zeros(size/2+1, dtype=odtype)
if inplace:
ip = op.view(dtype=idtype)[0:size]
else:
ip = zeros(size, dtype=idtype)
# Get the plan function
idtype = _np.dtype(idtype)
odtype = _np.dtype(odtype)
f = plan_function[str(idtype)][str(odtype)]
f.restype = ctypes.c_void_p
# handle the C2C cases (forward and reverse)
if idtype.kind == odtype.kind:
f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_int, ctypes.c_int]
theplan = f(size, ip.ptr, op.ptr, direction, flags)
# handle the R2C and C2R case
else:
f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_int]
theplan = f(size, ip.ptr, op.ptr, flags)
# We don't need ip or op anymore
del ip, op
# Make the destructors
if idtype.char in ['f', 'F']:
destroy = float_lib.fftwf_destroy_plan
else:
destroy = double_lib.fftw_destroy_plan
destroy.argtypes = [ctypes.c_void_p]
return theplan, destroy
# Note that we don't need to check whether we've set the threading backend
# in the following functions, since execute is not called directly and
# the fft and ifft will call plan first.
def execute(plan, invec, outvec):
f = execute_function[str(invec.dtype)][str(outvec.dtype)]
f.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
f(plan, invec.ptr, outvec.ptr)
def fft(invec, outvec, prec, itype, otype):
theplan, destroy = plan(len(invec), invec.dtype, outvec.dtype, FFTW_FORWARD,
get_measure_level(),(check_aligned(invec.data) and check_aligned(outvec.data)),
_scheme.mgr.state.num_threads, (invec.ptr == outvec.ptr))
execute(theplan, invec, outvec)
destroy(theplan)
def ifft(invec, outvec, prec, itype, otype):
theplan, destroy = plan(len(outvec), invec.dtype, outvec.dtype, FFTW_BACKWARD,
get_measure_level(),(check_aligned(invec.data) and check_aligned(outvec.data)),
_scheme.mgr.state.num_threads, (invec.ptr == outvec.ptr))
execute(theplan, invec, outvec)
destroy(theplan)
# Class based API
# First, set up a lot of different ctypes functions:
plan_many_c2c_f = float_lib.fftwf_plan_many_dft
plan_many_c2c_f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_int, ctypes.c_uint]
plan_many_c2c_f.restype = ctypes.c_void_p
plan_many_c2c_d = double_lib.fftw_plan_many_dft
plan_many_c2c_d.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_int, ctypes.c_uint]
plan_many_c2c_d.restype = ctypes.c_void_p
plan_many_c2r_f = float_lib.fftwf_plan_many_dft_c2r
plan_many_c2r_f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_uint]
plan_many_c2r_f.restype = ctypes.c_void_p
plan_many_c2r_d = double_lib.fftw_plan_many_dft_c2r
plan_many_c2r_d.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_uint]
plan_many_c2r_d.restype = ctypes.c_void_p
plan_many_r2c_f = float_lib.fftwf_plan_many_dft_r2c
plan_many_r2c_f.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_uint]
plan_many_r2c_f.restype = ctypes.c_void_p
plan_many_r2c_d = double_lib.fftw_plan_many_dft_r2c
plan_many_r2c_d.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_uint]
plan_many_r2c_d.restype = ctypes.c_void_p
# Now set up a dictionary indexed by (str(input_dtype), str(output_dtype)) to
# translate input and output dtypes into the correct planning function.
_plan_funcs_dict = { ('complex64', 'complex64') : plan_many_c2c_f,
('complex64', 'float32') : plan_many_r2c_f,
('float32', 'complex64') : plan_many_c2r_f,
('complex128', 'complex128') : plan_many_c2c_d,
('complex128', 'float64') : plan_many_r2c_d,
('float64', 'complex128') : plan_many_c2r_d }
# To avoid multiple-inheritance, we set up a function that returns much
# of the initialization that will need to be handled in __init__ of both
# classes.
def _fftw_setup(fftobj):
n = _np.asarray([fftobj.size], dtype=_np.int32)
inembed = _np.asarray([len(fftobj.invec)], dtype=_np.int32)
onembed = _np.asarray([len(fftobj.outvec)], dtype=_np.int32)
nthreads = _scheme.mgr.state.num_threads
if not _fftw_threaded_set:
set_threads_backend()
if nthreads != _fftw_current_nthreads:
_fftw_plan_with_nthreads(nthreads)
mlvl = get_measure_level()
aligned = check_aligned(fftobj.invec.data) and check_aligned(fftobj.outvec.data)
flags = get_flag(mlvl, aligned)
plan_func = _plan_funcs_dict[ (str(fftobj.invec.dtype), str(fftobj.outvec.dtype)) ]
tmpin = zeros(len(fftobj.invec), dtype = fftobj.invec.dtype)
tmpout = zeros(len(fftobj.outvec), dtype = fftobj.outvec.dtype)
# C2C, forward
if fftobj.forward and (fftobj.outvec.dtype in [complex64, complex128]):
plan = plan_func(1, n.ctypes.data, fftobj.nbatch,
tmpin.ptr, inembed.ctypes.data, 1, fftobj.idist,
tmpout.ptr, onembed.ctypes.data, 1, fftobj.odist,
FFTW_FORWARD, flags)
# C2C, backward
elif not fftobj.forward and (fftobj.invec.dtype in [complex64, complex128]):
plan = plan_func(1, n.ctypes.data, fftobj.nbatch,
tmpin.ptr, inembed.ctypes.data, 1, fftobj.idist,
tmpout.ptr, onembed.ctypes.data, 1, fftobj.odist,
FFTW_BACKWARD, flags)
# R2C or C2R (hence no direction argument for plan creation)
else:
plan = plan_func(1, n.ctypes.data, fftobj.nbatch,
tmpin.ptr, inembed.ctypes.data, 1, fftobj.idist,
tmpout.ptr, onembed.ctypes.data, 1, fftobj.odist,
flags)
del tmpin
del tmpout
return plan
class FFT(_BaseFFT):
def __init__(self, invec, outvec, nbatch=1, size=None):
super(FFT, self).__init__(invec, outvec, nbatch, size)
self.iptr = self.invec.ptr
self.optr = self.outvec.ptr
self._efunc = execute_function[str(self.invec.dtype)][str(self.outvec.dtype)]
self._efunc.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.plan = _fftw_setup(self)
def execute(self):
self._efunc(self.plan, self.iptr, self.optr)
class IFFT(_BaseIFFT):
def __init__(self, invec, outvec, nbatch=1, size=None):
super(IFFT, self).__init__(invec, outvec, nbatch, size)
self.iptr = self.invec.ptr
self.optr = self.outvec.ptr
self._efunc = execute_function[str(self.invec.dtype)][str(self.outvec.dtype)]
self._efunc.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.plan = _fftw_setup(self)
def execute(self):
self._efunc(self.plan, self.iptr, self.optr)
def insert_fft_options(optgroup):
"""
Inserts the options that affect the behavior of this backend
Parameters
----------
optgroup: fft_option
OptionParser argument group whose options are extended
"""
optgroup.add_argument("--fftw-measure-level",
help="Determines the measure level used in planning "
"FFTW FFTs; allowed values are: " + str([0,1,2,3]),
type=int, default=_default_measurelvl)
optgroup.add_argument("--fftw-threads-backend",
help="Give 'openmp', 'pthreads' or 'unthreaded' to specify which threaded FFTW to use",
default=None)
optgroup.add_argument("--fftw-input-float-wisdom-file",
help="Filename from which to read single-precision wisdom",
default=None)
optgroup.add_argument("--fftw-input-double-wisdom-file",
help="Filename from which to read double-precision wisdom",
default=None)
optgroup.add_argument("--fftw-output-float-wisdom-file",
help="Filename to which to write single-precision wisdom",
default=None)
optgroup.add_argument("--fftw-output-double-wisdom-file",
help="Filename to which to write double-precision wisdom",
default=None)
optgroup.add_argument("--fftw-import-system-wisdom",
help = "If given, call fftw[f]_import_system_wisdom()",
action = "store_true")
def verify_fft_options(opt,parser):
"""Parses the FFT options and verifies that they are
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes.
parser : object
OptionParser instance.
"""
if opt.fftw_measure_level not in [0,1,2,3]:
parser.error("{0} is not a valid FFTW measure level.".format(opt.fftw_measure_level))
if opt.fftw_import_system_wisdom and ((opt.fftw_input_float_wisdom_file is not None)
or (opt.fftw_input_double_wisdom_file is not None)):
parser.error("If --fftw-import-system-wisdom is given, then you cannot give"
" either of --fftw-input-float-wisdom-file or --fftw-input-double-wisdom-file")
if opt.fftw_threads_backend is not None:
if opt.fftw_threads_backend not in ['openmp','pthreads','unthreaded']:
parser.error("Invalid threads backend; must be 'openmp', 'pthreads' or 'unthreaded'")
def from_cli(opt):
# Since opt.fftw_threads_backend defaults to None, the following is always
# appropriate:
set_threads_backend(opt.fftw_threads_backend)
# Set the user-provided measure level
set_measure_level(opt.fftw_measure_level)
| ligo-cbc/pycbc | pycbc/fft/fftw.py | Python | gpl-3.0 | 22,427 |
# KicadModTree is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# KicadModTree is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >.
#
# (C) 2016 by Thomas Pointhuber, <[email protected]>
from copy import copy, deepcopy
from KicadModTree.Vector import *
class MultipleParentsError(RuntimeError):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(MultipleParentsError, self).__init__(message)
class RecursionDetectedError(RuntimeError):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(RecursionDetectedError, self).__init__(message)
class Node(object):
def __init__(self):
self._parent = None
self._childs = []
def append(self, node):
'''
add node to child
'''
if not isinstance(node, Node):
raise TypeError('invalid object, has to be based on Node')
if node._parent:
raise MultipleParentsError('muliple parents are not allowed!')
self._childs.append(node)
node._parent = self
def extend(self, nodes):
'''
add list of nodes to child
'''
new_nodes = []
for node in nodes:
if not isinstance(node, Node):
raise TypeError('invalid object, has to be based on Node')
if node._parent or node in new_nodes:
raise MultipleParentsError('muliple parents are not allowed!')
new_nodes.append(node)
# when all went smooth by now, we can set the parent nodes to ourself
for node in new_nodes:
node._parent = self
self._childs.extend(new_nodes)
def remove(self, node):
'''
remove child from node
'''
if not isinstance(node, Node):
raise TypeError('invalid object, has to be based on Node')
while node in self._childs:
self._childs.remove(node)
node._parent = None
def insert(self, node):
'''
moving all childs into the node, and using the node as new parent of those childs
'''
if not isinstance(node, Node):
raise TypeError('invalid object, has to be based on Node')
for child in copy(self._childs):
self.remove(child)
node.append(child)
self.append(node)
def copy(self):
copy = deepcopy(self)
copy._parent = None
return copy
def serialize(self):
nodes = [self]
for child in self.getAllChilds():
nodes += child.serialize()
return nodes
def getNormalChilds(self):
'''
Get all normal childs of this node
'''
return self._childs
def getVirtualChilds(self):
'''
Get virtual childs of this node
'''
return []
def getAllChilds(self):
'''
Get virtual and normal childs of this node
'''
return self.getNormalChilds() + self.getVirtualChilds()
def getParent(self):
'''
get Parent Node of this Node
'''
return self._parent
def getRootNode(self):
'''
get Root Node of this Node
'''
# TODO: recursion detection
if not self.getParent():
return self
return self.getParent().getRootNode()
def getRealPosition(self, coordinate, rotation=None):
'''
return position of point after applying all transformation and rotation operations
'''
if not self._parent:
if rotation is None:
# TODO: most of the points are 2D Nodes
return Vector3D(coordinate)
else:
return Vector3D(coordinate), rotation
return self._parent.getRealPosition(coordinate, rotation)
def calculateBoundingBox(self, outline=None):
min_x, min_y = 0, 0
max_x, max_y = 0, 0
if outline:
min_x = outline['min']['x']
min_y = outline['min']['y']
max_x = outline['max']['x']
max_y = outline['max']['y']
for child in self.getAllChilds():
child_outline = child.calculateBoundingBox()
min_x = min([min_x, child_outline['min']['x']])
min_y = min([min_y, child_outline['min']['y']])
max_x = max([max_x, child_outline['max']['x']])
max_y = max([max_y, child_outline['max']['y']])
return {'min': Vector2D(min_x, min_y), 'max': Vector2D(max_x, max_y)}
def _getRenderTreeText(self):
'''
Text which is displayed when generating a render tree
'''
return type(self).__name__
def _getRenderTreeSymbol(self):
'''
Symbol which is displayed when generating a render tree
'''
if self._parent is None:
return "+"
return "*"
def getRenderTree(self, rendered_nodes=None):
'''
print render tree
'''
if rendered_nodes is None:
rendered_nodes = set()
if self in rendered_nodes:
raise RecursionDetectedError('recursive definition of render tree!')
rendered_nodes.add(self)
tree_str = "{0} {1}".format(self._getRenderTreeSymbol(), self._getRenderTreeText())
for child in self.getNormalChilds():
tree_str += '\n '
tree_str += ' '.join(child.getRenderTree(rendered_nodes).splitlines(True))
return tree_str
def getCompleteRenderTree(self, rendered_nodes=None):
'''
print virtual render tree
'''
if rendered_nodes is None:
rendered_nodes = set()
if self in rendered_nodes:
raise RecursionDetectedError('recursive definition of render tree!')
rendered_nodes.add(self)
tree_str = "{0} {1}".format(self._getRenderTreeSymbol(), self._getRenderTreeText())
for child in self.getAllChilds():
tree_str += '\n '
tree_str += ' '.join(child.getCompleteRenderTree(rendered_nodes).splitlines(True))
return tree_str
| pointhi/kicad-footprint-generator | KicadModTree/nodes/Node.py | Python | gpl-3.0 | 6,713 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import subprocess
import snaps_tests
class HookCase(snaps_tests.SnapsTestCase):
snap_content_dir = "hooks"
def test_hooks(self):
snap_path = self.build_snap(self.snap_content_dir)
self.install_snap(snap_path, "hooks", "1.0")
# Regular `snap set` should succeed.
self.run_command_in_snappy_testbed("sudo snap set hooks foo=bar")
if not snaps_tests.config.get("skip-install", False):
# Setting fail=true should fail.
self.assertRaises(
subprocess.CalledProcessError,
self.run_command_in_snappy_testbed,
"sudo snap set hooks fail=true",
)
| cprov/snapcraft | snaps_tests/demos_tests/test_hooks.py | Python | gpl-3.0 | 1,341 |
# -*- encoding: utf-8 -*-
"""Test class for Smart/Puppet Class Parameter
:Requirement: Classparameters
:CaseAutomation: Automated
:CaseLevel: Component
:CaseComponent: Puppet
:TestType: Functional
:Upstream: No
"""
import json
from random import choice
from fauxfactory import gen_boolean, gen_integer, gen_string
from nailgun import entities
from requests import HTTPError
from robottelo.api.utils import delete_puppet_class, publish_puppet_module
from robottelo.constants import CUSTOM_PUPPET_REPO
from robottelo.datafactory import filtered_datapoint
from robottelo.decorators import (
run_in_one_thread,
tier1,
tier2,
upgrade
)
from robottelo.test import APITestCase
@filtered_datapoint
def valid_sc_parameters_data():
"""Returns a list of valid smart class parameter types and values"""
return [
{
u'sc_type': 'string',
u'value': gen_string('utf8'),
},
{
u'sc_type': 'boolean',
u'value': choice(['0', '1']),
},
{
u'sc_type': 'integer',
u'value': gen_integer(min_value=1000),
},
{
u'sc_type': 'real',
u'value': -123.0,
},
{
u'sc_type': 'array',
u'value': "['{0}', '{1}', '{2}']".format(
gen_string('alpha'), gen_integer(), gen_boolean()),
},
{
u'sc_type': 'hash',
u'value': '{{"{0}": "{1}"}}'.format(
gen_string('alpha'), gen_string('alpha')),
},
{
u'sc_type': 'yaml',
u'value': 'name=>XYZ',
},
{
u'sc_type': 'json',
u'value': '{"name": "XYZ"}',
},
]
@filtered_datapoint
def invalid_sc_parameters_data():
"""Returns a list of invalid smart class parameter types and values"""
return [
{
u'sc_type': 'boolean',
u'value': gen_string('alphanumeric'),
},
{
u'sc_type': 'integer',
u'value': gen_string('utf8'),
},
{
u'sc_type': 'real',
u'value': gen_string('alpha'),
},
{
u'sc_type': 'array',
u'value': '0',
},
{
u'sc_type': 'hash',
u'value': 'a:test',
},
{
u'sc_type': 'yaml',
u'value': '{a:test}',
},
{
u'sc_type': 'json',
u'value': gen_string('alpha'),
},
]
@run_in_one_thread
class SmartClassParametersTestCase(APITestCase):
"""Implements Smart Class Parameter tests in API"""
@classmethod
def setUpClass(cls):
"""Import some parametrized puppet classes. This is required to make
sure that we have smart class variable available.
Read all available smart class parameters for imported puppet class to
be able to work with unique entity for each specific test.
"""
super(SmartClassParametersTestCase, cls).setUpClass()
cls.puppet_modules = [
{'author': 'robottelo', 'name': 'api_test_classparameters'},
]
cls.org = entities.Organization().create()
cv = publish_puppet_module(
cls.puppet_modules, CUSTOM_PUPPET_REPO, cls.org)
cls.env = entities.Environment().search(
query={'search': u'content_view="{0}"'.format(cv.name)}
)[0].read()
cls.puppet_class = entities.PuppetClass().search(query={
'search': u'name = "{0}" and environment = "{1}"'.format(
cls.puppet_modules[0]['name'], cls.env.name)
})[0]
cls.sc_params_list = entities.SmartClassParameters().search(
query={
'search': 'puppetclass="{0}"'.format(cls.puppet_class.name),
'per_page': 1000
})
@classmethod
def tearDownClass(cls):
"""Removes puppet class."""
super(SmartClassParametersTestCase, cls).tearDownClass()
delete_puppet_class(cls.puppet_class.name)
def setUp(self):
"""Checks that there is at least one not overridden
smart class parameter before executing test.
"""
super(SmartClassParametersTestCase, self).setUp()
if len(self.sc_params_list) == 0:
raise Exception("Not enough smart class parameters. Please "
"update puppet module.")
@tier1
@upgrade
def test_positive_update_parameter_type(self):
"""Positive Parameter Update for parameter types - Valid Value.
Types - string, boolean, integer, real, array, hash, yaml, json
:id: 1140c3bf-ab3b-4da6-99fb-9c508cefbbd1
:steps:
1. Set override to True.
2. Update the Key Type to any of available.
3. Set a 'valid' default Value.
:expectedresults: Parameter Updated with a new type successfully.
:CaseImportance: Medium
"""
sc_param = self.sc_params_list.pop()
for data in valid_sc_parameters_data():
with self.subTest(data):
sc_param.override = True
sc_param.parameter_type = data['sc_type']
sc_param.default_value = data['value']
sc_param.update(
['override', 'parameter_type', 'default_value']
)
sc_param = sc_param.read()
if data['sc_type'] == 'boolean':
self.assertEqual(
sc_param.default_value,
True if data['value'] == '1' else False
)
elif data['sc_type'] == 'array':
string_list = [
str(element) for element in sc_param.default_value]
self.assertEqual(str(string_list), data['value'])
elif data['sc_type'] in ('json', 'hash'):
self.assertEqual(
sc_param.default_value,
# convert string to dict
json.loads(data['value'])
)
else:
self.assertEqual(sc_param.default_value, data['value'])
@tier1
def test_negative_update_parameter_type(self):
"""Negative Parameter Update for parameter types - Invalid Value.
Types - string, boolean, integer, real, array, hash, yaml, json
:id: 7f0ab885-5520-4431-a916-f739c0498a5b
:steps:
1. Set override to True.
2. Update the Key Type.
3. Attempt to set an 'Invalid' default Value.
:expectedresults:
1. Parameter not updated with string type for invalid value.
2. Error raised for invalid default value.
:CaseImportance: Medium
"""
sc_param = self.sc_params_list.pop()
for test_data in invalid_sc_parameters_data():
with self.subTest(test_data):
with self.assertRaises(HTTPError) as context:
sc_param.override = True
sc_param.parameter_type = test_data['sc_type']
sc_param.default_value = test_data['value']
sc_param.update(
['override', 'parameter_type', 'default_value'])
self.assertNotEqual(
sc_param.read().default_value, test_data['value'])
self.assertRegexpMatches(
context.exception.response.text,
"Validation failed: Default value is invalid"
)
@tier1
def test_positive_validate_default_value_required_check(self):
"""No error raised for non-empty default Value - Required check.
:id: 92977eb0-92c2-4734-84d9-6fda8ff9d2d8
:steps:
1. Set override to True.
2. Set some default value, Not empty.
3. Set 'required' to true.
4. Create a matcher for Parameter for some attribute.
5. Set some Value for matcher.
:expectedresults: No error raised for non-empty default value
:CaseImportance: Medium
"""
sc_param = self.sc_params_list.pop()
sc_param.parameter_type = 'boolean'
sc_param.default_value = True
sc_param.override = True
sc_param.required = True
sc_param.update(
['parameter_type', 'default_value', 'override', 'required']
)
sc_param = sc_param.read()
self.assertEqual(sc_param.required, True)
self.assertEqual(sc_param.default_value, True)
entities.OverrideValue(
smart_class_parameter=sc_param,
match='domain=example.com',
value=False,
).create()
sc_param.update(['override', 'required'])
sc_param = sc_param.read()
self.assertEqual(sc_param.required, True)
self.assertEqual(sc_param.override_values[0]['value'], False)
@tier1
def test_negative_validate_matcher_value_required_check(self):
"""Error is raised for blank matcher Value - Required check.
:id: 49de2c9b-40f1-4837-8ebb-dfa40d8fcb89
:steps:
1. Set override to True.
2. Create a matcher for Parameter for some attribute.
3. Set no value for matcher. Keep blank.
4. Set 'required' to true.
:expectedresults: Error raised for blank matcher value.
:CaseImportance: Medium
"""
sc_param = self.sc_params_list.pop()
sc_param.override = True
sc_param.required = True
sc_param.update(['override', 'required'])
with self.assertRaises(HTTPError) as context:
entities.OverrideValue(
smart_class_parameter=sc_param,
match='domain=example.com',
value='',
).create()
self.assertRegexpMatches(
context.exception.response.text,
"Validation failed: Value can't be blank"
)
@tier1
def test_negative_validate_default_value_with_regex(self):
"""Error is raised for default value not matching with regex.
:id: 99628b78-3037-4c20-95f0-7ce5455093ac
:steps:
1. Set override to True.
2. Set default value that doesn't matches the regex of step 3.
3. Validate this value with regex validator type and rule.
:expectedresults: Error raised for default value not matching with
regex.
:CaseImportance: Low
"""
value = gen_string('alpha')
sc_param = self.sc_params_list.pop()
sc_param.override = True
sc_param.default_value = value
sc_param.validator_type = 'regexp'
sc_param.validator_rule = '[0-9]'
with self.assertRaises(HTTPError) as context:
sc_param.update([
'override', 'default_value',
'validator_type', 'validator_rule'
])
self.assertRegexpMatches(
context.exception.response.text,
"Validation failed: Default value is invalid"
)
self.assertNotEqual(sc_param.read().default_value, value)
@tier1
def test_positive_validate_default_value_with_regex(self):
"""Error is not raised for default value matching with regex.
:id: d5df7804-9633-4ef8-a065-10807351d230
:steps:
1. Set override to True.
2. Set default value that matches the regex of step 3.
3. Validate this value with regex validator type and rule.
4. Create a matcher with value that matches the regex of step 3.
5. Validate this value with regex validator type and rule.
:expectedresults: Error not raised for default value matching with
regex.
:CaseImportance: Low
"""
# validate default value
value = gen_string('numeric')
sc_param = self.sc_params_list.pop()
sc_param.override = True
sc_param.default_value = value
sc_param.validator_type = 'regexp'
sc_param.validator_rule = '[0-9]'
sc_param.update(
['override', 'default_value', 'validator_type', 'validator_rule']
)
sc_param = sc_param.read()
self.assertEqual(sc_param.default_value, value)
self.assertEqual(sc_param.validator_type, 'regexp')
self.assertEqual(sc_param.validator_rule, '[0-9]')
# validate matcher value
entities.OverrideValue(
smart_class_parameter=sc_param,
match='domain=test.com',
value=gen_string('numeric'),
).create()
sc_param.update(
['override', 'default_value', 'validator_type', 'validator_rule']
)
self.assertEqual(sc_param.read().default_value, value)
@tier1
def test_negative_validate_matcher_value_with_list(self):
"""Error is raised for matcher value not in list.
:id: a5e89e86-253f-4254-9ebb-eefb3dc2c2ab
:steps:
1. Set override to True.
2. Create a matcher with value that doesn't match the list of step
3. Validate this value with list validator type and rule.
:expectedresults: Error raised for matcher value not in list.
:CaseImportance: Medium
"""
sc_param = self.sc_params_list.pop()
entities.OverrideValue(
smart_class_parameter=sc_param,
match='domain=example.com',
value='myexample',
).create()
sc_param.override = True
sc_param.default_value = 50
sc_param.validator_type = 'list'
sc_param.validator_rule = '25, example, 50'
with self.assertRaises(HTTPError) as context:
sc_param.update([
'override',
'default_value',
'validator_type',
'validator_rule',
])
self.assertRegexpMatches(
context.exception.response.text,
"Validation failed: Lookup values is invalid"
)
self.assertNotEqual(sc_param.read().default_value, 50)
@tier1
def test_positive_validate_matcher_value_with_list(self):
"""Error is not raised for matcher value in list.
:id: 05c1a0bb-ba27-4842-bb6a-8420114cffe7
:steps:
1. Set override to True.
2. Create a matcher with value that matches the list of step 3.
3. Validate this value with list validator type and rule.
:expectedresults: Error not raised for matcher value in list.
:CaseImportance: Medium
"""
sc_param = self.sc_params_list.pop()
entities.OverrideValue(
smart_class_parameter=sc_param,
match='domain=example.com',
value=30,
).create()
sc_param.override = True
sc_param.default_value = 'example'
sc_param.validator_type = 'list'
sc_param.validator_rule = 'test, example, 30'
sc_param.update(
['override', 'default_value', 'validator_type', 'validator_rule']
)
self.assertEqual(sc_param.read().default_value, 'example')
@tier1
def test_positive_validate_matcher_value_with_default_type(self):
"""No error for matcher value of default type.
:id: 77b6e90d-e38a-4973-98e3-c698eae5c534
:steps:
1. Set override to True.
2. Update parameter default type with valid value.
3. Create a matcher with value that matches the default type.
:expectedresults: Error not raised for matcher value of default type.
:CaseImportance: Medium
"""
sc_param = self.sc_params_list.pop()
sc_param.override = True
sc_param.parameter_type = 'boolean'
sc_param.default_value = True
sc_param.update(['override', 'parameter_type', 'default_value'])
entities.OverrideValue(
smart_class_parameter=sc_param,
match='domain=example.com',
value=False,
).create()
sc_param = sc_param.read()
self.assertEqual(sc_param.override_values[0]['value'], False)
self.assertEqual(
sc_param.override_values[0]['match'], 'domain=example.com')
@tier1
def test_negative_validate_matcher_and_default_value(self):
"""Error for invalid default and matcher value is raised both at a time.
:id: e46a12cb-b3ea-42eb-b1bb-b750655b6a4a
:steps:
1. Set override to True.
2. Update parameter default type with Invalid value.
3. Create a matcher with value that doesn't matches the default
type.
:expectedresults: Error raised for invalid default and matcher value
both.
:CaseImportance: Medium
"""
sc_param = self.sc_params_list.pop()
entities.OverrideValue(
smart_class_parameter=sc_param,
match='domain=example.com',
value=gen_string('alpha'),
).create()
with self.assertRaises(HTTPError) as context:
sc_param.parameter_type = 'boolean'
sc_param.default_value = gen_string('alpha')
sc_param.update(['parameter_type', 'default_value'])
self.assertRegexpMatches(
context.exception.response.text,
"Validation failed: Default value is invalid, "
"Lookup values is invalid"
)
@tier1
def test_positive_create_and_remove_matcher_puppet_default_value(self):
"""Create matcher for attribute in parameter where
value is puppet default value.
:id: 2b205e9c-e50c-48cd-8ebb-3b6bea09be77
:steps:
1. Set override to True.
2. Set some default Value.
3. Create matcher with valid attribute type, name and puppet
default value.
4. Remove matcher afterwards
:expectedresults: The matcher has been created and removed successfully.
:CaseImportance: Medium
"""
sc_param = self.sc_params_list.pop()
value = gen_string('alpha')
sc_param.override = True
sc_param.default_value = gen_string('alpha')
override = entities.OverrideValue(
smart_class_parameter=sc_param,
match='domain=example.com',
value=value,
use_puppet_default=True,
).create()
sc_param = sc_param.read()
self.assertEqual(
sc_param.override_values[0]['use_puppet_default'], True)
self.assertEqual(
sc_param.override_values[0]['match'], 'domain=example.com')
self.assertEqual(sc_param.override_values[0]['value'], value)
override.delete()
self.assertEqual(len(sc_param.read().override_values), 0)
@tier1
def test_positive_enable_merge_overrides_default_checkboxes(self):
"""Enable Merge Overrides, Merge Default checkbox for supported types.
:id: ae1c8e2d-c15d-4325-9aa6-cc6b091fb95a
:steps: Set parameter type to array/hash.
:expectedresults: The Merge Overrides, Merge Default checks are enabled
to check.
:CaseImportance: Medium
"""
sc_param = self.sc_params_list.pop()
sc_param.override = True
sc_param.parameter_type = 'array'
sc_param.default_value = "[{0}, {1}]".format(
gen_string('alpha'), gen_string('alpha'))
sc_param.merge_overrides = True
sc_param.merge_default = True
sc_param.update([
'override',
'parameter_type',
'default_value',
'merge_overrides',
'merge_default',
])
sc_param = sc_param.read()
self.assertEqual(sc_param.merge_overrides, True)
self.assertEqual(sc_param.merge_default, True)
@tier1
def test_negative_enable_merge_overrides_default_checkboxes(self):
"""Disable Merge Overrides, Merge Default checkboxes for non supported types.
:id: d7b1c336-bd9f-40a3-a573-939f2a021cdc
:steps: Set parameter type other than array/hash.
:expectedresults: The Merge Overrides, Merge Default checks are not
enabled to check.
:CaseImportance: Medium
"""
sc_param = self.sc_params_list.pop()
sc_param.override = True
sc_param.parameter_type = 'string'
sc_param.default_value = gen_string('alpha')
sc_param.merge_overrides = True
sc_param.merge_default = True
with self.assertRaises(HTTPError) as context:
sc_param.update([
'override',
'parameter_type',
'default_value',
'merge_overrides',
])
self.assertRegexpMatches(
context.exception.response.text,
"Validation failed: Merge overrides can only be set for "
"array or hash"
)
with self.assertRaises(HTTPError) as context:
sc_param.update([
'override',
'parameter_type',
'default_value',
'merge_default',
])
self.assertRegexpMatches(
context.exception.response.text,
"Validation failed: Merge default can only be set when merge "
"overrides is set"
)
sc_param = sc_param.read()
self.assertEqual(sc_param.merge_overrides, False)
self.assertEqual(sc_param.merge_default, False)
@tier1
def test_positive_enable_avoid_duplicates_checkbox(self):
"""Enable Avoid duplicates checkbox for supported type- array.
:id: 80bf52df-e678-4384-a4d5-7a88928620ce
:steps:
1. Set parameter type to array.
2. Set 'merge overrides' to True.
:expectedresults: The Avoid Duplicates is enabled to set to True.
:CaseImportance: Medium
"""
sc_param = self.sc_params_list.pop()
sc_param.override = True
sc_param.parameter_type = 'array'
sc_param.default_value = "[{0}, {1}]".format(
gen_string('alpha'), gen_string('alpha'))
sc_param.merge_overrides = True
sc_param.avoid_duplicates = True
sc_param.update([
'override',
'parameter_type',
'default_value',
'merge_overrides',
'avoid_duplicates',
])
self.assertEqual(sc_param.read().avoid_duplicates, True)
@tier1
def test_negative_enable_avoid_duplicates_checkbox(self):
"""Disable Avoid duplicates checkbox for non supported types.
:id: 11d75f6d-7105-4ee8-b147-b8329cae4156
:steps: Set parameter type other than array.
:expectedresults:
1. The Merge Overrides checkbox is only enabled to check for type
hash other than array.
2. The Avoid duplicates checkbox not enabled to check for any type
than array.
:CaseImportance: Medium
"""
sc_param = self.sc_params_list.pop()
sc_param.override = True
sc_param.parameter_type = 'string'
sc_param.default_value = gen_string('alpha')
sc_param.avoid_duplicates = True
with self.assertRaises(HTTPError) as context:
sc_param.update([
'override',
'parameter_type',
'default_value',
'avoid_duplicates'
])
self.assertRegexpMatches(
context.exception.response.text,
"Validation failed: Avoid duplicates can only be set for arrays "
"that have merge_overrides set to true"
)
self.assertEqual(sc_param.read().avoid_duplicates, False)
@tier2
def test_positive_impact_parameter_delete_attribute(self):
"""Impact on parameter after deleting associated attribute.
:id: 3ffbf403-dac9-4172-a586-82267765abd8
:steps:
1. Set the parameter to True and create a matcher for some
attribute.
2. Delete the attribute.
3. Recreate the attribute with same name as earlier.
:expectedresults:
1. The matcher for deleted attribute removed from parameter.
2. On recreating attribute, the matcher should not reappear in
parameter.
:CaseImportance: Medium
:BZ: 1374253
"""
sc_param = self.sc_params_list.pop()
hostgroup_name = gen_string('alpha')
match = 'hostgroup={0}'.format(hostgroup_name)
match_value = gen_string('alpha')
hostgroup = entities.HostGroup(
name=hostgroup_name,
environment=self.env,
).create()
hostgroup.add_puppetclass(
data={'puppetclass_id': self.puppet_class.id})
entities.OverrideValue(
smart_class_parameter=sc_param,
match=match,
value=match_value,
).create()
sc_param = sc_param.read()
self.assertEqual(sc_param.override_values[0]['match'], match)
self.assertEqual(sc_param.override_values[0]['value'], match_value)
hostgroup.delete()
self.assertEqual(len(sc_param.read().override_values), 0)
hostgroup = entities.HostGroup(
name=hostgroup_name,
environment=self.env,
).create()
hostgroup.add_puppetclass(
data={'puppetclass_id': self.puppet_class.id})
self.assertEqual(len(sc_param.read().override_values), 0)
| ldjebran/robottelo | tests/foreman/api/test_classparameters.py | Python | gpl-3.0 | 25,724 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ImportFile.results_acoustid'
db.add_column('importer_importfile', 'results_acoustid',
self.gf('django.db.models.fields.TextField')(default='{}', null=True, blank=True),
keep_default=False)
# Adding field 'ImportFile.results_acoustid_status'
db.add_column('importer_importfile', 'results_acoustid_status',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ImportFile.results_acoustid'
db.delete_column('importer_importfile', 'results_acoustid')
# Deleting field 'ImportFile.results_acoustid_status'
db.delete_column('importer_importfile', 'results_acoustid_status')
models = {
'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': "orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'importer.import': {
'Meta': {'ordering': "('-created',)", 'object_name': 'Import'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'web'", 'max_length': "'10'"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'import_user'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"})
},
'importer.importfile': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ImportFile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'null': 'True', 'to': "orm['importer.Import']"}),
'mimetype': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'results_acoustid': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'null': 'True', 'blank': 'True'}),
'results_acoustid_status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'results_discogs': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'null': 'True', 'blank': 'True'}),
'results_discogs_status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'results_musicbrainz': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'null': 'True', 'blank': 'True'}),
'results_tag': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'null': 'True', 'blank': 'True'}),
'results_tag_status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
}
}
complete_apps = ['importer'] | hzlf/openbroadcast | website/apps/importer/migrations/0009_auto__add_field_importfile_results_acoustid__add_field_importfile_resu.py | Python | gpl-3.0 | 8,507 |
from click.testing import CliRunner
from gandi.cli.core.base import GandiModule
from ..compat import unittest, mock
from ..fixtures.api import Api
from ..fixtures.mocks import MockObject
class CommandTestCase(unittest.TestCase):
base_mocks = [
('gandi.cli.core.base.GandiModule.save', MockObject.blank_func),
('gandi.cli.core.base.GandiModule.execute', MockObject.execute),
]
mocks = []
def setUp(self):
self.runner = CliRunner()
self.mocks = self.mocks + self.base_mocks
self.mocks = [mock.patch(*mock_args) for mock_args in self.mocks]
for dummy in self.mocks:
dummy.start()
GandiModule._api = Api()
GandiModule._conffiles = {'global': {'api': {'env': 'test',
'key': 'apikey0001'}}}
def tearDown(self):
GandiModule._api = None
GandiModule._conffiles = {}
for dummy in reversed(self.mocks):
dummy.stop()
def invoke_with_exceptions(self, cli, args, catch_exceptions=False,
**kwargs):
return self.runner.invoke(cli, args, catch_exceptions=catch_exceptions,
**kwargs)
| GPCsolutions/gandi.cli | gandi/cli/tests/commands/base.py | Python | gpl-3.0 | 1,238 |
__author__ = "jing"
from scrapy.cmdline import execute
execute()
| BitTigerInst/Kumamon | zhihu/manage.py | Python | gpl-3.0 | 68 |
# -*- coding: utf-8 -*-
# Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
"""tsyganenko module
This modules containes the following object(s):
Classes
-------------------------------------------------------------
tsygTrace Wraps fortran subroutines in one convenient class
-------------------------------------------------------------
Module
-------------------------------
tsygFort Fortran subroutines
-------------------------------
"""
import tsygFort
import logging
class tsygTrace(object):
"""models.tsyganenko.trace
Trace magnetic field line(s) from point(s)
Parameters
----------
lat : Optional[ ]
latitude [degrees]
lon : Optional[ ]
longitude [degrees]
rho : Optional[ ]
distance from center of the Earth [km]
filename : Optional[ ]
load a trace object directly from a file
coords : Optional[str]
coordinates used for start point ['geo']
datetime : Optional[datetime]
a python datetime object
vswgse : Optional[list, float]
solar wind velocity in GSE coordinates [m/s, m/s, m/s]
pdyn : Optional[float]
solar wind dynamic pressure [nPa]
dst : Optional[flaot]
Dst index [nT]
byimf : Optional[float]
IMF By [nT]
bzimf : Optional[float]
IMF Bz [nT]
lmax : Optional[int]
maximum number of points to trace
rmax : Optional[float]
upper trace boundary in Re
rmin : Optional[float]
lower trace boundary in Re
dsmax : Optional[float]
maximum tracing step size
err : Optional[float]
tracing step tolerance
Attributes
----------
lat :
latitude [degrees]
lon :
longitude [degrees]
rho :
distance from center of the Earth [km]
coords : str
coordinates used for start point ['geo']
vswgse : list
solar wind velocity in GSE coordinates [m/s, m/s, m/s]
pdyn : float
solar wind dynamic pressure [nPa]
dst : flaot
Dst index [nT]
byimf : float
IMF By [nT]
bzimf : float
IMF Bz [nT]
datetime : Optional[datetime]
a python datetime object
Returns
-------
Elements of this object:
lat[N/S]H :
latitude of the trace footpoint in Northern/Southern hemispher
lon[N/S]H :
longitude of the trace footpoint in Northern/Southern hemispher
rho[N/S]H :
distance of the trace footpoint in Northern/Southern hemispher
Examples
--------
from numpy import arange, zeros, ones
import tsyganenko
# trace a series of points
lats = arange(10, 90, 10)
lons = zeros(len(lats))
rhos = 6372.*ones(len(lats))
trace = tsyganenko.tsygTrace(lats, lons, rhos)
# Print the results nicely
print trace
# Plot the traced field lines
ax = trace.plot()
# Or generate a 3d view of the traced field lines
ax = trace.plot3d()
# Save your trace to a file for later use
trace.save('trace.dat')
# And when you want to re-use the saved trace
trace = tsyganenko.tsygTrace(filename='trace.dat')
Notes
-----
**FUNCTION**: trace(lat, lon, rho, coords='geo', datetime=None,
vswgse=[-400.,0.,0.], Pdyn=2., Dst=-5., ByIMF=0., BzIMF=-5.
lmax=5000, rmax=60., rmin=1., dsmax=0.01, err=0.000001)
Written by Sebastien 2012-10
"""
def __init__(self, lat=None, lon=None, rho=None, filename=None,
coords='geo', datetime=None,
vswgse=[-400.,0.,0.], pdyn=2., dst=-5., byimf=0., bzimf=-5.,
lmax=5000, rmax=60., rmin=1., dsmax=0.01, err=0.000001):
from datetime import datetime as pydt
assert (None not in [lat, lon, rho]) or filename, 'You must provide either (lat, lon, rho) or a filename to read from'
if None not in [lat, lon, rho]:
self.lat = lat
self.lon = lon
self.rho = rho
self.coords = coords
self.vswgse = vswgse
self.pdyn = pdyn
self.dst = dst
self.byimf = byimf
self.bzimf = bzimf
# If no datetime is provided, defaults to today
if datetime is None: datetime = pydt.utcnow()
self.datetime = datetime
iTest = self.__test_valid__()
if not iTest: self.__del__()
self.trace()
elif filename:
self.load(filename)
def __test_valid__(self):
"""Test the validity of input arguments to the tsygTrace class and trace method
Written by Sebastien 2012-10
"""
assert (len(self.vswgse) == 3), 'vswgse must have 3 elements'
assert (self.coords.lower() == 'geo'), '{}: this coordinae system is not supported'.format(self.coords.lower())
# A provision for those who want to batch trace
try:
[l for l in self.lat]
except:
self.lat = [self.lat]
try:
[l for l in self.lon]
except:
self.lon = [self.lon]
try:
[r for r in self.rho]
except:
self.rho = [self.rho]
try:
[d for d in self.datetime]
except:
self.datetime = [self.datetime for l in self.lat]
# Make sure they're all the sam elength
assert (len(self.lat) == len(self.lon) == len(self.rho) == len(self.datetime)), \
'lat, lon, rho and datetime must me the same length'
return True
def trace(self, lat=None, lon=None, rho=None, coords=None, datetime=None,
vswgse=None, pdyn=None, dst=None, byimf=None, bzimf=None,
lmax=5000, rmax=60., rmin=1., dsmax=0.01, err=0.000001):
"""See tsygTrace for a description of each parameter
Any unspecified parameter default to the one stored in the object
Unspecified lmax, rmax, rmin, dsmax, err has a set default value
Parameters
----------
lat : Optional[ ]
latitude [degrees]
lon : Optional[ ]
longitude [degrees]
rho : Optional[ ]
distance from center of the Earth [km]
coords : Optional[str]
coordinates used for start point ['geo']
datetime : Optional[datetime]
a python datetime object
vswgse : Optional[list, float]
solar wind velocity in GSE coordinates [m/s, m/s, m/s]
pdyn : Optional[float]
solar wind dynamic pressure [nPa]
dst : Optional[flaot]
Dst index [nT]
byimf : Optional[float]
IMF By [nT]
bzimf : Optional[float]
IMF Bz [nT]
lmax : Optional[int]
maximum number of points to trace
rmax : Optional[float]
upper trace boundary in Re
rmin : Optional[float]
lower trace boundary in Re
dsmax : Optional[float]
maximum tracing step size
err : Optional[float]
tracing step tolerance
Written by Sebastien 2012-10
"""
from numpy import radians, degrees, zeros
# Store existing values of class attributes in case something is wrong
# and we need to revert back to them
if lat: _lat = self.lat
if lon: _lon = self.lon
if rho: _rho = self.rho
if coords: _coords = self.coords
if vswgse: _vswgse = self.vswgse
if not datetime is None: _datetime = self.datetime
# Pass position if new
if lat: self.lat = lat
lat = self.lat
if lon: self.lon = lon
lon = self.lon
if rho: self.rho = rho
rho = self.rho
if not datetime is None: self.datetime = datetime
datetime = self.datetime
# Set necessary parameters if new
if coords: self.coords = coords
coords = self.coords
if not datetime is None: self.datetime = datetime
datetime = self.datetime
if vswgse: self.vswgse = vswgse
vswgse = self.vswgse
if pdyn: self.pdyn = pdyn
pdyn = self.pdyn
if dst: self.dst = dst
dst = self.dst
if byimf: self.byimf = byimf
byimf = self.byimf
if bzimf: self.bzimf = bzimf
bzimf = self.bzimf
# Test that everything is in order, if not revert to existing values
iTest = self.__test_valid__()
if not iTest:
if lat: self.lat = _lat
if lon: _self.lon = lon
if rho: self.rho = _rho
if coords: self.coords = _coords
if vswgse: self.vswgse = _vswgse
if not datetime is None: self.datetime = _datetime
# Declare the same Re as used in Tsyganenko models [km]
Re = 6371.2
# Initialize trace array
self.l = zeros(len(lat))
self.xTrace = zeros((len(lat),2*lmax))
self.yTrace = self.xTrace.copy()
self.zTrace = self.xTrace.copy()
self.xGsw = self.l.copy()
self.yGsw = self.l.copy()
self.zGsw = self.l.copy()
self.latNH = self.l.copy()
self.lonNH = self.l.copy()
self.rhoNH = self.l.copy()
self.latSH = self.l.copy()
self.lonSH = self.l.copy()
self.rhoSH = self.l.copy()
# And now iterate through the desired points
for ip in xrange(len(lat)):
# This has to be called first
tsygFort.recalc_08(datetime[ip].year,datetime[ip].timetuple().tm_yday,
datetime[ip].hour,datetime[ip].minute,datetime[ip].second,
vswgse[0],vswgse[1],vswgse[2])
# Convert lat,lon to geographic cartesian and then gsw
r, theta, phi, xgeo, ygeo, zgeo = tsygFort.sphcar_08(
rho[ip]/Re, radians(90.-lat[ip]), radians(lon[ip]),
0., 0., 0.,
1)
if coords.lower() == 'geo':
xgeo, ygeo, zgeo, xgsw, ygsw, zgsw = tsygFort.geogsw_08(
xgeo, ygeo, zgeo,
0. ,0. ,0. ,
1)
self.xGsw[ip] = xgsw
self.yGsw[ip] = ygsw
self.zGsw[ip] = zgsw
# Trace field line
inmod = 'IGRF_GSW_08'
exmod = 'T96_01'
parmod = [pdyn, dst, byimf, bzimf, 0, 0, 0, 0, 0, 0]
# First towards southern hemisphere
maptoL = [-1, 1]
for mapto in maptoL:
xfgsw, yfgsw, zfgsw, xarr, yarr, zarr, l = tsygFort.trace_08( xgsw, ygsw, zgsw,
mapto, dsmax, err, rmax, rmin, 0,
parmod, exmod, inmod,
lmax )
# Convert back to spherical geographic coords
xfgeo, yfgeo, zfgeo, xfgsw, yfgsw, zfgsw = tsygFort.geogsw_08(
0. ,0. ,0. ,
xfgsw, yfgsw, zfgsw,
-1)
geoR, geoColat, geoLon, xgeo, ygeo, zgeo = tsygFort.sphcar_08(
0., 0., 0.,
xfgeo, yfgeo, zfgeo,
-1)
# Get coordinates of traced point
if mapto == 1:
self.latSH[ip] = 90. - degrees(geoColat)
self.lonSH[ip] = degrees(geoLon)
self.rhoSH[ip] = geoR*Re
elif mapto == -1:
self.latNH[ip] = 90. - degrees(geoColat)
self.lonNH[ip] = degrees(geoLon)
self.rhoNH[ip] = geoR*Re
# Store trace
if mapto == -1:
self.xTrace[ip,0:l] = xarr[l-1::-1]
self.yTrace[ip,0:l] = yarr[l-1::-1]
self.zTrace[ip,0:l] = zarr[l-1::-1]
elif mapto == 1:
self.xTrace[ip,self.l[ip]:self.l[ip]+l] = xarr[0:l]
self.yTrace[ip,self.l[ip]:self.l[ip]+l] = yarr[0:l]
self.zTrace[ip,self.l[ip]:self.l[ip]+l] = zarr[0:l]
self.l[ip] += l
# Resize trace output to more minimum possible length
self.xTrace = self.xTrace[:,0:self.l.max()]
self.yTrace = self.yTrace[:,0:self.l.max()]
self.zTrace = self.zTrace[:,0:self.l.max()]
def __str__(self):
"""Print object information in a nice way
Written by Sebastien 2012-10
"""
# Declare print format
outstr = '''
vswgse=[{:6.0f},{:6.0f},{:6.0f}] [m/s]
pdyn={:3.0f} [nPa]
dst={:3.0f} [nT]
byimf={:3.0f} [nT]
bzimf={:3.0f} [nT]
'''.format(self.vswgse[0],
self.vswgse[1],
self.vswgse[2],
self.pdyn,
self.dst,
self.byimf,
self.bzimf)
outstr += '\nCoords: {}\n'.format(self.coords)
outstr += '(latitude [degrees], longitude [degrees], distance from center of the Earth [km])\n'
# Print stuff
for ip in xrange(len(self.lat)):
outstr += '''
({:6.3f}, {:6.3f}, {:6.3f}) @ {}
--> NH({:6.3f}, {:6.3f}, {:6.3f})
--> SH({:6.3f}, {:6.3f}, {:6.3f})
'''.format(self.lat[ip], self.lon[ip], self.rho[ip],
self.datetime[ip].strftime('%H:%M UT (%d-%b-%y)'),
self.latNH[ip], self.lonNH[ip], self.rhoNH[ip],
self.latSH[ip], self.lonSH[ip], self.rhoSH[ip])
return outstr
def save(self, filename):
"""Save trace information to a file
Parameters
----------
filename : str
Written by Sebastien 2012-10
"""
import cPickle as pickle
with open( filename, "wb" ) as fileObj:
pickle.dump(self, fileObj)
def load(self, filename):
"""load trace information from a file
Parameters
----------
filename : str
Written by Sebastien 2012-10
"""
import cPickle as pickle
with open( filename, "rb" ) as fileObj:
obj = pickle.load(fileObj)
for k, v in obj.__dict__.items():
self.__dict__[k] = v
def plot(self, proj='xz', color='b', onlyPts=None, showPts=False,
showEarth=True, disp=True, **kwargs):
"""Generate a 2D plot of the trace projected onto a given plane
Graphic keywords apply to the plot method for the field lines
Parameters
----------
proj : Optional[str]
the projection plane in GSW coordinates
color : Optional[char]
field line color
onlyPts : Optional[ ]
if the trace countains multiple point, only show the specified indices (list)
showEarth : Optional[bool]
Toggle Earth disk visibility on/off
showPts : Optional[bool]
Toggle start points visibility on/off
disp : Optional[bool]
invoke pylab.show()
**kwargs :
see matplotlib.axes.Axes.plot
Returns
-------
ax : matplotlib axes object
Written by Sebastien 2012-10
"""
from pylab import gcf, gca, show
from matplotlib.patches import Circle
from numpy import pi, linspace, outer, ones, size, cos, sin, radians, cross
from numpy.ma import masked_array
assert (len(proj) == 2) or \
(proj[0] in ['x','y','z'] and proj[1] in ['x','y','z']) or \
(proj[0] != proj[1]), 'Invalid projection plane'
fig = gcf()
ax = fig.gca()
ax.set_aspect('equal')
# First plot a nice disk for the Earth
if showEarth:
circ = Circle(xy=(0,0), radius=1, facecolor='0.8', edgecolor='k', alpha=.5, zorder=0)
ax.add_patch(circ)
# Select indices to show
if onlyPts is None:
inds = xrange(len(self.lat))
else:
try:
inds = [ip for ip in onlyPts]
except:
inds = [onlyPts]
# Then plot the traced field line
for ip in inds:
# Select projection plane
if proj[0] == 'x':
xx = self.xTrace[ip,0:self.l[ip]]
xpt = self.xGsw[ip]
ax.set_xlabel(r'$X_{GSW}$')
xdir = [1,0,0]
elif proj[0] == 'y':
xx = self.yTrace[ip,0:self.l[ip]]
xpt = self.yGsw[ip]
ax.set_xlabel(r'$Y_{GSW}$')
xdir = [0,1,0]
elif proj[0] == 'z':
xx = self.zTrace[ip,0:self.l[ip]]
xpt = self.zGsw[ip]
ax.set_xlabel(r'$Z_{GSW}$')
xdir = [0,0,1]
if proj[1] == 'x':
yy = self.xTrace[ip,0:self.l[ip]]
ypt = self.xGsw[ip]
ax.set_ylabel(r'$X_{GSW}$')
ydir = [1,0,0]
elif proj[1] == 'y':
yy = self.yTrace[ip,0:self.l[ip]]
ypt = self.yGsw[ip]
ax.set_ylabel(r'$Y_{GSW}$')
ydir = [0,1,0]
elif proj[1] == 'z':
yy = self.zTrace[ip,0:self.l[ip]]
ypt = self.zGsw[ip]
ax.set_ylabel(r'$Z_{GSW}$')
ydir = [0,0,1]
sign = 1 if -1 not in cross(xdir,ydir) else -1
if 'x' not in proj:
zz = sign*self.xGsw[ip]
indMask = sign*self.xTrace[ip,0:self.l[ip]] < 0
if 'y' not in proj:
zz = sign*self.yGsw[ip]
indMask = sign*self.yTrace[ip,0:self.l[ip]] < 0
if 'z' not in proj:
zz = sign*self.zGsw[ip]
indMask = sign*self.zTrace[ip,0:self.l[ip]] < 0
# Plot
ax.plot(masked_array(xx, mask=~indMask),
masked_array(yy, mask=~indMask),
zorder=-1, color=color, **kwargs)
ax.plot(masked_array(xx, mask=indMask),
masked_array(yy, mask=indMask),
zorder=1, color=color, **kwargs)
if showPts:
ax.scatter(xpt, ypt, c='k', s=40, zorder=zz)
if disp: show()
return ax
def plot3d(self, onlyPts=None, showEarth=True, showPts=False, disp=True,
xyzlim=None, zorder=1, linewidth=2, color='b', **kwargs):
"""Generate a 3D plot of the trace
Graphic keywords apply to the plot3d method for the field lines
Parameters
----------
onlyPts : Optional[ ]
if the trace countains multiple point, only show the specified indices (list)
showEarth : Optional[bool]
Toggle Earth sphere visibility on/off
showPts : Optional[bool]
Toggle start points visibility on/off
disp : Optional[bool]
invoke pylab.show()
xyzlim : Optional[ ]
3D axis limits
zorder : Optional[int]
3D layers ordering
linewidth : Optional[int]
field line width
color : Optional[char]
field line color
**kwargs :
see mpl_toolkits.mplot3d.axes3d.Axes3D.plot3D
Returns
-------
ax : matplotlib axes
axes object
Written by Sebastien 2012-10
"""
from mpl_toolkits.mplot3d import proj3d
from numpy import pi, linspace, outer, ones, size, cos, sin, radians
from pylab import gca, gcf, show
fig = gcf()
ax = fig.gca(projection='3d')
# First plot a nice sphere for the Earth
if showEarth:
u = linspace(0, 2 * pi, 179)
v = linspace(0, pi, 179)
tx = outer(cos(u), sin(v))
ty = outer(sin(u), sin(v))
tz = outer(ones(size(u)), cos(v))
ax.plot_surface(tx,ty,tz,rstride=10, cstride=10, color='grey', alpha=.5, zorder=0, linewidth=0.5)
# Select indices to show
if onlyPts is None:
inds = xrange(len(self.lat))
else:
try:
inds = [ip for ip in onlyPts]
except:
inds = [onlyPts]
# Then plot the traced field line
for ip in inds:
ax.plot3D( self.xTrace[ip,0:self.l[ip]],
self.yTrace[ip,0:self.l[ip]],
self.zTrace[ip,0:self.l[ip]],
zorder=zorder, linewidth=linewidth, color=color, **kwargs)
if showPts:
ax.scatter3D(self.xGsw[ip], self.yGsw[ip], self.zGsw[ip], c='k')
# Set plot limits
if not xyzlim:
xyzlim = max( [ ax.get_xlim3d().max(),
ax.get_ylim3d().max(),
ax.get_zlim3d().max(), ] )
ax.set_xlim3d([-xyzlim,xyzlim])
ax.set_ylim3d([-xyzlim,xyzlim])
ax.set_zlim3d([-xyzlim,xyzlim])
if disp: show()
return ax
| aburrell/davitpy | davitpy/models/tsyganenko/__init__.py | Python | gpl-3.0 | 21,946 |
#!/usr/bin/env python
#Pools assigned OTUs with identical names and renumbers the remaining distinct
#OTUs. Also allows filtering out OTUs with less than "min_cts" in at least
#one sample.
# Copyright (C) <2012> <Benjamin C. Smith>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, os, re, argparse, csv
from numpy import array, sum, append, amax, hstack, savetxt, linspace
from itertools import product
from time import strftime
parser = argparse.ArgumentParser(description='''Filter an OTU table by pooling
OTUs with identical names. (Optionally: discard OTUs with less than a specified
minimum proportion of counts in any one sample)''')
parser.add_argument('-i','--infile', required=True, nargs=1,
type=str, help='''input filepath. Should be a tab-delimited
OTU table.''')
parser.add_argument('-o', '--outfile', required=True, nargs=1, type=str,
help='''output filepath. The resulting pooled OTU table is
written here.''')
parser.add_argument('-k', '--keep', nargs=1, default=[0], type=float,
help='''set the minimum percentile of matched taxa to keep
based on maximum reads per sample for each taxon.
E.g., setting 50 will keep the taxon with a maximum number of
reads per sample that represents the 50th
percentile and all taxa above. In microbial communities,
there is usually a high degree of taxon uneveness and their
distribution may have a long tail. For this reason, you may be
required to set this value much higher than you would normally
expect, to filter out taxa with very small read numbers.''')
parser.add_argument('-r', '--reads', action='store_true',
help='''print information about number of reads''')
args = parser.parse_args()
min_cts = args.keep[0]
if min_cts >= 100 or min_cts < 0:
print "Invalid minimum count threshold (-k/--keep parameter). \
Value must be >= 0 and < 100 ."
sys.exit(1)
infile = args.infile[0]
outfile = args.outfile[0]
print "\nRun started " + strftime("%Y-%m-%d %H:%M:%S") + "."
#collect sample names, using first line of file
inhandle = csv.reader(open(infile, 'rU'), delimiter='\t')
outhandle = csv.writer(open(outfile, 'wb'), delimiter='\t')
for line in inhandle:
if line[0][0] == "#":
if line[0]=="#OTU ID":
sample_ids = [column for column in line if \
re.search(column, "#OTU ID"'|'"Consensus Lineage")==None]
outhandle.writerow(line)
else:
break
otu_names = []
otu_dict = {}
#build list of OTU names
inhandle = csv.reader(open(infile, 'rU'), delimiter='\t')
for line in inhandle :
if line[0][0]!="#":
if line[-1] not in otu_names:
otu_names.append(line[-1])
# K,V = name of taxon, list of number of occurrences in each sample
#there may be more than one V for each K.
otu_dict[line[-1]] = [line[1:-1]]
else :
otu_dict[line[-1]].append(line[1:-1])
#create array of total counts per sample per otu by summing columns for all lists of
#counts for each otu
counts_per_otu=array([array(lists, dtype=int).sum(axis=0) for lists in
otu_dict.values()])
#Calculate the total reads in the table prior to filtering
tot_start_cts = counts_per_otu.sum()
#Order the taxa according to maximum number of counts in a sample
ordered_taxa=sorted([(name, max(counts)) for name, counts in
zip(otu_dict.keys(), counts_per_otu)],
key=lambda taxon: taxon[1])
#Calculate the rank above which to keep taxa based on the specified percentile.
#Subtract 1 because python list numbering starts at 0.
keep_rank=int(round((min_cts/100)*len(ordered_taxa)+0.5))-1
otu_table = [] #empty array that will be filled with filtered count data
ictr = 1 #counter for assigning new OTU IDs.
#counters for tracking numbers of reads in intial and final OTU tables
tot_end_cts = 0
for i, entry in enumerate(ordered_taxa):
key=entry[0]
if i >= keep_rank and entry[1]>0:
#create row for output
if key != 'Noise' : #if not the "Noise" OTU add otu_id from ictr
# and increment it by 1.
otu_id = array( [ictr], dtype=object)
ictr += 1
else: # if "Noise" OTU, set otu_id to '0' and don't increment ictr.
otu_id = array( [0], dtype=object)
otu_counts=array(otu_dict[key], dtype=int).sum(axis=0)
otu_name = array( [key], dtype=object)
otu_row = hstack( (otu_id, otu_counts, otu_name) )
tot_end_cts += otu_counts.sum()
otu_table.append(otu_row.tolist())
final_otu_table=otu_table
#otu_table = array(otu_table) # convert to numpy array to allow easy sorting
#final_otu_table = otu_table[otu_table[:,0].argsort(),:].tolist() # sort
#otu_table by otu_id and convert back to list
for row in final_otu_table:
outhandle.writerow(row)
print "Finished.\n"
print "Final OTU table preview: "
print array(final_otu_table)
# Write log
logpath = open(str(os.path.splitext(outfile)[0]) + ".log","wb")
logpath.write("Logfile for OTU pooling of " \
+ infile + "\n" + strftime("%Y-%m-%d %H:%M:%S") + "\n\n" \
"Parameters specified:\n" \
"Minimum read threshold: " + str(min_cts) + "\n" \
"Counts:"
"\nTotal reads in input OTU table: " + str(tot_start_cts) + "\n" \
"Total reads in output OTU table: " + str(tot_end_cts) + "\n" \
"Reads discarded through retaining " + str(min_cts) \
+ " percentile and above: " + str(tot_start_cts-tot_end_cts) + "\n" \
"Maximum reads per sample of " + str(min_cts) + " percentile: " + str(ordered_taxa[keep_rank][1]) + "\n" )
logpath.close()
print "\n\nLog file written (" + str(os.path.splitext(outfile)[0]) + ".log" + ")\n"
if args.reads:
print '\nTotal reads in input OTU table: ' + str(tot_start_cts)
print 'Total reads in output OTU table: ' + str(tot_end_cts)
print 'Reads discarded through retaining' + str(min_cts) \
+ ' percentile and above: ' + str(tot_start_cts-tot_end_cts)
print 'Maximum reads per sample of ' + str(min_cts) + ' percentile: ' \
+ str(ordered_taxa[keep_rank][1]) + "\n"
| benjsmith/mubiomics | scripts/pool_otus.py | Python | gpl-3.0 | 6,450 |
# coding: utf-8
from handlers import base
from common import functions
class IndexHandler(base.BaseHandler):
def get(self, *args, **kwargs):
self.render('index.html')
class InfoHandler(base.SocketHandler):
def on_message(self, message):
data = functions.jsonToObject(message)
if not data:
return None
if not data.get('target') or not isinstance(data['target'], basestring):
return self.write_message('done')
base.SocketHandler.status = True # 重置查询状态
findRes = self.db.targets.find_one({'target': data['target']})
if not findRes:
result = self._insertTarget(data['target'])
if not result:
return self.write_message('done')
findRes = {'plugins': []}
# 如果数据库中存在某些插件的记录就先输出, 再校验不存在记录的插件
for pluginName in findRes['plugins']:
tempObj = self.getPlugins.get(pluginName)
# 防止插件名变动后与数据库中的记录不统一,所以移除数据库中已发生变更的插件记录
if not tempObj:
self._removePlugin(data['target'], pluginName)
continue
self.write_message({
'title': tempObj.__title__,
'url': tempObj.__url__
})
# 计算差集,然后使用数据库中不存在记录的插件进行校验
diffList = list(set(self.getPlugins.keys()).difference(set(findRes['plugins'])))
if diffList:
map(lambda x: self.taskQueue.put(self.getPlugins[x]), diffList)
self.start(data['target'])
else:
self.write_message('done')
def _insertTarget(self, target):
insertRes = self.db.targets.insert_one({
'target': target,
'plugins': []
})
if insertRes.inserted_id:
return True
else:
return False
def _removePlugin(self, target, name):
updateRes = self.db.targets.update_one({
'target': target
}, {
'$pull': {
'plugins': name
}
})
# 因为mongodb < 2.6的版本没有modified_count,所以通过 raw_result里 n的值来判断是否更新成功
if not updateRes.raw_result.has_key('n'):
return False
if updateRes.raw_result['n']:
return True
else:
return False
| tonybreak/Registered | handlers/index.py | Python | gpl-3.0 | 2,512 |
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import pytest
import requests
from unittest.mock import MagicMock
from shipit_pipeline.pipeline import PipelineStep, get_runnable_steps, refresh_pipeline_steps
@pytest.fixture
def pipeline_steps():
pipeline_steps_ = [
{
'api_url': 'http://localhost:5001/signoff1',
'description': 'signoff 1',
'parameters': {
},
'parameters_schema': 'https://null',
'requires': [
],
'uid': 'signoff1'
}, {
'api_url': 'http://localhost:5001/signoff2',
'description': 'signoff 2 - relman gatekeeps all the things',
'parameters': {
},
'parameters_schema': 'https://null',
'requires': [
'signoff1'
],
'uid': 'signoff2'
}, {
'api_url': 'http://localhost:5001/publish1',
'description': 'final publish',
'parameters': {
},
'parameters_schema': 'https://null',
'requires': [
'signoff2'
],
'uid': 'publish1'
}
]
return [PipelineStep.from_dict(step) for step in pipeline_steps_]
def test_get_runnable_steps_when_nothing_has_started(pipeline_steps):
runnables = get_runnable_steps(pipeline_steps)
assert len(runnables) == 1
assert runnables[0].uid == 'signoff1'
def test_get_runnable_steps_state_changed(pipeline_steps):
pipeline_steps[0].state = 'completed'
runnables = get_runnable_steps(pipeline_steps)
assert len(runnables) == 1
assert runnables[0].uid == 'signoff2'
def test_get_runnable_steps_dependency_in_failure(pipeline_steps):
pipeline_steps[0].state = 'exception'
runnables = get_runnable_steps(pipeline_steps)
assert len(runnables) == 0
def test_get_runnable_steps_state_changed2(pipeline_steps):
pipeline_steps[0].state = 'completed'
pipeline_steps[1].state = 'completed'
runnables = get_runnable_steps(pipeline_steps)
assert len(runnables) == 1
assert runnables[0].uid == 'publish1'
def test_get_runnable_steps_many_can_run_at_the_beginning(pipeline_steps):
another_first_step = PipelineStep(uid='parallel_action_to_signoff1', url='http://null', params={}, requires=[])
pipeline_steps.append(another_first_step)
runnables = get_runnable_steps(pipeline_steps)
assert [r.uid for r in runnables] == ['signoff1', 'parallel_action_to_signoff1']
def test_get_runnable_steps_many_upstream_dependencies(pipeline_steps):
upstream_dep = PipelineStep(uid='upstream_dep', url='http://null', params={}, requires=[])
upstream_dep.state = 'completed'
pipeline_steps[1].requires.append(upstream_dep.uid)
pipeline_steps.append(upstream_dep)
runnables = get_runnable_steps(pipeline_steps)
assert [r.uid for r in runnables] == ['signoff1']
pipeline_steps[0].state = 'completed'
runnables = get_runnable_steps(pipeline_steps)
assert [r.uid for r in runnables] == ['signoff2']
def test_get_runnable_steps_many_many_downstream_deps_run(pipeline_steps):
downstream_dep = PipelineStep(uid='another_downstream_dep', url='http://null', params={}, requires=[])
pipeline_steps.append(downstream_dep)
pipeline_steps[0].state = 'completed'
runnables = get_runnable_steps(pipeline_steps)
assert [r.uid for r in runnables] == ['signoff2', 'another_downstream_dep']
def test_refresh_pipeline_steps(pipeline_steps, monkeypatch):
def mock_get_request(url, verify):
get_response = MagicMock()
get_response.json.return_value = {'state': 'completed'} if 'signoff1' in url else {'state': 'busted'}
return get_response
monkeypatch.setattr(requests, 'get', mock_get_request)
pipeline_steps[0].state = 'running'
pipeline_steps = refresh_pipeline_steps(pipeline_steps)
assert pipeline_steps[0].state == 'completed'
assert pipeline_steps[1].state == 'pending'
assert pipeline_steps[2].state == 'pending'
| garbas/mozilla-releng-services | src/shipit_pipeline/tests/test_pipeline.py | Python | mpl-2.0 | 4,166 |
# -*- coding: utf-8 -*-
# © 2016 Antiun Ingenieria S.L. - Antonio Espinosa
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import models
from .hooks import post_init_hook
| open-synergy/contract | contract_payment_mode/__init__.py | Python | agpl-3.0 | 201 |
#!/usr/bin/env python
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'alert.settings'
import sys
# append these to the path to make the dev machines and the server happy (respectively)
execfile('/etc/courtlistener')
sys.path.append(INSTALL_ROOT)
from django import db
from django.core.exceptions import MultipleObjectsReturned
from django.utils.text import slugify
from alert.search.models import Court, Document
from alert.lib.parse_dates import parse_dates
from juriscraper.lib.string_utils import trunc
from alert.lib.scrape_tools import hasDuplicate
from lxml.html import fromstring, tostring
from urlparse import urljoin
import datetime
import re
import subprocess
import time
import urllib2
def load_fix_files():
"""Loads the fix files into memory so they can be accessed efficiently."""
court_fix_file = open('../logs/f2_court_fix_file.txt', 'r')
date_fix_file = open('../logs/f2_date_fix_file.txt', 'r')
case_name_short_fix_file = open('../logs/f2_short_case_name_fix_file.txt', 'r')
court_fix_dict = {}
date_fix_dict = {}
case_name_short_dict = {}
for line in court_fix_file:
key, value = line.split('|')
court_fix_dict[key] = value
for line in date_fix_file:
key, value = line.split('|')
date_fix_dict[key] = value
for line in case_name_short_fix_file:
key, value = line.split('|')
case_name_short_dict[key] = value
court_fix_file.close()
date_fix_file.close()
case_name_short_fix_file.close()
return court_fix_dict, date_fix_dict, case_name_short_dict
def check_fix_list(sha1, fix_dict):
""" Given a sha1, return the correction for a case. Return false if no values.
Corrections are strings that the parser can interpret as needed. Items are
written to this file the first time the cases are imported, and this file
can be used to import F2 into later systems.
"""
try:
return fix_dict[sha1].strip()
except KeyError:
return False
def exceptional_cleaner(caseName):
"""Cleans common Resource.org special cases off of case names, and
sets the precedential_status for a document.
Returns caseName, precedential_status
"""
caseName = caseName.lower()
ca1regex = re.compile('(unpublished disposition )?notice: first circuit local rule 36.2\(b\)6 states unpublished opinions may be cited only in related cases.?')
ca2regex = re.compile('(unpublished disposition )?notice: second circuit local rule 0.23 states unreported opinions shall not be cited or otherwise used in unrelated cases.?')
ca3regex = re.compile('(unpublished disposition )?notice: third circuit rule 21\(i\) states citations to federal decisions which have not been formally reported should identify the court, docket number and date.?')
ca4regex = re.compile('(unpublished disposition )?notice: fourth circuit (local rule 36\(c\)|i.o.p. 36.6) states that citation of unpublished dispositions is disfavored except for establishing res judicata, estoppel, or the law of the case and requires service of copies of cited unpublished dispositions of the fourth circuit.?')
ca5regex = re.compile('(unpublished disposition )?notice: fifth circuit local rule 47.5.3 states that unpublished opinions should normally be cited only when they establish the law of the case, are relied upon as a basis for res judicata or collateral estoppel, or involve related facts. if an unpublished opinion is cited, a copy shall be attached to each copy of the brief.?')
ca6regex = re.compile('(unpublished disposition )?notice: sixth circuit rule 24\(c\) states that citation of unpublished dispositions is disfavored except for establishing res judicata, estoppel, or the law of the case and requires service of copies of cited unpublished dispositions of the sixth circuit.?')
ca7regex = re.compile('(unpublished disposition )?notice: seventh circuit rule 53\(b\)\(2\) states unpublished orders shall not be cited or used as precedent except to support a claim of res judicata, collateral estoppel or law of the case in any federal court within the circuit.?')
ca8regex = re.compile('(unpublished disposition )?notice: eighth circuit rule 28a\(k\) governs citation of unpublished opinions and provides that (no party may cite an opinion not intended for publication unless the cases are related by identity between the parties or the causes of action|they are not precedent and generally should not be cited unless relevant to establishing the doctrines of res judicata, collateral estoppel, the law of the case, or if the opinion has persuasive value on a material issue and no published opinion would serve as well).?')
ca9regex = re.compile('(unpublished disposition )?notice: ninth circuit rule 36-3 provides that dispositions other than opinions or orders designated for publication are not precedential and should not be cited except when relevant under the doctrines of law of the case, res judicata, or collateral estoppel.?')
ca10regex = re.compile('(unpublished disposition )?notice: tenth circuit rule 36.3 states that unpublished opinions and orders and judgments have no precedential value and shall not be cited except for purposes of establishing the doctrines of the law of the case, res judicata, or collateral estoppel.?')
cadcregex = re.compile('(unpublished disposition )?notice: d.c. circuit local rule 11\(c\) states that unpublished orders, judgments, and explanatory memoranda may not be cited as precedents, but counsel may refer to unpublished dispositions when the binding or preclusive effect of the disposition, rather than its quality as precedent, is relevant.?')
cafcregex = re.compile('(unpublished disposition )?notice: federal circuit local rule 47.(6|8)\(b\) states that opinions and orders which are designated as not citable as precedent shall not be employed or cited as precedent. this does not preclude assertion of issues of claim preclusion, issue preclusion, judicial estoppel, law of the case or the like based on a decision of the court rendered in a nonprecedential opinion or order.?')
# Clean off special cases
if 'first circuit' in caseName:
caseName = re.sub(ca1regex, '', caseName)
precedential_status = 'Unpublished'
elif 'second circuit' in caseName:
caseName = re.sub(ca2regex, '', caseName)
precedential_status = 'Unpublished'
elif 'third circuit' in caseName:
caseName = re.sub(ca3regex, '', caseName)
precedential_status = 'Unpublished'
elif 'fourth circuit' in caseName:
caseName = re.sub(ca4regex, '', caseName)
precedential_status = 'Unpublished'
elif 'fifth circuit' in caseName:
caseName = re.sub(ca5regex, '', caseName)
precedential_status = 'Unpublished'
elif 'sixth circuit' in caseName:
caseName = re.sub(ca6regex, '', caseName)
precedential_status = 'Unpublished'
elif 'seventh circuit' in caseName:
caseName = re.sub(ca7regex, '', caseName)
precedential_status = 'Unpublished'
elif 'eighth circuit' in caseName:
caseName = re.sub(ca8regex, '', caseName)
precedential_status = 'Unpublished'
elif 'ninth circuit' in caseName:
caseName = re.sub(ca9regex, '', caseName)
precedential_status = 'Unpublished'
elif 'tenth circuit' in caseName:
caseName = re.sub(ca10regex, '', caseName)
precedential_status = 'Unpublished'
elif 'd.c. circuit' in caseName:
caseName = re.sub(cadcregex, '', caseName)
precedential_status = 'Unpublished'
elif 'federal circuit' in caseName:
caseName = re.sub(cafcregex, '', caseName)
precedential_status = 'Unpublished'
else:
precedential_status = 'Published'
return caseName, precedential_status
def scrape_and_parse():
"""Traverses the bulk data from public.resource.org, and puts them in the
DB.
Probably lots of ways to go about this, but I think the easiest will be the following:
- look at the index page of all volumes, and follow all the links it has.
- for each volume, look at its index page, and follow the link to all cases
- for each case, collect information wisely.
- put it all in the DB
"""
# begin by loading up the fix files into memory
court_fix_dict, date_fix_dict, case_name_short_dict = load_fix_files()
results = []
DEBUG = 4
# Set to False to disable automatic browser usage. Else, set to the
# command you want to run, e.g. 'firefox'
BROWSER = False
court_fix_file = open('../logs/f2_court_fix_file.txt', 'a')
date_fix_file = open('../logs/f2_date_fix_file.txt', 'a')
case_name_short_fix_file = open('../logs/f2_short_case_name_fix_file.txt', 'a')
vol_file = open('../logs/vol_file.txt', 'r+')
case_file = open('../logs/case_file.txt', 'r+')
url = "file://%s/Resource.org/F2/index.html" % INSTALL_ROOT
openedURL = urllib2.urlopen(url)
content = openedURL.read()
openedURL.close()
tree = fromstring(content)
volumeLinks = tree.xpath('//table/tbody/tr/td[1]/a')
try:
i = int(vol_file.readline())
except ValueError:
# the volume file is emtpy or otherwise failing.
i = 0
vol_file.close()
if DEBUG >= 1:
print "Number of remaining volumes is: %d" % (len(volumeLinks) - i)
# used later, needs a default value.
saved_caseDate = None
saved_court = None
while i < len(volumeLinks):
# we iterate over every case in the volume
volumeURL = volumeLinks[i].text + "/index.html"
volumeURL = urljoin(url, volumeURL)
if DEBUG >= 1:
print "Current volumeURL is: %s" % volumeURL
openedVolumeURL = urllib2.urlopen(volumeURL)
content = openedVolumeURL.read()
volumeTree = fromstring(content)
openedVolumeURL.close()
caseLinks = volumeTree.xpath('//table/tbody/tr/td[1]/a')
caseDates = volumeTree.xpath('//table/tbody/tr/td[2]')
sha1Hashes = volumeTree.xpath('//table/tbody/tr/td[3]/a')
# The following loads a serialized placeholder from disk.
try:
j = int(case_file.readline())
except ValueError:
j = 0
case_file.close()
while j < len(caseLinks):
# iterate over each case, throwing it in the DB
if DEBUG >= 1:
print ''
# like the scraper, we begin with the caseLink field (relative for
# now, not absolute)
caseLink = caseLinks[j].get('href')
# sha1 is easy
sha1Hash = sha1Hashes[j].text
if DEBUG >= 4:
print "SHA1 is: %s" % sha1Hash
# using the caselink from above, and the volumeURL, we can get the
# html
absCaseLink = urljoin(volumeURL, caseLink)
html = urllib2.urlopen(absCaseLink).read()
htmlTree = fromstring(html)
bodyContents = htmlTree.xpath('//body/*[not(@id="footer")]')
body = ""
bodyText = ""
for element in bodyContents:
body += tostring(element)
try:
bodyText += tostring(element, method='text')
except UnicodeEncodeError:
# Happens with odd characters. Simply pass this iteration.
pass
if DEBUG >= 5:
print body
print bodyText
# need to figure out the court ID
try:
courtPs = htmlTree.xpath('//p[@class = "court"]')
# Often the court ends up in the parties field.
partiesPs = htmlTree.xpath("//p[@class= 'parties']")
court = ""
for courtP in courtPs:
court += tostring(courtP).lower()
for party in partiesPs:
court += tostring(party).lower()
except IndexError:
court = check_fix_list(sha1Hash, court_fix_dict)
if not court:
print absCaseLink
if BROWSER:
subprocess.Popen([BROWSER, absCaseLink], shell=False).communicate()
court = raw_input("Please input court name (e.g. \"First Circuit of Appeals\"): ").lower()
court_fix_file.write("%s|%s\n" % (sha1Hash, court))
if ('first' in court) or ('ca1' == court):
court = 'ca1'
elif ('second' in court) or ('ca2' == court):
court = 'ca2'
elif ('third' in court) or ('ca3' == court):
court = 'ca3'
elif ('fourth' in court) or ('ca4' == court):
court = 'ca4'
elif ('fifth' in court) or ('ca5' == court):
court = 'ca5'
elif ('sixth' in court) or ('ca6' == court):
court = 'ca6'
elif ('seventh' in court) or ('ca7' == court):
court = 'ca7'
elif ('eighth' in court) or ('ca8' == court):
court = 'ca8'
elif ('ninth' in court) or ('ca9' == court):
court = 'ca9'
elif ("tenth" in court) or ('ca10' == court):
court = 'ca10'
elif ("eleventh" in court) or ('ca11' == court):
court = 'ca11'
elif ('columbia' in court) or ('cadc' == court):
court = 'cadc'
elif ('federal' in court) or ('cafc' == court):
court = 'cafc'
elif ('patent' in court) or ('ccpa' == court):
court = 'ccpa'
elif (('emergency' in court) and ('temporary' not in court)) or ('eca' == court):
court = 'eca'
elif ('claims' in court) or ('uscfc' == court):
court = 'uscfc'
else:
# No luck extracting the court name. Try the fix file.
court = check_fix_list(sha1Hash, court_fix_dict)
if not court:
# Not yet in the fix file. Check if it's a crazy ca5 case
court = ''
ca5courtPs = htmlTree.xpath('//p[@class = "center"]')
for ca5courtP in ca5courtPs:
court += tostring(ca5courtP).lower()
if 'fifth circuit' in court:
court = 'ca5'
else:
court = False
if not court:
# Still no luck. Ask for input, then append it to
# the fix file.
print absCaseLink
if BROWSER:
subprocess.Popen([BROWSER, absCaseLink], shell=False).communicate()
court = raw_input("Unknown court. Input the court code to proceed successfully [%s]: " % saved_court)
court = court or saved_court
court_fix_file.write("%s|%s\n" % (sha1Hash, court))
saved_court = court
court = Court.objects.get(pk=court)
if DEBUG >= 4:
print "Court is: %s" % court
# next: west_cite, docket_number and caseName. Full casename is gotten later.
west_cite = caseLinks[j].text
docket_number = absCaseLink.split('.')[-2]
caseName = caseLinks[j].get('title')
caseName, precedential_status = exceptional_cleaner(caseName)
cite, new = hasDuplicate(caseName, west_cite, docket_number)
if cite.caseNameShort == '':
# No luck getting the case name
savedCaseNameShort = check_fix_list(sha1Hash, case_name_short_dict)
if not savedCaseNameShort:
print absCaseLink
if BROWSER:
subprocess.Popen([BROWSER, absCaseLink], shell=False).communicate()
caseName = raw_input("Short casename: ")
cite.caseNameShort = trunc(caseName, 100)
cite.caseNameFull = caseName
case_name_short_fix_file.write("%s|%s\n" % (sha1Hash, caseName))
else:
# We got both the values from the save files. Use 'em.
cite.caseNameShort = trunc(savedCaseNameShort, 100)
cite.caseNameFull = savedCaseNameShort
# The slug needs to be done here, b/c it is only done automatically
# the first time the citation is saved, and this will be
# at least the second.
cite.slug = trunc(slugify(cite.caseNameShort), 50)
cite.save()
if DEBUG >= 4:
print "precedential_status: " + precedential_status
print "west_cite: " + cite.west_cite
print "docket_number: " + cite.docket_number
print "caseName: " + cite.caseNameFull
# date is kinda tricky...details here:
# http://pleac.sourceforge.net/pleac_python/datesandtimes.html
rawDate = caseDates[j].find('a')
try:
if rawDate is not None:
# Special cases
if sha1Hash == 'f0da421f117ef16223d7e61d1e4e5526036776e6':
date_text = 'August 28, 1980'
elif sha1Hash == '8cc192eaacd1c544b5e8ffbd751d9be84c311932':
date_text = 'August 16, 1985'
elif sha1Hash == 'd19bce155f72a9f981a12efabd760a35e1e7dbe7':
date_text = 'October 12, 1979'
elif sha1Hash == '9f7583cf0d46ddc9cad4e7943dd775f9e9ea99ff':
date_text = 'July 30, 1980'
elif sha1Hash == '211ea81a4ab4132483c483698d2a40f4366f5640':
date_text = 'November 3, 1981'
elif sha1Hash == 'eefb344034461e9c6912689677a32cd18381d5c2':
date_text = 'July 28, 1983'
else:
date_text = rawDate.text
try:
caseDate = datetime.datetime(*time.strptime(date_text, "%B, %Y")[0:5])
except ValueError, TypeError:
caseDate = datetime.datetime(*time.strptime(date_text, "%B %d, %Y")[0:5])
else:
# No value was found. Throw an exception.
raise ValueError
except:
# No date provided.
try:
# Try to get it from the saved list
caseDate = datetime.datetime(*time.strptime(check_fix_list(sha1Hash, date_fix_dict), "%B %d, %Y")[0:5])
except:
caseDate = False
if not caseDate:
# Parse out the dates with debug set to false.
try:
dates = parse_dates(bodyText, False)
except OverflowError:
# Happens when we try to make a date from a very large number
dates = []
try:
first_date_found = dates[0]
except IndexError:
# No dates found.
first_date_found = False
if first_date_found == saved_caseDate:
# High likelihood of date being correct. Use it.
caseDate = saved_caseDate
else:
print absCaseLink
if BROWSER:
subprocess.Popen([BROWSER, absCaseLink], shell=False).communicate()
print "Unknown date. Possible options are:"
try:
print " 1) %s" % saved_caseDate.strftime("%B %d, %Y")
except AttributeError:
# Happens on first iteration when saved_caseDate has no strftime attribute.
try:
saved_caseDate = dates[0]
print " 1) %s" % saved_caseDate.strftime(
"%B %d, %Y")
except IndexError:
# Happens when dates has no values.
print " No options available."
for k, date in enumerate(dates[0:4]):
if date.year >= 1900:
# strftime can't handle dates before 1900.
print " %s) %s" % (k + 2,
date.strftime("%B %d, %Y"))
choice = raw_input("Enter the date or an option to proceed [1]: ")
choice = choice or 1
if str(choice) == '1':
# The user chose the default. Use the saved value from the last case
caseDate = saved_caseDate
elif choice in ['2', '3', '4', '5']:
# The user chose an option between 2 and 5. Use it.
caseDate = dates[int(choice) - 2]
else:
# The user typed a new date. Use it.
caseDate = datetime.datetime(*time.strptime(choice, "%B %d, %Y")[0:5])
date_fix_file.write("%s|%s\n" % (sha1Hash, caseDate.strftime("%B %d, %Y")))
# Used during the next iteration as the default value
saved_caseDate = caseDate
if DEBUG >= 3:
print "caseDate is: %s" % caseDate
try:
doc, created = Document.objects.get_or_create(
sha1=sha1Hash, court=court)
except MultipleObjectsReturned:
# this shouldn't happen now that we're using SHA1 as the dup
# check, but the old data is problematic, so we must catch this.
created = False
if created:
# we only do this if it's new
doc.html = body
doc.sha1 = sha1Hash
doc.download_url = "http://bulk.resource.org/courts.gov/c/F2/"\
+ str(i + 178) + "/" + caseLink
doc.date_filed = caseDate
doc.source = "R"
doc.precedential_status = precedential_status
doc.citation = cite
doc.save()
if not created:
# something is afoot. Throw a big error.
print "Duplicate found at volume " + str(i + 1) + \
" and row " + str(j + 1) + "!!!!"
print "Found document %s in the database with doc id of %d!" % (doc, doc.pk)
exit(1)
# save our location within the volume.
j += 1
case_file = open('../logs/case_file.txt', 'w')
case_file.write(str(j))
case_file.close()
# save the last volume completed.
i += 1
vol_file = open('../logs/vol_file.txt', 'w')
vol_file.write(str(i))
vol_file.close()
# Clear query cache, as it presents a memory leak
db.reset_queries()
return 0
def main():
print scrape_and_parse()
print "Completed all volumes successfully. Exiting."
exit(0)
if __name__ == '__main__':
main()
| shashi792/courtlistener | alert/corpus_importer/resource_org/import_f2.py | Python | agpl-3.0 | 23,715 |
#!env/python3
# coding: utf-8
import ipdb
import os
import json
import datetime
import uuid
import psycopg2
import hashlib
import asyncio
import ped_parser
from config import *
from core.framework.common import *
from core.model import *
# =====================================================================================================================
# FILTER ENGINE
# =====================================================================================================================
class FilterEngine:
op_map = {'AND': ' AND ', 'OR': ' OR ', '==': '=', '!=': '<>', '>': '>', '<': '<', '>=': '>=', '<=': '<=', '~': ' LIKE ', '!~': ' NOT LIKE ',
# As a left join will be done on the chr+pos or chr+pos+ref+alt according to the type of the set operation (by site or by variant)
# We just need to test if one of the "joined" field is set or not
'IN': '{0}.chr is not null',
'NOTIN': '{0}.chr is null'}
sql_type_map = {'int': 'integer', 'string': 'text', 'float': 'real', 'percent': 'real', 'enum': 'integer', 'range': 'int8range', 'bool': 'boolean',
'list_i': 'text', 'list_s': 'text', 'list_f': 'text', 'list_i': 'text', 'list_pb': 'text'}
def __init__(self):
run_until_complete(self.load_annotation_metadata())
async def load_annotation_metadata(self):
"""
Init Annso Filtering engine.
Init mapping collection for annotations databases and fields
"""
refname = 'hg19' # execute("SELECT table_suffix FROM reference WHERE id="+str(reference)).first()["table_suffix"]
self.reference = 2
self.fields_map = {}
self.db_map = {}
self.variant_table = "sample_variant_{0}".format(refname)
query = "SELECT d.uid AS duid, d.name AS dname, d.name_ui AS dname_ui, d.jointure, d.reference_id, d.type AS dtype, d.db_pk_field_uid, a.uid AS fuid, a.name AS fname, a.type, a.wt_default FROM annotation_field a LEFT JOIN annotation_database d ON a.database_uid=d.uid"
result = await execute_aio(query)
for row in result:
if row.duid not in self.db_map:
self.db_map[row.duid] = {"name": row.dname, "join": row.jointure, "fields": {}, "reference_id": row.reference_id, "type": row.dtype, "db_pk_field_uid" : row.db_pk_field_uid}
self.db_map[row.duid]["fields"][row.fuid] = {"name": row.fname, "type": row.type}
self.fields_map[row.fuid] = {"name": row.fname, "type": row.type, "db_uid": row.duid, "db_name_ui": row.dname_ui, "db_name": row.dname, "db_type": row.dtype, "join": row.jointure, "wt_default": row.wt_default}
def create_working_table(self, analysis_id, sample_ids, field_uids, dbs_uids, filter_ids=[], attributes={}):
"""
Create a working sql table for the analysis to improove speed of filtering/annotation.
A Working table contains all variants used by the analysis, with all annotations used by filters or displayed
"""
from core.core import core
if len(sample_ids) == 0: raise RegovarException("No sample... so not able to retrieve data")
db_ref_suffix= "hg19" # execute("SELECT table_suffix FROM reference WHERE id={}".format(reference_id)).first().table_suffix
progress = {"msg": "wt_processing", "start": datetime.datetime.now().ctime(), "analysis_id": analysis_id, "step": 1}
core.notify_all(progress)
# Create schema
w_table = 'wt_{}'.format(analysis_id)
query = "DROP TABLE IF EXISTS {0} CASCADE; CREATE TABLE {0} (\
is_variant boolean DEFAULT False, \
annotated boolean DEFAULT False, \
variant_id bigint, \
bin integer, \
chr bigint, \
pos integer, \
ref text, \
alt text,\
transcript_pk_field_uid character varying(32), \
transcript_pk_value character varying(100), \
is_transition boolean, \
sample_tlist integer[], \
sample_tcount integer, \
sample_alist integer[], \
sample_acount integer, \
depth integer, "
query += ", ".join(["s{}_gt integer".format(i) for i in sample_ids]) + ", "
query += ", ".join(["s{}_dp integer".format(i) for i in sample_ids])
query += ", CONSTRAINT {0}_ukey UNIQUE (variant_id, transcript_pk_field_uid, transcript_pk_value));"
execute(query.format(w_table))
# Insert variant without annotation first
query = "INSERT INTO {0} (variant_id, bin, chr, pos, ref, alt, is_transition, sample_tlist) \
SELECT DISTINCT sample_variant_{1}.variant_id, sample_variant_{1}.bin, sample_variant_{1}.chr, sample_variant_{1}.pos, sample_variant_{1}.ref, sample_variant_{1}.alt, \
variant_{1}.is_transition, \
variant_{1}.sample_list \
FROM sample_variant_{1} INNER JOIN variant_{1} ON sample_variant_{1}.variant_id=variant_{1}.id \
WHERE sample_variant_{1}.sample_id IN ({2}) \
ON CONFLICT (variant_id, transcript_pk_field_uid, transcript_pk_value) DO NOTHING;"
execute(query.format(w_table, db_ref_suffix, ','.join([str(i) for i in sample_ids])))
# Complete sample-variant's associations
for sid in sample_ids:
execute("UPDATE {0} SET s{2}_gt=_sub.genotype, s{2}_dp=_sub.depth FROM (SELECT variant_id, genotype, depth FROM sample_variant_{1} WHERE sample_id={2}) AS _sub WHERE {0}.variant_id=_sub.variant_id".format(w_table, db_ref_suffix, sid))
query = "UPDATE {0} SET \
is_variant=(CASE WHEN ref<>alt THEN True ELSE False END), \
sample_tcount=array_length(sample_tlist,1), \
sample_alist=array_intersect(sample_tlist, array[{1}]), \
sample_acount=array_length(array_intersect(sample_tlist, array[{1}]),1), \
depth=GREATEST({2})"
execute(query.format(w_table, ",".join([str(i) for i in sample_ids]), ", ".join(["s{}_dp".format(i) for i in sample_ids])))
# Create indexes
# FIXME : do we need to create index on boolean fields ? Is partition a better way to do for low cardinality fields : http://www.postgresql.org/docs/9.1/static/ddl-partitioning.html
# query = "CREATE INDEX {0}_idx_ann ON {0} USING btree (annotated);".format(w_table)
query = "CREATE INDEX {0}_idx_vid ON {0} USING btree (variant_id);".format(w_table)
query += "CREATE INDEX {0}_idx_var ON {0} USING btree (bin, chr, pos, transcript_pk_field_uid, transcript_pk_value);".format(w_table)
query += "CREATE INDEX {0}_idx_trx ON {0} USING btree (transcript_pk_field_uid, transcript_pk_value);".format(w_table)
query += "".join(["CREATE INDEX {0}_idx_s{1}_gt ON {0} USING btree (s{1}_gt);".format(w_table, i) for i in sample_ids])
query += "".join(["CREATE INDEX {0}_idx_s{1}_dp ON {0} USING btree (s{1}_dp);".format(w_table, i) for i in sample_ids])
execute(query)
# Update count stat of the analysis
query = "UPDATE analysis SET total_variants=(SELECT COUNT(*) FROM {} WHERE is_variant), status='ANNOTATING' WHERE id={}".format(w_table, analysis_id)
execute(query)
# Update working table by computing annotation
self.update_working_table(analysis_id, sample_ids, field_uids, dbs_uids, filter_ids, attributes)
def update_working_table(self, analysis_id, sample_ids, field_uids, dbs_uids, filter_ids=[], attributes={}):
"""
Update annotation of the working table of an analysis. The working table shall already exists
"""
from core.core import core
# Get list of fields to add in the wt
analysis = Analysis.from_id(analysis_id)
total = analysis.total_variants
diff_fields = []
diff_dbs = []
progress = {"msg": "wt_processing", "start": datetime.datetime.now().ctime(), "analysis_id": analysis_id, "step": 2, "progress_total": total, "progress_current": 0}
core.notify_all(progress)
try:
query = "SELECT column_name FROM information_schema.columns WHERE table_name='wt_{}'".format(analysis_id)
current_fields = [row.column_name if row.column_name[0] != '_' else row.column_name[1:] for row in execute(query)]
current_dbs = []
for f_uid in current_fields:
if f_uid in self.fields_map and self.fields_map[f_uid]['db_uid'] not in current_dbs:
current_dbs.append(self.fields_map[f_uid]['db_uid'])
for f_uid in field_uids:
if f_uid not in current_fields and self.fields_map[f_uid]['db_name_ui'] != 'Variant':
diff_fields.append('_{}'.format(f_uid))
if self.fields_map[f_uid]['db_uid'] not in diff_dbs and self.fields_map[f_uid]['db_uid'] not in current_dbs:
diff_dbs.append(self.fields_map[f_uid]['db_uid'])
except:
# working table doesn't exist
return False
# Alter working table to add new fields
pattern = "ALTER TABLE wt_{0} ADD COLUMN {1}{2} {3};"
query = ""
update_queries = []
for f_uid in diff_fields:
if f_uid[0] == '_':
f_uid = f_uid[1:]
query += pattern.format(analysis_id, '_', f_uid, self.sql_type_map[self.fields_map[f_uid]['type']])
for a_name in attributes.keys():
att_checked = []
for sid, att in attributes[a_name].items():
if 'attr_{}_{}'.format(a_name.lower(), att.lower()) in current_fields:
# We consider that if the first key_value for the attribute is define, the whole attribute's columns are defined,
# So break and switch to the next attribute.
# That's why before updating and attribute-value, we need before to drop all former columns in the wt
break;
else:
if att not in att_checked:
att_checked.append(att)
query += pattern.format(analysis_id, 'attr_', "{}_{}".format(a_name.lower(), att.lower()), 'boolean DEFAULT False')
update_queries.append("UPDATE wt_{} SET attr_{}_{}=True WHERE s{}_gt IS NOT NULL; ".format(analysis_id, a_name.lower(), att.lower(), sid))
for f_id in filter_ids:
if 'filter_{}'.format(f_id) not in current_fields:
query += pattern.format(analysis_id, 'filter_', f_id, 'boolean DEFAULT False')
f_filter = json.loads(execute("SELECT filter FROM filter WHERE id={}".format(f_id)).first().filter)
q = self.build_query(analysis_id, analysis.reference_id, 'table', f_filter, [], None, None)
queries = q[0]
if len(queries) > 0:
# add all query to create temps tables needed by the filter if they do not yet exists
for q in queries[:-1]:
query += q
# add the query to update wt with the filter
# Note : As transcript_pk_field_uid and transcript_pk_field_value may be null, we cannot use '=' operator and must use 'IS NOT DISTINCT FROM'
# as two expressions that return 'null' are not considered as equal in SQL.
update_queries.append("UPDATE wt_{0} SET filter_{1}=True FROM ({2}) AS _sub WHERE wt_{0}.variant_id=_sub.variant_id AND wt_{0}.transcript_pk_field_uid IS NOT DISTINCT FROM _sub.transcript_pk_field_uid AND wt_{0}.transcript_pk_value IS NOT DISTINCT FROM _sub.transcript_pk_value ; ".format(analysis_id, f_id, queries[-1].strip()[:-1]))
if query != "":
# Add new annotation columns to the working table
execute(query)
progress.update({"step": 3})
core.notify_all(progress)
# Loop over new annotation's databases, because if new: need to add new transcripts to the working table
fields_to_copy_from_variant = ["variant_id","bin","chr","pos","ref","alt","is_transition","sample_tlist","sample_tcount","sample_alist","sample_acount","depth"]
fields_to_copy_from_variant.extend(['s{}_gt'.format(s) for s in sample_ids])
fields_to_copy_from_variant.extend(['s{}_dp'.format(s) for s in sample_ids])
fields_to_copy_from_variant.extend(['attr_{}'.format(a.lower()) for a in attributes.keys()])
fields_to_copy_from_variant.extend(['filter_{}'.format(f) for f in filter_ids])
pattern = "INSERT INTO wt_{0} (annotated, transcript_pk_field_uid, transcript_pk_value, {1}) \
SELECT False, '{2}', {4}.transcript_id, {3} \
FROM (SELECT {1} FROM wt_{0} WHERE transcript_pk_field_uid IS NULL) AS _var \
INNER JOIN {4} ON _var.variant_id={4}.variant_id" # TODO : check if more optim to select with JOIN ON bin/chr/pos/ref/alt
for uid in diff_dbs:
if self.db_map[uid]["type"] == "transcript":
query = pattern.format(analysis_id,
', '.join(fields_to_copy_from_variant),
self.db_map[uid]["db_pk_field_uid"],
', '.join(["_var.{}".format(f) for f in fields_to_copy_from_variant]),
self.db_map[uid]["name"])
execute(query)
progress.update({"step": 4})
core.notify_all(progress)
# Create update query to retrieve annotation
UPDATE_LOOP_RANGE = 1000
to_update = {}
for f_uid in diff_fields:
if self.fields_map[f_uid[1:]]['db_uid'] not in to_update.keys():
to_update[self.fields_map[f_uid[1:]]['db_uid']] = []
to_update[self.fields_map[f_uid[1:]]['db_uid']].append({
"name": self.fields_map[f_uid[1:]]['name'],
"uid":f_uid[1:],
"db_name": self.fields_map[f_uid[1:]]['db_name']})
# Loop to update working table annotation (queries "packed" fields requested by annotation's database)
for db_uid in to_update.keys():
if self.db_map[db_uid]["type"] == "transcript":
qset_ann = ', '.join(['_{0}=_ann._{0}'.format(f["uid"]) for f in to_update[db_uid]])
qslt_ann = ','.join(['{0}.{1} AS _{2}'.format(f['db_name'], f["name"], f["uid"]) for f in to_update[db_uid]])
qslt_var = "SELECT variant_id, bin, chr, pos, ref, alt, transcript_pk_value FROM wt_{0} WHERE annotated=False AND transcript_pk_field_uid='{1}' LIMIT {2}".format(analysis_id, self.db_map[self.fields_map[f_uid[1:]]['db_uid']]['db_pk_field_uid'], UPDATE_LOOP_RANGE)
qjoin = 'LEFT JOIN {0} '.format(self.db_map[db_uid]['join'].format('_var'))
query = "UPDATE wt_{0} SET annotated=True, {1} FROM (SELECT _var.variant_id, _var.transcript_pk_value, {2} FROM ({3}) AS _var {4}) AS _ann \
WHERE wt_{0}.variant_id=_ann.variant_id AND wt_{0}.transcript_pk_field_uid='{5}' AND wt_{0}.transcript_pk_value=_ann.transcript_pk_value".format(
analysis_id,
qset_ann,
qslt_ann,
qslt_var,
qjoin,
self.db_map[self.fields_map[f_uid[1:]]['db_uid']]['db_pk_field_uid'])
else:
qset_ann = ', '.join(['{0}=_ann._{0}'.format(f_uid) for f_uid in diff_fields])
qslt_ann = ','.join(['{0}.{1} AS _{2}'.format(self.fields_map[f_uid[1:]]['db_name'], self.fields_map[f_uid[1:]]['name'], f_uid) for f_uid in diff_fields])
qslt_var = 'SELECT variant_id, bin, chr, pos, ref, alt FROM wt_{0} WHERE annotated=False AND transcript_pk_field_uid IS NULL LIMIT {1}'.format(analysis_id, UPDATE_LOOP_RANGE)
qjoin = ' '.join(['LEFT JOIN {0} '.format(self.db_map[db_uid]['join'].format('_var'), self.db_map[db_uid]) for db_uid in diff_dbs])
query = "UPDATE wt_{0} SET annotated=True, {1} FROM (SELECT _var.variant_id, {2} FROM ({3}) AS _var {4}) AS _ann WHERE wt_{0}.variant_id=_ann.variant_id".format(analysis_id, qset_ann, qslt_ann, qslt_var, qjoin)
if qset_ann != "":
# Mark all variant as not annotated (to be able to do a "resumable update")
execute("UPDATE wt_{} SET annotated=False".format(analysis_id))
for page in range(0, total, UPDATE_LOOP_RANGE):
execute(query)
progress.update({"progress_current": page})
core.notify_all(progress)
progress.update({"step": 5, "progress_current": total})
core.notify_all(progress)
# Apply queries to update attributes and filters columns in the wt
if len(update_queries) > 0:
execute("".join(update_queries))
progress.update({"step": 6})
core.notify_all(progress)
# Update count stat of the analysis
query = "UPDATE analysis SET status='READY' WHERE id={}".format(analysis_id)
execute(query)
def request(self, analysis_id, mode, filter_json, fields=None, order=None, limit=100, offset=0, count=False):
"""
"""
# Check parameters: if no field, select by default the first field avalaible to avoir error
if fields is None:
fields = [next(iter(self.fields_map.keys()))]
if type(analysis_id) != int or analysis_id <= 0:
analysis_id = None
if mode not in ["table", "list"]:
mode = "table"
# Get analysis data and check status if ok to do filtering
analysis = Analysis.from_id(analysis_id)
if analysis is None:
raise RegovarException("Not able to retrieve analysis with provided id: {}".format(analysis_id))
# Parse data to generate sql query and retrieve list of needed annotations databases/fields
query, field_uids, dbs_uids, sample_ids, filter_ids, attributes = self.build_query(analysis_id, analysis.reference_id, mode, filter_json, fields, order, limit, offset, count)
# Prepare database working table
if analysis.status is None or analysis.status == '':
self.create_working_table(analysis_id, sample_ids, field_uids, dbs_uids, filter_ids, attributes)
else:
self.update_working_table(analysis_id, sample_ids, field_uids, dbs_uids, filter_ids, attributes)
# Execute query
sql_result = None
with Timer() as t:
sql_result = execute(' '.join(query))
log("---\nFields:\n{0}\nFilter:\n{1}\nQuery:\n{2}\nRequest query: {3}".format(fields, filter_json, '\n'.join(query), t))
# Save filter in analysis settings
if not count and analysis_id > 0:
settings = {}
try:
settings = json.loads(execute("SELECT settings FROM analysis WHERE id={}".format(analysis_id)).first().settings)
settings["filter"] = filter_json
settings["fields"] = fields
settings["order"] = [] if order is None else order
execute("UPDATE analysis SET {0}update_date=CURRENT_TIMESTAMP WHERE id={1}".format("settings='{0}', ".format(json.dumps(settings)), analysis_id))
except:
# TODO: log error
err("Not able to save current filter")
# Get result
if count:
result = sql_result.first()[0]
else:
result = []
with Timer() as t:
if sql_result is not None:
for row in sql_result:
entry = {"id" : "{}_{}_{}".format(row.variant_id, row.transcript_pk_field_uid, row.transcript_pk_value )}
for f_uid in fields:
# Manage special case for fields splitted by sample
if self.fields_map[f_uid]['name'].startswith('s{}_'):
pattern = "row." + self.fields_map[f_uid]['name']
r = {}
for sid in sample_ids:
r[sid] = FilterEngine.parse_result(eval(pattern.format(sid)))
entry[f_uid] = r
else:
if self.fields_map[f_uid]['db_name_ui'] == 'Variant':
entry[f_uid] = FilterEngine.parse_result(eval("row.{}".format(self.fields_map[f_uid]['name'])))
else:
entry[f_uid] = FilterEngine.parse_result(eval("row._{}".format(f_uid)))
result.append(entry)
log("Result processing: {0}\nTotal result: {1}".format(t, "-"))
return result
def build_query(self, analysis_id, reference_id, mode, filter, fields, order=None, limit=100, offset=0, count=False):
"""
This method build the sql query according to the provided parameters, and also build several list with ids of
fields, databases, sample, etc... all information that could be used by the analysis to work.
"""
# Data that will be computed and returned by this method !
query = [] # sql queries that correspond to the provided parameters (we will have several queries if need to create temp tables)
field_uids = [] # list of annotation field's uids that need to be present in the analysis working table
db_uids = [] # list of annotation databases uids used for the analysis
sample_ids = [] # list of sample's ids used for the analysis
filter_ids = [] # list of saved filter's ids for this analysis
attributes = {} # list of attributes (and their values by sample) defined for this analysis
# Retrieve sample ids of the analysis
for row in execute("select sample_id from analysis_sample where analysis_id={0}".format(analysis_id)):
sample_ids.append(str(row.sample_id))
# Retrieve attributes of the analysis
for row in execute("select sample_id, value, name from attribute where analysis_id={0}".format(analysis_id)):
if row.name not in attributes.keys():
attributes[row.name] = {row.sample_id: row.value}
else:
attributes[row.name].update({row.sample_id: row.value})
# Init fields uid and db uids with the defaults annotations fields according to the reference (hg19 by example)
# for row in execute("SELECT d.uid AS duid, f.uid FROM annotation_database d INNER JOIN annotation_field f ON d.uid=f.database_uid WHERE d.reference_id={} AND d.type='variant' AND f.wt_default=True".format(reference_id)):
# if row.duid not in db_uids:
# db_uids.append(row.duid)
# field_uids.append(row.uid)
# Retrieve saved filter's ids of the analysis - and parse their filter to get list of dbs/fields used by filters
for row in execute("select id, filter from filter where analysis_id={0} ORDER BY id ASC".format(analysis_id)): # ORDER BY is important as a filter can "called" an oldest filter to be build.
filter_ids.append(row.id)
q, f, d = self.parse_filter(analysis_id, mode, sample_ids, row.filter, fields, None, None)
field_uids = array_merge(field_uids, f)
db_uids = array_merge(db_uids, d)
# Parse the current filter
query, f, d = self.parse_filter(analysis_id, mode, sample_ids, filter, fields, order, limit, offset, count)
field_uids = array_merge(field_uids, f)
db_uids = array_merge(db_uids, d)
# return query and all usefulldata about annotations needed to execute the query
return query, field_uids, db_uids, sample_ids, filter_ids, attributes
def parse_filter(self, analysis_id, mode, sample_ids, filters, fields=[], order=None, limit=100, offset=0, count=False):
"""
This method parse the json filter and return the corresponding postgreSQL query, and also the list of fields and databases uid used by the query
(thoses databases/fields must be present in the working table to be run succefully the query)
"""
# Init some global variables
wt = 'wt_{}'.format(analysis_id)
query = ""
field_uids = []
db_uids = []
with_trx = False
# Build SELECT
fields_names = []
for f_uid in fields:
if self.fields_map[f_uid]["db_uid"] not in db_uids:
db_uids.append(self.fields_map[f_uid]["db_uid"])
field_uids.append(f_uid)
if self.fields_map[f_uid]['db_name_ui'] == 'Variant':
# Manage special case for fields splitted by sample
if self.fields_map[f_uid]['name'].startswith('s{}_'):
fields_names.extend(['{}.'.format(wt) + self.fields_map[f_uid]['name'].format(s) for s in sample_ids])
else:
fields_names.append('{}.{}'.format(wt, self.fields_map[f_uid]["name"]))
else:
with_trx = with_trx or self.fields_map[f_uid]["db_type"] == "transcript"
fields_names.append('{}._{}'.format(wt, f_uid))
q_select = 'variant_id, transcript_pk_field_uid, transcript_pk_value{} {}'.format(',' if len(fields_names) > 0 else '', ', '.join(fields_names))
# Build FROM/JOIN
q_from = wt
# Build WHERE
temporary_to_import = {}
def check_field_uid(data):
if data[0] == 'field':
if self.fields_map[data[1]]["db_uid"] not in db_uids:
db_uids.append(self.fields_map[data[1]]["db_uid"])
field_uids.append(data[1])
def build_filter(data):
"""
Recursive method that build the query from the filter json data at operator level
"""
operator = data[0]
if operator in ['AND', 'OR']:
if len(data[1]) == 0:
return ''
return ' (' + FilterEngine.op_map[operator].join([build_filter(f) for f in data[1]]) + ') '
elif operator in ['==', '!=', '>', '<', '>=', '<=']:
# If comparaison with a field, the field MUST BE the first operande
if data[1][0] == 'field':
metadata = self.fields_map[data[1][1]]
else:
metadata = {"type": "string", "name":""}
check_field_uid(data[1])
check_field_uid(data[2])
# Manage special case for fields splitted by sample
if metadata['name'].startswith('s{}_'):
# With these special fields, we don't allow field tot field comparaison.
# First shall always be the special fields, and the second shall be everythong except another special fields
return ' (' + ' OR '.join(['{0}{1}{2}'.format(metadata['name'].format(s), FilterEngine.op_map[operator], parse_value(metadata["type"], data[2])) for s in sample_ids]) + ') '
else:
return '{0}{1}{2}'.format(parse_value(metadata["type"], data[1]), FilterEngine.op_map[operator], parse_value(metadata["type"], data[2]))
elif operator in ['~', '!~']:
check_field_uid(data[1])
check_field_uid(data[2])
return '{0}{1}{2}'.format(parse_value('string', data[1]), FilterEngine.op_map[operator], parse_value('string%', data[2]))
elif operator in ['IN', 'NOTIN']:
tmp_table = get_tmp_table(data[1], data[2])
temporary_to_import[tmp_table]['where'] = FilterEngine.op_map[operator].format(tmp_table, wt)
if data[1] == 'site':
temporary_to_import[tmp_table]['from'] = " LEFT JOIN {1} ON {0}.bin={1}.bin AND {0}.chr={1}.chr AND {0}.pos={1}.pos".format(wt, tmp_table)
else: # if data[1] == 'variant':
temporary_to_import[tmp_table]['from'] = " LEFT JOIN {1} ON {0}.bin={1}.bin AND {0}.chr={1}.chr AND {0}.pos={1}.pos AND {0}.ref={1}.ref AND {0}.alt={1}.alt".format(wt, tmp_table)
return temporary_to_import[tmp_table]['where']
def get_tmp_table(mode, data):
"""
Parse json data to build temp table for ensemblist operation IN/NOTIN
mode: site or variant
data: json data about the temp table to create
"""
ttable_quer_map = "CREATE TABLE IF NOT EXISTS {0} AS {1}; "
if data[0] == 'sample':
tmp_table_name = "tmp_sample_{0}_{1}".format(data[1], mode)
if mode == 'site':
tmp_table_query = ttable_quer_map.format(tmp_table_name, "SELECT DISTINCT {0}.bin, {0}.chr, {0}.pos FROM {0} WHERE {0}.s{1}_gt IS NOT NULL".format(wt, data[1]))
else: # if mode = 'variant':
tmp_table_query = ttable_quer_map.format(tmp_table_name, "SELECT DISTINCT {0}.bin, {0}.chr, {0}.pos, {0}.ref, {0}.alt FROM {0} WHERE {0}.s{1}_gt IS NOT NULL".format(wt, data[1]))
elif data[0] == 'filter':
tmp_table_name = "tmp_filter_{0}".format(data[1])
if mode == 'site':
tmp_table_query = ttable_quer_map.format(tmp_table_name, "SELECT DISTINCT {0}.bin, {0}.chr, {0}.pos FROM {0} WHERE {0}.filter_{1}=True".format(wt, data[1]))
else: # if mode = 'variant':
tmp_table_query = ttable_quer_map.format(tmp_table_name, "SELECT DISTINCT {0}.bin, {0}.chr, {0}.pos, {0}.ref, {0}.alt FROM {0} WHERE {0}.filter_{1}=True".format(wt, data[1]))
elif data[0] == 'attribute':
key, value = data[1].split(':')
tmp_table_name = "tmp_attribute_{0}_{1}_{2}_{3}".format(analysis_id, key, value, mode)
if mode == 'site':
tmp_table_query = ttable_quer_map.format(tmp_table_name, "SELECT DISTINCT {0}.bin, {0}.chr, {0}.pos FROM {0} WHERE {0}.attr_{1}='{2}'".format(wt, key, value))
else: # if mode = 'variant':
tmp_table_query = ttable_quer_map.format(tmp_table_name, "SELECT DISTINCT {0}.bin, {0}.chr, {0}.pos, {0}.ref, {0}.alt FROM {0} WHERE {0}.attr_{1}='{2}'".format(wt, key, value))
temporary_to_import[tmp_table_name] = {'query': tmp_table_query + "CREATE INDEX IF NOT EXISTS {0}_idx_var ON {0} USING btree (bin, chr, pos);".format(tmp_table_name)}
return tmp_table_name
def parse_value(ftype, data):
if data[0] == 'field':
if self.fields_map[data[1]]["type"] == ftype:
if self.fields_map[data[1]]['db_name_ui'] == 'Variant':
return "{0}".format(self.fields_map[data[1]]["name"])
else:
return "_{0}".format(data[1])
if data[0] == 'value':
if ftype in ['int', 'float', 'enum', 'percent']:
return str(data[1])
elif ftype == 'string':
return "'{0}'".format(data[1])
elif ftype == 'string%':
return "'%%{0}%%'".format(data[1])
elif ftype == 'range' and len(data) == 3:
return 'int8range({0}, {1})'.format(data[1], data[2])
raise RegovarException("FilterEngine.request.parse_value - Unknow type: {0} ({1})".format(ftype, data))
# q_where = ""
# if len(sample_ids) == 1:
# q_where = "{0}.sample_id={1}".format(wt, sample_ids[0])
# elif len(sample_ids) > 1:
# q_where = "{0}.sample_id IN ({1})".format(wt, ','.join(sample_ids))
q_where = build_filter(filters)
if q_where is not None and len(q_where.strip()) > 0:
q_where = "WHERE " + q_where
# Build FROM/JOIN according to the list of used annotations databases
q_from += " ".join([t['from'] for t in temporary_to_import.values()])
# Build ORDER BY
# TODO : actually, it's not possible to do "order by" on special fields (GT and DP because they are split by sample)
q_order = ""
if order is not None and len(order) > 0:
orders = []
for f_uid in order:
asc = 'ASC'
if f_uid[0] == '-':
f_uid = f_uid[1:]
asc = 'DESC'
if self.fields_map[f_uid]['db_name_ui'] == 'Variant':
# Manage special case for fields splitted by sample
if self.fields_map[f_uid]['name'].startswith('s{}_'):
pass
else:
orders.append('{} {}'.format(self.fields_map[f_uid]["name"], asc))
else:
orders.append('_{} {}'.format(f_uid, asc))
q_order = 'ORDER BY {}'.format(', '.join(orders))
# build final query
query_tpm = [t['query'] for t in temporary_to_import.values()]
if count:
query_req = "SELECT DISTINCT {0} FROM {1} {2}".format(q_select, q_from, q_where)
query = query_tpm + ['SELECT COUNT(*) FROM ({0}) AS sub;'.format(query_req)]
else:
query_req = "SELECT DISTINCT {0} FROM {1} {2} {3} {4} {5};".format(q_select, q_from, q_where, q_order, 'LIMIT {}'.format(limit) if limit is not None else '', 'OFFSET {}'.format(offset) if offset is not None else '')
query = query_tpm + [query_req]
return query, field_uids, db_uids
@staticmethod
def get_hasname(analysis_id, mode, fields, filter_json):
# clean and sort fields list
clean_fields = fields
clean_fields.sort()
clean_fields = list(set(clean_fields))
string_id = "{0}{1}{2}{3}".format(analysis_id, mode, clean_fields, json.dumps(filter_json))
return hashlib.md5(string_id.encode()).hexdigest()
@staticmethod
def parse_result(value):
"""
Parse value returned by sqlAlchemy and cast it, if needed, into "simples" python types
"""
# if value is None:
# return ""
if type(value) == psycopg2._range.NumericRange:
return (value.lower, value.upper)
return value
| REGOVAR/Annso | annso/core/annso/filter_manager.py | Python | agpl-3.0 | 34,605 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Report Configurator - Stock',
'version': '8.0.1.0.0',
'category': 'Reporting Subsystem',
'sequence': 14,
'summary': '',
'description': """
Report Configurator - Stock
=============================
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'report_extended',
'stock_voucher',
],
'data': [
'views/report_view.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': True,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| HBEE/odoo-addons | report_extended_stock/__openerp__.py | Python | agpl-3.0 | 1,603 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cint, cstr
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.model.document import Document
from six import iteritems
class SalaryStructure(Document):
def validate(self):
self.set_missing_values()
self.validate_amount()
self.strip_condition_and_formula_fields()
self.validate_max_benefits_with_flexi()
self.validate_component_based_on_tax_slab()
def set_missing_values(self):
overwritten_fields = ["depends_on_payment_days", "variable_based_on_taxable_salary", "is_tax_applicable", "is_flexible_benefit"]
overwritten_fields_if_missing = ["amount_based_on_formula", "formula", "amount"]
for table in ["earnings", "deductions"]:
for d in self.get(table):
component_default_value = frappe.db.get_value("Salary Component", cstr(d.salary_component),
overwritten_fields + overwritten_fields_if_missing, as_dict=1)
if component_default_value:
for fieldname in overwritten_fields:
value = component_default_value.get(fieldname)
if d.get(fieldname) != value:
d.set(fieldname, value)
if not (d.get("amount") or d.get("formula")):
for fieldname in overwritten_fields_if_missing:
d.set(fieldname, component_default_value.get(fieldname))
def validate_component_based_on_tax_slab(self):
for row in self.deductions:
if row.variable_based_on_taxable_salary and (row.amount or row.formula):
frappe.throw(_("Row #{0}: Cannot set amount or formula for Salary Component {1} with Variable Based On Taxable Salary")
.format(row.idx, row.salary_component))
def validate_amount(self):
if flt(self.net_pay) < 0 and self.salary_slip_based_on_timesheet:
frappe.throw(_("Net pay cannot be negative"))
def strip_condition_and_formula_fields(self):
# remove whitespaces from condition and formula fields
for row in self.earnings:
row.condition = row.condition.strip() if row.condition else ""
row.formula = row.formula.strip() if row.formula else ""
for row in self.deductions:
row.condition = row.condition.strip() if row.condition else ""
row.formula = row.formula.strip() if row.formula else ""
def validate_max_benefits_with_flexi(self):
have_a_flexi = False
if self.earnings:
flexi_amount = 0
for earning_component in self.earnings:
if earning_component.is_flexible_benefit == 1:
have_a_flexi = True
max_of_component = frappe.db.get_value("Salary Component", earning_component.salary_component, "max_benefit_amount")
flexi_amount += max_of_component
if have_a_flexi and flt(self.max_benefits) == 0:
frappe.throw(_("Max benefits should be greater than zero to dispense benefits"))
if have_a_flexi and flexi_amount and flt(self.max_benefits) > flexi_amount:
frappe.throw(_("Total flexible benefit component amount {0} should not be less than max benefits {1}")
.format(flexi_amount, self.max_benefits))
if not have_a_flexi and flt(self.max_benefits) > 0:
frappe.throw(_("Salary Structure should have flexible benefit component(s) to dispense benefit amount"))
def get_employees(self, **kwargs):
conditions, values = [], []
for field, value in kwargs.items():
if value:
conditions.append("{0}=%s".format(field))
values.append(value)
condition_str = " and " + " and ".join(conditions) if conditions else ""
employees = frappe.db.sql_list("select name from tabEmployee where status='Active' {condition}"
.format(condition=condition_str), tuple(values))
return employees
@frappe.whitelist()
def assign_salary_structure(self, company=None, grade=None, department=None, designation=None,employee=None,
from_date=None, base=None, variable=None, income_tax_slab=None):
employees = self.get_employees(company= company, grade= grade,department= department,designation= designation,name=employee)
if employees:
if len(employees) > 20:
frappe.enqueue(assign_salary_structure_for_employees, timeout=600,
employees=employees, salary_structure=self,from_date=from_date,
base=base, variable=variable, income_tax_slab=income_tax_slab)
else:
assign_salary_structure_for_employees(employees, self, from_date=from_date,
base=base, variable=variable, income_tax_slab=income_tax_slab)
else:
frappe.msgprint(_("No Employee Found"))
def assign_salary_structure_for_employees(employees, salary_structure, from_date=None, base=None, variable=None, income_tax_slab=None):
salary_structures_assignments = []
existing_assignments_for = get_existing_assignments(employees, salary_structure, from_date)
count=0
for employee in employees:
if employee in existing_assignments_for:
continue
count +=1
salary_structures_assignment = create_salary_structures_assignment(employee,
salary_structure, from_date, base, variable, income_tax_slab)
salary_structures_assignments.append(salary_structures_assignment)
frappe.publish_progress(count*100/len(set(employees) - set(existing_assignments_for)), title = _("Assigning Structures..."))
if salary_structures_assignments:
frappe.msgprint(_("Structures have been assigned successfully"))
def create_salary_structures_assignment(employee, salary_structure, from_date, base, variable, income_tax_slab=None):
assignment = frappe.new_doc("Salary Structure Assignment")
assignment.employee = employee
assignment.salary_structure = salary_structure.name
assignment.company = salary_structure.company
assignment.from_date = from_date
assignment.base = base
assignment.variable = variable
assignment.income_tax_slab = income_tax_slab
assignment.save(ignore_permissions = True)
assignment.submit()
return assignment.name
def get_existing_assignments(employees, salary_structure, from_date):
salary_structures_assignments = frappe.db.sql_list("""
select distinct employee from `tabSalary Structure Assignment`
where salary_structure=%s and employee in (%s)
and from_date=%s and company= %s and docstatus=1
""" % ('%s', ', '.join(['%s']*len(employees)),'%s', '%s'), [salary_structure.name] + employees+[from_date]+[salary_structure.company])
if salary_structures_assignments:
frappe.msgprint(_("Skipping Salary Structure Assignment for the following employees, as Salary Structure Assignment records already exists against them. {0}")
.format("\n".join(salary_structures_assignments)))
return salary_structures_assignments
@frappe.whitelist()
def make_salary_slip(source_name, target_doc = None, employee = None, as_print = False, print_format = None, for_preview=0, ignore_permissions=False):
def postprocess(source, target):
if employee:
employee_details = frappe.db.get_value("Employee", employee,
["employee_name", "branch", "designation", "department"], as_dict=1)
target.employee = employee
target.employee_name = employee_details.employee_name
target.branch = employee_details.branch
target.designation = employee_details.designation
target.department = employee_details.department
target.run_method('process_salary_structure', for_preview=for_preview)
doc = get_mapped_doc("Salary Structure", source_name, {
"Salary Structure": {
"doctype": "Salary Slip",
"field_map": {
"total_earning": "gross_pay",
"name": "salary_structure"
}
}
}, target_doc, postprocess, ignore_child_tables=True, ignore_permissions=ignore_permissions)
if cint(as_print):
doc.name = 'Preview for {0}'.format(employee)
return frappe.get_print(doc.doctype, doc.name, doc = doc, print_format = print_format)
else:
return doc
@frappe.whitelist()
def get_employees(salary_structure):
employees = frappe.get_list('Salary Structure Assignment',
filters={'salary_structure': salary_structure, 'docstatus': 1}, fields=['employee'])
if not employees:
frappe.throw(_("There's no Employee with Salary Structure: {0}. \
Assign {1} to an Employee to preview Salary Slip").format(salary_structure, salary_structure))
return list(set([d.employee for d in employees]))
| gsnbng/erpnext | erpnext/hr/doctype/salary_structure/salary_structure.py | Python | agpl-3.0 | 8,133 |
#!/usr/bin/env python
import argparse
import os
import sys
from loomengine import server
from loomengine import verify_has_connection_settings, \
get_server_url, verify_server_is_running, get_token
from loomengine_utils.connection import Connection
from loomengine_utils.exceptions import LoomengineUtilsError
class RunLabelAdd(object):
"""Add a new run labels
"""
def __init__(self, args=None, silent=False):
# Args may be given as an input argument for testing purposes
# or from the main parser.
# Otherwise get them from the parser.
if args is None:
args = self._get_args()
self.args = args
self.silent = silent
verify_has_connection_settings()
server_url = get_server_url()
verify_server_is_running(url=server_url)
self.connection = Connection(server_url, token=get_token())
def _get_args(self):
self.parser = self.get_parser()
return self.parser.parse_args()
@classmethod
def get_parser(cls, parser=None):
# If called from main, use the subparser provided.
# Otherwise create a top-level parser here.
if parser is None:
parser = argparse.ArgumentParser(__file__)
parser.add_argument(
'target',
metavar='TARGET',
help='identifier for run to be labeled')
parser.add_argument(
'label',
metavar='LABEL', help='label name to be added')
return parser
def run(self):
try:
runs = self.connection.get_run_index(
min=1, max=1,
query_string=self.args.target)
except LoomengineUtilsError as e:
raise SystemExit("ERROR! Failed to get run list: '%s'" % e)
label_data = {'label': self.args.label}
try:
label = self.connection.post_run_label(runs[0]['uuid'], label_data)
except LoomengineUtilsError as e:
raise SystemExit("ERROR! Failed to create label: '%s'" % e)
if not self.silent:
print 'Target "%s@%s" has been labeled as "%s"' % \
(runs[0].get('name'),
runs[0].get('uuid'),
label.get('label'))
class RunLabelRemove(object):
"""Remove a run label
"""
def __init__(self, args=None, silent=False):
if args is None:
args = self._get_args()
self.args = args
self.silent = silent
verify_has_connection_settings()
server_url = get_server_url()
verify_server_is_running(url=server_url)
self.connection = Connection(server_url, token=get_token())
def _get_args(self):
self.parser = self.get_parser()
return self.parser.parse_args()
@classmethod
def get_parser(cls, parser=None):
# If called from main, use the subparser provided.
# Otherwise create a top-level parser here.
if parser is None:
parser = argparse.ArgumentParser(__file__)
parser.add_argument(
'target',
metavar='TARGET',
help='identifier for run to be unlabeled')
parser.add_argument(
'label',
metavar='LABEL', help='label name to be removed')
return parser
def run(self):
try:
runs = self.connection.get_run_index(
min=1, max=1,
query_string=self.args.target)
except LoomengineUtilsError as e:
raise SystemExit("ERROR! Failed to get run list: '%s'" % e)
label_data = {'label': self.args.label}
try:
label = self.connection.remove_run_label(
runs[0]['uuid'], label_data)
except LoomengineUtilsError as e:
raise SystemExit("ERROR! Failed to remove label: '%s'" % e)
if not self.silent:
print 'Label %s has been removed from run "%s@%s"' % \
(label.get('label'),
runs[0].get('name'),
runs[0].get('uuid'))
class RunLabelList(object):
def __init__(self, args=None, silent=False):
if args is None:
args = self._get_args()
self.args = args
self.silent = silent
verify_has_connection_settings()
server_url = get_server_url()
verify_server_is_running(url=server_url)
self.connection = Connection(server_url, token=get_token())
def _get_args(self):
self.parser = self.get_parser()
return self.parser.parse_args()
@classmethod
def get_parser(cls, parser=None):
# If called from main, use the subparser provided.
# Otherwise create a top-level parser here.
if parser is None:
parser = argparse.ArgumentParser(__file__)
parser.add_argument(
'target',
metavar='TARGET',
nargs='?',
help='show labels only for the specified run')
return parser
def run(self):
if self.args.target:
try:
runs = self.connection.get_run_index(
min=1, max=1,
query_string=self.args.target)
except LoomengineUtilsError as e:
raise SystemExit("ERROR! Failed to get run list: '%s'" % e)
try:
label_data = self.connection.list_run_labels(runs[0]['uuid'])
except LoomengineUtilsError as e:
raise SystemExit("ERROR! Failed to get label list: '%s'" % e)
labels = label_data.get('labels', [])
if not self.silent:
print '[showing %s labels]' % len(labels)
for label in labels:
print label
else:
try:
label_list = self.connection.get_run_label_index()
except LoomengineUtilsError as e:
raise SystemExit("ERROR! Failed to get label list: '%s'" % e)
label_counts = {}
for item in label_list:
label_counts.setdefault(item.get('label'), 0)
label_counts[item.get('label')] += 1
if not self.silent:
print '[showing %s labels]' % len(label_counts)
for key in label_counts:
print "%s (%s)" % (key, label_counts[key])
class RunLabel(object):
"""Configures and executes subcommands under "label" on the parent parser.
"""
def __init__(self, args=None, silent=False):
if args is None:
args = self._get_args()
self.args = args
self.silent = silent
def _get_args(self):
parser = self.get_parser()
return parser.parse_args()
@classmethod
def get_parser(cls, parser=None):
# If called from main, a subparser should be provided.
# Otherwise we create a top-level parser here.
if parser is None:
parser = argparse.ArgumentParser(__file__)
subparsers = parser.add_subparsers()
add_subparser = subparsers.add_parser(
'add', help='add a run label')
RunLabelAdd.get_parser(add_subparser)
add_subparser.set_defaults(SubSubSubcommandClass=RunLabelAdd)
remove_subparser = subparsers.add_parser(
'remove', help='remove a run label')
RunLabelRemove.get_parser(remove_subparser)
remove_subparser.set_defaults(SubSubSubcommandClass=RunLabelRemove)
list_subparser = subparsers.add_parser(
'list', help='list run labels')
RunLabelList.get_parser(list_subparser)
list_subparser.set_defaults(SubSubSubcommandClass=RunLabelList)
return parser
def run(self):
return self.args.SubSubSubcommandClass(
self.args, silent=self.silent).run()
if __name__ == '__main__':
response = RunLabel().run()
| StanfordBioinformatics/loom | client/loomengine/run_label.py | Python | agpl-3.0 | 7,876 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def action_invoice_create(self, cr, uid, ids, journal_id=False,
group=False, type='out_invoice', context=None):
obj_bud_mov = self.pool.get('budget.move')
obj_bud_line = self.pool.get('budget.move.line')
purchase_line_obj = self.pool.get('purchase.order.line')
invoice_obj = self.pool.get('account.invoice')
purchase_obj = self.pool.get('purchase.order')
invoice_line_obj = self.pool.get('account.invoice.line')
invoices= super(stock_picking, self).action_invoice_create(cr, uid, ids, journal_id=journal_id, group=group, type=type, context=context)
res = {}
res= {'res':invoices,}
for picking in res.keys():
invoice_id = res[picking]
invoice = invoice_obj.browse(cr, uid, invoice_id, context=context)
for invoice_line in invoice.invoice_line:
#purchase_order_line_invoice_rel
cr.execute('''SELECT order_line_id FROM purchase_order_line_invoice_rel \
WHERE invoice_id = %s''',(invoice_line.id,))
count = cr.fetchall()
for po_line_id in count:
po_line = purchase_line_obj.browse(cr, uid, [po_line_id[0]], context=context)
asoc_bud_line_id = obj_bud_line.search(cr, uid, [('po_line_id','=',po_line.id), ])[0]
obj_bud_line.write(cr, uid, [asoc_bud_line_id],{'inv_line_id': invoice_line.id}, context=context)
move_id = po_line.order_id.budget_move_id.id
invoice_obj.write(cr, uid, invoice_id, {'budget_move_id': move_id, 'from_order':True}, context=context)
obj_bud_mov.signal_workflow(cr, uid, [move_id], 'button_execute', context=context)
return invoices
| ClearCorp/odoo-clearcorp | TODO-9.0/budget/stock.py | Python | agpl-3.0 | 3,050 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
from django.utils import timezone
from mock import patch
import pytz
from ureport.contacts.models import ContactField, Contact, ReportersCounter
from ureport.contacts.tasks import fetch_contacts_task
from ureport.locations.models import Boundary
from ureport.tests import DashTest, TembaContactField, MockTembaClient, TembaContact
from temba_client.v1.types import Group as TembaGroup
from ureport.utils import json_date_to_datetime
class ContactFieldTest(DashTest):
def setUp(self):
super(ContactFieldTest, self).setUp()
self.nigeria = self.create_org('nigeria', self.admin)
def test_kwargs_from_temba(self):
temba_contact_field = TembaContactField.create(key='foo', label='Bar', value_type='T')
kwargs = ContactField.kwargs_from_temba(self.nigeria, temba_contact_field)
self.assertEqual(kwargs, dict(org=self.nigeria, key='foo', label='Bar', value_type='T'))
# try creating contact from them
ContactField.objects.create(**kwargs)
@patch('dash.orgs.models.TembaClient1', MockTembaClient)
def test_fetch_contact_fields(self):
ContactField.objects.create(org=self.nigeria, key='old', label='Old', value_type='T')
field_keys = ContactField.fetch_contact_fields(self.nigeria)
self.assertEqual(field_keys, ['occupation'])
self.assertIsNone(ContactField.objects.filter(key='old', org=self.nigeria).first())
contact_field = ContactField.objects.get()
self.assertEqual(contact_field.org, self.nigeria)
self.assertEqual(contact_field.key, 'occupation')
self.assertEqual(contact_field.label, 'Activité')
self.assertEqual(contact_field.value_type, 'T')
@patch('dash.orgs.models.TembaClient1', MockTembaClient)
def test_get_contact_fields(self):
field_keys = ContactField.get_contact_fields(self.nigeria)
self.assertEqual(field_keys, ['occupation'])
with patch('django.core.cache.cache.get') as cache_get_mock:
cache_get_mock.return_value = None
field_keys = ContactField.get_contact_fields(self.nigeria)
self.assertEqual(field_keys, ['occupation'])
cache_get_mock.return_value = ['occupation']
with patch('ureport.contacts.models.ContactField.fetch_contact_fields') as mock_fetch:
ContactField.get_contact_fields(self.nigeria)
self.assertFalse(mock_fetch.called)
class ContactTest(DashTest):
def setUp(self):
super(ContactTest, self).setUp()
self.nigeria = self.create_org('nigeria', self.admin)
self.nigeria.set_config('reporter_group', "Ureporters")
self.nigeria.set_config('registration_label', "Registration Date")
self.nigeria.set_config('state_label', "State")
self.nigeria.set_config('district_label', "LGA")
self.nigeria.set_config('occupation_label', "Activité")
self.nigeria.set_config('born_label', "Born")
self.nigeria.set_config('gender_label', 'Gender')
self.nigeria.set_config('female_label', "Female")
self.nigeria.set_config('male_label', 'Male')
# boundaries fetched
self.country = Boundary.objects.create(org=self.nigeria, osm_id="R-NIGERIA", name="Nigeria", level=0, parent=None,
geometry='{"foo":"bar-country"}')
self.state = Boundary.objects.create(org=self.nigeria, osm_id="R-LAGOS", name="Lagos", level=1,
parent=self.country, geometry='{"foo":"bar-state"}')
self.district = Boundary.objects.create(org=self.nigeria, osm_id="R-OYO", name="Oyo", level=2,
parent=self.state, geometry='{"foo":"bar-state"}')
self.registration_date = ContactField.objects.create(org=self.nigeria, key='registration_date',
label='Registration Date', value_type='T')
self.state_field = ContactField.objects.create(org=self.nigeria, key='state', label='State', value_type='S')
self.district_field = ContactField.objects.create(org=self.nigeria, key='lga', label='LGA', value_type='D')
self.occupation_field = ContactField.objects.create(org=self.nigeria, key='occupation', label='Activité',
value_type='T')
self.born_field = ContactField.objects.create(org=self.nigeria, key='born', label='Born', value_type='T')
self.gender_field = ContactField.objects.create(org=self.nigeria, key='gender', label='Gender', value_type='T')
def test_kwargs_from_temba(self):
temba_contact = TembaContact.create(uuid='C-006', name="Jan", urns=['tel:123'],
groups=['G-001', 'G-007'],
fields={'registration_date': None, 'state': None,
'lga': None, 'occupation': None, 'born': None,
'gender': None},
language='eng')
kwargs = Contact.kwargs_from_temba(self.nigeria, temba_contact)
self.assertEqual(kwargs, dict(uuid='C-006', org=self.nigeria, gender='', born=0, occupation='',
registered_on=None, state='', district=''))
# try creating contact from them
Contact.objects.create(**kwargs)
# Invalid boundaries become ''
temba_contact = TembaContact.create(uuid='C-007', name="Jan", urns=['tel:123'],
groups=['G-001', 'G-007'],
fields={'registration_date': '2014-01-02T03:04:05.000000Z',
'state': 'Kigali', 'lga': 'Oyo', 'occupation': 'Student',
'born': '1990', 'gender': 'Male'},
language='eng')
kwargs = Contact.kwargs_from_temba(self.nigeria, temba_contact)
self.assertEqual(kwargs, dict(uuid='C-007', org=self.nigeria, gender='M', born=1990, occupation='Student',
registered_on=json_date_to_datetime('2014-01-02T03:04:05.000'), state='',
district=''))
# try creating contact from them
Contact.objects.create(**kwargs)
temba_contact = TembaContact.create(uuid='C-008', name="Jan", urns=['tel:123'],
groups=['G-001', 'G-007'],
fields={'registration_date': '2014-01-02T03:04:05.000000Z', 'state':'Lagos',
'lga': 'Oyo', 'occupation': 'Student', 'born': '1990',
'gender': 'Male'},
language='eng')
kwargs = Contact.kwargs_from_temba(self.nigeria, temba_contact)
self.assertEqual(kwargs, dict(uuid='C-008', org=self.nigeria, gender='M', born=1990, occupation='Student',
registered_on=json_date_to_datetime('2014-01-02T03:04:05.000'), state='R-LAGOS',
district='R-OYO'))
# try creating contact from them
Contact.objects.create(**kwargs)
def test_fetch_contacts(self):
self.nigeria.set_config('reporter_group', 'Reporters')
tz = pytz.timezone('UTC')
with patch.object(timezone, 'now', return_value=tz.localize(datetime(2015, 9, 29, 10, 20, 30, 40))):
with patch('dash.orgs.models.TembaClient1.get_groups') as mock_groups:
group = TembaGroup.create(uuid="uuid-8", name='reporters', size=120)
mock_groups.return_value = [group]
with patch('dash.orgs.models.TembaClient1.get_contacts') as mock_contacts:
mock_contacts.return_value = [
TembaContact.create(uuid='000-001', name="Ann", urns=['tel:1234'], groups=['000-002'],
fields=dict(state="Lagos", lga="Oyo", gender='Female', born="1990"),
language='eng',
modified_on=datetime(2015, 9, 20, 10, 20, 30, 400000, pytz.utc))]
seen_uuids = Contact.fetch_contacts(self.nigeria)
self.assertEqual(seen_uuids, [])
group = TembaGroup.create(uuid="000-002", name='reporters', size=120)
mock_groups.return_value = [group]
with patch('dash.orgs.models.TembaClient1.get_contacts') as mock_contacts:
mock_contacts.return_value = [
TembaContact.create(uuid='000-001', name="Ann",urns=['tel:1234'], groups=['000-002'],
fields=dict(state="Lagos", lga="Oyo",gender='Female', born="1990"),
language='eng',
modified_on=datetime(2015, 9, 20, 10, 20, 30, 400000, pytz.utc))]
seen_uuids = Contact.fetch_contacts(self.nigeria)
self.assertTrue('000-001' in seen_uuids)
contact = Contact.objects.get()
self.assertEqual(contact.uuid, '000-001')
self.assertEqual(contact.org, self.nigeria)
self.assertEqual(contact.state, 'R-LAGOS')
self.assertEqual(contact.district, 'R-OYO')
self.assertEqual(contact.gender, 'F')
self.assertEqual(contact.born, 1990)
Contact.fetch_contacts(self.nigeria, after=datetime(2014, 12, 01, 22, 34, 36, 123000, pytz.utc))
self.assertTrue('000-001' in seen_uuids)
# delete the contacts
Contact.objects.all().delete()
group1 = TembaGroup.create(uuid="000-001", name='reporters too', size=10)
group2 = TembaGroup.create(uuid="000-002", name='reporters', size=120)
mock_groups.return_value = [group1, group2]
with patch('dash.orgs.models.TembaClient1.get_contacts') as mock_contacts:
mock_contacts.return_value = [
TembaContact.create(uuid='000-001', name="Ann",urns=['tel:1234'], groups=['000-002'],
fields=dict(state="Lagos", lga="Oyo",gender='Female', born="1990"),
language='eng',
modified_on=datetime(2015, 9, 20, 10, 20, 30, 400000, pytz.utc))]
seen_uuids = Contact.fetch_contacts(self.nigeria)
self.assertTrue('000-001' in seen_uuids)
contact = Contact.objects.get()
self.assertEqual(contact.uuid, '000-001')
self.assertEqual(contact.org, self.nigeria)
self.assertEqual(contact.state, 'R-LAGOS')
self.assertEqual(contact.district, 'R-OYO')
self.assertEqual(contact.gender, 'F')
self.assertEqual(contact.born, 1990)
Contact.fetch_contacts(self.nigeria, after=datetime(2014, 12, 01, 22, 34, 36, 123000, pytz.utc))
self.assertTrue('000-001' in seen_uuids)
def test_reporters_counter(self):
self.assertEqual(ReportersCounter.get_counts(self.nigeria), dict())
Contact.objects.create(uuid='C-007', org=self.nigeria, gender='M', born=1990, occupation='Student',
registered_on=json_date_to_datetime('2014-01-02T03:04:05.000'), state='R-LAGOS',
district='R-OYO')
expected = dict()
expected['total-reporters'] = 1
expected['gender:m'] = 1
expected['occupation:student'] = 1
expected['born:1990'] = 1
expected['registered_on:2014-01-02'] = 1
expected['state:R-LAGOS'] = 1
expected['district:R-OYO'] = 1
self.assertEqual(ReportersCounter.get_counts(self.nigeria), expected)
Contact.objects.create(uuid='C-008', org=self.nigeria, gender='M', born=1980, occupation='Teacher',
registered_on=json_date_to_datetime('2014-01-02T03:07:05.000'), state='R-LAGOS',
district='R-OYO')
expected = dict()
expected['total-reporters'] = 2
expected['gender:m'] = 2
expected['occupation:student'] = 1
expected['occupation:teacher'] = 1
expected['born:1990'] = 1
expected['born:1980'] = 1
expected['registered_on:2014-01-02'] = 2
expected['state:R-LAGOS'] = 2
expected['district:R-OYO'] = 2
self.assertEqual(ReportersCounter.get_counts(self.nigeria), expected)
@patch('dash.orgs.models.TembaClient1', MockTembaClient)
def test_tasks(self):
with self.settings(CACHES={'default': {'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': '127.0.0.1:6379:1',
'OPTIONS': {'CLIENT_CLASS': 'redis_cache.client.DefaultClient'}
}}):
with patch('ureport.contacts.tasks.Contact.fetch_contacts') as mock_fetch_contacts:
with patch('ureport.contacts.tasks.Boundary.fetch_boundaries') as mock_fetch_boundaries:
with patch('ureport.contacts.tasks.ContactField.fetch_contact_fields') as mock_fetch_contact_fields:
mock_fetch_contacts.return_value = 'FETCHED'
mock_fetch_boundaries.return_value = 'FETCHED'
mock_fetch_contact_fields.return_value = 'FETCHED'
fetch_contacts_task(self.nigeria.pk, True)
mock_fetch_contacts.assert_called_once_with(self.nigeria, after=None)
mock_fetch_boundaries.assert_called_with(self.nigeria)
mock_fetch_contact_fields.assert_called_with(self.nigeria)
self.assertEqual(mock_fetch_boundaries.call_count, 2)
self.assertEqual(mock_fetch_contact_fields.call_count, 2)
mock_fetch_contacts.reset_mock()
mock_fetch_boundaries.reset_mock()
mock_fetch_contact_fields.reset_mock()
with patch('django.core.cache.cache.get') as cache_get_mock:
date_str = '2014-01-02T01:04:05.000Z'
d1 = json_date_to_datetime(date_str)
cache_get_mock.return_value = date_str
fetch_contacts_task(self.nigeria.pk)
mock_fetch_contacts.assert_called_once_with(self.nigeria, after=d1)
self.assertFalse(mock_fetch_boundaries.called)
self.assertFalse(mock_fetch_contact_fields.called)
| eHealthAfrica/ureport | ureport/contacts/tests.py | Python | agpl-3.0 | 15,406 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from bson import ObjectId
from superdesk import get_resource_service
from test_factory import SuperdeskTestCase
from eve.utils import date_to_str
from superdesk.utc import get_expiry_date, utcnow
from apps.archive.commands import get_overdue_scheduled_items
from apps.archive.archive import SOURCE as ARCHIVE
from superdesk.errors import SuperdeskApiError
from datetime import timedelta, datetime
from pytz import timezone
from apps.archive.common import validate_schedule, remove_media_files, \
format_dateline_to_locmmmddsrc, convert_task_attributes_to_objectId, \
is_genre, BROADCAST_GENRE
from settings import ORGANIZATION_NAME_ABBREVIATION
class RemoveSpikedContentTestCase(SuperdeskTestCase):
articles = [{'guid': 'tag:localhost:2015:69b961ab-2816-4b8a-a584-a7b402fed4f9',
'_id': '1',
'type': 'text',
'last_version': 3,
'_current_version': 4,
'body_html': 'Test body',
'urgency': 4,
'headline': 'Two students missing',
'pubstatus': 'usable',
'firstcreated': utcnow(),
'byline': 'By Alan Karben',
'ednote': 'Andrew Marwood contributed to this article',
'keywords': ['Student', 'Crime', 'Police', 'Missing'],
'subject':[{'qcode': '17004000', 'name': 'Statistics'},
{'qcode': '04001002', 'name': 'Weather'}],
'state': 'draft',
'expiry': utcnow() + timedelta(minutes=20),
'unique_name': '#1'},
{'guid': 'tag:localhost:2015:69b961ab-2816-4b8a-a974-xy4532fe33f9',
'_id': '2',
'last_version': 3,
'_current_version': 4,
'body_html': 'Test body of the second article',
'urgency': 4,
'headline': 'Another two students missing',
'pubstatus': 'usable',
'firstcreated': utcnow(),
'byline': 'By Alan Karben',
'ednote': 'Andrew Marwood contributed to this article',
'keywords': ['Student', 'Crime', 'Police', 'Missing'],
'subject':[{'qcode': '17004000', 'name': 'Statistics'},
{'qcode': '04001002', 'name': 'Weather'}],
'expiry': utcnow() + timedelta(minutes=20),
'state': 'draft',
'type': 'text',
'unique_name': '#2'},
{'guid': 'tag:localhost:2015:69b961ab-2816-4b8a-a584-a7b402fed4fa',
'_id': '3',
'_current_version': 4,
'body_html': 'Test body',
'urgency': 4,
'headline': 'Two students missing killed',
'pubstatus': 'usable',
'firstcreated': utcnow(),
'byline': 'By Alan Karben',
'ednote': 'Andrew Marwood contributed to this article killed',
'keywords': ['Student', 'Crime', 'Police', 'Missing'],
'subject':[{'qcode': '17004000', 'name': 'Statistics'},
{'qcode': '04001002', 'name': 'Weather'}],
'state': 'draft',
'expiry': utcnow() + timedelta(minutes=20),
'type': 'text',
'unique_name': '#3'},
{'guid': 'tag:localhost:2015:69b961ab-2816-4b8a-a584-a7b402fed4fc',
'_id': '4',
'_current_version': 3,
'state': 'draft',
'type': 'composite',
'groups': [{'id': 'root', 'refs': [{'idRef': 'main'}], 'role': 'grpRole:NEP'},
{
'id': 'main',
'refs': [
{
'location': 'archive',
'guid': '1',
'residRef': '1',
'type': 'text'
},
{
'location': 'archive',
'residRef': '2',
'guid': '2',
'type': 'text'
}
],
'role': 'grpRole:main'}],
'firstcreated': utcnow(),
'expiry': utcnow() + timedelta(minutes=20),
'unique_name': '#4'},
{'guid': 'tag:localhost:2015:69b961ab-4b8a-a584-2816-a7b402fed4fc',
'_id': '5',
'_current_version': 3,
'state': 'draft',
'type': 'composite',
'groups': [{'id': 'root', 'refs': [{'idRef': 'main'}, {'idRef': 'story'}], 'role': 'grpRole:NEP'},
{
'id': 'main',
'refs': [
{
'location': 'archive',
'guid': '1',
'residRef': '1',
'type': 'text'
}
],
'role': 'grpRole:main'},
{
'id': 'story',
'refs': [
{
'location': 'archive',
'guid': '4',
'residRef': '4',
'type': 'composite'
}
],
'role': 'grpRole:story'}],
'firstcreated': utcnow(),
'expiry': utcnow() + timedelta(minutes=20),
'unique_name': '#5'}]
media = {
'viewImage': {
'media': '1592730d582080f4e9fcc2fcf43aa357bda0ed19ffe314ee3248624cd4d4bc54',
'mimetype': 'image/jpeg',
'href': 'http://192.168.220.209/api/upload/abc/raw?_schema=http',
'height': 452,
'width': 640
},
'thumbnail': {
'media': '52250b4f37da50ee663fdbff057a5f064479f8a8bbd24fb8fdc06135d3f807bb',
'mimetype': 'image/jpeg',
'href': 'http://192.168.220.209/api/upload/abc/raw?_schema=http',
'height': 120,
'width': 169
},
'baseImage': {
'media': '7a608aa8f51432483918027dd06d0ef385b90702bfeba84ac4aec38ed1660b18',
'mimetype': 'image/jpeg',
'href': 'http://192.168.220.209/api/upload/abc/raw?_schema=http',
'height': 990,
'width': 1400
},
'original': {
'media': 'stub.jpeg',
'mimetype': 'image/jpeg',
'href': 'http://192.168.220.209/api/upload/stub.jpeg/raw?_schema=http',
'height': 2475,
'width': 3500
}
}
def setUp(self):
super().setUp()
def test_query_getting_expired_content(self):
self.app.data.insert(ARCHIVE, [{'expiry': get_expiry_date(-10), 'state': 'spiked'}])
self.app.data.insert(ARCHIVE, [{'expiry': get_expiry_date(0), 'state': 'spiked'}])
self.app.data.insert(ARCHIVE, [{'expiry': get_expiry_date(10), 'state': 'spiked'}])
self.app.data.insert(ARCHIVE, [{'expiry': get_expiry_date(20), 'state': 'spiked'}])
self.app.data.insert(ARCHIVE, [{'expiry': get_expiry_date(30), 'state': 'spiked'}])
self.app.data.insert(ARCHIVE, [{'expiry': None, 'state': 'spiked'}])
self.app.data.insert(ARCHIVE, [{'unique_id': 97, 'state': 'spiked'}])
now = utcnow()
expired_items = get_resource_service(ARCHIVE).get_expired_items(now)
self.assertEquals(2, expired_items.count())
def test_query_removing_media_files_keeps(self):
self.app.data.insert(ARCHIVE, [{'state': 'spiked',
'expiry': get_expiry_date(-10),
'type': 'picture',
'renditions': self.media}])
self.app.data.insert('ingest', [{'type': 'picture', 'renditions': self.media}])
self.app.data.insert('archive_versions', [{'type': 'picture', 'renditions': self.media}])
self.app.data.insert('legal_archive', [{'_id': 1, 'type': 'picture', 'renditions': self.media}])
self.app.data.insert('legal_archive_versions', [{'_id': 1, 'type': 'picture', 'renditions': self.media}])
archive_items = self.app.data.find_all('archive', None)
self.assertEqual(archive_items.count(), 1)
deleted = remove_media_files(archive_items[0])
self.assertFalse(deleted)
def test_query_getting_overdue_scheduled_content(self):
self.app.data.insert(ARCHIVE, [{'publish_schedule': get_expiry_date(-10), 'state': 'published'}])
self.app.data.insert(ARCHIVE, [{'publish_schedule': get_expiry_date(-10), 'state': 'scheduled'}])
self.app.data.insert(ARCHIVE, [{'publish_schedule': get_expiry_date(0), 'state': 'spiked'}])
self.app.data.insert(ARCHIVE, [{'publish_schedule': get_expiry_date(10), 'state': 'scheduled'}])
self.app.data.insert(ARCHIVE, [{'unique_id': 97, 'state': 'spiked'}])
now = date_to_str(utcnow())
overdueItems = get_overdue_scheduled_items(now, 'archive')
self.assertEquals(1, overdueItems.count())
class ArchiveTestCase(SuperdeskTestCase):
def setUp(self):
super().setUp()
def test_validate_schedule(self):
validate_schedule(utcnow() + timedelta(hours=2))
def test_validate_schedule_date_with_datetime_as_string_raises_superdeskApiError(self):
self.assertRaises(SuperdeskApiError, validate_schedule, "2015-04-27T10:53:48+00:00")
def test_validate_schedule_date_with_datetime_in_past_raises_superdeskApiError(self):
self.assertRaises(SuperdeskApiError, validate_schedule, utcnow() + timedelta(hours=-2))
def _get_located_and_current_utc_ts(self):
current_ts = utcnow()
located = {"dateline": "city", "city_code": "Sydney", "state": "NSW", "city": "Sydney", "state_code": "NSW",
"country_code": "AU", "tz": "Australia/Sydney", "country": "Australia"}
current_timestamp = datetime.fromtimestamp(current_ts.timestamp(), tz=timezone(located['tz']))
if current_timestamp.month == 9:
formatted_date = 'Sept {}'.format(current_timestamp.strftime('%d'))
elif 3 <= current_timestamp.month <= 7:
formatted_date = current_timestamp.strftime('%B %d')
else:
formatted_date = current_timestamp.strftime('%b %d')
return located, formatted_date, current_ts
def test_format_dateline_to_format_when_only_city_is_present(self):
located, formatted_date, current_ts = self._get_located_and_current_utc_ts()
formatted_dateline = format_dateline_to_locmmmddsrc(located, current_ts)
self.assertEqual(formatted_dateline, 'SYDNEY %s %s -' % (formatted_date, ORGANIZATION_NAME_ABBREVIATION))
def test_format_dateline_to_format_when_only_city_and_state_are_present(self):
located, formatted_date, current_ts = self._get_located_and_current_utc_ts()
located['dateline'] = "city,state"
formatted_dateline = format_dateline_to_locmmmddsrc(located, current_ts)
self.assertEqual(formatted_dateline, 'SYDNEY, NSW %s %s -' % (formatted_date, ORGANIZATION_NAME_ABBREVIATION))
def test_format_dateline_to_format_when_only_city_and_country_are_present(self):
located, formatted_date, current_ts = self._get_located_and_current_utc_ts()
located['dateline'] = "city,country"
formatted_dateline = format_dateline_to_locmmmddsrc(located, current_ts)
self.assertEqual(formatted_dateline, 'SYDNEY, AU %s %s -' % (formatted_date, ORGANIZATION_NAME_ABBREVIATION))
def test_format_dateline_to_format_when_city_state_and_country_are_present(self):
located, formatted_date, current_ts = self._get_located_and_current_utc_ts()
located['dateline'] = "city,state,country"
formatted_dateline = format_dateline_to_locmmmddsrc(located, current_ts)
self.assertEqual(formatted_dateline, 'SYDNEY, NSW, AU %s %s -' % (formatted_date,
ORGANIZATION_NAME_ABBREVIATION))
def test_if_task_attributes_converted_to_objectid(self):
doc = {
'task': {
'user': '562435231d41c835d7b5fb55',
'desk': ObjectId("562435241d41c835d7b5fb5d"),
'stage': 'test',
'last_authoring_desk': 3245,
'last_production_desk': None
}
}
convert_task_attributes_to_objectId(doc)
self.assertIsInstance(doc['task']['user'], ObjectId)
self.assertEqual(doc['task']['desk'], ObjectId("562435241d41c835d7b5fb5d"))
self.assertEqual(doc['task']['stage'], 'test')
self.assertEqual(doc['task']['last_authoring_desk'], 3245)
self.assertIsNone(doc['task']['last_production_desk'])
class ArchiveCommonTestCase(SuperdeskTestCase):
def setUp(self):
super().setUp()
def test_broadcast_content(self):
content = {
'genre': [{'name': 'Broadcast Script', 'value': 'Broadcast Script'}]
}
self.assertTrue(is_genre(content, BROADCAST_GENRE))
def test_broadcast_content_if_genre_is_none(self):
content = {
'genre': None
}
self.assertFalse(is_genre(content, BROADCAST_GENRE))
def test_broadcast_content_if_genre_is_empty_list(self):
content = {
'genre': []
}
self.assertFalse(is_genre(content, BROADCAST_GENRE))
def test_broadcast_content_if_genre_is_other_than_broadcast(self):
content = {
'genre': [{'name': 'Article', 'value': 'Article'}]
}
self.assertFalse(is_genre(content, BROADCAST_GENRE))
self.assertTrue(is_genre(content, 'Article'))
| sivakuna-aap/superdesk | server/apps/archive/archive_test.py | Python | agpl-3.0 | 14,852 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AreaSoltura',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('processo', models.IntegerField()),
('nome', models.CharField(verbose_name='Nome da propriedade', max_length=255)),
('endereco', models.CharField(verbose_name='Endereço', max_length=400)),
('municipio', models.CharField(verbose_name='Município', max_length=255)),
('uf', models.CharField(verbose_name='Unidade da Federação', max_length=2)),
('proprietario', models.CharField(verbose_name='Nome do proprietário', max_length=255)),
('cpf', models.IntegerField(verbose_name='CPF')),
('telefone', models.BigIntegerField()),
('email', models.EmailField(max_length=254)),
('area', models.FloatField(verbose_name='Área da Propriedade (ha)')),
('arl_app', models.FloatField(verbose_name='Área de reserva legal e proteção permanente')),
('bioma', models.CharField(verbose_name='Bioma', max_length=255)),
('fitofisionomia', models.CharField(max_length=255)),
('atividade', models.CharField(verbose_name='Atividade Econômica', max_length=255)),
('viveiro', models.IntegerField(verbose_name='Número de viveiros')),
('distancia', models.FloatField(verbose_name='Área da Propriedade (ha)')),
('tempo', models.FloatField(verbose_name='Tempo de viagem ao CETAS mais próximo')),
('vistoria', models.DateField()),
('geom', django.contrib.gis.db.models.fields.PolygonField(srid=4674)),
],
),
]
| ibamacsr/casv | casv/core/migrations/0002_areasoltura.py | Python | agpl-3.0 | 2,089 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('activities', '0004_manytomany_not_null'),
]
operations = [
migrations.AddField(
model_name='activity',
name='is_approved',
field=models.NullBooleanField(verbose_name='\u0627\u0644\u062d\u0627\u0644\u0629', choices=[(True, '\u0645\u0639\u062a\u0645\u062f'), (False, '\u0645\u0631\u0641\u0648\u0636'), (None, '\u0645\u0639\u0644\u0642')]),
),
]
| enjaz/enjaz | activities/migrations/0005_activity_is_approved.py | Python | agpl-3.0 | 587 |
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# ola_artnet_params.py
# Copyright (C) 2005 Simon Newton
"""Fetch some ArtNet parameters."""
__author__ = '[email protected] (Simon Newton)'
from ola.ClientWrapper import ClientWrapper
from ola import ArtNetConfigMessages_pb2
def ArtNetConfigureReply(state, response):
reply = ArtNetConfigMessages_pb2.Reply()
reply.ParseFromString(response)
print('Short Name: %s' % reply.options.short_name)
print('Long Name: %s' % reply.options.long_name)
print('Subnet: %d' % reply.options.subnet)
wrapper.Stop()
#Set this appropriately
device_alias = 1
wrapper = ClientWrapper()
client = wrapper.Client()
artnet_request = ArtNetConfigMessages_pb2.Request()
artnet_request.type = artnet_request.ARTNET_OPTIONS_REQUEST
client.ConfigureDevice(device_alias, artnet_request.SerializeToString(),
ArtNetConfigureReply)
wrapper.Run()
| ld3300/ola | python/examples/ola_artnet_params.py | Python | lgpl-2.1 | 1,591 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Fp16(Package):
"""FP16 is a header-only library for
conversion to/from half-precision floating point formats"""
homepage = "https://github.com/Maratyszcza/FP16/"
git = "https://github.com/Maratyszcza/FP16.git"
version('master')
def install(self, spec, prefix):
install_tree('include', prefix.include)
| mfherbst/spack | var/spack/repos/builtin/packages/fp16/package.py | Python | lgpl-2.1 | 1,604 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyBxPython(PythonPackage):
"""The bx-python project is a python library and associated set of scripts
to allow for rapid implementation of genome scale analyses."""
homepage = "https://github.com/bxlab/bx-python"
pypi = "bx-python/bx-python-0.8.8.tar.gz"
version('0.8.8', sha256='ad0808ab19c007e8beebadc31827e0d7560ac0e935f1100fb8cc93607400bb47')
version('0.7.4',
sha256='1066d1e56d062d0661f23c19942eb757bd7ab7cb8bc7d89a72fdc3931c995cb4',
url="https://github.com/bxlab/bx-python/archive/v0.7.4.tar.gz", deprecated=True)
depends_on('[email protected]:2.7', type=('build', 'run'), when='@:0.7')
depends_on('[email protected]:2.8,3.5:', type=('build', 'run'), when='@0.8:')
depends_on('py-setuptools', type='build')
depends_on('py-python-lzo', type=('build', 'run'), when='@:0.7')
depends_on('py-cython', type='build', when='@0.8:')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'), when='@0.8:')
| LLNL/spack | var/spack/repos/builtin/packages/py-bx-python/package.py | Python | lgpl-2.1 | 1,225 |
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('greck_smuggler')
mobileTemplate.setLevel(25)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("olag greck")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(128)
templates = Vector()
templates.add('object/mobile/shared_greck_thug_f_01.iff')
templates.add('object/mobile/shared_greck_thug_f_02.iff')
templates.add('object/mobile/shared_greck_thug_f_03.iff')
templates.add('object/mobile/shared_greck_thug_m_01.iff')
templates.add('object/mobile/shared_greck_thug_m_02.iff')
templates.add('object/mobile/shared_greck_thug_m_03.iff')
templates.add('object/mobile/shared_greck_thug_m_04.iff')
templates.add('object/mobile/shared_greck_thug_m_05.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('meleeHit')
mobileTemplate.setAttacks(attacks)
lootPoolNames_1 = ['Junk']
lootPoolChances_1 = [100]
lootGroupChance_1 = 100
mobileTemplate.addToLootGroups(lootPoolNames_1,lootPoolChances_1,lootGroupChance_1)
core.spawnService.addMobileTemplate('greck_smuggler', mobileTemplate)
return | ProjectSWGCore/NGECore2 | scripts/mobiles/corellia/greck_smuggler.py | Python | lgpl-3.0 | 1,924 |
import re
from vFense.operations._constants import vFensePlugins
VALID_NOTIFICATION_PLUGINS = (
vFensePlugins.RV_PLUGIN, vFensePlugins.MONITORING_PLUGIN
)
INSTALL = 'install'
UNINSTALL = 'uninstall'
REBOOT = 'reboot'
SHUTDOWN = 'shutdown'
PASS = 'pass'
FAIL = 'fail'
CPU = 'cpu'
MEM = 'mem'
FS = 'filesystem'
VALID_RV_NOTIFICATIONS = (INSTALL, UNINSTALL, REBOOT, SHUTDOWN)
VALID_MONITORING_NOTIFICATIONS = (CPU, MEM, FS)
VALID_NOTIFICATIONS = VALID_RV_NOTIFICATIONS + VALID_MONITORING_NOTIFICATIONS
VALID_STATUSES_TO_ALERT_ON = (PASS, FAIL)
class NotificationCollections():
Notifications = 'notifications'
NotificationsHistory = 'notifications_history'
NotificationPlugins = 'notification_plugins'
class NotificationKeys():
NotificationId = 'notification_id'
NotificationType = 'notification_type'
RuleName = 'rule_name'
RuleDescription = 'rule_description'
CreatedBy = 'created_by'
CreatedTime = 'created_time'
ModifiedBy = 'modified_by'
ModifiedTime = 'modified_time'
Plugin = 'plugin'
User = 'user'
Group = 'group'
AllAgents = 'all_agents'
Agents = 'agents'
Tags = 'tags'
CustomerName = 'customer_name'
AppThreshold = 'app_threshold'
RebootThreshold = 'reboot_threshold'
ShutdownThreshold = 'shutdown_threshold'
CpuThreshold = 'cpu_threshold'
MemThreshold = 'mem_threshold'
FileSystemThreshold = 'filesystem_threshold'
FileSystem = 'filesystem'
class NotificationIndexes():
CustomerName = 'customer_name'
RuleNameAndCustomer = 'rule_name_and_customer'
NotificationTypeAndCustomer = 'notification_type_and_customer'
AppThresholdAndCustomer = 'app_threshold_and_customer'
RebootThresholdAndCustomer = 'reboot_threshold_and_customer'
ShutdownThresholdAndCustomer = 'shutdown_threshold_and_customer'
MemThresholdAndCustomer = 'mem_threshold_and_customer'
CpuThresholdAndCustomer = 'cpu_threshold_and_customer'
FileSystemThresholdAndFileSystemAndCustomer = (
'fs_threshold_and_fs_and_customer'
)
class NotificationHistoryKeys():
Id = 'id'
NotificationId = 'notification_id'
AlertSent = 'alert_sent'
AlertSentTime = 'alert_sent_time'
class NotificationHistoryIndexes():
NotificationId = 'notification_id'
class NotificationPluginKeys():
Id = 'id'
CustomerName = 'customer_name'
PluginName = 'plugin_name'
CreatedTime = 'created_time'
ModifiedTime = 'modified_time'
CreatedBy = 'created_by'
ModifiedBy = 'modified_by'
UserName = 'username'
Password = 'password'
Server = 'server'
Port = 'port'
IsTls = 'is_tls'
IsSsl = 'is_ssl'
FromEmail = 'from_email'
ToEmail = 'to_email'
class NotificationPluginIndexes():
CustomerName = 'customer_name'
def return_notif_type_from_operation(oper_type):
if re.search(r'^install', oper_type):
oper_type = INSTALL
elif re.search(r'^uninstall', oper_type):
oper_type = UNINSTALL
elif oper_type == REBOOT:
oper_type = REBOOT
elif oper_type == SHUTDOWN:
oper_type = SHUTDOWN
return(oper_type)
| dtklein/vFense | tp/src/notifications/__init__.py | Python | lgpl-3.0 | 3,134 |
def itemTemplate():
return ['object/tangible/component/weapon/lightsaber/shared_lightsaber_module_force_crystal.iff']
def customItemName():
return "Shard Of The Serpent"
def biolink():
return 1
def customColor1():
return 3
def lootDescriptor():
return 'rarebuffitem'
def itemStats():
stats =['proc_name','towCrystalUberCombat','towCrystalUberCombat']
stats +=['effectname','Harmonious Counteraction','Harmonious Counteraction']
stats +=['duration','180','180']
stats +=['cooldown','3600','3600']
return stats | agry/NGECore2 | scripts/loot/lootItems/rarelootchest/shard_of_the_serpent.py | Python | lgpl-3.0 | 538 |
import os
import shutil
from makegyp import formula
from makegyp.core import gyp
from makegyp.core import parser
class Openssl(formula.Formula):
parser = parser.GccParser()
url = 'http://www.openssl.org/source/openssl-1.0.1e.tar.gz'
sha256 = 'f74f15e8c8ff11aa3d5bb5f276d202ec18d7246e95f961db76054199c69c1ae3'
dependencies = ['zlib']
default_target_arch = 'ia32'
def configure(self):
return './config zlib no-shared'
def make(self):
return 'make'
def patch_gyp_dict(self, gyp_dict):
# Patchs the libcrypto target:
for target in gyp_dict['targets']:
if target['target_name'] == 'libcrypto':
# Adds the missing `mem_clr.c` source:
target['sources'].append('crypto/mem_clr.c')
target['sources'].sort()
# Adds zlib as dependency:
target['dependencies'] = ['../zlib/zlib.gyp:libz']
break
def post_process(self, package_root):
# Copies the generated "*.s" files to package:
for target in self.gyp['targets']:
for source in target['sources']:
if source.endswith('.s'):
print('Copying source file: %s' % source)
path_components = source.split('/')
source = os.path.join(self.tmp_package_root,
*path_components)
dest = os.path.join(package_root, *path_components)
shutil.copyfile(source, dest)
# Copies config files:
config_file_paths = ['crypto/buildinf.h']
for path in config_file_paths:
print('Copying config file: %s' % path)
source = os.path.join(self.tmp_package_root, *path.split('/'))
dest = os.path.join(package_root, formula.kConfigRootDirectoryName,
gyp.get_os(), self.default_target_arch,
os.path.basename(source))
shutil.copyfile(source, dest)
| olliwang/makegyp | makegyp/formula/openssl.py | Python | lgpl-3.0 | 2,054 |
# -*- coding: iso-8859-1 -*-
#
# Copyright (C) 2009 Rene Liebscher
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
#
"""Documentation
"""
__revision__ = "$Id: __init__.py,v 1.2 2009/08/07 07:19:18 rliebscher Exp $"
| arruda/pyfuzzy | fuzzy/doc/structure/dot/__init__.py | Python | lgpl-3.0 | 826 |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.40
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
# This file is compatible with both classic and new-style classes.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_EST_Track', [dirname(__file__)])
except ImportError:
import _EST_Track
return _EST_Track
if fp is not None:
try:
_mod = imp.load_module('_EST_Track', fp, pathname, description)
finally:
fp.close()
return _mod
_EST_Track = swig_import_helper()
del swig_import_helper
else:
import _EST_Track
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
read_ok = _EST_Track.read_ok
read_format_error = _EST_Track.read_format_error
read_not_found_error = _EST_Track.read_not_found_error
read_error = _EST_Track.read_error
write_ok = _EST_Track.write_ok
write_fail = _EST_Track.write_fail
write_error = _EST_Track.write_error
write_partial = _EST_Track.write_partial
connect_ok = _EST_Track.connect_ok
connect_not_found_error = _EST_Track.connect_not_found_error
connect_not_allowed_error = _EST_Track.connect_not_allowed_error
connect_system_error = _EST_Track.connect_system_error
connect_error = _EST_Track.connect_error
import EST_FVector
class EST_Track(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, EST_Track, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, EST_Track, name)
__repr__ = _swig_repr
__swig_getmethods__["default_frame_shift"] = _EST_Track.EST_Track_default_frame_shift_get
if _newclass:default_frame_shift = _swig_property(_EST_Track.EST_Track_default_frame_shift_get)
__swig_getmethods__["default_sample_rate"] = _EST_Track.EST_Track_default_sample_rate_get
if _newclass:default_sample_rate = _swig_property(_EST_Track.EST_Track_default_sample_rate_get)
def __init__(self, *args):
this = _EST_Track.new_EST_Track(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _EST_Track.delete_EST_Track
__del__ = lambda self : None;
def resize(self, *args): return _EST_Track.EST_Track_resize(self, *args)
def set_num_channels(self, *args): return _EST_Track.EST_Track_set_num_channels(self, *args)
def set_num_frames(self, *args): return _EST_Track.EST_Track_set_num_frames(self, *args)
def set_channel_name(self, *args): return _EST_Track.EST_Track_set_channel_name(self, *args)
def set_aux_channel_name(self, *args): return _EST_Track.EST_Track_set_aux_channel_name(self, *args)
def copy_setup(self, *args): return _EST_Track.EST_Track_copy_setup(self, *args)
def name(self): return _EST_Track.EST_Track_name(self)
def set_name(self, *args): return _EST_Track.EST_Track_set_name(self, *args)
def frame(self, *args): return _EST_Track.EST_Track_frame(self, *args)
def channel(self, *args): return _EST_Track.EST_Track_channel(self, *args)
def sub_track(self, *args): return _EST_Track.EST_Track_sub_track(self, *args)
def copy_sub_track(self, *args): return _EST_Track.EST_Track_copy_sub_track(self, *args)
def copy_sub_track_out(self, *args): return _EST_Track.EST_Track_copy_sub_track_out(self, *args)
def copy_channel_out(self, *args): return _EST_Track.EST_Track_copy_channel_out(self, *args)
def copy_frame_out(self, *args): return _EST_Track.EST_Track_copy_frame_out(self, *args)
def copy_channel_in(self, *args): return _EST_Track.EST_Track_copy_channel_in(self, *args)
def copy_frame_in(self, *args): return _EST_Track.EST_Track_copy_frame_in(self, *args)
def channel_position(self, *args): return _EST_Track.EST_Track_channel_position(self, *args)
def has_channel(self, *args): return _EST_Track.EST_Track_has_channel(self, *args)
def a(self, *args): return _EST_Track.EST_Track_a(self, *args)
def t(self, i = 0): return _EST_Track.EST_Track_t(self, i)
def ms_t(self, *args): return _EST_Track.EST_Track_ms_t(self, *args)
def fill_time(self, *args): return _EST_Track.EST_Track_fill_time(self, *args)
def fill(self, *args): return _EST_Track.EST_Track_fill(self, *args)
def sample(self, *args): return _EST_Track.EST_Track_sample(self, *args)
def shift(self): return _EST_Track.EST_Track_shift(self)
def start(self): return _EST_Track.EST_Track_start(self)
def end(self): return _EST_Track.EST_Track_end(self)
def load(self, *args): return _EST_Track.EST_Track_load(self, *args)
def save(self, *args): return _EST_Track.EST_Track_save(self, *args)
def set_break(self, *args): return _EST_Track.EST_Track_set_break(self, *args)
def set_value(self, *args): return _EST_Track.EST_Track_set_value(self, *args)
def val(self, *args): return _EST_Track.EST_Track_val(self, *args)
def track_break(self, *args): return _EST_Track.EST_Track_track_break(self, *args)
def prev_non_break(self, *args): return _EST_Track.EST_Track_prev_non_break(self, *args)
def next_non_break(self, *args): return _EST_Track.EST_Track_next_non_break(self, *args)
def empty(self): return _EST_Track.EST_Track_empty(self)
def index(self, *args): return _EST_Track.EST_Track_index(self, *args)
def index_below(self, *args): return _EST_Track.EST_Track_index_below(self, *args)
def num_frames(self): return _EST_Track.EST_Track_num_frames(self)
def length(self): return _EST_Track.EST_Track_length(self)
def num_channels(self): return _EST_Track.EST_Track_num_channels(self)
def num_aux_channels(self): return _EST_Track.EST_Track_num_aux_channels(self)
def equal_space(self): return _EST_Track.EST_Track_equal_space(self)
def single_break(self): return _EST_Track.EST_Track_single_break(self)
def set_equal_space(self, *args): return _EST_Track.EST_Track_set_equal_space(self, *args)
def set_single_break(self, *args): return _EST_Track.EST_Track_set_single_break(self, *args)
def __iadd__(self, *args): return _EST_Track.EST_Track___iadd__(self, *args)
def __ior__(self, *args): return _EST_Track.EST_Track___ior__(self, *args)
def load_channel_names(self, *args): return _EST_Track.EST_Track_load_channel_names(self, *args)
def save_channel_names(self, *args): return _EST_Track.EST_Track_save_channel_names(self, *args)
def channel_name(self, *args): return _EST_Track.EST_Track_channel_name(self, *args)
def aux_channel_name(self, *args): return _EST_Track.EST_Track_aux_channel_name(self, *args)
EST_Track_swigregister = _EST_Track.EST_Track_swigregister
EST_Track_swigregister(EST_Track)
def mean(*args):
return _EST_Track.mean(*args)
mean = _EST_Track.mean
def meansd(*args):
return _EST_Track.meansd(*args)
meansd = _EST_Track.meansd
def normalise(*args):
return _EST_Track.normalise(*args)
normalise = _EST_Track.normalise
| getvasanth/QtSpeech | festival/speech_tools/wrappers/python/EST_Track.py | Python | lgpl-3.0 | 8,370 |
from dynamic_graph.sot.application.stabilizer.scenarii.seqplay_lqr_twoDof_coupled_stabilizer import SeqPlayLqrTwoDofCoupledStabilizer
from dynamic_graph.sot.application.stabilizer.scenarii.hrp2_lqr_twoDof_coupled_stabilizer import HRP2LqrTwoDofCoupledStabilizer
from dynamic_graph.sot.core.meta_tasks import GainAdaptive
from dynamic_graph import plug
from dynamic_graph.sot.core.matrix_util import matrixToTuple
from dynamic_graph.sot.core import MatrixToUTheta, HomoToMatrix, HomoToRotation, Multiply_matrix_vector
from numpy import diag
class SeqPlayLqrTwoDofCoupledStabilizerHRP2(SeqPlayLqrTwoDofCoupledStabilizer):
def __init__(self,robot,sequenceFilename,trunkStabilize = False, hands = False, posture =False,forceSeqplay=True):
SeqPlayLqrTwoDofCoupledStabilizer.__init__(self,robot,sequenceFilename,trunkStabilize,hands,posture,forceSeqplay)
def createStabilizedCoMTask (self):
task = HRP2LqrTwoDofCoupledStabilizer(self.robot)
gain = GainAdaptive('gain'+task.name)
plug(self.comRef,task.comRef)
task.waistOriRef.value=(0,)*3
task.flexOriRef.value=(0,)*3
task.comDotRef.value=(0,)*3
task.waistVelRef.value=(0,)*3
task.flexAngVelRef.value=(0,)*3
plug(gain.gain, task.controlGain)
plug(task.error, gain.error)
return (task, gain)
def initTaskPosture(self):
# --- LEAST NORM
weight_ff = 0
weight_leg = 3
weight_knee = 5
weight_chest = 1
weight_chesttilt = 10
weight_head = 0.3
weight_arm = 1
#weight = diag( (weight_ff,)*6 + (weight_leg,)*12 + (weight_chest,)*2 + (weight_head,)*2 + (weight_arm,)*14)
#weight[9,9] = weight_knee
#weight[15,15] = weight_knee
#weight[19,19] = weight_chesttilt
weight = diag( (0,)*6+(1,)*30)
#weight = weight[6:,:]
self.featurePosture.jacobianIN.value = matrixToTuple(weight)
self.featurePostureDes.errorIN.value = self.robot.halfSitting
#mask = '1'*36
#mask = '1'*14+'0'*22
#self.tasks['posture'].controlSelec.value = mask
| amifsud/sot-stabilizer | src/dynamic_graph/sot/application/stabilizer/scenarii/seqplay_lqr_twoDof_coupled_stabilizer_hrp2.py | Python | lgpl-3.0 | 2,157 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pyweed/gui/uic/SpinnerWidget.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SpinnerWidget(object):
def setupUi(self, SpinnerWidget):
SpinnerWidget.setObjectName("SpinnerWidget")
SpinnerWidget.resize(306, 207)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(SpinnerWidget.sizePolicy().hasHeightForWidth())
SpinnerWidget.setSizePolicy(sizePolicy)
SpinnerWidget.setStyleSheet("QFrame { background-color: rgba(224,224,224,192)} \n"
"QLabel { background-color: transparent }")
self.verticalLayout = QtWidgets.QVBoxLayout(SpinnerWidget)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.icon = QtWidgets.QLabel(SpinnerWidget)
self.icon.setText("")
self.icon.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.icon.setObjectName("icon")
self.verticalLayout.addWidget(self.icon)
self.label = QtWidgets.QLabel(SpinnerWidget)
self.label.setText("")
self.label.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.cancelButton = QtWidgets.QPushButton(SpinnerWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cancelButton.sizePolicy().hasHeightForWidth())
self.cancelButton.setSizePolicy(sizePolicy)
self.cancelButton.setObjectName("cancelButton")
self.horizontalLayout.addWidget(self.cancelButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout.setStretch(0, 1)
self.verticalLayout.setStretch(1, 1)
self.retranslateUi(SpinnerWidget)
QtCore.QMetaObject.connectSlotsByName(SpinnerWidget)
def retranslateUi(self, SpinnerWidget):
_translate = QtCore.QCoreApplication.translate
SpinnerWidget.setWindowTitle(_translate("SpinnerWidget", "Form"))
self.cancelButton.setText(_translate("SpinnerWidget", "Cancel"))
| iris-edu/pyweed | pyweed/gui/uic/SpinnerWidget.py | Python | lgpl-3.0 | 2,674 |
import re
from hashlib import sha256
from vFense.plugins.patching import AppsKey
from vFense.plugins.patching._constants import CommonSeverityKeys
def build_app_id(name, version):
""" Return the 64 character hexdigest of the appid.
The app_id is generated by creating a hexdigest based of the
name and the version of the application.
Args:
app (dict): Dictionary containing the name and version of the
application.
Basic Usage:
>>> from vFense.plugins.patching.utils import build_app_id
>>> name = 'app_name'
>>> version = '2.2.0'
>>> build_app_id(name, version)
Returns:
String
'bab72e94f26a0af32a8e1fc8eef732b99150fac9bc17a720dac06f5474b53f08'
"""
app_id = name.encode('utf8') + version.encode('utf8')
return sha256(app_id).hexdigest()
def build_agent_app_id(agent_id, app_id):
""" Return the 64 character hexdigest of the
appid and agentid combined
Args:
agent_id (str): The 36 character UUID of the agent.
app_id (str): The 64 character hexdigest of the app id
Basic Usage:
>>> vFense.plugins.patching.patching import build_agent_app_id
>>> agent_id = '7f242ab8-a9d7-418f-9ce2-7bcba6c2d9dc'
>>> app_id = '15fa819554aca425d7f699e81a2097898b06f00a0f2dd6e8d51a18405360a6eb'
>>> build_agent_app_id(agent_id, app_id)
Returns:
String
'0009281d779a37cc73919656f6575de471237c3ed99f585160708defe8396d3d'
"""
agent_app_id = agent_id.encode('utf8') + app_id.encode('utf8')
return sha256(agent_app_id).hexdigest()
def get_proper_severity(severity):
if re.search(r'Critical|Important|Security', severity, re.IGNORECASE):
return CommonSeverityKeys.CRITICAL
elif re.search(r'Recommended|Moderate|Low|Bugfix', severity, re.IGNORECASE):
return CommonSeverityKeys.RECOMMENDED
return CommonSeverityKeys.OPTIONAL
| dtklein/vFense | tp/src/plugins/patching/utils/__init__.py | Python | lgpl-3.0 | 1,957 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
# Do not modify this file! It is auto-generated by the document_psifiles
# script, from psi4topdir/psi4/include/psi4/psifiles.h
PSIF_OPTKING = 1 #
PSIF_GRAD = 11 # geometry optimization, geometry, and gradient; currently is an ASCII file like output.grad
PSIF_INTCO = 12 # internal coordinates file, currently is ASCII file like output.intco
PSIF_3INDEX = 16 #
PSIF_SO_TEI = 33 #
PSIF_SO_PK = 34 #
PSIF_OEI = 35 #
PSIF_SO_ERF_TEI = 36 #
PSIF_SO_ERFC_TEI = 37 #
PSIF_SO_R12 = 38 #
PSIF_SO_R12T1 = 39 #
PSIF_DERINFO = 40 #
PSIF_SO_PRESORT = 41 #
PSIF_CIVECT = 43 # CI vector from DETCI along with string and determinant info
PSIF_AO_DGDBX = 44 # B-field derivative AO integrals over GIAO Gaussians -- only bra-ket permutational symmetry holds
PSIF_AO_DGDBY = 45 #
PSIF_AO_DGDBZ = 46 #
PSIF_PSIMRCC_INTEGRALS = 50 #
PSIF_PSIMRCC_RESTART = 51 #
PSIF_MCSCF = 52 #
PSIF_TPDM_HALFTRANS = 53 #
PSIF_DETCAS = 60 #
PSIF_LIBTRANS_DPD = 61 # libtrans: All transformed integrals in DPD format are sent here by default
PSIF_LIBTRANS_A_HT = 62 # libtrans: Alpha half-transformed integrals in DPD format
PSIF_LIBTRANS_B_HT = 63 # libtrans: Beta half-transformed integrals in DPD format
PSIF_LIBDIIS = 64 # Storage for libdiis
PSIF_TPDM_PRESORT = 71 #
PSIF_MO_TEI = 72 #
PSIF_MO_OPDM = 73 #
PSIF_MO_TPDM = 74 #
PSIF_MO_LAG = 75 #
PSIF_AO_OPDM = 76 # PSIF_AO_OPDM also contains AO Lagrangian
PSIF_AO_TPDM = 77 #
PSIF_MO_R12 = 79 #
PSIF_MO_R12T2 = 80 #
PSIF_MO_AA_TEI = 81 #
PSIF_MO_BB_TEI = 82 #
PSIF_MO_AB_TEI = 83 #
PSIF_MO_AA_TPDM = 84 #
PSIF_MO_BB_TPDM = 85 #
PSIF_MO_AB_TPDM = 86 #
PSIF_AA_PRESORT = 87 # AA UHF twopdm presort file
PSIF_BB_PRESORT = 88 # BB UHF twopdm presort file
PSIF_AB_PRESORT = 89 # AB UHF twopdm presort file
PSIF_SO_PKSUPER1 = 92 #
PSIF_SO_PKSUPER2 = 93 #
PSIF_HALFT0 = 94 #
PSIF_HALFT1 = 95 #
PSIF_DFSCF_BJ = 97 # B Matrix containing 3-index tensor in AOs with J^-1/2 for use with DF-SCF
PSIF_CC_INFO = 100 #
PSIF_CC_OEI = 101 #
PSIF_CC_AINTS = 102 #
PSIF_CC_BINTS = 103 #
PSIF_CC_CINTS = 104 #
PSIF_CC_DINTS = 105 #
PSIF_CC_EINTS = 106 #
PSIF_CC_FINTS = 107 #
PSIF_CC_DENOM = 108 #
PSIF_CC_TAMPS = 109 #
PSIF_CC_GAMMA = 110 #
PSIF_CC_MISC = 111 #
PSIF_CC_HBAR = 112 #
PSIF_CC_OEI_NEW = 113 #
PSIF_CC_GAMMA_NEW = 114 #
PSIF_CC_AINTS_NEW = 115 #
PSIF_CC_BINTS_NEW = 116 #
PSIF_CC_CINTS_NEW = 117 #
PSIF_CC_DINTS_NEW = 118 #
PSIF_CC_EINTS_NEW = 119 #
PSIF_CC_FINTS_NEW = 120 #
PSIF_CC_LAMBDA = 121 #
PSIF_CC_RAMPS = 122 #
PSIF_CC_LAMPS = 123 #
PSIF_CC_LR = 124 #
PSIF_CC_DIIS_ERR = 125 #
PSIF_CC_DIIS_AMP = 126 #
PSIF_CC_TMP = 127 #
PSIF_CC_TMP0 = 128 #
PSIF_CC_TMP1 = 129 #
PSIF_CC_TMP2 = 130 #
PSIF_CC_TMP3 = 131 #
PSIF_CC_TMP4 = 132 #
PSIF_CC_TMP5 = 133 #
PSIF_CC_TMP6 = 134 #
PSIF_CC_TMP7 = 135 #
PSIF_CC_TMP8 = 135 #
PSIF_CC_TMP9 = 137 #
PSIF_CC_TMP10 = 138 #
PSIF_CC_TMP11 = 139 #
PSIF_EOM_D = 140 #
PSIF_EOM_CME = 141 #
PSIF_EOM_Cme = 142 #
PSIF_EOM_CMNEF = 143 #
PSIF_EOM_Cmnef = 144 #
PSIF_EOM_CMnEf = 145 #
PSIF_EOM_SIA = 146 #
PSIF_EOM_Sia = 147 #
PSIF_EOM_SIJAB = 148 #
PSIF_EOM_Sijab = 149 #
PSIF_EOM_SIjAb = 150 #
PSIF_EOM_R = 151 # holds residual
PSIF_CC_GLG = 152 # left-hand psi for g.s. parts of cc-density
PSIF_CC_GL = 153 # left-hand psi for e.s. parts of cc-density
PSIF_CC_GR = 154 # right-hand eigenvector for cc-density
PSIF_EOM_TMP1 = 155 # intermediates just for single contractions
PSIF_EOM_TMP0 = 156 # temporary copies of density
PSIF_EOM_TMP_XI = 157 # intermediates for xi computation
PSIF_EOM_XI = 158 # xi = dE/dt amplitudes
PSIF_EOM_TMP = 159 # intermediates used more than once
PSIF_CC3_HET1 = 160 # [H,e^T1]
PSIF_CC3_HC1 = 161 # [H,C1]
PSIF_CC3_HC1ET1 = 162 # [[H,e^T1],C1]
PSIF_CC3_MISC = 163 # various intermediates needed in CC3 codes
PSIF_CC2_HET1 = 164 # [H,e^T1]
PSIF_WK_PK = 165 # File to contain wK pre-sorted integrals for PK
PSIF_SCF_MOS = 180 # Save SCF orbitals for re-use later as guess, etc.
PSIF_DFMP2_AIA = 181 # Unfitted three-index MO ints for DFMP2
PSIF_DFMP2_QIA = 182 # Fitted-three index MO ints for DFMP2
PSIF_ADC = 183 # ADC
PSIF_ADC_SEM = 184 # ADC
PSIF_SAPT_DIMER = 190 # SAPT Two-Body Dimer
PSIF_SAPT_MONOMERA = 191 # SAPT Two-Body Mon A
PSIF_SAPT_MONOMERB = 192 # SAPT Two-Body Mon B
PSIF_SAPT_AA_DF_INTS = 193 # SAPT AA DF Ints
PSIF_SAPT_AB_DF_INTS = 194 # SAPT AB DF Ints
PSIF_SAPT_BB_DF_INTS = 195 # SAPT BB DF Ints
PSIF_SAPT_AMPS = 196 # SAPT Amplitudes
PSIF_SAPT_TEMP = 197 # SAPT Temporary worlds fastest code file
PSIF_SAPT_LRINTS = 198 # SAPT0 2-Body linear response LDA integrals
PSIF_3B_SAPT_TRIMER = 220 # SAPT Three-Body Trimer
PSIF_3B_SAPT_DIMER_AB = 221 # SAPT Three-Body Dimer AB
PSIF_3B_SAPT_DIMER_AC = 222 # SAPT Three-Body Dimer AC
PSIF_3B_SAPT_DIMER_BC = 223 # SAPT Three-Body Dimer BC
PSIF_3B_SAPT_MONOMER_A = 224 # SAPT Three-Body Mon A
PSIF_3B_SAPT_MONOMER_B = 225 # SAPT Three-Body Mon B
PSIF_3B_SAPT_MONOMER_C = 226 # SAPT Three-Body Mon C
PSIF_3B_SAPT_AA_DF_INTS = 227 #
PSIF_3B_SAPT_BB_DF_INTS = 228 #
PSIF_3B_SAPT_CC_DF_INTS = 229 #
PSIF_3B_SAPT_AMPS = 230 #
PSIF_DCC_IJAK = 250 # CEPA/CC (ij|ak)
PSIF_DCC_IJAK2 = 251 # CEPA/CC (ij|ak)
PSIF_DCC_ABCI = 252 # CEPA/CC (ia|bc)
PSIF_DCC_ABCI2 = 253 # CEPA/CC (ia|bc)
PSIF_DCC_ABCI3 = 254 # CEPA/CC (ia|bc)
PSIF_DCC_ABCI4 = 255 # CEPA/CC (ia|bc)
PSIF_DCC_ABCI5 = 256 # CEPA/CC (ia|bc)
PSIF_DCC_ABCD1 = 257 # CEPA/CC (ab|cd)+
PSIF_DCC_ABCD2 = 258 # CEPA/CC (ab|cd)-
PSIF_DCC_IJAB = 259 # CEPA/CC (ij|ab)
PSIF_DCC_IAJB = 260 # CEPA/CC (ia|jb)
PSIF_DCC_IJKL = 261 # CEPA/CC (ij|kl)
PSIF_DCC_OVEC = 262 # CEPA/CC old vectors for diis
PSIF_DCC_EVEC = 263 # CEPA/CC error vectors for diis
PSIF_DCC_R2 = 264 # CEPA/CC residual
PSIF_DCC_TEMP = 265 # CEPA/CC temporary storage
PSIF_DCC_T2 = 266 # CEPA/CC t2 amplitudes
PSIF_DCC_QSO = 267 # DFCC 3-index integrals
PSIF_DCC_SORT_START = 270 # CEPA/CC integral sort starting file number
PSIF_SAPT_CCD = 271 # SAPT2+ CCD Utility File
PSIF_HESS = 272 # Hessian Utility File
PSIF_OCC_DPD = 273 # OCC DPD
PSIF_OCC_DENSITY = 274 # OCC Density
PSIF_OCC_IABC = 275 # OCC out-of-core <IA|BC>
PSIF_DFOCC_INTS = 276 # DFOCC Integrals
PSIF_DFOCC_AMPS = 277 # DFOCC Amplitudes
PSIF_DFOCC_DENS = 278 # DFOCC PDMs
PSIF_DFOCC_IABC = 279 # DFOCC (IA|BC)
PSIF_DFOCC_ABIC = 280 # DFOCC <AB|IC>
PSIF_DFOCC_MIABC = 281 # DFOCC M_iabc
PSIF_DFOCC_TEMP = 282 # DFOCC temporary storage
PSIF_SAD = 300 # A SAD file (File for SAD related quantities
PSIF_CI_HD_FILE = 350 # DETCI H diagonal
PSIF_CI_C_FILE = 351 # DETCI CI coeffs
PSIF_CI_S_FILE = 352 # DETCI sigma coeffs
PSIF_CI_D_FILE = 353 # DETCI D correction vectors
PSIF_DCT_DPD = 400 # DCT DPD handle
PSIF_DCT_DENSITY = 401 # DCT density
| psi4/psi4 | psi4/driver/psifiles.py | Python | lgpl-3.0 | 10,284 |
#coding: utf-8
"""
@Author: Well
@Date: 2014 - 04 - 16
"""
import time
def login(self, username, password):
browser = self.browser
# 输入用户名
browser.find_element_by_id('user_login').send_keys(username)
# 输入密码
browser.find_element_by_id('user_pass').send_keys(password)
# 点击登录按钮
browser.find_element_by_id('wp-submit').click()
# 等待几秒进行加载
time.sleep(5) | neiltest/neil_test_selenium | selenium_test/test_case/login.py | Python | unlicense | 433 |
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
extract_attributes,
int_or_none,
str_to_int,
unified_strdate,
url_or_none,
)
class YouPornIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?youporn\.com/(?:watch|embed)/(?P<id>\d+)(?:/(?P<display_id>[^/?#&]+))?'
_TESTS = [{
'url': 'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
'md5': '3744d24c50438cf5b6f6d59feb5055c2',
'info_dict': {
'id': '505835',
'display_id': 'sex-ed-is-it-safe-to-masturbate-daily',
'ext': 'mp4',
'title': 'Sex Ed: Is It Safe To Masturbate Daily?',
'description': 'Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 210,
'uploader': 'Ask Dan And Jennifer',
'upload_date': '20101217',
'average_rating': int,
'view_count': int,
'categories': list,
'tags': list,
'age_limit': 18,
},
'skip': 'This video has been disabled',
}, {
# Unknown uploader
'url': 'http://www.youporn.com/watch/561726/big-tits-awesome-brunette-on-amazing-webcam-show/?from=related3&al=2&from_id=561726&pos=4',
'info_dict': {
'id': '561726',
'display_id': 'big-tits-awesome-brunette-on-amazing-webcam-show',
'ext': 'mp4',
'title': 'Big Tits Awesome Brunette On amazing webcam show',
'description': 'http://sweetlivegirls.com Big Tits Awesome Brunette On amazing webcam show.mp4',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'Unknown',
'upload_date': '20110418',
'average_rating': int,
'view_count': int,
'categories': list,
'tags': list,
'age_limit': 18,
},
'params': {
'skip_download': True,
},
'skip': '404',
}, {
'url': 'https://www.youporn.com/embed/505835/sex-ed-is-it-safe-to-masturbate-daily/',
'only_matching': True,
}, {
'url': 'http://www.youporn.com/watch/505835',
'only_matching': True,
}, {
'url': 'https://www.youporn.com/watch/13922959/femdom-principal/',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return re.findall(
r'<iframe[^>]+\bsrc=["\']((?:https?:)?//(?:www\.)?youporn\.com/embed/\d+)',
webpage)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id') or video_id
definitions = self._download_json(
'https://www.youporn.com/api/video/media_definitions/%s/' % video_id,
display_id)
formats = []
for definition in definitions:
if not isinstance(definition, dict):
continue
video_url = url_or_none(definition.get('videoUrl'))
if not video_url:
continue
f = {
'url': video_url,
'filesize': int_or_none(definition.get('videoSize')),
}
height = int_or_none(definition.get('quality'))
# Video URL's path looks like this:
# /201012/17/505835/720p_1500k_505835/YouPorn%20-%20Sex%20Ed%20Is%20It%20Safe%20To%20Masturbate%20Daily.mp4
# /201012/17/505835/vl_240p_240k_505835/YouPorn%20-%20Sex%20Ed%20Is%20It%20Safe%20To%20Masturbate%20Daily.mp4
# /videos/201703/11/109285532/1080P_4000K_109285532.mp4
# We will benefit from it by extracting some metadata
mobj = re.search(r'(?P<height>\d{3,4})[pP]_(?P<bitrate>\d+)[kK]_\d+', video_url)
if mobj:
if not height:
height = int(mobj.group('height'))
bitrate = int(mobj.group('bitrate'))
f.update({
'format_id': '%dp-%dk' % (height, bitrate),
'tbr': bitrate,
})
f['height'] = height
formats.append(f)
self._sort_formats(formats)
webpage = self._download_webpage(
'http://www.youporn.com/watch/%s' % video_id, display_id,
headers={'Cookie': 'age_verified=1'})
title = self._html_search_regex(
r'(?s)<div[^>]+class=["\']watchVideoTitle[^>]+>(.+?)</div>',
webpage, 'title', default=None) or self._og_search_title(
webpage, default=None) or self._html_search_meta(
'title', webpage, fatal=True)
description = self._html_search_regex(
r'(?s)<div[^>]+\bid=["\']description["\'][^>]*>(.+?)</div>',
webpage, 'description',
default=None) or self._og_search_description(
webpage, default=None)
thumbnail = self._search_regex(
r'(?:imageurl\s*=|poster\s*:)\s*(["\'])(?P<thumbnail>.+?)\1',
webpage, 'thumbnail', fatal=False, group='thumbnail')
duration = int_or_none(self._html_search_meta(
'video:duration', webpage, 'duration', fatal=False))
uploader = self._html_search_regex(
r'(?s)<div[^>]+class=["\']submitByLink["\'][^>]*>(.+?)</div>',
webpage, 'uploader', fatal=False)
upload_date = unified_strdate(self._html_search_regex(
[r'UPLOADED:\s*<span>([^<]+)',
r'Date\s+[Aa]dded:\s*<span>([^<]+)',
r'(?s)<div[^>]+class=["\']videoInfo(?:Date|Time)["\'][^>]*>(.+?)</div>'],
webpage, 'upload date', fatal=False))
age_limit = self._rta_search(webpage)
view_count = None
views = self._search_regex(
r'(<div[^>]+\bclass=["\']js_videoInfoViews["\']>)', webpage,
'views', default=None)
if views:
view_count = str_to_int(extract_attributes(views).get('data-value'))
comment_count = str_to_int(self._search_regex(
r'>All [Cc]omments? \(([\d,.]+)\)',
webpage, 'comment count', default=None))
def extract_tag_box(regex, title):
tag_box = self._search_regex(regex, webpage, title, default=None)
if not tag_box:
return []
return re.findall(r'<a[^>]+href=[^>]+>([^<]+)', tag_box)
categories = extract_tag_box(
r'(?s)Categories:.*?</[^>]+>(.+?)</div>', 'categories')
tags = extract_tag_box(
r'(?s)Tags:.*?</div>\s*<div[^>]+class=["\']tagBoxContent["\'][^>]*>(.+?)</div>',
'tags')
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'uploader': uploader,
'upload_date': upload_date,
'view_count': view_count,
'comment_count': comment_count,
'categories': categories,
'tags': tags,
'age_limit': age_limit,
'formats': formats,
}
| rg3/youtube-dl | youtube_dl/extractor/youporn.py | Python | unlicense | 7,294 |
#!/usr/bin/env python
#
# ESP8266 make firmware image
#
# Arguments: dir of *.bin
#
# (c) vad7
import argparse
import os
argp = argparse.ArgumentParser()
argp.add_argument('flashsize', action='store', help='Flash size, kb')
argp.add_argument('dir', action='store', help='Directory of *.bin')
args = argp.parse_args()
fout_name = args.dir + "firmware.bin"
fout = open(fout_name, "wb")
fin = open(args.dir + "0x00000.bin", "rb")
data = fin.read()
fin.close()
data += b"\xFF" * (0x7000 - len(data))
fin = open(args.dir + "0x07000.bin", "rb")
data2 = fin.read()
fin.close()
data = data + data2
fout.write(data)
fout.flush()
size = os.fstat(fout.fileno()).st_size
fout.close()
print "Make: " + fout_name
if int(args.flashsize) == 512:
webfs = (size + 0xFFF) & 0xFF000
maxota = (0x7B000 / 2) & 0xFF000
else:
webfs = 0x80000
maxota = 0x7B000
print "Firmware size: " + str(size) + ", WebFS addr: " + str(webfs) + ", Max OTA size: " + str(maxota)
print "Space available for OTA: " + str(maxota - size)
| vad7/PowerMeter | bin/make_firmware_image.py | Python | unlicense | 1,000 |
import sublime
import sublime_plugin
from html.entities import codepoint2name as cp2n
class EncodeHtmlEntities(sublime_plugin.TextCommand):
def run(self, edit, **args):
view = self.view
for sel in view.sel():
buf = []
for pt in range(sel.begin(), sel.end()):
ch = view.substr(pt)
ch_ord = ord(ch)
if (not view.match_selector(pt, ('meta.tag - string, constant.character.entity'))
and ch_ord in cp2n
and not (ch in ('"', "'")
and view.match_selector(pt, 'string'))):
ch = '&%s;' % cp2n[ch_ord]
buf.append(ch)
view.replace(edit, sel, ''.join(buf))
| twolfson/sublime-files | Packages/HTML/encode_html_entities.py | Python | unlicense | 764 |
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
#
# Example on the use of the SmoothingRecursiveGaussianImageFilter
#
import itk
from sys import argv
itk.auto_progress(2)
dim = 2
IType = itk.Image[itk.F, dim]
OIType = itk.Image[itk.UC, dim]
reader = itk.ImageFileReader[IType].New( FileName=argv[1] )
filter = itk.SmoothingRecursiveGaussianImageFilter[IType, IType].New( reader,
Sigma=eval( argv[3] ) )
cast = itk.RescaleIntensityImageFilter[IType, OIType].New(filter,
OutputMinimum=0,
OutputMaximum=255)
writer = itk.ImageFileWriter[OIType].New( cast, FileName=argv[2] )
writer.Update()
| daviddoria/itkHoughTransform | Wrapping/WrapITK/Languages/Python/Tests/SmoothingRecursiveGaussianImageFilter.py | Python | apache-2.0 | 1,366 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Take uploaded bucket contents and register them as disk images (AMIs).
Requires decryption using keys in the manifest.
"""
# TODO(jesse): Got these from Euca2ools, will need to revisit them
import binascii
import glob
import json
import os
import shutil
import tarfile
import tempfile
from xml.etree import ElementTree
from nova import exception
from nova import flags
from nova import utils
from nova.objectstore import bucket
FLAGS = flags.FLAGS
flags.DEFINE_string('images_path', utils.abspath('../images'),
'path to decrypted images')
class Image(object):
def __init__(self, image_id):
self.image_id = image_id
self.path = os.path.abspath(os.path.join(FLAGS.images_path, image_id))
if not self.path.startswith(os.path.abspath(FLAGS.images_path)) or \
not os.path.isdir(self.path):
raise exception.NotFound
def delete(self):
for fn in ['info.json', 'image']:
try:
os.unlink(os.path.join(self.path, fn))
except:
pass
try:
os.rmdir(self.path)
except:
pass
def is_authorized(self, context):
try:
return self.metadata['isPublic'] or context.user.is_admin() or self.metadata['imageOwnerId'] == context.project.id
except:
return False
def set_public(self, state):
md = self.metadata
md['isPublic'] = state
with open(os.path.join(self.path, 'info.json'), 'w') as f:
json.dump(md, f)
@staticmethod
def all():
images = []
for fn in glob.glob("%s/*/info.json" % FLAGS.images_path):
try:
image_id = fn.split('/')[-2]
images.append(Image(image_id))
except:
pass
return images
@property
def owner_id(self):
return self.metadata['imageOwnerId']
@property
def metadata(self):
with open(os.path.join(self.path, 'info.json')) as f:
return json.load(f)
@staticmethod
def create(image_id, image_location, context):
image_path = os.path.join(FLAGS.images_path, image_id)
os.makedirs(image_path)
bucket_name = image_location.split("/")[0]
manifest_path = image_location[len(bucket_name)+1:]
bucket_object = bucket.Bucket(bucket_name)
manifest = ElementTree.fromstring(bucket_object[manifest_path].read())
image_type = 'machine'
try:
kernel_id = manifest.find("machine_configuration/kernel_id").text
if kernel_id == 'true':
image_type = 'kernel'
except:
pass
try:
ramdisk_id = manifest.find("machine_configuration/ramdisk_id").text
if ramdisk_id == 'true':
image_type = 'ramdisk'
except:
pass
info = {
'imageId': image_id,
'imageLocation': image_location,
'imageOwnerId': context.project.id,
'isPublic': False, # FIXME: grab public from manifest
'architecture': 'x86_64', # FIXME: grab architecture from manifest
'type' : image_type
}
def write_state(state):
info['imageState'] = state
with open(os.path.join(image_path, 'info.json'), "w") as f:
json.dump(info, f)
write_state('pending')
encrypted_filename = os.path.join(image_path, 'image.encrypted')
with open(encrypted_filename, 'w') as f:
for filename in manifest.find("image").getiterator("filename"):
shutil.copyfileobj(bucket_object[filename.text].file, f)
write_state('decrypting')
# FIXME: grab kernelId and ramdiskId from bundle manifest
encrypted_key = binascii.a2b_hex(manifest.find("image/ec2_encrypted_key").text)
encrypted_iv = binascii.a2b_hex(manifest.find("image/ec2_encrypted_iv").text)
cloud_private_key = os.path.join(FLAGS.ca_path, "private/cakey.pem")
decrypted_filename = os.path.join(image_path, 'image.tar.gz')
Image.decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename)
write_state('untarring')
image_file = Image.untarzip_image(image_path, decrypted_filename)
shutil.move(os.path.join(image_path, image_file), os.path.join(image_path, 'image'))
write_state('available')
os.unlink(decrypted_filename)
os.unlink(encrypted_filename)
@staticmethod
def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename):
key, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_key)
if err:
raise exception.Error("Failed to decrypt private key: %s" % err)
iv, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_iv)
if err:
raise exception.Error("Failed to decrypt initialization vector: %s" % err)
out, err = utils.execute('openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' % (encrypted_filename, key, iv, decrypted_filename))
if err:
raise exception.Error("Failed to decrypt image file %s : %s" % (encrypted_filename, err))
@staticmethod
def untarzip_image(path, filename):
tar_file = tarfile.open(filename, "r|gz")
tar_file.extractall(path)
image_file = tar_file.getnames()[0]
tar_file.close()
return image_file
| movmov/cc | nova/objectstore/image.py | Python | apache-2.0 | 6,265 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from six.moves.urllib import parse as urlparse
from keystoneclient import base
from keystoneclient.v3.contrib.oauth1 import utils
try:
from oauthlib import oauth1
except ImportError:
oauth1 = None
class RequestToken(base.Resource):
def authorize(self, roles):
try:
retval = self.manager.authorize(self.id, roles)
self = retval
except Exception:
retval = None
return retval
class RequestTokenManager(base.CrudManager):
"""Manager class for manipulating identity OAuth request tokens."""
resource_class = RequestToken
def authorize(self, request_token, roles):
"""Authorize a request token with specific roles.
Utilize Identity API operation:
PUT /OS-OAUTH1/authorize/$request_token_id
:param request_token: a request token that will be authorized, and
can be exchanged for an access token.
:param roles: a list of roles, that will be delegated to the user.
"""
request_id = urlparse.quote(base.getid(request_token))
endpoint = utils.OAUTH_PATH + '/authorize/%s' % (request_id)
body = {'roles': [{'id': base.getid(r_id)} for r_id in roles]}
return self._put(endpoint, body, "token")
def create(self, consumer_key, consumer_secret, project):
endpoint = utils.OAUTH_PATH + '/request_token'
headers = {'requested_project_id': base.getid(project)}
oauth_client = oauth1.Client(consumer_key,
client_secret=consumer_secret,
signature_method=oauth1.SIGNATURE_HMAC,
callback_uri="oob")
url = self.client.auth_url.rstrip("/") + endpoint
url, headers, body = oauth_client.sign(url, http_method='POST',
headers=headers)
resp, body = self.client.post(endpoint, headers=headers)
token = utils.get_oauth_token_from_body(resp.content)
return self.resource_class(self, token)
| alexpilotti/python-keystoneclient | keystoneclient/v3/contrib/oauth1/request_tokens.py | Python | apache-2.0 | 2,659 |
from gluon.storage import Storage
settings = Storage()
settings.migrate = True
settings.title = 'Syndicate Metadata Service'
settings.subtitle = 'Princeton University'
settings.author = 'Jude Nelson'
settings.author_email = '[email protected]'
settings.keywords = ''
settings.description = 'Web front-end for the Syndicate Metadata Service'
settings.layout_theme = 'Default'
settings.database_uri = ''
settings.security_key = '96b4dcd8-c27d-4b0e-8fbf-8a2ccc0e4db4'
settings.email_server = ''
settings.email_sender = ''
settings.email_login = ''
settings.login_method = 'local'
settings.login_config = ''
settings.plugins = []
| jcnelson/syndicate | old/md-service/SMDS/web2py/applications/SMDS/models/0.py | Python | apache-2.0 | 635 |
import responses
import unittest
from unittest import skipIf
from unittest.mock import mock_open, patch, ANY, call
from docopt import docopt, DocoptExit
from io import StringIO
from collections import OrderedDict
import sys
import nio_cli.cli as cli
from nio_cli.commands.base import Base
from nio.block.terminals import input
try:
import niocore
niocore_installed = True
except:
niocore_installed = False
class TestCLI(unittest.TestCase):
def parse_args(self, command):
return docopt(cli.__doc__, command.split(' '))
def test_new_arguments(self):
"""'new' requires a project-name"""
args = self.parse_args('new project')
self.assertEqual(args['<project-name>'], 'project')
with self.assertRaises(DocoptExit):
self.parse_args('new')
def test_buildpsec_arguments(self):
"""'buildspec' requires a repo-name"""
args = self.parse_args('buildspec repo')
self.assertEqual(args['<repo-name>'], 'repo')
with self.assertRaises(DocoptExit):
self.parse_args('buildspec')
def test_buildreadme_arguments(self):
"""'buildreadme' take no args"""
args = self.parse_args('buildreadme')
with self.assertRaises(DocoptExit):
self.parse_args('buildreadme some-args')
def test_new_command(self):
"""Clone the project template from GitHub"""
with patch('nio_cli.commands.new.os.path.isdir', return_value=True), \
patch('nio_cli.commands.new.subprocess.call') as call, \
patch('nio_cli.commands.new.config_project') as config:
self._patched_new_command(call, config)
def _patched_new_command(self, call, config):
self._main('new', **{
'<project-name>': 'project',
})
config.assert_called_once_with(name='project',
niohost='127.0.0.1',
nioport='8181',
pubkeeper_hostname=None,
pubkeeper_token=None,
ssl=True,
instance_id=None)
self.assertEqual(call.call_args_list[0][0][0], (
'git clone '
'git://github.com/niolabs/project_template.git project'
))
self.assertEqual(call.call_args_list[1][0][0], (
'cd ./project '
'&& git submodule update --init --recursive'
))
self.assertEqual(call.call_args_list[2][0][0], (
'cd ./project '
'&& git remote remove origin '
'&& git commit --amend --reset-author --quiet -m "Initial commit"'
))
def test_new_command_set_user(self):
"""Clone the project template from GitHub"""
with patch('nio_cli.commands.new.os.path.isdir', return_value=True), \
patch('nio_cli.commands.new.subprocess.call') as call, \
patch('nio_cli.commands.new.set_user') as user, \
patch('nio_cli.commands.new.config_project') as config:
self._patched_new_command_set_user(call, user, config)
def _patched_new_command_set_user(self, call, user, config):
self._main('new', **{
'<project-name>': 'project',
'--username': 'new_user',
'--password': 'new_password',
})
user.assert_called_once_with('project',
'new_user',
'new_password',
True)
config.assert_called_once_with(name='project',
niohost='127.0.0.1',
nioport='8181',
pubkeeper_hostname=None,
pubkeeper_token=None,
ssl=True,
instance_id=None)
self.assertEqual(call.call_args_list[0][0][0], (
'git clone '
'git://github.com/niolabs/project_template.git project'
))
self.assertEqual(call.call_args_list[1][0][0], (
'cd ./project '
'&& git submodule update --init --recursive'
))
self.assertEqual(call.call_args_list[2][0][0], (
'cd ./project '
'&& git remote remove origin '
'&& git commit --amend --reset-author --quiet -m "Initial commit"'
))
def test_new_command_template(self):
"""Clone the project template from GitHub"""
with patch('nio_cli.commands.new.os.path.isdir', return_value=True), \
patch('nio_cli.commands.new.subprocess.call') as call, \
patch('nio_cli.commands.new.config_project') as config:
self._patched_new_command_template(call, config)
def _patched_new_command_template(self, call, config):
with patch('nio_cli.commands.new.os.walk') as patched_os_walk:
join_module = 'nio_cli.commands.new.os.path.join'
with patch(join_module, return_value='join'):
patched_os_walk.return_value = [
('root', ('dirs'), ['requirements.txt'])]
self._main('new', **{
'<project-name>': 'project',
'<template>': 'my_template',
'--pubkeeper-hostname': 'pkhost',
'--pubkeeper-token': 'pktoken',
'--instance-id': 'abc-123',
})
config.assert_called_once_with(name='project',
niohost='127.0.0.1',
nioport='8181',
pubkeeper_hostname='pkhost',
pubkeeper_token='pktoken',
ssl=True,
instance_id='abc-123')
self.assertEqual(call.call_args_list[0][0][0], (
'git clone '
'git://github.com/niolabs/my_template.git project'
))
self.assertEqual(call.call_args_list[1][0][0], (
'cd ./project '
'&& git submodule update --init --recursive'
))
self.assertEqual(call.call_args_list[2][0][0], (
[sys.executable, '-m', 'pip', 'install', '-r', 'join']
))
self.assertEqual(call.call_args_list[3][0][0], (
'cd ./project '
'&& git remote remove origin '
'&& git commit --amend --reset-author --quiet '
'-m "Initial commit"'
))
def test_new_command_with_failed_clone(self):
"""Cleanly handle new command when 'git clone' fails"""
isdir_path = 'nio_cli.commands.new.os.path.isdir'
with patch(isdir_path, return_value=False) as isdir, \
patch('nio_cli.commands.new.subprocess.call') as call:
self._main('new', **{
'<project-name>': 'project',
'--username': 'user',
'--password': 'pwd'
})
self.assertEqual(call.call_count, 1)
isdir.assert_called_once_with('project')
@responses.activate
def test_add_command(self):
"""Clone specified blocks as submodules"""
responses.add(responses.POST,
'http://127.0.0.1:8181/project/blocks')
self._main('add', **{
'<block-repo>': ['block1'],
'--project': '.'
})
self.assertEqual(len(responses.calls), 1)
self._main('add', **{
'<block-repo>': ['block1'],
'--project': '.',
'--upgrade': True
})
self.assertEqual(len(responses.calls), 3)
@responses.activate
def test_list_command(self):
"""List blocks or services from the rest api"""
service_response = [{'api': 'response'}, {'another': 'service'}]
responses.add(responses.GET,
'http://127.0.0.1:8181/services',
json=service_response)
with patch('builtins.print') as print:
self._main('list', **{
"services": True,
'--username': 'user',
'--password': 'pwd'
})
self.assertEqual(len(responses.calls), 1)
self.assertEqual(print.call_count, len(service_response))
for index, service in enumerate(service_response):
self.assertDictEqual(
print.call_args_list[index][0][0], service)
@responses.activate
def test_list_command_with_id(self):
"""List blocks or services from the rest api"""
blk_response = {'id1': {'name': 'name1', 'id': 'id1'},
'id2': {'name': 'name2', 'id': 'id2'}}
responses.add(responses.GET,
'http://127.0.0.1:8181/blocks',
json=blk_response)
with patch('builtins.print') as mock_print:
self._main('list', **{
"services": False,
'--username': 'user',
'--password': 'pwd'
})
self.assertEqual(len(responses.calls), 1)
self.assertEqual(mock_print.call_count, 2)
call_args = [arg[0] for arg in mock_print.call_args_list]
for blk in blk_response:
# the order of responses is not guaranteed
self.assertTrue(
(blk_response[blk]['id'], blk_response[blk]['name'])
in call_args)
@responses.activate
def test_shutdown_command(self):
"""Shutdown nio through the rest api"""
responses.add(responses.GET, 'http://127.0.0.1:8181/shutdown')
self._main('shutdown', **{
'--username': 'user',
'--password': 'pwd'
})
self.assertEqual(len(responses.calls), 1)
@responses.activate
def test_command_command(self):
"""Command a nio block through the rest api"""
responses.add(responses.POST,
'http://127.0.0.1:8181/services/service/block/command')
self._main('command', **{
'<command-name>': 'command',
'<service-name>': 'service',
'<block-name>': 'block',
'--username': 'user',
'--password': 'pwd'
})
self.assertEqual(len(responses.calls), 1)
def test_publishblock_command(self):
"""Create spec.json file from block class"""
from nio.block.base import Block
from nio.properties import StringProperty, VersionProperty
from nio.command import command
@command('commandit')
@command('commander')
@input("testInput")
@input("testInput2")
class SampleBlock1(Block):
version = VersionProperty('1.2.3')
str_prop = StringProperty(
title='String Prop',
default='default string',
)
another = StringProperty(
title='Another Prop',
)
get_block_class_path = 'nio_cli.utils.spec._get_block_class'
requests_path = 'nio_cli.commands.publishblock.requests'
sample_spec = """
{
"nio/SampleBlock1": {
"description": "This is the description",
"outputs": "The original output",
"from_python": "myfile.SampleBlock1"
}
}
"""
sample_release = """
{
"nio/SampleBlock1": {
"language": "Python",
"from_python": "myfile.SampleBlock1",
"url": "git://myblock"
}
}
"""
with patch('builtins.open', new_callable=mock_open) as open_calls, \
patch(get_block_class_path) as mock_get_class, \
patch(requests_path) as mock_requests:
open_calls.side_effect = [
mock_open(read_data=sample_spec).return_value,
mock_open(read_data=sample_release).return_value
]
# mocks to load existing spec.json and to discover blocks
mock_get_class.return_value = SampleBlock1
# Exectute on repo 'myblocks'
self._main('publishblock', **{
'--api-url': 'http://fake',
'--api-token': 'token'})
mock_get_class.assert_called_with('myfile.SampleBlock1')
self.maxDiff = None
# One POST for spec and one for release
self.assertEqual(mock_requests.post.call_count, 2)
spec_call_args = mock_requests.post.call_args_list[0][1]['json']
release_call_args = mock_requests.post.call_args_list[1][1]['json']
self.assertDictEqual(spec_call_args, {
'nio/SampleBlock1': {
'description': 'This is the description',
'commands': {
'commander': {'params': {}},
'commandit': {'params': {}}
},
'inputs': {
'testInput': {'description': ''},
'testInput2': {'description': ''}
},
'outputs': 'The original output', # orig output preserved
'properties': {
'another': {
'default': None,
'title': 'Another Prop',
'type': 'StringType'
},
'str_prop': {
'default': 'default string',
'title': 'String Prop',
'type': 'StringType'
}
},
'version': '1.2.0' # Make sure only major.minor
}
})
self.assertDictEqual(release_call_args, {
'nio/SampleBlock1': {
"language": "Python",
"version": "1.2.3",
"url": "git://myblock"
}
})
@skipIf(not niocore_installed, 'niocore required for buildrelease')
def test_buildrelease_command(self):
"""create release.json from block class"""
from nio.block.base import Block
from nio.properties import StringProperty, VersionProperty
from nio.command import command
@command('commandit')
@command('commander')
class SampleBlock1(Block):
version = VersionProperty('0.1.0')
str_prop = StringProperty(
title='String Prop',
default='default string',
)
another = StringProperty(
title='Another Prop',
)
class SampleBlock2(Block):
# if a block has no configured version prop, the version is 0.0.0
# by default
pass
discover_path = \
'nio_cli.commands.buildrelease.Discover.discover_classes'
json_dump_path = 'nio_cli.commands.buildrelease.json.dump'
file_exists_path = 'nio_cli.commands.buildrelease.os.path.exists'
subprocess_call_path = \
'nio_cli.commands.buildrelease.subprocess.check_output'
with patch(discover_path) as discover_classes, \
patch('builtins.open', mock_open()) as mock_file, \
patch(file_exists_path) as mock_file_exists, \
patch(json_dump_path) as mock_json_dump, \
patch(subprocess_call_path) as check_output:
# mocks to load existing spec.json and to discover blocks
mock_file_exists.return_value = True
discover_classes.return_value = [SampleBlock1, SampleBlock2]
check_output.return_value = \
b'origin [email protected]:niolabs/myblocks.git (fetch)'
# Exectute on repo 'myblocks'
self._main('buildrelease', **{'<repo-name>': 'myblocks'})
discover_classes.assert_called_once_with(
'blocks.myblocks', ANY, ANY)
# json dump to file with formatting
mock_json_dump.assert_called_once_with(
{
'nio/SampleBlock2': {
'version': '0.0.0', 'language': 'Python',
'url': 'git://github.com/niolabs/myblocks.git'
},
'nio/SampleBlock1': {
'version': '0.1.0', 'language': 'Python',
'url': 'git://github.com/niolabs/myblocks.git'}
},
mock_file(),
indent=2,
sort_keys=True)
def test_newblock_command(self):
"""Clone the block template from GitHub"""
with patch('nio_cli.commands.new.subprocess.call') as call, \
patch('builtins.open',
mock_open(
read_data='Example ..example_block TestExample')
) as mock_file, \
patch("nio_cli.commands.newblock.os") as os_mock, \
patch("nio_cli.commands.newblock.move") as move_mock:
self._main('newblock', **{'<block-name>': 'yaba_daba'})
self.assertEqual(call.call_args_list[0][0][0], (
'git clone '
'git://github.com/nio-blocks/block_template.git yaba_daba'
))
self.assertEqual(mock_file.call_args_list[0][0],
('./yaba_daba/yaba_daba_block.py',))
self.assertEqual(
mock_file.return_value.write.call_args_list[0][0][0],
'YabaDaba ..example_block TestYabaDaba')
# assert calls to rename block files
self.assertEqual(os_mock.remove.call_count, 1)
self.assertEqual(move_mock.call_count, 3)
def test_blockcheck_command(self):
self.maxDiff = None
file_exists_path = 'nio_cli.commands.blockcheck.os.path.exists'
getcwd_path = 'nio_cli.commands.blockcheck.os.getcwd'
listdir_path = 'nio_cli.commands.blockcheck.os.listdir'
subprocess_path = 'nio_cli.commands.blockcheck.subprocess.call'
sys_exit_path = 'nio_cli.commands.blockcheck.sys.exit'
print_path = 'nio_cli.commands.blockcheck.sys.stdout'
json_load_path = 'nio_cli.commands.blockcheck.json.load'
with patch('builtins.open', mock_open()) as mock_file, \
patch(file_exists_path) as mock_file_exists, \
patch(getcwd_path) as mock_getcwd, \
patch(listdir_path) as mock_listdir, \
patch(subprocess_path) as mock_subprocess_call, \
patch(sys_exit_path) as mock_sys_exit, \
patch(print_path, new_callable=StringIO) as mock_print, \
patch(json_load_path) as mock_json_load:
mock_file_exists.return_value = True
mock_getcwd.return_value = 'nio_lmnopio_block'
mock_listdir.return_value = ['nio_lmnopio_block.py']
mock_json_load.side_effect = [
# json.load() for spec.json (prop1 missing description)
{
'nio/nioLmnopio': {
'version': '0.1.0',
'description': 'spec description',
'properties': {
'prop1': {
'description': ''
}
},
'inputs': {},
'outputs': {},
'commands': {},
}
},
# json.load() for release.json (versions do not match)
{
'nio/nioLmnopio': {
'language': 'Python',
'url': 'release url',
'version': '0.2.0',
}
}
]
mock_file.return_value.readlines.side_effect = [
# .readlines() for nio_lmnopio_block.py
[
'class nioLmnopio(Block):',
"version = VersionProperty('0.1.0')"
],
# .readlines() for README.md (missing 'Outputs')
[
'nioLmnopio', 'Properties', 'Inputs',
'Commands', 'Dependencies'
]
]
self._main('blockcheck')
self.assertEqual(
'pycodestyle .', mock_subprocess_call.call_args_list[0][0][0])
# Check that print statements are run
what_was_printed = mock_print.getvalue()
self.assertIn('Checking PEP8 formatting ...', what_was_printed)
self.assertIn('Checking spec.json formatting ...', what_was_printed)
self.assertIn('Fill in the description for the "prop1" property ', what_was_printed)
self.assertIn('in the nioLmnopio block', what_was_printed)
self.assertIn('Checking README.md formatting ...', what_was_printed)
self.assertIn('Add "Outputs" to the nioLmnopio block', what_was_printed)
self.assertIn('Checking release.json formatting ...', what_was_printed)
self.assertIn('Checking version formatting ...', what_was_printed)
self.assertIn('The nioLmnopio version in the release file does not match ', what_was_printed)
self.assertIn('the version in its block file', what_was_printed)
self.assertIn('Spec.json and release.json versions do not match ', what_was_printed)
self.assertIn('for nioLmnopio block', what_was_printed)
self.assertIn('Checking class and file name formatting ...', what_was_printed)
def test_add_user_command(self):
""" Adds a user through the rest api"""
with patch("nio_cli.commands.add_user.set_user") as set_user_patch:
self._main('add_user', **{
'--project': 'testing_project',
'<username>': 'user',
'<password>': 'pwd'
})
self.assertEqual(set_user_patch.call_count, 1)
self.assertEqual(set_user_patch.call_args_list[0],
call('testing_project', 'user', 'pwd'))
from nio_cli.utils.users import set_user, _hash_password, \
_set_permissions
with patch(set_user.__module__ + '.os') as mock_os, \
patch(set_user.__module__ + '.json') as mock_json, \
patch('builtins.open') as mock_open, \
patch('nio_cli.utils.users._hash_password') as mock_hash, \
patch('nio_cli.utils.users._set_permissions'):
mock_os.path.isfile.return_value = True
mock_hash.return_value = "AdminPwdHash"
mock_json.load.return_value = {"Admin": "AdminPwd"}
username = "user1"
password = "pwd1"
self._main('add_user', **{
'--project': 'testing_project',
'<username>': username,
'<password>': password
})
# one call to read users.json and one to save users.json
self.assertEqual(mock_open.call_count, 2)
print(mock_json.dump.call_args_list)
users, _ = mock_json.dump.call_args_list[0][0]
self.assertIn(username, users)
self.assertDictEqual(users[username],
{"password": "AdminPwdHash"})
_set_permissions('testing_project', username, False)
# make sure we open permissions.json two times
# to read and write new permissions
self.assertEqual(mock_open.call_count, 4)
print(mock_json.dump.call_args_list)
permissions, _ = mock_json.dump.call_args_list[0][0]
self.assertIn(username, permissions)
self.assertDictEqual(permissions[username],
{".*": "rwx"})
def test_remove_user_command(self):
""" Adds a user through the rest api"""
with patch("nio_cli.commands.remove_user.remove_user") as \
remove_user_patch:
self._main('remove_user', **{
'--project': 'testing_project',
'<username>': 'user'
})
self.assertEqual(remove_user_patch.call_count, 1)
self.assertEqual(remove_user_patch.call_args_list[0],
call('testing_project', 'user'))
from nio_cli.commands.remove_user import RemoveUser, _remove_permission
with patch(RemoveUser.__module__ + '.os') as mock_os, \
patch(RemoveUser.__module__ + '.json') as mock_json, \
patch('builtins.open') as mock_open, \
patch('nio_cli.commands.remove_user._remove_permission'):
mock_os.path.isfile.return_value = True
mock_json.load.return_value = {"Admin": "AdminPwd"}
username = "Admin"
self._main('remove_user', **{
'--project': 'testing_project',
'<username>': username
})
# one call to read users.json and one to save users.json
self.assertEqual(mock_open.call_count, 2)
users, _ = mock_json.dump.call_args_list[0][0]
self.assertNotIn(username, users)
self.assertEqual(len(users), 0)
# make sure we open permissions.json two times
# to read and write new permissions
mock_json.load.return_value = {"Admin": {".*": "rwx"}}
_remove_permission('testing_project', username)
self.assertEqual(mock_open.call_count, 4)
permissions, _ = mock_json.dump.call_args_list[0][0]
self.assertNotIn(username, permissions)
self.assertEqual(len(permissions), 0)
def test_cleanup_host(self):
cli_command = Base({})
self.assertEqual(
cli_command._cleanup_host('localhost'),
'https://localhost')
self.assertEqual(
cli_command._cleanup_host('http://localhost'),
'http://localhost')
self.assertEqual(
cli_command._cleanup_host('https://localhost'),
'https://localhost')
self.assertEqual(
cli_command._cleanup_host('https://localhost:8181'),
'https://localhost:8181')
self.assertEqual(
cli_command._cleanup_host('https://localhost:8181/'),
'https://localhost:8181')
def _main(self, command, **kwargs):
args = {
'--daemon': False,
'--upgrade': False,
'-u': False,
'--template': False,
'-t': False,
}
if command in ('new', 'config'):
args['--ip'] = '127.0.0.1'
args['--port'] = '8181'
else:
args['--instance-host'] = 'http://127.0.0.1:8181'
args[command] = True
for k, v in kwargs.items():
args[k] = v
with patch('nio_cli.cli.docopt') as docopt:
docopt.return_value = args
cli.main()
| nioinnovation/nio-cli | tests/test_cli.py | Python | apache-2.0 | 27,669 |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import pymongo
import trove.common.db.mongodb.models as models
import trove.common.utils as utils
import trove.guestagent.backup as backup
from trove.guestagent.common.configuration import ImportOverrideStrategy
import trove.guestagent.datastore.experimental.mongodb.manager as manager
import trove.guestagent.datastore.experimental.mongodb.service as service
import trove.guestagent.volume as volume
from trove.tests.unittests.guestagent.test_datastore_manager import \
DatastoreManagerTest
class GuestAgentMongoDBManagerTest(DatastoreManagerTest):
@mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory')
def setUp(self, _):
super(GuestAgentMongoDBManagerTest, self).setUp('mongodb')
self.manager = manager.Manager()
self.execute_with_timeout_patch = mock.patch.object(
utils, 'execute_with_timeout', return_value=('0', '')
)
self.addCleanup(self.execute_with_timeout_patch.stop)
self.execute_with_timeout_patch.start()
self.pymongo_patch = mock.patch.object(
pymongo, 'MongoClient'
)
self.addCleanup(self.pymongo_patch.stop)
self.pymongo_patch.start()
self.mount_point = '/var/lib/mongodb'
self.host_wildcard = '%' # This is used in the test_*_user tests below
self.serialized_user = {
'_name': 'testdb.testuser', '_password': None,
'_roles': [{'db': 'testdb', 'role': 'testrole'}],
'_username': 'testuser', '_databases': [],
'_host': self.host_wildcard,
'_database': {'_name': 'testdb',
'_character_set': None,
'_collate': None},
'_is_root': False
}
def tearDown(self):
super(GuestAgentMongoDBManagerTest, self).tearDown()
def test_update_status(self):
self.manager.app.status = mock.MagicMock()
self.manager.update_status(self.context)
self.manager.app.status.update.assert_any_call()
def _prepare_method(self, packages=['packages'], databases=None,
memory_mb='2048', users=None, device_path=None,
mount_point=None, backup_info=None,
config_contents=None, root_password=None,
overrides=None, cluster_config=None,):
"""self.manager.app must be correctly mocked before calling."""
self.manager.app.status = mock.Mock()
self.manager.prepare(self.context, packages,
databases, memory_mb, users,
device_path=device_path,
mount_point=mount_point,
backup_info=backup_info,
config_contents=config_contents,
root_password=root_password,
overrides=overrides,
cluster_config=cluster_config)
self.manager.app.status.begin_install.assert_any_call()
self.manager.app.install_if_needed.assert_called_with(packages)
self.manager.app.stop_db.assert_any_call()
self.manager.app.clear_storage.assert_any_call()
(self.manager.app.apply_initial_guestagent_configuration.
assert_called_once_with(cluster_config, self.mount_point))
@mock.patch.object(volume, 'VolumeDevice')
@mock.patch('os.path.exists')
def test_prepare_for_volume(self, exists, mocked_volume):
device_path = '/dev/vdb'
self.manager.app = mock.Mock()
self._prepare_method(device_path=device_path)
mocked_volume().unmount_device.assert_called_with(device_path)
mocked_volume().format.assert_any_call()
mocked_volume().migrate_data.assert_called_with(self.mount_point)
mocked_volume().mount.assert_called_with(self.mount_point)
def test_secure(self):
self.manager.app = mock.Mock()
mock_secure = mock.Mock()
self.manager.app.secure = mock_secure
self._prepare_method()
mock_secure.assert_called_with()
@mock.patch.object(backup, 'restore')
@mock.patch.object(service.MongoDBAdmin, 'is_root_enabled')
def test_prepare_from_backup(self, mocked_root_check, mocked_restore):
self.manager.app = mock.Mock()
backup_info = {'id': 'backup_id_123abc',
'location': 'fake-location',
'type': 'MongoDBDump',
'checksum': 'fake-checksum'}
self._prepare_method(backup_info=backup_info)
mocked_restore.assert_called_with(self.context, backup_info,
'/var/lib/mongodb')
mocked_root_check.assert_any_call()
def test_prepare_with_databases(self):
self.manager.app = mock.Mock()
database = mock.Mock()
mock_create_databases = mock.Mock()
self.manager.create_database = mock_create_databases
self._prepare_method(databases=[database])
mock_create_databases.assert_called_with(self.context, [database])
def test_prepare_with_users(self):
self.manager.app = mock.Mock()
user = mock.Mock()
mock_create_users = mock.Mock()
self.manager.create_user = mock_create_users
self._prepare_method(users=[user])
mock_create_users.assert_called_with(self.context, [user])
@mock.patch.object(service.MongoDBAdmin, 'enable_root')
def test_provide_root_password(self, mocked_enable_root):
self.manager.app = mock.Mock()
self._prepare_method(root_password='test_password')
mocked_enable_root.assert_called_with('test_password')
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
@mock.patch.object(service.MongoDBAdmin, '_get_user_record')
def test_create_user(self, mocked_get_user, mocked_admin_user,
mocked_client):
user = self.serialized_user.copy()
user['_password'] = 'testpassword'
users = [user]
client = mocked_client().__enter__()['testdb']
mocked_get_user.return_value = None
self.manager.create_user(self.context, users)
client.add_user.assert_called_with('testuser', password='testpassword',
roles=[{'db': 'testdb',
'role': 'testrole'}])
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
def test_delete_user(self, mocked_admin_user, mocked_client):
client = mocked_client().__enter__()['testdb']
self.manager.delete_user(self.context, self.serialized_user)
client.remove_user.assert_called_with('testuser')
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
def test_get_user(self, mocked_admin_user, mocked_client):
mocked_find = mock.MagicMock(return_value={
'_id': 'testdb.testuser',
'user': 'testuser', 'db': 'testdb',
'roles': [{'db': 'testdb', 'role': 'testrole'}]
})
client = mocked_client().__enter__().admin
client.system.users.find_one = mocked_find
result = self.manager.get_user(self.context, 'testdb.testuser', None)
mocked_find.assert_called_with({'user': 'testuser', 'db': 'testdb'})
self.assertEqual(self.serialized_user, result)
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
def test_list_users(self, mocked_admin_user, mocked_client):
# roles are NOT returned by list_users
user1 = self.serialized_user.copy()
user2 = self.serialized_user.copy()
user2['_name'] = 'testdb.otheruser'
user2['_username'] = 'otheruser'
user2['_roles'] = [{'db': 'testdb2', 'role': 'readWrite'}]
user2['_databases'] = [{'_name': 'testdb2',
'_character_set': None,
'_collate': None}]
mocked_find = mock.MagicMock(return_value=[
{
'_id': 'admin.os_admin',
'user': 'os_admin', 'db': 'admin',
'roles': [{'db': 'admin', 'role': 'root'}]
},
{
'_id': 'testdb.testuser',
'user': 'testuser', 'db': 'testdb',
'roles': [{'db': 'testdb', 'role': 'testrole'}]
},
{
'_id': 'testdb.otheruser',
'user': 'otheruser', 'db': 'testdb',
'roles': [{'db': 'testdb2', 'role': 'readWrite'}]
}
])
client = mocked_client().__enter__().admin
client.system.users.find = mocked_find
users, next_marker = self.manager.list_users(self.context)
self.assertIsNone(next_marker)
self.assertEqual(sorted([user1, user2], key=lambda x: x['_name']),
users)
@mock.patch.object(service.MongoDBAdmin, 'create_validated_user')
@mock.patch.object(utils, 'generate_random_password',
return_value='password')
def test_enable_root(self, mock_gen_rand_pwd, mock_create_user):
root_user = {'_name': 'admin.root',
'_username': 'root',
'_database': {'_name': 'admin',
'_character_set': None,
'_collate': None},
'_password': 'password',
'_roles': [{'db': 'admin', 'role': 'root'}],
'_databases': [],
'_host': self.host_wildcard,
'_is_root': True}
result = self.manager.enable_root(self.context)
self.assertTrue(mock_create_user.called)
self.assertEqual(root_user, result)
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
@mock.patch.object(service.MongoDBAdmin, '_get_user_record',
return_value=models.MongoDBUser('testdb.testuser'))
def test_grant_access(self, mocked_get_user,
mocked_admin_user, mocked_client):
client = mocked_client().__enter__()['testdb']
self.manager.grant_access(self.context, 'testdb.testuser',
None, ['db1', 'db2', 'db3'])
client.add_user.assert_called_with('testuser', roles=[
{'db': 'db1', 'role': 'readWrite'},
{'db': 'db2', 'role': 'readWrite'},
{'db': 'db3', 'role': 'readWrite'}
])
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
@mock.patch.object(service.MongoDBAdmin, '_get_user_record',
return_value=models.MongoDBUser('testdb.testuser'))
def test_revoke_access(self, mocked_get_user,
mocked_admin_user, mocked_client):
client = mocked_client().__enter__()['testdb']
mocked_get_user.return_value.roles = [
{'db': 'db1', 'role': 'readWrite'},
{'db': 'db2', 'role': 'readWrite'},
{'db': 'db3', 'role': 'readWrite'}
]
self.manager.revoke_access(self.context, 'testdb.testuser',
None, 'db2')
client.add_user.assert_called_with('testuser', roles=[
{'db': 'db1', 'role': 'readWrite'},
{'db': 'db3', 'role': 'readWrite'}
])
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
@mock.patch.object(service.MongoDBAdmin, '_get_user_record',
return_value=models.MongoDBUser('testdb.testuser'))
def test_list_access(self, mocked_get_user,
mocked_admin_user, mocked_client):
mocked_get_user.return_value.roles = [
{'db': 'db1', 'role': 'readWrite'},
{'db': 'db2', 'role': 'readWrite'},
{'db': 'db3', 'role': 'readWrite'}
]
accessible_databases = self.manager.list_access(
self.context, 'testdb.testuser', None
)
self.assertEqual(['db1', 'db2', 'db3'],
[db['_name'] for db in accessible_databases])
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
def test_create_databases(self, mocked_admin_user, mocked_client):
schema = models.MongoDBSchema('testdb').serialize()
db_client = mocked_client().__enter__()['testdb']
self.manager.create_database(self.context, [schema])
db_client['dummy'].insert.assert_called_with({'dummy': True})
db_client.drop_collection.assert_called_with('dummy')
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
def test_list_databases(self, # mocked_ignored_dbs,
mocked_admin_user, mocked_client):
# This list contains the special 'admin', 'local' and 'config' dbs;
# the special dbs should be skipped in the output.
# Pagination is tested by starting at 'db1', so 'db0' should not
# be in the output. The limit is set to 2, meaning the result
# should be 'db1' and 'db2'. The next_marker should be 'db3'.
mocked_list = mock.MagicMock(
return_value=['admin', 'local', 'config',
'db0', 'db1', 'db2', 'db3'])
mocked_client().__enter__().database_names = mocked_list
dbs, next_marker = self.manager.list_databases(
self.context, limit=2, marker='db1', include_marker=True)
mocked_list.assert_any_call()
self.assertEqual([models.MongoDBSchema('db1').serialize(),
models.MongoDBSchema('db2').serialize()],
dbs)
self.assertEqual('db2', next_marker)
@mock.patch.object(service, 'MongoDBClient')
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
def test_delete_database(self, mocked_admin_user, mocked_client):
schema = models.MongoDBSchema('testdb').serialize()
self.manager.delete_database(self.context, schema)
mocked_client().__enter__().drop_database.assert_called_with('testdb')
| zhangg/trove | trove/tests/unittests/guestagent/test_mongodb_manager.py | Python | apache-2.0 | 15,121 |
# -*- coding: utf-8 -*-
"""
CSS Selectors based on XPath
============================
This module supports selecting XML/HTML elements based on CSS selectors.
See the `CSSSelector` class for details.
:copyright: (c) 2007-2012 Ian Bicking and contributors.
See AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from cssselect.parser import (parse, Selector, FunctionalPseudoElement,
SelectorError, SelectorSyntaxError)
from cssselect.xpath import GenericTranslator, HTMLTranslator, ExpressionError
VERSION = '1.0.1'
__version__ = VERSION
| frvannes16/Cops-Robbers-Coding-Challenge | src/competition_code/libs/cssselect/__init__.py | Python | apache-2.0 | 639 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
from datetime import datetime
from typing import Any, Dict, Optional, TYPE_CHECKING
from superset import is_feature_enabled
from superset.db_engine_specs.base import BaseEngineSpec
from superset.exceptions import SupersetException
from superset.utils import core as utils
if TYPE_CHECKING:
from superset.connectors.sqla.models import TableColumn
from superset.models.core import Database
logger = logging.getLogger()
class DruidEngineSpec(BaseEngineSpec):
"""Engine spec for Druid.io"""
engine = "druid"
engine_name = "Apache Druid"
allows_joins = is_feature_enabled("DRUID_JOINS")
allows_subqueries = True
_time_grain_expressions = {
None: "{col}",
"PT1S": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT1S')",
"PT5S": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT5S')",
"PT30S": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT30S')",
"PT1M": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT1M')",
"PT5M": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT5M')",
"PT10M": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT10M')",
"PT15M": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT15M')",
"PT30M": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT30M')",
"PT1H": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT1H')",
"PT6H": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT6H')",
"P1D": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'P1D')",
"P1W": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'P1W')",
"P1M": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'P1M')",
"P3M": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'P3M')",
"P1Y": "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'P1Y')",
"P1W/1970-01-03T00:00:00Z": (
"TIME_SHIFT(TIME_FLOOR(TIME_SHIFT(CAST({col} AS TIMESTAMP), "
"'P1D', 1), 'P1W'), 'P1D', 5)"
),
"1969-12-28T00:00:00Z/P1W": (
"TIME_SHIFT(TIME_FLOOR(TIME_SHIFT(CAST({col} AS TIMESTAMP), "
"'P1D', 1), 'P1W'), 'P1D', -1)"
),
}
@classmethod
def alter_new_orm_column(cls, orm_col: "TableColumn") -> None:
if orm_col.column_name == "__time":
orm_col.is_dttm = True
@staticmethod
def get_extra_params(database: "Database") -> Dict[str, Any]:
"""
For Druid, the path to a SSL certificate is placed in `connect_args`.
:param database: database instance from which to extract extras
:raises CertificateException: If certificate is not valid/unparseable
:raises SupersetException: If database extra json payload is unparseable
"""
try:
extra = json.loads(database.extra or "{}")
except json.JSONDecodeError as ex:
raise SupersetException("Unable to parse database extras") from ex
if database.server_cert:
engine_params = extra.get("engine_params", {})
connect_args = engine_params.get("connect_args", {})
connect_args["scheme"] = "https"
path = utils.create_ssl_cert_file(database.server_cert)
connect_args["ssl_verify_cert"] = path
engine_params["connect_args"] = connect_args
extra["engine_params"] = engine_params
return extra
@classmethod
def convert_dttm(
cls, target_type: str, dttm: datetime, db_extra: Optional[Dict[str, Any]] = None
) -> Optional[str]:
tt = target_type.upper()
if tt == utils.TemporalType.DATE:
return f"CAST(TIME_PARSE('{dttm.date().isoformat()}') AS DATE)"
if tt in (utils.TemporalType.DATETIME, utils.TemporalType.TIMESTAMP):
return f"""TIME_PARSE('{dttm.isoformat(timespec="seconds")}')"""
return None
@classmethod
def epoch_to_dttm(cls) -> str:
"""
Convert from number of seconds since the epoch to a timestamp.
"""
return "MILLIS_TO_TIMESTAMP({col} * 1000)"
@classmethod
def epoch_ms_to_dttm(cls) -> str:
"""
Convert from number of milliseconds since the epoch to a timestamp.
"""
return "MILLIS_TO_TIMESTAMP({col})"
| apache/incubator-superset | superset/db_engine_specs/druid.py | Python | apache-2.0 | 4,908 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "forge-"
cfg.versionfile_source = "forge/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| sipplified/forge | forge/_version.py | Python | apache-2.0 | 18,448 |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
import google.api_core.operations_v1
from google.cloud.dataproc_v1beta2.proto import workflow_templates_pb2_grpc
class WorkflowTemplateServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.cloud.dataproc.v1beta2 WorkflowTemplateService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
def __init__(
self, channel=None, credentials=None, address="dataproc.googleapis.com:443"
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive."
)
# Create the channel.
if channel is None:
channel = self.create_channel(address=address, credentials=credentials)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
"workflow_template_service_stub": workflow_templates_pb2_grpc.WorkflowTemplateServiceStub(
channel
)
}
# Because this API includes a method that returns a
# long-running operation (proto: google.longrunning.Operation),
# instantiate an LRO client.
self._operations_client = google.api_core.operations_v1.OperationsClient(
channel
)
@classmethod
def create_channel(cls, address="dataproc.googleapis.com:443", credentials=None):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def create_workflow_template(self):
"""Return the gRPC stub for {$apiMethod.name}.
Creates new workflow template.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["workflow_template_service_stub"].CreateWorkflowTemplate
@property
def get_workflow_template(self):
"""Return the gRPC stub for {$apiMethod.name}.
Retrieves the latest workflow template.
Can retrieve previously instantiated template by specifying optional
version parameter.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["workflow_template_service_stub"].GetWorkflowTemplate
@property
def instantiate_workflow_template(self):
"""Return the gRPC stub for {$apiMethod.name}.
Instantiates a template and begins execution.
The returned Operation can be used to track execution of workflow by
polling ``operations.get``. The Operation will complete when entire
workflow is finished.
The running workflow can be aborted via ``operations.cancel``. This will
cause any inflight jobs to be cancelled and workflow-owned clusters to
be deleted.
The ``Operation.metadata`` will be ``WorkflowMetadata``.
On successful completion, ``Operation.response`` will be ``Empty``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["workflow_template_service_stub"].InstantiateWorkflowTemplate
@property
def instantiate_inline_workflow_template(self):
"""Return the gRPC stub for {$apiMethod.name}.
Instantiates a template and begins execution.
This method is equivalent to executing the sequence
``CreateWorkflowTemplate``, ``InstantiateWorkflowTemplate``,
``DeleteWorkflowTemplate``.
The returned Operation can be used to track execution of workflow by
polling ``operations.get``. The Operation will complete when entire
workflow is finished.
The running workflow can be aborted via ``operations.cancel``. This will
cause any inflight jobs to be cancelled and workflow-owned clusters to
be deleted.
The ``Operation.metadata`` will be ``WorkflowMetadata``.
On successful completion, ``Operation.response`` will be ``Empty``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs[
"workflow_template_service_stub"
].InstantiateInlineWorkflowTemplate
@property
def update_workflow_template(self):
"""Return the gRPC stub for {$apiMethod.name}.
Updates (replaces) workflow template. The updated template
must contain version that matches the current server version.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["workflow_template_service_stub"].UpdateWorkflowTemplate
@property
def list_workflow_templates(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists workflows that match the specified filter in the request.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["workflow_template_service_stub"].ListWorkflowTemplates
@property
def delete_workflow_template(self):
"""Return the gRPC stub for {$apiMethod.name}.
Deletes a workflow template. It does not cancel in-progress workflows.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["workflow_template_service_stub"].DeleteWorkflowTemplate
| dhermes/google-cloud-python | dataproc/google/cloud/dataproc_v1beta2/gapic/transports/workflow_template_service_grpc_transport.py | Python | apache-2.0 | 8,646 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines various data containers for plotting a transect.
This file is not used in the current version of `geotransect` but is kept here
in case it's useful later.
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""
import sys
import os
import numpy as np
from scipy import fft
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import obspy
from plot_utils import add_subplot_axes
filename = sys.argv[1]
segyfile = os.path.basename(filename)
# Read all traces.
section = obspy.read(filename)
r_elevs = []
s_elevs = []
esp = [] # energy source point number
ens = [] # ensemble number
for t in section.traces:
nsamples = t.stats.segy.trace_header.number_of_samples_in_this_trace
dt = t.stats.segy.trace_header.sample_interval_in_ms_for_this_trace
if dt > 100:
dt /= 1000.
r_elevs.append(t.stats.segy.trace_header.datum_elevation_at_receiver_group)
s_elevs.append(t.stats.segy.trace_header.receiver_group_elevation)
esp.append(t.stats.segy.trace_header.energy_source_point_number)
ens.append(t.stats.segy.trace_header.ensemble_number)
ntraces = len(section.traces)
tbase = np.arange(0, nsamples * dt, dt)
tstart = 0
tend = tbase[-1]
aspect = float(ntraces) / float(nsamples)
nf = 1.0
print 'ntraces', ntraces
print 'nsamples', nsamples
print 'dt', dt/nf
data = np.zeros((nsamples, ntraces))
for i, trace in enumerate(section.traces):
data[:, i] = trace.data
line_extents = {'first_trace': 1,
'last_trace': ntraces,
'start_time': tstart,
'end_time': tend
}
clip_val = np.percentile(data, 99.0)
print "clip_val", clip_val
print "max_val", np.amax(data)
print "min_val", np.amin(data)
print "tstart", tstart
print "tend", tend
largest = max(np.amax(data), abs(np.amin(data)))
# MAIN PLOT
h = (tend-tstart) / 250.0
w = ntraces / 250.0
fig = plt.figure(figsize=(10, 10), facecolor='w')
# Seismic data
ax = fig.add_axes([0.05, 0.05, 0.9, 0.95])
im = ax.imshow(data, cmap=cm.gray, origin='upper',
vmin=-clip_val,
vmax=clip_val,
extent=(line_extents['first_trace'],
line_extents['last_trace'],
line_extents['end_time'],
line_extents['start_time']),
aspect = aspect * 0.5
)
ax.set_ylabel('Two-way time [ms]')
ax.set_xlabel('Trace no.')
ax.grid()
ax.set_title(segyfile)
# Colourbar
extreme = max(np.amax(data), abs(np.amin(data)))
colorbar_ax = add_subplot_axes(ax, [0.075, 0.075, 0.025, 0.15])
fig.colorbar(im, cax=colorbar_ax)
colorbar_ax.text(1.15, 1.1, '%3.0f' % -extreme,
transform=colorbar_ax.transAxes,
ha='left',
va='top')
colorbar_ax.text(1.15, -0.05, '%3.0f' % extreme,
transform=colorbar_ax.transAxes,
ha='left', fontsize=10)
colorbar_ax.set_axis_off()
# Power spectrum
S = abs(fft(data[:, 1]))
faxis = np.fft.fftfreq(len(data[:, 1]), d=(1/nf)*dt*1e-6)
spec_ax = add_subplot_axes(ax, [0.50, 0.075, 0.2, 0.15])
spec_ax.plot(faxis[:len(faxis)//4],
np.log10(S[0:len(faxis)//4]),
'b', lw=2)
spec_ax.set_xlabel('frequency [Hz]', fontsize=10)
spec_ax.set_xticklabels([0, 100, 200, 300], fontsize=10)
# spec_ax.set_xticklabels(spec_ax.get_xticks(), fontsize=10)
spec_ax.set_yticklabels(spec_ax.get_yticks(), fontsize=10)
spec_ax.set_yticks([])
spec_ax.set_yticklabels([])
spec_ax.text(.95, .9, 'Power spectrum',
horizontalalignment='right',
transform=spec_ax.transAxes, fontsize=10
)
spec_ax.grid('on')
# Histogram
hist_ax = add_subplot_axes(ax, [0.75, 0.075, 0.2, 0.15])
hist_line = hist_ax.hist(np.ravel(data),
bins=int(100.0 / (clip_val/largest)))
hist_ax.set_xlim(-clip_val, clip_val)
# hist_ax.set_xticklabels([])
hist_ax.set_yticks([])
hist_ax.set_xticklabels([])
hist_ax.set_ylim(hist_ax.get_ylim()[0], hist_ax.get_ylim()[1]),
hist_ax.set_yticks([])
hist_ax.text(.95, .9, 'Histogram',
horizontalalignment='right',
transform=hist_ax.transAxes, fontsize=10
)
plt.show()
| kinverarity1/geotransect | profile_plot.py | Python | apache-2.0 | 4,222 |
'''
Copyright 2015 University of Auckland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
class RespirationConstants(object):
class ExpirationType(object):
PASSIVE = 0
ACTIVE = 1
| OpenCMISS/neon | src/opencmiss/neon/core/problems/constants.py | Python | apache-2.0 | 714 |
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from ec2api.tests.functional import base
from ec2api.tests.functional import config
CONF = config.CONF
class SnapshotTest(base.EC2TestCase):
def test_create_delete_snapshot(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
self.assertEqual(desc, data['Description'])
self.assertEqual(volume_id, data['VolumeId'])
self.assertEqual(1, data['VolumeSize'])
self.assertNotEmpty(data.get('State', ''))
if 'Encrypted' in data:
self.assertFalse(data['Encrypted'])
self.assertIsNotNone(data['StartTime'])
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
def test_describe_snapshots(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
ownerId = data['OwnerId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
self.assertEqual(desc, data['Description'])
self.assertEqual(volume_id, data['VolumeId'])
self.assertEqual(1, data['VolumeSize'])
self.assertNotEmpty(data.get('State', ''))
if 'Encrypted' in data:
self.assertFalse(data['Encrypted'])
self.assertIsNotNone(data['StartTime'])
data = self.client.describe_snapshots(SnapshotIds=[snapshot_id])
self.assertEqual(1, len(data['Snapshots']))
data = data['Snapshots'][0]
self.assertEqual(snapshot_id, data['SnapshotId'])
self.assertEqual(desc, data['Description'])
self.assertEqual(volume_id, data['VolumeId'])
self.assertEqual(1, data['VolumeSize'])
self.assertNotEmpty(data.get('State', ''))
if 'Encrypted' in data:
self.assertFalse(data['Encrypted'])
self.assertIsNotNone(data['StartTime'])
data = self.client.describe_snapshots(OwnerIds=[ownerId])
data = [s for s in data['Snapshots'] if s['SnapshotId'] == snapshot_id]
self.assertEqual(1, len(data))
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
self.assertRaises('InvalidSnapshot.NotFound',
self.client.describe_snapshots,
SnapshotIds=[snapshot_id])
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
def test_create_volume_from_snapshot(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
vol1 = data
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
kwargs = {
'SnapshotId': snapshot_id,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id2 = data['VolumeId']
clean_vol2 = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id2)
self.get_volume_waiter().wait_available(volume_id2)
self.assertNotEqual(volume_id, volume_id2)
self.assertEqual(vol1['Size'], data['Size'])
self.assertEqual(snapshot_id, data['SnapshotId'])
data = self.client.describe_volumes(
Filters=[{'Name': 'snapshot-id', 'Values': [snapshot_id]}])
self.assertEqual(1, len(data['Volumes']))
self.assertEqual(volume_id2, data['Volumes'][0]['VolumeId'])
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
data = self.client.delete_volume(VolumeId=volume_id2)
self.cancelResourceCleanUp(clean_vol2)
self.get_volume_waiter().wait_delete(volume_id2)
def test_create_increased_volume_from_snapshot(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
kwargs = {
'Size': 2,
'SnapshotId': snapshot_id,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id2 = data['VolumeId']
clean_vol2 = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id2)
self.get_volume_waiter().wait_available(volume_id2)
self.assertNotEqual(volume_id, volume_id2)
self.assertEqual(2, data['Size'])
self.assertEqual(snapshot_id, data['SnapshotId'])
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
data = self.client.delete_volume(VolumeId=volume_id2)
self.cancelResourceCleanUp(clean_vol2)
self.get_volume_waiter().wait_delete(volume_id2)
@testtools.skipUnless(CONF.aws.run_incompatible_tests,
"Openstack can't delete volume with snapshots")
def test_delete_volume_with_snapshots(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
| vishnu-kumar/ec2-api | ec2api/tests/functional/api/test_snapshots.py | Python | apache-2.0 | 10,696 |
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
eventlet.monkey_patch()
import contextlib
import sys
from oslo.config import cfg
from keystone.openstack.common import log as logging
from keystone.openstack.common import rpc
from keystone.openstack.common.rpc import impl_zmq
CONF = cfg.CONF
CONF.register_opts(rpc.rpc_opts)
CONF.register_opts(impl_zmq.zmq_opts)
def main():
CONF(sys.argv[1:], project='oslo')
logging.setup("oslo")
with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
reactor.consume_in_thread()
reactor.wait()
| derekchiang/keystone | keystone/openstack/common/rpc/zmq_receiver.py | Python | apache-2.0 | 1,154 |
"""The Hunter Douglas PowerView integration."""
import asyncio
from datetime import timedelta
import logging
from aiopvapi.helpers.aiorequest import AioRequest
from aiopvapi.helpers.constants import ATTR_ID
from aiopvapi.helpers.tools import base64_to_unicode
from aiopvapi.rooms import Rooms
from aiopvapi.scenes import Scenes
from aiopvapi.shades import Shades
from aiopvapi.userdata import UserData
import async_timeout
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
COORDINATOR,
DEVICE_FIRMWARE,
DEVICE_INFO,
DEVICE_MAC_ADDRESS,
DEVICE_MODEL,
DEVICE_NAME,
DEVICE_REVISION,
DEVICE_SERIAL_NUMBER,
DOMAIN,
FIRMWARE_BUILD,
FIRMWARE_IN_USERDATA,
FIRMWARE_SUB_REVISION,
HUB_EXCEPTIONS,
HUB_NAME,
LEGACY_DEVICE_BUILD,
LEGACY_DEVICE_MODEL,
LEGACY_DEVICE_REVISION,
LEGACY_DEVICE_SUB_REVISION,
MAC_ADDRESS_IN_USERDATA,
MAINPROCESSOR_IN_USERDATA_FIRMWARE,
MODEL_IN_MAINPROCESSOR,
PV_API,
PV_ROOM_DATA,
PV_SCENE_DATA,
PV_SHADE_DATA,
PV_SHADES,
REVISION_IN_MAINPROCESSOR,
ROOM_DATA,
SCENE_DATA,
SERIAL_NUMBER_IN_USERDATA,
SHADE_DATA,
USER_DATA,
)
PARALLEL_UPDATES = 1
CONFIG_SCHEMA = cv.deprecated(DOMAIN)
PLATFORMS = ["cover", "scene", "sensor"]
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, hass_config: dict):
"""Set up the Hunter Douglas PowerView component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Hunter Douglas PowerView from a config entry."""
config = entry.data
hub_address = config.get(CONF_HOST)
websession = async_get_clientsession(hass)
pv_request = AioRequest(hub_address, loop=hass.loop, websession=websession)
try:
async with async_timeout.timeout(10):
device_info = await async_get_device_info(pv_request)
async with async_timeout.timeout(10):
rooms = Rooms(pv_request)
room_data = _async_map_data_by_id((await rooms.get_resources())[ROOM_DATA])
async with async_timeout.timeout(10):
scenes = Scenes(pv_request)
scene_data = _async_map_data_by_id(
(await scenes.get_resources())[SCENE_DATA]
)
async with async_timeout.timeout(10):
shades = Shades(pv_request)
shade_data = _async_map_data_by_id(
(await shades.get_resources())[SHADE_DATA]
)
except HUB_EXCEPTIONS as err:
_LOGGER.error("Connection error to PowerView hub: %s", hub_address)
raise ConfigEntryNotReady from err
if not device_info:
_LOGGER.error("Unable to initialize PowerView hub: %s", hub_address)
raise ConfigEntryNotReady
async def async_update_data():
"""Fetch data from shade endpoint."""
async with async_timeout.timeout(10):
shade_entries = await shades.get_resources()
if not shade_entries:
raise UpdateFailed("Failed to fetch new shade data.")
return _async_map_data_by_id(shade_entries[SHADE_DATA])
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="powerview hub",
update_method=async_update_data,
update_interval=timedelta(seconds=60),
)
hass.data[DOMAIN][entry.entry_id] = {
PV_API: pv_request,
PV_ROOM_DATA: room_data,
PV_SCENE_DATA: scene_data,
PV_SHADES: shades,
PV_SHADE_DATA: shade_data,
COORDINATOR: coordinator,
DEVICE_INFO: device_info,
}
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_get_device_info(pv_request):
"""Determine device info."""
userdata = UserData(pv_request)
resources = await userdata.get_resources()
userdata_data = resources[USER_DATA]
if FIRMWARE_IN_USERDATA in userdata_data:
main_processor_info = userdata_data[FIRMWARE_IN_USERDATA][
MAINPROCESSOR_IN_USERDATA_FIRMWARE
]
else:
# Legacy devices
main_processor_info = {
REVISION_IN_MAINPROCESSOR: LEGACY_DEVICE_REVISION,
FIRMWARE_SUB_REVISION: LEGACY_DEVICE_SUB_REVISION,
FIRMWARE_BUILD: LEGACY_DEVICE_BUILD,
MODEL_IN_MAINPROCESSOR: LEGACY_DEVICE_MODEL,
}
return {
DEVICE_NAME: base64_to_unicode(userdata_data[HUB_NAME]),
DEVICE_MAC_ADDRESS: userdata_data[MAC_ADDRESS_IN_USERDATA],
DEVICE_SERIAL_NUMBER: userdata_data[SERIAL_NUMBER_IN_USERDATA],
DEVICE_REVISION: main_processor_info[REVISION_IN_MAINPROCESSOR],
DEVICE_FIRMWARE: main_processor_info,
DEVICE_MODEL: main_processor_info[MODEL_IN_MAINPROCESSOR],
}
@callback
def _async_map_data_by_id(data):
"""Return a dict with the key being the id for a list of entries."""
return {entry[ATTR_ID]: entry for entry in data}
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| turbokongen/home-assistant | homeassistant/components/hunterdouglas_powerview/__init__.py | Python | apache-2.0 | 5,873 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import itertools
import json
import re
from itertools import imap
from operator import itemgetter
from django.utils.translation import ugettext as _
from desktop.lib import thrift_util
from desktop.conf import DEFAULT_USER
from hadoop import cluster
from TCLIService import TCLIService
from TCLIService.ttypes import TOpenSessionReq, TGetTablesReq, TFetchResultsReq,\
TStatusCode, TGetResultSetMetadataReq, TGetColumnsReq, TTypeId,\
TExecuteStatementReq, TGetOperationStatusReq, TFetchOrientation,\
TCloseSessionReq, TGetSchemasReq, TGetLogReq, TCancelOperationReq,\
TCloseOperationReq, TFetchResultsResp, TRowSet, TProtocolVersion
from beeswax import conf as beeswax_conf
from beeswax import hive_site
from beeswax.hive_site import hiveserver2_use_ssl
from beeswax.models import Session, HiveServerQueryHandle, HiveServerQueryHistory
from beeswax.server.dbms import Table, NoSuchObjectException, DataTable,\
QueryServerException
LOG = logging.getLogger(__name__)
IMPALA_RESULTSET_CACHE_SIZE = 'impala.resultset.cache.size'
DEFAULT_USER = DEFAULT_USER.get()
class HiveServerTable(Table):
"""
We get the table details from a DESCRIBE FORMATTED.
"""
def __init__(self, table_results, table_schema, desc_results, desc_schema):
if beeswax_conf.THRIFT_VERSION.get() >= 7:
if not table_results.columns:
raise NoSuchObjectException()
self.table = table_results.columns
else: # Deprecated. To remove in Hue 4.
if not table_results.rows:
raise NoSuchObjectException()
self.table = table_results.rows and table_results.rows[0] or ''
self.table_schema = table_schema
self.desc_results = desc_results
self.desc_schema = desc_schema
self.describe = HiveServerTTableSchema(self.desc_results, self.desc_schema).cols()
@property
def name(self):
return HiveServerTRow(self.table, self.table_schema).col('TABLE_NAME')
@property
def is_view(self):
return HiveServerTRow(self.table, self.table_schema).col('TABLE_TYPE') == 'VIEW'
@property
def partition_keys(self):
try:
return [PartitionKeyCompatible(row['col_name'], row['data_type'], row['comment']) for row in self._get_partition_column()]
except:
LOG.exception('failed to get partition keys')
return []
@property
def path_location(self):
try:
rows = self.describe
rows = [row for row in rows if row['col_name'].startswith('Location:')]
if rows:
return rows[0]['data_type']
except:
LOG.exception('failed to get path location')
return None
@property
def cols(self):
rows = self.describe
col_row_index = 2
try:
end_cols_index = map(itemgetter('col_name'), rows[col_row_index:]).index('')
return rows[col_row_index:][:end_cols_index] + self._get_partition_column()
except ValueError: # DESCRIBE on columns and nested columns does not contain add'l rows beyond cols
return rows[col_row_index:]
except:
# Impala does not have it
return rows
def _get_partition_column(self):
rows = self.describe
try:
col_row_index = map(itemgetter('col_name'), rows).index('# Partition Information') + 3
end_cols_index = map(itemgetter('col_name'), rows[col_row_index:]).index('')
return rows[col_row_index:][:end_cols_index]
except:
# Impala does not have it
return []
@property
def comment(self):
return HiveServerTRow(self.table, self.table_schema).col('REMARKS')
@property
def properties(self):
rows = self.describe
col_row_index = 2
end_cols_index = map(itemgetter('col_name'), rows[col_row_index:]).index('')
return [{
'col_name': prop['col_name'].strip() if prop['col_name'] else prop['col_name'],
'data_type': prop['data_type'].strip() if prop['data_type'] else prop['data_type'],
'comment': prop['comment'].strip() if prop['comment'] else prop['comment']
} for prop in rows[col_row_index + end_cols_index + 1:]
]
@property
def stats(self):
rows = self.properties
col_row_index = map(itemgetter('col_name'), rows).index('Table Parameters:') + 1
end_cols_index = map(itemgetter('data_type'), rows[col_row_index:]).index(None)
return rows[col_row_index:][:end_cols_index]
@property
def has_complex(self):
has_complex = False
complex_types = ["struct", "array", "map", "uniontype"]
patterns = [re.compile(typ) for typ in complex_types]
for column in self.cols:
if isinstance(column, dict) and 'data_type' in column:
column_type = column['data_type']
else: # Col object
column_type = column.type
if column_type and any(p.match(column_type.lower()) for p in patterns):
has_complex = True
break
return has_complex
class HiveServerTRowSet2:
def __init__(self, row_set, schema):
self.row_set = row_set
self.rows = row_set.rows
self.schema = schema
self.startRowOffset = row_set.startRowOffset
def is_empty(self):
return not self.row_set.columns or not HiveServerTColumnValue2(self.row_set.columns[0]).val
def cols(self, col_names):
cols_rows = []
rs = HiveServerTRow2(self.row_set.columns, self.schema)
cols = [rs.full_col(name) for name in col_names]
for cols_row in itertools.izip(*cols):
cols_rows.append(dict(itertools.izip(col_names, cols_row)))
return cols_rows
def __iter__(self):
return self
def next(self):
if self.row_set.columns:
return HiveServerTRow2(self.row_set.columns, self.schema)
else:
raise StopIteration
class HiveServerTRow2:
def __init__(self, cols, schema):
self.cols = cols
self.schema = schema
def col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnValue2(self.cols[pos]).val[0] # Return only first element
def full_col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnValue2(self.cols[pos]).val # Return the full column and its values
def _get_col_position(self, column_name):
return filter(lambda (i, col): col.columnName == column_name, enumerate(self.schema.columns))[0][0]
def fields(self):
try:
return [HiveServerTColumnValue2(field).val.pop(0) for field in self.cols]
except IndexError:
raise StopIteration
class HiveServerTColumnValue2:
def __init__(self, tcolumn_value):
self.column_value = tcolumn_value
@property
def val(self):
# Could directly get index from schema but would need to cache the schema
if self.column_value.stringVal:
return self._get_val(self.column_value.stringVal)
elif self.column_value.i16Val is not None:
return self._get_val(self.column_value.i16Val)
elif self.column_value.i32Val is not None:
return self._get_val(self.column_value.i32Val)
elif self.column_value.i64Val is not None:
return self._get_val(self.column_value.i64Val)
elif self.column_value.doubleVal is not None:
return self._get_val(self.column_value.doubleVal)
elif self.column_value.boolVal is not None:
return self._get_val(self.column_value.boolVal)
elif self.column_value.byteVal is not None:
return self._get_val(self.column_value.byteVal)
elif self.column_value.binaryVal is not None:
return self._get_val(self.column_value.binaryVal)
@classmethod
def _get_val(cls, column):
column.values = cls.set_nulls(column.values, column.nulls)
column.nulls = '' # Clear the null values for not re-marking again the column with nulls at the next call
return column.values
@classmethod
def mark_nulls(cls, values, bytestring):
mask = bytearray(bytestring)
for n in mask:
yield n & 0x01
yield n & 0x02
yield n & 0x04
yield n & 0x08
yield n & 0x10
yield n & 0x20
yield n & 0x40
yield n & 0x80
@classmethod
def set_nulls(cls, values, bytestring):
if bytestring == '' or re.match('^(\x00)+$', bytestring): # HS2 has just \x00 or '', Impala can have \x00\x00...
return values
else:
_values = [None if is_null else value for value, is_null in itertools.izip(values, cls.mark_nulls(values, bytestring))]
if len(values) != len(_values): # HS2 can have just \x00\x01 instead of \x00\x01\x00...
_values.extend(values[len(_values):])
return _values
class HiveServerDataTable(DataTable):
def __init__(self, results, schema, operation_handle, query_server):
self.schema = schema and schema.schema
self.row_set = HiveServerTRowSet(results.results, schema)
self.operation_handle = operation_handle
if query_server['server_name'] == 'impala':
self.has_more = results.hasMoreRows
else:
self.has_more = not self.row_set.is_empty() # Should be results.hasMoreRows but always True in HS2
self.startRowOffset = self.row_set.startRowOffset # Always 0 in HS2
@property
def ready(self):
return True
def cols(self):
if self.schema:
return [HiveServerTColumnDesc(col) for col in self.schema.columns]
else:
return []
def rows(self):
for row in self.row_set:
yield row.fields()
class HiveServerTTableSchema:
def __init__(self, columns, schema):
self.columns = columns
self.schema = schema
def cols(self):
try:
return HiveServerTRowSet(self.columns, self.schema).cols(('col_name', 'data_type', 'comment'))
except:
# Impala API is different
cols = HiveServerTRowSet(self.columns, self.schema).cols(('name', 'type', 'comment'))
for col in cols:
col['col_name'] = col.pop('name')
col['data_type'] = col.pop('type')
return cols
def col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnDesc(self.columns[pos]).val
def _get_col_position(self, column_name):
return filter(lambda (i, col): col.columnName == column_name, enumerate(self.schema.columns))[0][0]
if beeswax_conf.THRIFT_VERSION.get() >= 7:
HiveServerTRow = HiveServerTRow2
HiveServerTRowSet = HiveServerTRowSet2
else:
# Deprecated. To remove in Hue 4.
class HiveServerTRow:
def __init__(self, row, schema):
self.row = row
self.schema = schema
def col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnValue(self.row.colVals[pos]).val
def _get_col_position(self, column_name):
return filter(lambda (i, col): col.columnName == column_name, enumerate(self.schema.columns))[0][0]
def fields(self):
return [HiveServerTColumnValue(field).val for field in self.row.colVals]
class HiveServerTRowSet:
def __init__(self, row_set, schema):
self.row_set = row_set
self.rows = row_set.rows
self.schema = schema
self.startRowOffset = row_set.startRowOffset
def is_empty(self):
return len(self.rows) == 0
def cols(self, col_names):
cols_rows = []
for row in self.rows:
row = HiveServerTRow(row, self.schema)
cols = {}
for col_name in col_names:
cols[col_name] = row.col(col_name)
cols_rows.append(cols)
return cols_rows
def __iter__(self):
return self
def next(self):
if self.rows:
return HiveServerTRow(self.rows.pop(0), self.schema)
else:
raise StopIteration
class HiveServerTColumnValue:
def __init__(self, tcolumn_value):
self.column_value = tcolumn_value
@property
def val(self):
if self.column_value.boolVal is not None:
return self.column_value.boolVal.value
elif self.column_value.byteVal is not None:
return self.column_value.byteVal.value
elif self.column_value.i16Val is not None:
return self.column_value.i16Val.value
elif self.column_value.i32Val is not None:
return self.column_value.i32Val.value
elif self.column_value.i64Val is not None:
return self.column_value.i64Val.value
elif self.column_value.doubleVal is not None:
return self.column_value.doubleVal.value
elif self.column_value.stringVal is not None:
return self.column_value.stringVal.value
class HiveServerTColumnDesc:
def __init__(self, column):
self.column = column
@property
def name(self):
return self.column.columnName
@property
def comment(self):
return self.column.comment
@property
def type(self):
return self.get_type(self.column.typeDesc)
@classmethod
def get_type(self, typeDesc):
for ttype in typeDesc.types:
if ttype.primitiveEntry is not None:
return TTypeId._VALUES_TO_NAMES[ttype.primitiveEntry.type]
elif ttype.mapEntry is not None:
return ttype.mapEntry
elif ttype.unionEntry is not None:
return ttype.unionEntry
elif ttype.arrayEntry is not None:
return ttype.arrayEntry
elif ttype.structEntry is not None:
return ttype.structEntry
elif ttype.userDefinedTypeEntry is not None:
return ttype.userDefinedTypeEntry
class HiveServerClient:
HS2_MECHANISMS = {
'KERBEROS': 'GSSAPI',
'NONE': 'PLAIN',
'NOSASL': 'NOSASL',
'LDAP': 'PLAIN',
'PAM': 'PLAIN'
}
def __init__(self, query_server, user):
self.query_server = query_server
self.user = user
use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled, auth_username, auth_password = self.get_security()
LOG.info('use_sasl=%s, mechanism=%s, kerberos_principal_short_name=%s, impersonation_enabled=%s, auth_username=%s' % (
use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled, auth_username))
self.use_sasl = use_sasl
self.kerberos_principal_short_name = kerberos_principal_short_name
self.impersonation_enabled = impersonation_enabled
if self.query_server['server_name'] == 'impala':
from impala import conf as impala_conf
ssl_enabled = impala_conf.SSL.ENABLED.get()
ca_certs = impala_conf.SSL.CACERTS.get()
keyfile = impala_conf.SSL.KEY.get()
certfile = impala_conf.SSL.CERT.get()
validate = impala_conf.SSL.VALIDATE.get()
timeout = impala_conf.SERVER_CONN_TIMEOUT.get()
else:
ssl_enabled = hiveserver2_use_ssl()
ca_certs = beeswax_conf.SSL.CACERTS.get()
keyfile = beeswax_conf.SSL.KEY.get()
certfile = beeswax_conf.SSL.CERT.get()
validate = beeswax_conf.SSL.VALIDATE.get()
timeout = beeswax_conf.SERVER_CONN_TIMEOUT.get()
if auth_username:
username = auth_username
password = auth_password
else:
username = user.username
password = None
self._client = thrift_util.get_client(TCLIService.Client,
query_server['server_host'],
query_server['server_port'],
service_name=query_server['server_name'],
kerberos_principal=kerberos_principal_short_name,
use_sasl=use_sasl,
mechanism=mechanism,
username=username,
password=password,
timeout_seconds=timeout,
use_ssl=ssl_enabled,
ca_certs=ca_certs,
keyfile=keyfile,
certfile=certfile,
validate=validate,
transport_mode=query_server.get('transport_mode', 'socket'),
http_url=query_server.get('http_url', '')
)
def get_security(self):
principal = self.query_server['principal']
impersonation_enabled = False
auth_username = self.query_server['auth_username'] # Pass-through LDAP/PAM authentication
auth_password = self.query_server['auth_password']
if principal:
kerberos_principal_short_name = principal.split('/', 1)[0]
else:
kerberos_principal_short_name = None
if self.query_server['server_name'] == 'impala':
if auth_password: # Force LDAP/PAM.. auth if auth_password is provided
use_sasl = True
mechanism = HiveServerClient.HS2_MECHANISMS['NONE']
else:
cluster_conf = cluster.get_cluster_conf_for_job_submission()
use_sasl = cluster_conf is not None and cluster_conf.SECURITY_ENABLED.get()
mechanism = HiveServerClient.HS2_MECHANISMS['KERBEROS']
impersonation_enabled = self.query_server['impersonation_enabled']
else:
hive_mechanism = hive_site.get_hiveserver2_authentication()
if hive_mechanism not in HiveServerClient.HS2_MECHANISMS:
raise Exception(_('%s server authentication not supported. Valid are %s.') % (hive_mechanism, HiveServerClient.HS2_MECHANISMS.keys()))
use_sasl = hive_mechanism in ('KERBEROS', 'NONE', 'LDAP', 'PAM')
mechanism = HiveServerClient.HS2_MECHANISMS[hive_mechanism]
impersonation_enabled = hive_site.hiveserver2_impersonation_enabled()
return use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled, auth_username, auth_password
def open_session(self, user):
kwargs = {
'client_protocol': beeswax_conf.THRIFT_VERSION.get() - 1,
'username': user.username, # If SASL or LDAP, it gets the username from the authentication mechanism" since it dependents on it.
'configuration': {},
}
if self.impersonation_enabled:
kwargs.update({'username': DEFAULT_USER})
if self.query_server['server_name'] == 'impala': # Only when Impala accepts it
kwargs['configuration'].update({'impala.doas.user': user.username})
if self.query_server['server_name'] == 'beeswax': # All the time
kwargs['configuration'].update({'hive.server2.proxy.user': user.username})
if self.query_server['server_name'] == 'sparksql': # All the time
kwargs['configuration'].update({'hive.server2.proxy.user': user.username})
req = TOpenSessionReq(**kwargs)
res = self._client.OpenSession(req)
if res.status is not None and res.status.statusCode not in (TStatusCode.SUCCESS_STATUS,):
if hasattr(res.status, 'errorMessage') and res.status.errorMessage:
message = res.status.errorMessage
else:
message = ''
raise QueryServerException(Exception('Bad status for request %s:\n%s' % (req, res)), message=message)
sessionId = res.sessionHandle.sessionId
LOG.info('Opening session %s' % sessionId)
encoded_status, encoded_guid = HiveServerQueryHandle(secret=sessionId.secret, guid=sessionId.guid).get()
properties = json.dumps(res.configuration)
return Session.objects.create(owner=user,
application=self.query_server['server_name'],
status_code=res.status.statusCode,
secret=encoded_status,
guid=encoded_guid,
server_protocol_version=res.serverProtocolVersion,
properties=properties)
def call(self, fn, req, status=TStatusCode.SUCCESS_STATUS):
session = Session.objects.get_session(self.user, self.query_server['server_name'])
if session is None:
session = self.open_session(self.user)
if hasattr(req, 'sessionHandle') and req.sessionHandle is None:
req.sessionHandle = session.get_handle()
res = fn(req)
# Not supported currently in HS2 and Impala: TStatusCode.INVALID_HANDLE_STATUS
if res.status.statusCode == TStatusCode.ERROR_STATUS and \
re.search('Invalid SessionHandle|Invalid session|Client session expired', res.status.errorMessage or '', re.I):
LOG.info('Retrying with a new session because for %s of %s' % (self.user, res))
session = self.open_session(self.user)
req.sessionHandle = session.get_handle()
# Get back the name of the function to call
res = getattr(self._client, fn.attr)(req)
if status is not None and res.status.statusCode not in (
TStatusCode.SUCCESS_STATUS, TStatusCode.SUCCESS_WITH_INFO_STATUS, TStatusCode.STILL_EXECUTING_STATUS):
if hasattr(res.status, 'errorMessage') and res.status.errorMessage:
message = res.status.errorMessage
else:
message = ''
raise QueryServerException(Exception('Bad status for request %s:\n%s' % (req, res)), message=message)
else:
return res
def close_session(self, sessionHandle):
req = TCloseSessionReq(sessionHandle=sessionHandle)
return self._client.CloseSession(req)
def get_databases(self):
# GetCatalogs() is not implemented in HS2
req = TGetSchemasReq()
res = self.call(self._client.GetSchemas, req)
results, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=5000)
self.close_operation(res.operationHandle)
col = 'TABLE_SCHEM'
return HiveServerTRowSet(results.results, schema.schema).cols((col,))
def get_database(self, database):
if self.query_server['server_name'] == 'impala':
raise NotImplementedError(_("Impala has not implemented the 'DESCRIBE DATABASE' command: %(issue_ref)s") % {
'issue_ref': "https://issues.cloudera.org/browse/IMPALA-2196"
})
query = 'DESCRIBE DATABASE EXTENDED `%s`' % (database)
(desc_results, desc_schema), operation_handle = self.execute_statement(query, max_rows=5000, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(operation_handle)
cols = ('db_name', 'comment', 'location')
if len(HiveServerTRowSet(desc_results.results, desc_schema.schema).cols(cols)) != 1:
raise ValueError(_("%(query)s returned more than 1 row") % {'query': query})
return HiveServerTRowSet(desc_results.results, desc_schema.schema).cols(cols)[0] # Should only contain one row
def get_tables_meta(self, database, table_names):
req = TGetTablesReq(schemaName=database, tableName=table_names)
res = self.call(self._client.GetTables, req)
results, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=5000)
self.close_operation(res.operationHandle)
cols = ('TABLE_NAME', 'TABLE_TYPE', 'REMARKS')
return HiveServerTRowSet(results.results, schema.schema).cols(cols)
def get_tables(self, database, table_names):
req = TGetTablesReq(schemaName=database, tableName=table_names)
res = self.call(self._client.GetTables, req)
results, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=5000)
self.close_operation(res.operationHandle)
return HiveServerTRowSet(results.results, schema.schema).cols(('TABLE_NAME',))
def get_table(self, database, table_name, partition_spec=None):
req = TGetTablesReq(schemaName=database, tableName=table_name)
res = self.call(self._client.GetTables, req)
table_results, table_schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(res.operationHandle)
if partition_spec:
query = 'DESCRIBE FORMATTED `%s`.`%s` PARTITION(%s)' % (database, table_name, partition_spec)
else:
query = 'DESCRIBE FORMATTED `%s`.`%s`' % (database, table_name)
(desc_results, desc_schema), operation_handle = self.execute_statement(query, max_rows=5000, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(operation_handle)
return HiveServerTable(table_results.results, table_schema.schema, desc_results.results, desc_schema.schema)
def execute_query(self, query, max_rows=1000):
configuration = self._get_query_configuration(query)
return self.execute_query_statement(statement=query.query['query'], max_rows=max_rows, configuration=configuration)
def execute_query_statement(self, statement, max_rows=1000, configuration={}, orientation=TFetchOrientation.FETCH_FIRST):
(results, schema), operation_handle = self.execute_statement(statement=statement, max_rows=max_rows, configuration=configuration, orientation=orientation)
return HiveServerDataTable(results, schema, operation_handle, self.query_server)
def execute_async_query(self, query, statement=0):
if statement == 0:
# Impala just has settings currently
if self.query_server['server_name'] == 'beeswax':
for resource in query.get_configuration_statements():
self.execute_statement(resource.strip())
configuration = {}
if self.query_server['server_name'] == 'impala' and self.query_server['querycache_rows'] > 0:
configuration[IMPALA_RESULTSET_CACHE_SIZE] = str(self.query_server['querycache_rows'])
# The query can override the default configuration
configuration.update(self._get_query_configuration(query))
query_statement = query.get_query_statement(statement)
return self.execute_async_statement(statement=query_statement, confOverlay=configuration)
def execute_statement(self, statement, max_rows=1000, configuration={}, orientation=TFetchOrientation.FETCH_NEXT):
if self.query_server['server_name'] == 'impala' and self.query_server['QUERY_TIMEOUT_S'] > 0:
configuration['QUERY_TIMEOUT_S'] = str(self.query_server['QUERY_TIMEOUT_S'])
req = TExecuteStatementReq(statement=statement.encode('utf-8'), confOverlay=configuration)
res = self.call(self._client.ExecuteStatement, req)
return self.fetch_result(res.operationHandle, max_rows=max_rows, orientation=orientation), res.operationHandle
def execute_async_statement(self, statement, confOverlay):
if self.query_server['server_name'] == 'impala' and self.query_server['QUERY_TIMEOUT_S'] > 0:
confOverlay['QUERY_TIMEOUT_S'] = str(self.query_server['QUERY_TIMEOUT_S'])
req = TExecuteStatementReq(statement=statement.encode('utf-8'), confOverlay=confOverlay, runAsync=True)
res = self.call(self._client.ExecuteStatement, req)
return HiveServerQueryHandle(secret=res.operationHandle.operationId.secret,
guid=res.operationHandle.operationId.guid,
operation_type=res.operationHandle.operationType,
has_result_set=res.operationHandle.hasResultSet,
modified_row_count=res.operationHandle.modifiedRowCount)
def fetch_data(self, operation_handle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=1000):
# Fetch until the result is empty dues to a HS2 bug instead of looking at hasMoreRows
results, schema = self.fetch_result(operation_handle, orientation, max_rows)
return HiveServerDataTable(results, schema, operation_handle, self.query_server)
def cancel_operation(self, operation_handle):
req = TCancelOperationReq(operationHandle=operation_handle)
return self.call(self._client.CancelOperation, req)
def close_operation(self, operation_handle):
req = TCloseOperationReq(operationHandle=operation_handle)
return self.call(self._client.CloseOperation, req)
def get_columns(self, database, table):
req = TGetColumnsReq(schemaName=database, tableName=table)
res = self.call(self._client.GetColumns, req)
res, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(res.operationHandle)
return res, schema
def fetch_result(self, operation_handle, orientation=TFetchOrientation.FETCH_FIRST, max_rows=1000):
if operation_handle.hasResultSet:
fetch_req = TFetchResultsReq(operationHandle=operation_handle, orientation=orientation, maxRows=max_rows)
res = self.call(self._client.FetchResults, fetch_req)
else:
res = TFetchResultsResp(results=TRowSet(startRowOffset=0, rows=[], columns=[]))
if operation_handle.hasResultSet and TFetchOrientation.FETCH_FIRST: # Only fetch for the first call that should be with start_over
meta_req = TGetResultSetMetadataReq(operationHandle=operation_handle)
schema = self.call(self._client.GetResultSetMetadata, meta_req)
else:
schema = None
return res, schema
def fetch_log(self, operation_handle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=1000):
req = TFetchResultsReq(operationHandle=operation_handle, orientation=orientation, maxRows=max_rows, fetchType=1)
res = self.call(self._client.FetchResults, req)
if beeswax_conf.THRIFT_VERSION.get() >= 7:
lines = res.results.columns[0].stringVal.values
else:
lines = imap(lambda r: r.colVals[0].stringVal.value, res.results.rows)
return '\n'.join(lines)
def get_operation_status(self, operation_handle):
req = TGetOperationStatusReq(operationHandle=operation_handle)
return self.call(self._client.GetOperationStatus, req)
def explain(self, query):
query_statement = query.get_query_statement(0)
configuration = self._get_query_configuration(query)
return self.execute_query_statement(statement='EXPLAIN %s' % query_statement, configuration=configuration, orientation=TFetchOrientation.FETCH_NEXT)
def get_log(self, operation_handle):
try:
req = TGetLogReq(operationHandle=operation_handle)
res = self.call(self._client.GetLog, req)
return res.log
except:
LOG.exception('server does not support GetLog')
return 'Server does not support GetLog()'
def get_partitions(self, database, table_name, partition_spec=None, max_parts=None, reverse_sort=True):
table = self.get_table(database, table_name)
if max_parts is None or max_parts <= 0:
max_rows = 10000
else:
max_rows = 1000 if max_parts <= 250 else max_parts
query = 'SHOW PARTITIONS `%s`.`%s`' % (database, table_name)
if partition_spec:
query += ' PARTITION(%s)' % partition_spec
partition_table = self.execute_query_statement(query, max_rows=max_rows)
partitions = [PartitionValueCompatible(partition, table) for partition in partition_table.rows()]
if reverse_sort:
partitions.reverse()
return partitions[:max_parts]
def _get_query_configuration(self, query):
return dict([(setting['key'], setting['value']) for setting in query.settings])
class HiveServerTableCompatible(HiveServerTable):
"""Same API as Beeswax"""
def __init__(self, hive_table):
self.table = hive_table.table
self.table_schema = hive_table.table_schema
self.desc_results = hive_table.desc_results
self.desc_schema = hive_table.desc_schema
self.describe = HiveServerTTableSchema(self.desc_results, self.desc_schema).cols()
@property
def cols(self):
return [
type('Col', (object,), {
'name': col.get('col_name', '').strip() if col.get('col_name') else '',
'type': col.get('data_type', '').strip() if col.get('data_type') else '',
'comment': col.get('comment', '').strip() if col.get('comment') else ''
}) for col in HiveServerTable.cols.fget(self)
]
class ResultCompatible:
def __init__(self, data_table):
self.data_table = data_table
self.rows = data_table.rows
self.has_more = data_table.has_more
self.start_row = data_table.startRowOffset
self.ready = True
@property
def columns(self):
return self.cols()
def cols(self):
return [col.name for col in self.data_table.cols()]
class PartitionKeyCompatible:
def __init__(self, name, type, comment):
self.name = name
self.type = type
self.comment = comment
def __eq__(self, other):
return isinstance(other, PartitionKeyCompatible) and \
self.name == other.name and \
self.type == other.type and \
self.comment == other.comment
def __repr__(self):
return 'PartitionKey(name:%s, type:%s, comment:%s)' % (self.name, self.type, self.comment)
class PartitionValueCompatible:
def __init__(self, partition_row, table, properties=None):
if properties is None:
properties = {}
# Parses: ['datehour=2013022516'] or ['month=2011-07/dt=2011-07-01/hr=12']
partition = partition_row[0]
parts = partition.split('/')
self.partition_spec = ','.join(["%s='%s'" % (pv[0], pv[1]) for pv in [part.split('=') for part in parts]])
self.values = [pv[1] for pv in [part.split('=') for part in parts]]
self.sd = type('Sd', (object,), properties,)
class ExplainCompatible:
def __init__(self, data_table):
self.textual = '\n'.join([line[0] for line in data_table.rows()])
class ResultMetaCompatible:
def __init__(self):
self.in_tablename = True
class HiveServerClientCompatible(object):
"""Same API as Beeswax"""
def __init__(self, client):
self._client = client
self.user = client.user
self.query_server = client.query_server
def query(self, query, statement=0):
return self._client.execute_async_query(query, statement)
def get_state(self, handle):
operationHandle = handle.get_rpc_handle()
res = self._client.get_operation_status(operationHandle)
return HiveServerQueryHistory.STATE_MAP[res.operationState]
def get_operation_status(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.get_operation_status(operationHandle)
def use(self, query):
data = self._client.execute_query(query)
self._client.close_operation(data.operation_handle)
return data
def explain(self, query):
data_table = self._client.explain(query)
data = ExplainCompatible(data_table)
self._client.close_operation(data_table.operation_handle)
return data
def fetch(self, handle, start_over=False, max_rows=None):
operationHandle = handle.get_rpc_handle()
if max_rows is None:
max_rows = 1000
if start_over and not (self.query_server['server_name'] == 'impala' and self.query_server['querycache_rows'] == 0): # Backward compatibility for impala
orientation = TFetchOrientation.FETCH_FIRST
else:
orientation = TFetchOrientation.FETCH_NEXT
data_table = self._client.fetch_data(operationHandle, orientation=orientation, max_rows=max_rows)
return ResultCompatible(data_table)
def cancel_operation(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.cancel_operation(operationHandle)
def close(self, handle):
return self.close_operation(handle)
def close_operation(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.close_operation(operationHandle)
def close_session(self, session):
operationHandle = session.get_handle()
return self._client.close_session(operationHandle)
def dump_config(self):
return 'Does not exist in HS2'
def get_log(self, handle, start_over=True):
operationHandle = handle.get_rpc_handle()
if beeswax_conf.USE_GET_LOG_API.get() or self.query_server['server_name'] == 'impala':
return self._client.get_log(operationHandle)
else:
if start_over:
orientation = TFetchOrientation.FETCH_FIRST
else:
orientation = TFetchOrientation.FETCH_NEXT
return self._client.fetch_log(operationHandle, orientation=orientation, max_rows=-1)
def get_databases(self):
col = 'TABLE_SCHEM'
return [table[col] for table in self._client.get_databases()]
def get_database(self, database):
return self._client.get_database(database)
def get_tables_meta(self, database, table_names):
tables = self._client.get_tables_meta(database, table_names)
massaged_tables = []
for table in tables:
massaged_tables.append({
'name': table['TABLE_NAME'],
'comment': table['REMARKS'],
'type': table['TABLE_TYPE'].capitalize()}
)
return massaged_tables
def get_tables(self, database, table_names):
tables = [table['TABLE_NAME'] for table in self._client.get_tables(database, table_names)]
tables.sort()
return tables
def get_table(self, database, table_name, partition_spec=None):
table = self._client.get_table(database, table_name, partition_spec)
return HiveServerTableCompatible(table)
def get_columns(self, database, table):
return self._client.get_columns(database, table)
def get_default_configuration(self, *args, **kwargs):
return {}
def get_results_metadata(self, handle):
# We just need to mock
return ResultMetaCompatible()
def create_database(self, name, description): raise NotImplementedError()
def alter_table(self, dbname, tbl_name, new_tbl): raise NotImplementedError()
def open_session(self, user):
return self._client.open_session(user)
def add_partition(self, new_part): raise NotImplementedError()
def get_partition(self, *args, **kwargs): raise NotImplementedError()
def get_partitions(self, database, table_name, partition_spec, max_parts, reverse_sort=True):
return self._client.get_partitions(database, table_name, partition_spec, max_parts, reverse_sort)
def alter_partition(self, db_name, tbl_name, new_part): raise NotImplementedError()
| ahmed-mahran/hue | apps/beeswax/src/beeswax/server/hive_server2_lib.py | Python | apache-2.0 | 38,037 |
from matplotlib.testing.decorators import cleanup
from unittest import TestCase
from nose_parameterized import parameterized
import os
import gzip
from pandas import read_csv
from pyfolio.utils import (to_utc, to_series)
from pyfolio.tears import (create_full_tear_sheet,
create_simple_tear_sheet,
create_returns_tear_sheet,
create_position_tear_sheet,
create_txn_tear_sheet,
create_round_trip_tear_sheet,
create_interesting_times_tear_sheet,)
class PositionsTestCase(TestCase):
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
test_returns = read_csv(
gzip.open(
__location__ + '/test_data/test_returns.csv.gz'),
index_col=0, parse_dates=True)
test_returns = to_series(to_utc(test_returns))
test_txn = to_utc(read_csv(
gzip.open(
__location__ + '/test_data/test_txn.csv.gz'),
index_col=0, parse_dates=True))
test_pos = to_utc(read_csv(
gzip.open(__location__ + '/test_data/test_pos.csv.gz'),
index_col=0, parse_dates=True))
@parameterized.expand([({},),
({'slippage': 1},),
({'live_start_date': test_returns.index[-20]},),
({'round_trips': True},),
({'hide_positions': True},),
({'cone_std': 1},),
({'bootstrap': True},),
])
@cleanup
def test_create_full_tear_sheet_breakdown(self, kwargs):
create_full_tear_sheet(self.test_returns,
positions=self.test_pos,
transactions=self.test_txn,
benchmark_rets=self.test_returns,
**kwargs
)
@parameterized.expand([({},),
({'slippage': 1},),
({'live_start_date': test_returns.index[-20]},),
])
@cleanup
def test_create_simple_tear_sheet_breakdown(self, kwargs):
create_simple_tear_sheet(self.test_returns,
positions=self.test_pos,
transactions=self.test_txn,
**kwargs
)
@parameterized.expand([({},),
({'live_start_date':
test_returns.index[-20]},),
({'cone_std': 1},),
({'bootstrap': True},),
])
@cleanup
def test_create_returns_tear_sheet_breakdown(self, kwargs):
create_returns_tear_sheet(self.test_returns,
benchmark_rets=self.test_returns,
**kwargs
)
@parameterized.expand([({},),
({'hide_positions': True},),
({'show_and_plot_top_pos': 0},),
({'show_and_plot_top_pos': 1},),
])
@cleanup
def test_create_position_tear_sheet_breakdown(self, kwargs):
create_position_tear_sheet(self.test_returns,
self.test_pos,
**kwargs
)
@parameterized.expand([({},),
({'unadjusted_returns': test_returns},),
])
@cleanup
def test_create_txn_tear_sheet_breakdown(self, kwargs):
create_txn_tear_sheet(self.test_returns,
self.test_pos,
self.test_txn,
**kwargs
)
@parameterized.expand([({},),
({'sector_mappings': {}},),
])
@cleanup
def test_create_round_trip_tear_sheet_breakdown(self, kwargs):
create_round_trip_tear_sheet(self.test_returns,
self.test_pos,
self.test_txn,
**kwargs
)
@parameterized.expand([({},),
({'legend_loc': 1},),
])
@cleanup
def test_create_interesting_times_tear_sheet_breakdown(self,
kwargs):
create_interesting_times_tear_sheet(self.test_returns,
self.test_returns,
**kwargs
)
| quantopian/pyfolio | pyfolio/tests/test_tears.py | Python | apache-2.0 | 4,911 |
# Copyright (c) 2011-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import fcntl
import os
import json
class NullResource(object):
""" Implments the lock interface for spawn. """
def __init__(self, *args, **kwargs):
self.owned = False
def remove(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, trace):
pass
def acquire(self, info):
pass
class LockFile(object):
""" Manages locking and unlocking an open file handle
can also be used as a context manager
"""
def __init__(self, fd, lock_operation=fcntl.LOCK_EX,
unlock_operation=fcntl.LOCK_UN):
self.fd = fd
self.file_name = None
if type(fd) != int:
self.fd = self.open(fd)
self.file_name = fd
self.lock_operation = lock_operation
self.unlock_operation = unlock_operation
def __enter__(self):
self.lock(self.lock_operation)
return self
def __exit__(self, exc_type, exc_value, trace):
self.unlock(self.unlock_operation)
return False
def lock(self, operation=fcntl.LOCK_EX):
fcntl.flock(self.fd, operation)
def unlock(self, operation=fcntl.LOCK_UN):
fcntl.flock(self.fd, operation)
def write(self, data):
os.lseek(self.fd, 0, os.SEEK_SET)
os.ftruncate(self.fd, 0)
os.write(self.fd, data)
os.fsync(self.fd)
def read(self):
size = os.lseek(self.fd, 0, os.SEEK_END)
os.lseek(self.fd, 0, os.SEEK_SET)
return os.read(self.fd, size)
def close(self):
try:
os.close(self.fd)
except TypeError, OSError:
pass
self.fd = None
def unlink(self):
self.close()
try:
os.unlink(self.file_name)
except OSError, e:
pass
def _createdir(self, file_name):
try:
dir = os.path.dirname(file_name)
os.makedirs(dir)
except OSError, e:
# ignore if already exists
if e.errno != errno.EEXIST:
raise
def open(self, file_name):
for i in range(0, 2):
try:
# Attempt to create the file
return os.open(file_name, os.O_RDWR | os.O_CREAT)
except OSError, e:
# No such file or directory
if e.errno == errno.ENOENT:
# create the dir and try again
self._createdir(file_name)
continue
# Unknown error
raise
raise RuntimeError("failed to create '%s'" % file_name)
class JsonLockFile(LockFile):
""" Manages a lock file that contains json """
def update(self, info):
data = self.read()
data.update(info)
self.write(data)
def get(self, key, default=None):
try:
data = self.read()
return data[key]
except KeyError:
return default
def write(self, data):
super(JsonLockFile, self).write(json.dumps(data))
def read(self):
try:
return json.loads(super(JsonLockFile, self).read())
except ValueError, e:
return {}
class ResourceFile(JsonLockFile):
""" Manages ownership of a resource file,
can also be used as a context manager
"""
def __init__(self, file_name):
self.file_name = file_name
self.owned = False
self.fd = None
def __enter__(self):
self.fd = self.open(self.file_name)
super(ResourceFile, self).lock()
return self
def __exit__(self, exc_type, exc_value, trace):
super(ResourceFile, self).unlock()
self.close()
return False
def used(self):
""" Returns true if the resource file is in use by someone """
info = self.read()
# If pid is alive, the volume is owned by someone else
if 'pid' in info and self.alive(info['pid']):
return info
return False
def alive(self, pid):
try:
os.kill(pid, 0)
return True
except OSError, e:
return False
def acquire(self, info):
""" Acquire ownership of the file by writing our pid information """
self.update(info)
if 'pid' in info:
# We own the resource
self.owned = True
def remove(self):
if self.owned:
self.unlink()
| rackerlabs/lunr | lunr/common/lock.py | Python | apache-2.0 | 5,070 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.hostvirtual import HostVirtualNodeDriver
from libcloud.compute.types import NodeState
from libcloud.compute.base import NodeAuthPassword
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import HOSTVIRTUAL_PARAMS
class HostVirtualTest(unittest.TestCase):
def setUp(self):
HostVirtualNodeDriver.connectionCls.conn_class = HostVirtualMockHttp
self.driver = HostVirtualNodeDriver(*HOSTVIRTUAL_PARAMS)
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 4)
self.assertEqual(len(nodes[0].public_ips), 1)
self.assertEqual(len(nodes[1].public_ips), 1)
self.assertEqual(len(nodes[0].private_ips), 0)
self.assertEqual(len(nodes[1].private_ips), 0)
self.assertTrue("208.111.39.118" in nodes[1].public_ips)
self.assertTrue("208.111.45.250" in nodes[0].public_ips)
self.assertEqual(nodes[3].state, NodeState.RUNNING)
self.assertEqual(nodes[1].state, NodeState.TERMINATED)
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 14)
self.assertEqual(sizes[0].id, "31")
self.assertEqual(sizes[4].id, "71")
self.assertEqual(sizes[2].ram, "512MB")
self.assertEqual(sizes[2].disk, "20GB")
self.assertEqual(sizes[3].bandwidth, "600GB")
self.assertEqual(sizes[1].price, "15.00")
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images), 8)
self.assertEqual(images[0].id, "1739")
self.assertEqual(images[0].name, "Gentoo 2012 (0619) i386")
def test_list_locations(self):
locations = self.driver.list_locations()
self.assertEqual(locations[0].id, "3")
self.assertEqual(locations[0].name, "SJC - San Jose, CA")
self.assertEqual(locations[1].id, "13")
self.assertEqual(locations[1].name, "IAD - Reston, VA")
def test_reboot_node(self):
node = self.driver.list_nodes()[0]
self.assertTrue(self.driver.reboot_node(node))
def test_ex_get_node(self):
node = self.driver.ex_get_node(node_id="62291")
self.assertEqual(node.id, "62291")
self.assertEqual(node.name, "server1.vr-cluster.org")
self.assertEqual(node.state, NodeState.TERMINATED)
self.assertTrue("208.111.45.250" in node.public_ips)
def test_ex_list_packages(self):
pkgs = self.driver.ex_list_packages()
self.assertEqual(len(pkgs), 3)
self.assertEqual(pkgs[1]["mbpkgid"], "176018")
self.assertEqual(pkgs[2]["package_status"], "Suspended")
def test_ex_order_package(self):
sizes = self.driver.list_sizes()
pkg = self.driver.ex_order_package(sizes[0])
self.assertEqual(pkg["id"], "62291")
def test_ex_cancel_package(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_cancel_package(node)
self.assertEqual(result["status"], "success")
def test_ex_unlink_package(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_unlink_package(node)
self.assertEqual(result["status"], "success")
def test_ex_stop_node(self):
node = self.driver.list_nodes()[0]
self.assertTrue(self.driver.ex_stop_node(node))
def test_ex_start_node(self):
node = self.driver.list_nodes()[0]
self.assertTrue(self.driver.ex_start_node(node))
def test_destroy_node(self):
node = self.driver.list_nodes()[0]
self.assertTrue(self.driver.destroy_node(node))
def test_ex_delete_node(self):
node = self.driver.list_nodes()[0]
self.assertTrue(self.driver.ex_delete_node(node))
def test_create_node(self):
auth = NodeAuthPassword("vr!@#hosted#@!")
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test.com", image=image, size=size, auth=auth
)
self.assertEqual("62291", node.id)
self.assertEqual("server1.vr-cluster.org", node.name)
def test_ex_provision_node(self):
node = self.driver.list_nodes()[0]
auth = NodeAuthPassword("vr!@#hosted#@!")
self.assertTrue(self.driver.ex_provision_node(node=node, auth=auth))
def test_create_node_in_location(self):
auth = NodeAuthPassword("vr!@#hosted#@!")
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[1]
node = self.driver.create_node(
name="test.com", image=image, size=size, auth=auth, location=location
)
self.assertEqual("62291", node.id)
self.assertEqual("server1.vr-cluster.org", node.name)
class HostVirtualMockHttp(MockHttp):
fixtures = ComputeFileFixtures("hostvirtual")
def _cloud_servers(self, method, url, body, headers):
body = self.fixtures.load("list_nodes.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_server(self, method, url, body, headers):
body = self.fixtures.load("get_node.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_packages(self, method, url, body, headers):
body = self.fixtures.load("list_packages.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_sizes(self, method, url, body, headers):
body = self.fixtures.load("list_sizes.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_images(self, method, url, body, headers):
body = self.fixtures.load("list_images.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_locations(self, method, url, body, headers):
body = self.fixtures.load("list_locations.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_server_delete(self, method, url, body, headers):
body = self.fixtures.load("cancel_package.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_server_reboot(self, method, url, body, headers):
body = self.fixtures.load("node_reboot.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_server_shutdown(self, method, url, body, headers):
body = self.fixtures.load("node_stop.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_server_start(self, method, url, body, headers):
body = self.fixtures.load("node_start.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_server_build(self, method, url, body, headers):
body = self.fixtures.load("order_package.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_buy(self, method, url, body, headers):
body = self.fixtures.load("order_package.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_cancel(self, method, url, body, headers):
body = self.fixtures.load("cancel_package.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_unlink(self, method, url, body, headers):
body = self.fixtures.load("unlink_package.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == "__main__":
sys.exit(unittest.main())
# vim:autoindent tabstop=4 shiftwidth=4 expandtab softtabstop=4 filetype=python
| apache/libcloud | libcloud/test/compute/test_hostvirtual.py | Python | apache-2.0 | 8,559 |
# Copyright 2021 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
PYTHON_VERSION_COMPATIBILITY = 'PY2+3'
DEPS = [
'cq',
'properties',
'step',
]
def RunSteps(api):
api.step('show properties', [])
api.step.active_result.presentation.logs['result'] = [
'mode: %s' % (api.cq.run_mode,),
]
def GenTests(api):
yield api.test('dry') + api.cq(run_mode=api.cq.DRY_RUN)
yield api.test('quick-dry') + api.cq(run_mode=api.cq.QUICK_DRY_RUN)
yield api.test('full') + api.cq(run_mode=api.cq.FULL_RUN)
yield api.test('legacy-full') + api.properties(**{
'$recipe_engine/cq': {'dry_run': False},
})
yield api.test('legacy-dry') + api.properties(**{
'$recipe_engine/cq': {'dry_run': True},
})
| luci/recipes-py | recipe_modules/cq/tests/mode_of_run.py | Python | apache-2.0 | 825 |
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
from tempest import exceptions
from tempest import test
import time
CONF = config.CONF
class AttachInterfacesTestJSON(base.BaseV2ComputeTest):
@classmethod
def resource_setup(cls):
if not CONF.service_available.neutron:
raise cls.skipException("Neutron is required")
if not CONF.compute_feature_enabled.interface_attach:
raise cls.skipException("Interface attachment is not available.")
# This test class requires network and subnet
cls.set_network_resources(network=True, subnet=True)
super(AttachInterfacesTestJSON, cls).resource_setup()
cls.client = cls.os.interfaces_client
def _check_interface(self, iface, port_id=None, network_id=None,
fixed_ip=None):
self.assertIn('port_state', iface)
if port_id:
self.assertEqual(iface['port_id'], port_id)
if network_id:
self.assertEqual(iface['net_id'], network_id)
if fixed_ip:
self.assertEqual(iface['fixed_ips'][0]['ip_address'], fixed_ip)
def _create_server_get_interfaces(self):
resp, server = self.create_test_server(wait_until='ACTIVE')
resp, ifs = self.client.list_interfaces(server['id'])
self.assertEqual(200, resp.status)
resp, body = self.client.wait_for_interface_status(
server['id'], ifs[0]['port_id'], 'ACTIVE')
ifs[0]['port_state'] = body['port_state']
return server, ifs
def _test_create_interface(self, server):
resp, iface = self.client.create_interface(server['id'])
self.assertEqual(200, resp.status)
resp, iface = self.client.wait_for_interface_status(
server['id'], iface['port_id'], 'ACTIVE')
self._check_interface(iface)
return iface
def _test_create_interface_by_network_id(self, server, ifs):
network_id = ifs[0]['net_id']
resp, iface = self.client.create_interface(server['id'],
network_id=network_id)
self.assertEqual(200, resp.status)
resp, iface = self.client.wait_for_interface_status(
server['id'], iface['port_id'], 'ACTIVE')
self._check_interface(iface, network_id=network_id)
return iface
def _test_show_interface(self, server, ifs):
iface = ifs[0]
resp, _iface = self.client.show_interface(server['id'],
iface['port_id'])
self.assertEqual(200, resp.status)
self.assertEqual(iface, _iface)
def _test_delete_interface(self, server, ifs):
# NOTE(danms): delete not the first or last, but one in the middle
iface = ifs[1]
resp, _ = self.client.delete_interface(server['id'], iface['port_id'])
self.assertEqual(202, resp.status)
_ifs = self.client.list_interfaces(server['id'])[1]
start = int(time.time())
while len(ifs) == len(_ifs):
time.sleep(self.build_interval)
_ifs = self.client.list_interfaces(server['id'])[1]
timed_out = int(time.time()) - start >= self.build_timeout
if len(ifs) == len(_ifs) and timed_out:
message = ('Failed to delete interface within '
'the required time: %s sec.' % self.build_timeout)
raise exceptions.TimeoutException(message)
self.assertNotIn(iface['port_id'], [i['port_id'] for i in _ifs])
return _ifs
def _compare_iface_list(self, list1, list2):
# NOTE(danms): port_state will likely have changed, so just
# confirm the port_ids are the same at least
list1 = [x['port_id'] for x in list1]
list2 = [x['port_id'] for x in list2]
self.assertEqual(sorted(list1), sorted(list2))
@test.attr(type='smoke')
@test.services('network')
def test_create_list_show_delete_interfaces(self):
server, ifs = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertTrue(interface_count > 0)
self._check_interface(ifs[0])
iface = self._test_create_interface(server)
ifs.append(iface)
iface = self._test_create_interface_by_network_id(server, ifs)
ifs.append(iface)
resp, _ifs = self.client.list_interfaces(server['id'])
self._compare_iface_list(ifs, _ifs)
self._test_show_interface(server, ifs)
_ifs = self._test_delete_interface(server, ifs)
self.assertEqual(len(ifs) - 1, len(_ifs))
@test.attr(type='smoke')
@test.services('network')
def test_add_remove_fixed_ip(self):
# Add and Remove the fixed IP to server.
server, ifs = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertTrue(interface_count > 0)
self._check_interface(ifs[0])
network_id = ifs[0]['net_id']
resp, body = self.client.add_fixed_ip(server['id'],
network_id)
self.assertEqual(202, resp.status)
# Remove the fixed IP from server.
server_resp, server_detail = self.os.servers_client.get_server(
server['id'])
# Get the Fixed IP from server.
fixed_ip = None
for ip_set in server_detail['addresses']:
for ip in server_detail['addresses'][ip_set]:
if ip['OS-EXT-IPS:type'] == 'fixed':
fixed_ip = ip['addr']
break
if fixed_ip is not None:
break
resp, body = self.client.remove_fixed_ip(server['id'],
fixed_ip)
self.assertEqual(202, resp.status)
class AttachInterfacesTestXML(AttachInterfacesTestJSON):
_interface = 'xml'
| queria/my-tempest | tempest/api/compute/servers/test_attach_interfaces.py | Python | apache-2.0 | 6,514 |
from django.conf import settings
from django.utils.module_loading import import_string
from .tracing import DjangoTracing
from .tracing import initialize_global_tracer
try:
# Django >= 1.10
from django.utils.deprecation import MiddlewareMixin
except ImportError:
# Not required for Django <= 1.9, see:
# https://docs.djangoproject.com/en/1.10/topics/http/middleware/#upgrading-pre-django-1-10-style-middleware
MiddlewareMixin = object
class OpenTracingMiddleware(MiddlewareMixin):
'''
__init__() is only called once, no arguments, when the Web server
responds to the first request
'''
def __init__(self, get_response=None):
'''
TODO: ANSWER Qs
- Is it better to place all tracing info in the settings file,
or to require a tracing.py file with configurations?
- Also, better to have try/catch with empty tracer or just fail
fast if there's no tracer specified
'''
self._init_tracing()
self._tracing = settings.OPENTRACING_TRACING
self.get_response = get_response
def _init_tracing(self):
if getattr(settings, 'OPENTRACING_TRACER', None) is not None:
# Backwards compatibility.
tracing = settings.OPENTRACING_TRACER
elif getattr(settings, 'OPENTRACING_TRACING', None) is not None:
tracing = settings.OPENTRACING_TRACING
elif getattr(settings, 'OPENTRACING_TRACER_CALLABLE',
None) is not None:
tracer_callable = settings.OPENTRACING_TRACER_CALLABLE
tracer_parameters = getattr(settings,
'OPENTRACING_TRACER_PARAMETERS',
{})
if not callable(tracer_callable):
tracer_callable = import_string(tracer_callable)
tracer = tracer_callable(**tracer_parameters)
tracing = DjangoTracing(tracer)
else:
# Rely on the global Tracer.
tracing = DjangoTracing()
# trace_all defaults to True when used as middleware.
tracing._trace_all = getattr(settings, 'OPENTRACING_TRACE_ALL', True)
# set the start_span_cb hook, if any.
tracing._start_span_cb = getattr(settings, 'OPENTRACING_START_SPAN_CB',
None)
# Normalize the tracing field in settings, including the old field.
settings.OPENTRACING_TRACING = tracing
settings.OPENTRACING_TRACER = tracing
# Potentially set the global Tracer (unless we rely on it already).
if getattr(settings, 'OPENTRACING_SET_GLOBAL_TRACER', False):
initialize_global_tracer(tracing)
def process_view(self, request, view_func, view_args, view_kwargs):
# determine whether this middleware should be applied
# NOTE: if tracing is on but not tracing all requests, then the tracing
# occurs through decorator functions rather than middleware
if not self._tracing._trace_all:
return None
if hasattr(settings, 'OPENTRACING_TRACED_ATTRIBUTES'):
traced_attributes = getattr(settings,
'OPENTRACING_TRACED_ATTRIBUTES')
else:
traced_attributes = []
self._tracing._apply_tracing(request, view_func, traced_attributes)
def process_exception(self, request, exception):
self._tracing._finish_tracing(request, error=exception)
def process_response(self, request, response):
self._tracing._finish_tracing(request, response=response)
return response
| kawamon/hue | desktop/core/ext-py/django_opentracing-1.1.0/django_opentracing/middleware.py | Python | apache-2.0 | 3,646 |
config = {
"interfaces": {
"google.bigtable.v2.Bigtable": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000,
},
"streaming": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 3600000,
},
},
"methods": {
"ReadRows": {
"timeout_millis": 3600000,
"retry_codes_name": "idempotent",
"retry_params_name": "streaming",
},
"SampleRowKeys": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"MutateRow": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"MutateRows": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"CheckAndMutateRow": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"ReadModifyWriteRow": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
},
}
}
}
| dhermes/google-cloud-python | bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py | Python | apache-2.0 | 2,407 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-06 01:35
from __future__ import unicode_literals
from django.db import migrations, models
def load_settings(apps, schema_editor):
Setting = apps.get_model("climate_data", "Setting")
Setting(
name="receiving_data",
value="0"
).save()
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0013_setting'),
]
operations = [
migrations.RunPython(load_settings)
]
| qubs/data-centre | climate_data/migrations/0014_auto_20160906_0135.py | Python | apache-2.0 | 506 |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import logging
import uuid
import six
from stevedore import named
from oslo.config import cfg
from oslo.messaging import serializer as msg_serializer
from oslo.utils import timeutils
_notifier_opts = [
cfg.MultiStrOpt('notification_driver',
default=[],
help='Driver or drivers to handle sending notifications.'),
cfg.ListOpt('notification_topics',
default=['notifications', ],
deprecated_name='topics',
deprecated_group='rpc_notifier2',
help='AMQP topic used for OpenStack notifications.'),
]
_LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class _Driver(object):
def __init__(self, conf, topics, transport):
self.conf = conf
self.topics = topics
self.transport = transport
@abc.abstractmethod
def notify(self, ctxt, msg, priority, retry):
pass
class Notifier(object):
"""Send notification messages.
The Notifier class is used for sending notification messages over a
messaging transport or other means.
Notification messages follow the following format::
{'message_id': six.text_type(uuid.uuid4()),
'publisher_id': 'compute.host1',
'timestamp': timeutils.utcnow(),
'priority': 'WARN',
'event_type': 'compute.create_instance',
'payload': {'instance_id': 12, ... }}
A Notifier object can be instantiated with a transport object and a
publisher ID:
notifier = messaging.Notifier(get_transport(CONF), 'compute')
and notifications are sent via drivers chosen with the notification_driver
config option and on the topics chosen with the notification_topics config
option.
Alternatively, a Notifier object can be instantiated with a specific
driver or topic::
notifier = notifier.Notifier(RPC_TRANSPORT,
'compute.host',
driver='messaging',
topic='notifications')
Notifier objects are relatively expensive to instantiate (mostly the cost
of loading notification drivers), so it is possible to specialize a given
Notifier object with a different publisher id using the prepare() method::
notifier = notifier.prepare(publisher_id='compute')
notifier.info(ctxt, event_type, payload)
"""
def __init__(self, transport, publisher_id=None,
driver=None, topic=None,
serializer=None, retry=None):
"""Construct a Notifier object.
:param transport: the transport to use for sending messages
:type transport: oslo.messaging.Transport
:param publisher_id: field in notifications sent, for example
'compute.host1'
:type publisher_id: str
:param driver: a driver to lookup from oslo.messaging.notify.drivers
:type driver: str
:param topic: the topic which to send messages on
:type topic: str
:param serializer: an optional entity serializer
:type serializer: Serializer
:param retry: an connection retries configuration
None or -1 means to retry forever
0 means no retry
N means N retries
:type retry: int
"""
transport.conf.register_opts(_notifier_opts)
self.transport = transport
self.publisher_id = publisher_id
self.retry = retry
self._driver_names = ([driver] if driver is not None
else transport.conf.notification_driver)
self._topics = ([topic] if topic is not None
else transport.conf.notification_topics)
self._serializer = serializer or msg_serializer.NoOpSerializer()
self._driver_mgr = named.NamedExtensionManager(
'oslo.messaging.notify.drivers',
names=self._driver_names,
invoke_on_load=True,
invoke_args=[transport.conf],
invoke_kwds={
'topics': self._topics,
'transport': self.transport,
}
)
_marker = object()
def prepare(self, publisher_id=_marker, retry=_marker):
"""Return a specialized Notifier instance.
Returns a new Notifier instance with the supplied publisher_id. Allows
sending notifications from multiple publisher_ids without the overhead
of notification driver loading.
:param publisher_id: field in notifications sent, for example
'compute.host1'
:type publisher_id: str
:param retry: an connection retries configuration
None or -1 means to retry forever
0 means no retry
N means N retries
:type retry: int
"""
return _SubNotifier._prepare(self, publisher_id, retry=retry)
def _notify(self, ctxt, event_type, payload, priority, publisher_id=None,
retry=None):
payload = self._serializer.serialize_entity(ctxt, payload)
ctxt = self._serializer.serialize_context(ctxt)
msg = dict(message_id=six.text_type(uuid.uuid4()),
publisher_id=publisher_id or self.publisher_id,
event_type=event_type,
priority=priority,
payload=payload,
timestamp=six.text_type(timeutils.utcnow()))
def do_notify(ext):
try:
ext.obj.notify(ctxt, msg, priority, retry or self.retry)
except Exception as e:
_LOG.exception("Problem '%(e)s' attempting to send to "
"notification system. Payload=%(payload)s",
dict(e=e, payload=payload))
if self._driver_mgr.extensions:
self._driver_mgr.map(do_notify)
def audit(self, ctxt, event_type, payload):
"""Send a notification at audit level.
:param ctxt: a request context dict
:type ctxt: dict
:param event_type: describes the event, for example
'compute.create_instance'
:type event_type: str
:param payload: the notification payload
:type payload: dict
:raises: MessageDeliveryFailure
"""
self._notify(ctxt, event_type, payload, 'AUDIT')
def debug(self, ctxt, event_type, payload):
"""Send a notification at debug level.
:param ctxt: a request context dict
:type ctxt: dict
:param event_type: describes the event, for example
'compute.create_instance'
:type event_type: str
:param payload: the notification payload
:type payload: dict
:raises: MessageDeliveryFailure
"""
self._notify(ctxt, event_type, payload, 'DEBUG')
def info(self, ctxt, event_type, payload):
"""Send a notification at info level.
:param ctxt: a request context dict
:type ctxt: dict
:param event_type: describes the event, for example
'compute.create_instance'
:type event_type: str
:param payload: the notification payload
:type payload: dict
:raises: MessageDeliveryFailure
"""
self._notify(ctxt, event_type, payload, 'INFO')
def warn(self, ctxt, event_type, payload):
"""Send a notification at warning level.
:param ctxt: a request context dict
:type ctxt: dict
:param event_type: describes the event, for example
'compute.create_instance'
:type event_type: str
:param payload: the notification payload
:type payload: dict
:raises: MessageDeliveryFailure
"""
self._notify(ctxt, event_type, payload, 'WARN')
warning = warn
def error(self, ctxt, event_type, payload):
"""Send a notification at error level.
:param ctxt: a request context dict
:type ctxt: dict
:param event_type: describes the event, for example
'compute.create_instance'
:type event_type: str
:param payload: the notification payload
:type payload: dict
:raises: MessageDeliveryFailure
"""
self._notify(ctxt, event_type, payload, 'ERROR')
def critical(self, ctxt, event_type, payload):
"""Send a notification at critical level.
:param ctxt: a request context dict
:type ctxt: dict
:param event_type: describes the event, for example
'compute.create_instance'
:type event_type: str
:param payload: the notification payload
:type payload: dict
:raises: MessageDeliveryFailure
"""
self._notify(ctxt, event_type, payload, 'CRITICAL')
def sample(self, ctxt, event_type, payload):
"""Send a notification at sample level.
Sample notifications are for high-frequency events
that typically contain small payloads. eg: "CPU = 70%"
Not all drivers support the sample level
(log, for example) so these could be dropped.
:param ctxt: a request context dict
:type ctxt: dict
:param event_type: describes the event, for example
'compute.create_instance'
:type event_type: str
:param payload: the notification payload
:type payload: dict
:raises: MessageDeliveryFailure
"""
self._notify(ctxt, event_type, payload, 'SAMPLE')
class _SubNotifier(Notifier):
_marker = Notifier._marker
def __init__(self, base, publisher_id, retry):
self._base = base
self.transport = base.transport
self.publisher_id = publisher_id
self.retry = retry
self._serializer = self._base._serializer
self._driver_mgr = self._base._driver_mgr
def _notify(self, ctxt, event_type, payload, priority):
super(_SubNotifier, self)._notify(ctxt, event_type, payload, priority)
@classmethod
def _prepare(cls, base, publisher_id=_marker, retry=_marker):
if publisher_id is cls._marker:
publisher_id = base.publisher_id
if retry is cls._marker:
retry = base.retry
return cls(base, publisher_id, retry=retry)
| eayunstack/oslo.messaging | oslo/messaging/notify/notifier.py | Python | apache-2.0 | 11,163 |
#!/usr/bin/env python
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""pylint error checking."""
from __future__ import print_function
import json
import os
import re
import sys
from pylint import lint
from pylint.reporters import text
from six.moves import cStringIO as StringIO
# enabled checks
# http://pylint-messages.wikidot.com/all-codes
ENABLED_CODES=(
# refactor category
"R0801", "R0911", "R0912", "R0913", "R0914", "R0915",
# warning category
"W0612", "W0613", "W0703",
# convention category
"C1001")
KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions"
class LintOutput(object):
_cached_filename = None
_cached_content = None
def __init__(self, filename, lineno, line_content, code, message,
lintoutput):
self.filename = filename
self.lineno = lineno
self.line_content = line_content
self.code = code
self.message = message
self.lintoutput = lintoutput
@classmethod
def get_duplicate_code_location(cls, remaining_lines):
module, lineno = remaining_lines.pop(0)[2:].split(":")
filename = module.replace(".", os.sep) + ".py"
return filename, int(lineno)
@classmethod
def get_line_content(cls, filename, lineno):
if cls._cached_filename != filename:
with open(filename) as f:
cls._cached_content = list(f.readlines())
cls._cached_filename = filename
# find first non-empty line
lineno -= 1
while True:
line_content = cls._cached_content[lineno].rstrip()
lineno +=1
if line_content:
return line_content
@classmethod
def from_line(cls, line, remaining_lines):
m = re.search(r"(\S+):(\d+): \[(\S+)(, \S*)?] (.*)", line)
if not m:
return None
matched = m.groups()
filename, lineno, code, message = (matched[0], int(matched[1]),
matched[2], matched[-1])
# duplicate code output needs special handling
if "duplicate-code" in code:
filename, lineno = cls.get_duplicate_code_location(remaining_lines)
# fixes incorrectly reported file path
line = line.replace(matched[0], filename)
line_content = cls.get_line_content(filename, lineno)
return cls(filename, lineno, line_content, code, message,
line.rstrip())
@classmethod
def from_msg_to_dict(cls, msg):
"""From the output of pylint msg, to a dict, where each key
is a unique error identifier, value is a list of LintOutput
"""
result = {}
lines = msg.splitlines()
while lines:
line = lines.pop(0)
obj = cls.from_line(line, lines)
if not obj:
continue
key = obj.key()
if key not in result:
result[key] = []
result[key].append(obj)
return result
def key(self):
return self.message, self.line_content.strip()
def json(self):
return json.dumps(self.__dict__)
def review_str(self):
return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n"
"%(code)s: %(message)s" % self.__dict__)
class ErrorKeys(object):
@classmethod
def print_json(cls, errors, output=sys.stdout):
print("# automatically generated by tools/lintstack.py", file=output)
for i in sorted(errors.keys()):
print(json.dumps(i), file=output)
@classmethod
def from_file(cls, filename):
keys = set()
for line in open(filename):
if line and line[0] != "#":
d = json.loads(line)
keys.add(tuple(d))
return keys
def run_pylint():
buff = StringIO()
reporter = text.ParseableTextReporter(output=buff)
args = ["-rn", "--disable=all", "--enable=" + ",".join(ENABLED_CODES),"murano"]
lint.Run(args, reporter=reporter, exit=False)
val = buff.getvalue()
buff.close()
return val
def generate_error_keys(msg=None):
print("Generating", KNOWN_PYLINT_EXCEPTIONS_FILE)
if msg is None:
msg = run_pylint()
errors = LintOutput.from_msg_to_dict(msg)
with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f:
ErrorKeys.print_json(errors, output=f)
def validate(newmsg=None):
print("Loading", KNOWN_PYLINT_EXCEPTIONS_FILE)
known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE)
if newmsg is None:
print("Running pylint. Be patient...")
newmsg = run_pylint()
errors = LintOutput.from_msg_to_dict(newmsg)
print()
print("Unique errors reported by pylint: was %d, now %d."
% (len(known), len(errors)))
passed = True
for err_key, err_list in errors.items():
for err in err_list:
if err_key not in known:
print()
print(err.lintoutput)
print(err.review_str())
passed = False
if passed:
print("Congrats! pylint check passed.")
redundant = known - set(errors.keys())
if redundant:
print("Extra credit: some known pylint exceptions disappeared.")
for i in sorted(redundant):
print(json.dumps(i))
print("Consider regenerating the exception file if you will.")
else:
print()
print("Please fix the errors above. If you believe they are false "
"positives, run 'tools/lintstack.py generate' to overwrite.")
sys.exit(1)
def usage():
print("""Usage: tools/lintstack.py [generate|validate]
To generate pylint_exceptions file: tools/lintstack.py generate
To validate the current commit: tools/lintstack.py
""")
def main():
option = "validate"
if len(sys.argv) > 1:
option = sys.argv[1]
if option == "generate":
generate_error_keys()
elif option == "validate":
validate()
else:
usage()
if __name__ == "__main__":
main()
| chenyujie/hybrid-murano | tools/lintstack.py | Python | apache-2.0 | 6,700 |
# Copyright 2013 Josh Durgin
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import webob
from cinder.api import extensions
from cinder.api.v1 import volumes
from cinder import context
from cinder import db
from cinder import exception
from cinder import flags
from cinder import test
from cinder.tests.api import fakes
from cinder.tests.api.v2 import stubs
from cinder.tests.image import fake as fake_image
from cinder.volume import api as volume_api
FLAGS = flags.FLAGS
NS = '{http://docs.openstack.org/volume/api/v1}'
TEST_SNAPSHOT_UUID = '00000000-0000-0000-0000-000000000001'
def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id != TEST_SNAPSHOT_UUID:
raise exception.NotFound
return {'id': snapshot_id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description', }
class VolumeApiTest(test.TestCase):
def setUp(self):
super(VolumeApiTest, self).setUp()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
fake_image.stub_out_image_service(self.stubs)
self.controller = volumes.VolumeController(self.ext_mgr)
self.stubs.Set(db, 'volume_get_all', stubs.stub_volume_get_all)
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete)
def test_volume_create(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.create(req, body)
expected = {'volume': {'status': 'fakestatus',
'display_description': 'Volume Test Desc',
'availability_zone': 'zone1:host1',
'display_name': 'Volume Test Name',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 100}}
self.assertEqual(res_dict, expected)
def test_volume_create_with_type(self):
vol_type = FLAGS.default_volume_type
db.volume_type_create(context.get_admin_context(),
dict(name=vol_type, extra_specs={}))
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
vol_type)
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"volume_type": db_vol_type['name'], }
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.create(req, body)
self.assertEquals(res_dict['volume']['volume_type'],
db_vol_type['name'])
def test_volume_creation_fails_with_bad_size(self):
vol = {"size": '',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(exception.InvalidInput,
self.controller.create,
req,
body)
def test_volume_create_with_image_id(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
test_id = "c905cedb-7281-47e4-8a62-f26bc5fc4c77"
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "nova",
"imageRef": test_id}
expected = {'volume': {'status': 'fakestatus',
'display_description': 'Volume Test Desc',
'availability_zone': 'nova',
'display_name': 'Volume Test Name',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'image_id': test_id,
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': '1'}}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.create(req, body)
self.assertEqual(res_dict, expected)
def test_volume_create_with_image_id_and_snapshot_id(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.stubs.Set(volume_api.API, "get_snapshot", stub_snapshot_get)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
"source_volid": None,
"snapshot_id": TEST_SNAPSHOT_UUID}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_id_is_integer(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": 1234}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_id_not_uuid_format(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": '12345'}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_update(self):
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
updates = {
"display_name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.update(req, '1', body)
expected = {'volume': {
'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'Updated Test Name',
'attachments': [{
'id': '1',
'volume_id': '1',
'server_id': 'fakeuuid',
'device': '/',
}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
}}
self.assertEquals(res_dict, expected)
def test_volume_update_metadata(self):
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
updates = {
"metadata": {"qos_max_iops": 2000}
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.update(req, '1', body)
expected = {'volume': {
'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'attachments': [{
'id': '1',
'volume_id': '1',
'server_id': 'fakeuuid',
'device': '/',
}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {"qos_max_iops": 2000},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
}}
self.assertEquals(res_dict, expected)
def test_update_empty_body(self):
body = {}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.update,
req, '1', body)
def test_update_invalid_body(self):
body = {'display_name': 'missing top level volume key'}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.update,
req, '1', body)
def test_update_not_found(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
updates = {
"display_name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
req, '1', body)
def test_volume_list(self):
self.stubs.Set(volume_api.API, 'get_all',
stubs.stub_volume_get_all_by_project)
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 1}]}
self.assertEqual(res_dict, expected)
def test_volume_list_detail(self):
self.stubs.Set(volume_api.API, 'get_all',
stubs.stub_volume_get_all_by_project)
req = fakes.HTTPRequest.blank('/v1/volumes/detail')
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 1}]}
self.assertEqual(res_dict, expected)
def test_volume_list_by_name(self):
def stub_volume_get_all_by_project(context, project_id, marker, limit,
sort_key, sort_dir):
return [
stubs.stub_volume(1, display_name='vol1'),
stubs.stub_volume(2, display_name='vol2'),
stubs.stub_volume(3, display_name='vol3'),
]
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project)
# no display_name filter
req = fakes.HTTPRequest.blank('/v1/volumes')
resp = self.controller.index(req)
self.assertEqual(len(resp['volumes']), 3)
# filter on display_name
req = fakes.HTTPRequest.blank('/v1/volumes?display_name=vol2')
resp = self.controller.index(req)
self.assertEqual(len(resp['volumes']), 1)
self.assertEqual(resp['volumes'][0]['display_name'], 'vol2')
# filter no match
req = fakes.HTTPRequest.blank('/v1/volumes?display_name=vol4')
resp = self.controller.index(req)
self.assertEqual(len(resp['volumes']), 0)
def test_volume_list_by_status(self):
def stub_volume_get_all_by_project(context, project_id, marker, limit,
sort_key, sort_dir):
return [
stubs.stub_volume(1, display_name='vol1', status='available'),
stubs.stub_volume(2, display_name='vol2', status='available'),
stubs.stub_volume(3, display_name='vol3', status='in-use'),
]
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project)
# no status filter
req = fakes.HTTPRequest.blank('/v1/volumes')
resp = self.controller.index(req)
self.assertEqual(len(resp['volumes']), 3)
# single match
req = fakes.HTTPRequest.blank('/v1/volumes?status=in-use')
resp = self.controller.index(req)
self.assertEqual(len(resp['volumes']), 1)
self.assertEqual(resp['volumes'][0]['status'], 'in-use')
# multiple match
req = fakes.HTTPRequest.blank('/v1/volumes?status=available')
resp = self.controller.index(req)
self.assertEqual(len(resp['volumes']), 2)
for volume in resp['volumes']:
self.assertEqual(volume['status'], 'available')
# multiple filters
req = fakes.HTTPRequest.blank('/v1/volumes?status=available&'
'display_name=vol1')
resp = self.controller.index(req)
self.assertEqual(len(resp['volumes']), 1)
self.assertEqual(resp['volumes'][0]['display_name'], 'vol1')
self.assertEqual(resp['volumes'][0]['status'], 'available')
# no match
req = fakes.HTTPRequest.blank('/v1/volumes?status=in-use&'
'display_name=vol1')
resp = self.controller.index(req)
self.assertEqual(len(resp['volumes']), 0)
def test_volume_show(self):
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, '1')
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
def test_volume_show_no_attachments(self):
def stub_volume_get(self, context, volume_id):
return stubs.stub_volume(volume_id, attach_status='detached')
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, '1')
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'attachments': [],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
def test_volume_show_bootable(self):
def stub_volume_get(self, context, volume_id):
return (stubs.stub_volume(volume_id,
volume_glance_metadata=dict(foo='bar')))
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, '1')
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'id': '1',
'volume_id': '1'}],
'bootable': 'true',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
def test_volume_show_no_volume(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
req,
1)
def test_volume_delete(self):
req = fakes.HTTPRequest.blank('/v1/volumes/1')
resp = self.controller.delete(req, 1)
self.assertEqual(resp.status_int, 202)
def test_volume_delete_no_volume(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete,
req,
1)
def test_admin_list_volumes_limited_to_project(self):
req = fakes.HTTPRequest.blank('/v1/fake/volumes',
use_admin_context=True)
res = self.controller.index(req)
self.assertTrue('volumes' in res)
self.assertEqual(1, len(res['volumes']))
def test_admin_list_volumes_all_tenants(self):
req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1',
use_admin_context=True)
res = self.controller.index(req)
self.assertTrue('volumes' in res)
self.assertEqual(3, len(res['volumes']))
def test_all_tenants_non_admin_gets_all_tenants(self):
req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1')
res = self.controller.index(req)
self.assertTrue('volumes' in res)
self.assertEqual(1, len(res['volumes']))
def test_non_admin_get_by_project(self):
req = fakes.HTTPRequest.blank('/v1/fake/volumes')
res = self.controller.index(req)
self.assertTrue('volumes' in res)
self.assertEqual(1, len(res['volumes']))
class VolumeSerializerTest(test.TestCase):
def _verify_volume_attachment(self, attach, tree):
for attr in ('id', 'volume_id', 'server_id', 'device'):
self.assertEqual(str(attach[attr]), tree.get(attr))
def _verify_volume(self, vol, tree):
self.assertEqual(tree.tag, NS + 'volume')
for attr in ('id', 'status', 'size', 'availability_zone', 'created_at',
'display_name', 'display_description', 'volume_type',
'snapshot_id'):
self.assertEqual(str(vol[attr]), tree.get(attr))
for child in tree:
print child.tag
self.assertTrue(child.tag in (NS + 'attachments', NS + 'metadata'))
if child.tag == 'attachments':
self.assertEqual(1, len(child))
self.assertEqual('attachment', child[0].tag)
self._verify_volume_attachment(vol['attachments'][0], child[0])
elif child.tag == 'metadata':
not_seen = set(vol['metadata'].keys())
for gr_child in child:
self.assertTrue(gr_child.get("key") in not_seen)
self.assertEqual(str(vol['metadata'][gr_child.get("key")]),
gr_child.text)
not_seen.remove(gr_child.get('key'))
self.assertEqual(0, len(not_seen))
def test_volume_show_create_serializer(self):
serializer = volumes.VolumeTemplate()
raw_volume = dict(
id='vol_id',
status='vol_status',
size=1024,
availability_zone='vol_availability',
created_at=datetime.datetime.now(),
attachments=[dict(id='vol_id',
volume_id='vol_id',
server_id='instance_uuid',
device='/foo')],
display_name='vol_name',
display_description='vol_desc',
volume_type='vol_type',
snapshot_id='snap_id',
source_volid='source_volid',
metadata=dict(foo='bar',
baz='quux', ), )
text = serializer.serialize(dict(volume=raw_volume))
print text
tree = etree.fromstring(text)
self._verify_volume(raw_volume, tree)
def test_volume_index_detail_serializer(self):
serializer = volumes.VolumesTemplate()
raw_volumes = [dict(id='vol1_id',
status='vol1_status',
size=1024,
availability_zone='vol1_availability',
created_at=datetime.datetime.now(),
attachments=[dict(id='vol1_id',
volume_id='vol1_id',
server_id='instance_uuid',
device='/foo1')],
display_name='vol1_name',
display_description='vol1_desc',
volume_type='vol1_type',
snapshot_id='snap1_id',
source_volid=None,
metadata=dict(foo='vol1_foo',
bar='vol1_bar', ), ),
dict(id='vol2_id',
status='vol2_status',
size=1024,
availability_zone='vol2_availability',
created_at=datetime.datetime.now(),
attachments=[dict(id='vol2_id',
volume_id='vol2_id',
server_id='instance_uuid',
device='/foo2')],
display_name='vol2_name',
display_description='vol2_desc',
volume_type='vol2_type',
snapshot_id='snap2_id',
source_volid=None,
metadata=dict(foo='vol2_foo',
bar='vol2_bar', ), )]
text = serializer.serialize(dict(volumes=raw_volumes))
print text
tree = etree.fromstring(text)
self.assertEqual(NS + 'volumes', tree.tag)
self.assertEqual(len(raw_volumes), len(tree))
for idx, child in enumerate(tree):
self._verify_volume(raw_volumes[idx], child)
class TestVolumeCreateRequestXMLDeserializer(test.TestCase):
def setUp(self):
super(TestVolumeCreateRequestXMLDeserializer, self).setUp()
self.deserializer = volumes.CreateDeserializer()
def test_minimal_volume(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {"volume": {"size": "1", }, }
self.assertEquals(request['body'], expected)
def test_display_name(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
},
}
self.assertEquals(request['body'], expected)
def test_display_description(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
},
}
self.assertEquals(request['body'], expected)
def test_volume_type(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"display_name": "Volume-xml",
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
},
}
self.assertEquals(request['body'], expected)
def test_availability_zone(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
availability_zone="us-east1"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
"availability_zone": "us-east1",
},
}
self.assertEquals(request['body'], expected)
def test_metadata(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
display_name="Volume-xml"
size="1">
<metadata><meta key="Type">work</meta></metadata></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"display_name": "Volume-xml",
"size": "1",
"metadata": {
"Type": "work",
},
},
}
self.assertEquals(request['body'], expected)
def test_full_volume(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
availability_zone="us-east1">
<metadata><meta key="Type">work</meta></metadata></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
"availability_zone": "us-east1",
"metadata": {
"Type": "work",
},
},
}
self.assertEquals(request['body'], expected)
class VolumesUnprocessableEntityTestCase(test.TestCase):
"""
Tests of places we throw 422 Unprocessable Entity from
"""
def setUp(self):
super(VolumesUnprocessableEntityTestCase, self).setUp()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = volumes.VolumeController(self.ext_mgr)
def _unprocessable_volume_create(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/volumes')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, body)
def test_create_no_body(self):
self._unprocessable_volume_create(body=None)
def test_create_missing_volume(self):
body = {'foo': {'a': 'b'}}
self._unprocessable_volume_create(body=body)
def test_create_malformed_entity(self):
body = {'volume': 'string'}
self._unprocessable_volume_create(body=body)
| tomasdubec/openstack-cinder | cinder/tests/api/v1/test_volumes.py | Python | apache-2.0 | 33,659 |
#
# actions.py: routines that actually run the svn client.
#
# Subversion is a tool for revision control.
# See http://subversion.tigris.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
import os, shutil, re, sys, errno
import difflib, pprint
import xml.parsers.expat
from xml.dom.minidom import parseString
import svntest
from svntest import main, verify, tree, wc
from svntest import Failure
def no_sleep_for_timestamps():
os.environ['SVN_I_LOVE_CORRUPTED_WORKING_COPIES_SO_DISABLE_SLEEP_FOR_TIMESTAMPS'] = 'yes'
def do_sleep_for_timestamps():
os.environ['SVN_I_LOVE_CORRUPTED_WORKING_COPIES_SO_DISABLE_SLEEP_FOR_TIMESTAMPS'] = 'no'
def no_relocate_validation():
os.environ['SVN_I_LOVE_CORRUPTED_WORKING_COPIES_SO_DISABLE_RELOCATE_VALIDATION'] = 'yes'
def do_relocate_validation():
os.environ['SVN_I_LOVE_CORRUPTED_WORKING_COPIES_SO_DISABLE_RELOCATE_VALIDATION'] = 'no'
def setup_pristine_greek_repository():
"""Create the pristine repository and 'svn import' the greek tree"""
# these directories don't exist out of the box, so we may have to create them
if not os.path.exists(main.general_wc_dir):
os.makedirs(main.general_wc_dir)
if not os.path.exists(main.general_repo_dir):
os.makedirs(main.general_repo_dir) # this also creates all the intermediate dirs
# If there's no pristine repos, create one.
if not os.path.exists(main.pristine_greek_repos_dir):
main.create_repos(main.pristine_greek_repos_dir)
# if this is dav, gives us access rights to import the greek tree.
if main.is_ra_type_dav():
authz_file = os.path.join(main.work_dir, "authz")
main.file_write(authz_file, "[/]\n* = rw\n")
# dump the greek tree to disk.
main.greek_state.write_to_disk(main.greek_dump_dir)
# import the greek tree, using l:foo/p:bar
### todo: svn should not be prompting for auth info when using
### repositories with no auth/auth requirements
exit_code, output, errput = main.run_svn(None, 'import', '-m',
'Log message for revision 1.',
main.greek_dump_dir,
main.pristine_greek_repos_url)
# check for any errors from the import
if len(errput):
display_lines("Errors during initial 'svn import':",
'STDERR', None, errput)
sys.exit(1)
# verify the printed output of 'svn import'.
lastline = output.pop().strip()
match = re.search("(Committed|Imported) revision [0-9]+.", lastline)
if not match:
print("ERROR: import did not succeed, while creating greek repos.")
print("The final line from 'svn import' was:")
print(lastline)
sys.exit(1)
output_tree = wc.State.from_commit(output)
expected_output_tree = main.greek_state.copy(main.greek_dump_dir)
expected_output_tree.tweak(verb='Adding',
contents=None)
try:
expected_output_tree.compare_and_display('output', output_tree)
except tree.SVNTreeUnequal:
verify.display_trees("ERROR: output of import command is unexpected.",
"OUTPUT TREE",
expected_output_tree.old_tree(),
output_tree.old_tree())
sys.exit(1)
# Finally, disallow any changes to the "pristine" repos.
error_msg = "Don't modify the pristine repository"
create_failing_hook(main.pristine_greek_repos_dir, 'start-commit', error_msg)
create_failing_hook(main.pristine_greek_repos_dir, 'pre-lock', error_msg)
create_failing_hook(main.pristine_greek_repos_dir, 'pre-revprop-change', error_msg)
######################################################################
def guarantee_empty_repository(path):
"""Guarantee that a local svn repository exists at PATH, containing
nothing."""
if path == main.pristine_greek_repos_dir:
print("ERROR: attempt to overwrite the pristine repos! Aborting.")
sys.exit(1)
# create an empty repository at PATH.
main.safe_rmtree(path)
main.create_repos(path)
# Used by every test, so that they can run independently of one
# another. Every time this routine is called, it recursively copies
# the `pristine repos' to a new location.
# Note: make sure setup_pristine_greek_repository was called once before
# using this function.
def guarantee_greek_repository(path):
"""Guarantee that a local svn repository exists at PATH, containing
nothing but the greek-tree at revision 1."""
if path == main.pristine_greek_repos_dir:
print("ERROR: attempt to overwrite the pristine repos! Aborting.")
sys.exit(1)
# copy the pristine repository to PATH.
main.safe_rmtree(path)
if main.copy_repos(main.pristine_greek_repos_dir, path, 1):
print("ERROR: copying repository failed.")
sys.exit(1)
# make the repos world-writeable, for mod_dav_svn's sake.
main.chmod_tree(path, 0666, 0666)
def run_and_verify_atomic_ra_revprop_change(message,
expected_stdout,
expected_stderr,
expected_exit,
url, revision, propname,
old_propval, propval,
want_error):
"""Run atomic-ra-revprop-change helper and check its output and exit code.
Transforms OLD_PROPVAL and PROPVAL into a skel.
For HTTP, the default HTTP library is used."""
KEY_OLD_PROPVAL = "old_value_p"
KEY_NEW_PROPVAL = "value"
def skel_make_atom(word):
return "%d %s" % (len(word), word)
def make_proplist_skel_part(nick, val):
if val is None:
return ""
else:
return "%s %s" % (skel_make_atom(nick), skel_make_atom(val))
skel = "( %s %s )" % (make_proplist_skel_part(KEY_OLD_PROPVAL, old_propval),
make_proplist_skel_part(KEY_NEW_PROPVAL, propval))
exit_code, out, err = main.run_atomic_ra_revprop_change(url, revision,
propname, skel,
want_error)
verify.verify_outputs("Unexpected output", out, err,
expected_stdout, expected_stderr)
verify.verify_exit_code(message, exit_code, expected_exit)
return exit_code, out, err
def run_and_verify_svnlook(message, expected_stdout,
expected_stderr, *varargs):
"""Like run_and_verify_svnlook2, but the expected exit code is
assumed to be 0 if no output is expected on stderr, and 1 otherwise."""
expected_exit = 0
if expected_stderr is not None and expected_stderr != []:
expected_exit = 1
return run_and_verify_svnlook2(message, expected_stdout, expected_stderr,
expected_exit, *varargs)
def run_and_verify_svnlook2(message, expected_stdout, expected_stderr,
expected_exit, *varargs):
"""Run svnlook command and check its output and exit code."""
exit_code, out, err = main.run_svnlook(*varargs)
verify.verify_outputs("Unexpected output", out, err,
expected_stdout, expected_stderr)
verify.verify_exit_code(message, exit_code, expected_exit)
return exit_code, out, err
def run_and_verify_svnadmin(message, expected_stdout,
expected_stderr, *varargs):
"""Like run_and_verify_svnadmin2, but the expected exit code is
assumed to be 0 if no output is expected on stderr, and 1 otherwise."""
expected_exit = 0
if expected_stderr is not None and expected_stderr != []:
expected_exit = 1
return run_and_verify_svnadmin2(message, expected_stdout, expected_stderr,
expected_exit, *varargs)
def run_and_verify_svnadmin2(message, expected_stdout, expected_stderr,
expected_exit, *varargs):
"""Run svnadmin command and check its output and exit code."""
exit_code, out, err = main.run_svnadmin(*varargs)
verify.verify_outputs("Unexpected output", out, err,
expected_stdout, expected_stderr)
verify.verify_exit_code(message, exit_code, expected_exit)
return exit_code, out, err
def run_and_verify_svnversion(message, wc_dir, trail_url,
expected_stdout, expected_stderr, *varargs):
"""like run_and_verify_svnversion2, but the expected exit code is
assumed to be 0 if no output is expected on stderr, and 1 otherwise."""
expected_exit = 0
if expected_stderr is not None and expected_stderr != []:
expected_exit = 1
return run_and_verify_svnversion2(message, wc_dir, trail_url,
expected_stdout, expected_stderr,
expected_exit, *varargs)
def run_and_verify_svnversion2(message, wc_dir, trail_url,
expected_stdout, expected_stderr,
expected_exit, *varargs):
"""Run svnversion command and check its output and exit code."""
if trail_url is None:
exit_code, out, err = main.run_svnversion(wc_dir, *varargs)
else:
exit_code, out, err = main.run_svnversion(wc_dir, trail_url, *varargs)
verify.verify_outputs("Unexpected output", out, err,
expected_stdout, expected_stderr)
verify.verify_exit_code(message, exit_code, expected_exit)
return exit_code, out, err
def run_and_verify_svn(message, expected_stdout, expected_stderr, *varargs):
"""like run_and_verify_svn2, but the expected exit code is assumed to
be 0 if no output is expected on stderr, and 1 otherwise."""
expected_exit = 0
if expected_stderr is not None:
if isinstance(expected_stderr, verify.ExpectedOutput):
if not expected_stderr.matches([]):
expected_exit = 1
elif expected_stderr != []:
expected_exit = 1
return run_and_verify_svn2(message, expected_stdout, expected_stderr,
expected_exit, *varargs)
def run_and_verify_svn2(message, expected_stdout, expected_stderr,
expected_exit, *varargs):
"""Invoke main.run_svn() with *VARARGS. Return exit code as int; stdout,
stderr as lists of lines (including line terminators). For both
EXPECTED_STDOUT and EXPECTED_STDERR, create an appropriate instance of
verify.ExpectedOutput (if necessary):
- If it is an array of strings, create a vanilla ExpectedOutput.
- If it is a single string, create a RegexOutput that must match every
line (for stdout) or any line (for stderr) of the expected output.
- If it is already an instance of ExpectedOutput
(e.g. UnorderedOutput), leave it alone.
...and invoke compare_and_display_lines() on MESSAGE, a label based
on the name of the stream being compared (e.g. STDOUT), the
ExpectedOutput instance, and the actual output.
If EXPECTED_STDOUT is None, do not check stdout.
EXPECTED_STDERR may not be None.
If output checks pass, the expected and actual codes are compared.
If a comparison fails, a Failure will be raised."""
if expected_stderr is None:
raise verify.SVNIncorrectDatatype("expected_stderr must not be None")
want_err = None
if isinstance(expected_stderr, verify.ExpectedOutput):
if not expected_stderr.matches([]):
want_err = True
elif expected_stderr != []:
want_err = True
exit_code, out, err = main.run_svn(want_err, *varargs)
verify.verify_outputs(message, out, err, expected_stdout, expected_stderr)
verify.verify_exit_code(message, exit_code, expected_exit)
return exit_code, out, err
def run_and_verify_load(repo_dir, dump_file_content,
bypass_prop_validation = False):
"Runs 'svnadmin load' and reports any errors."
if not isinstance(dump_file_content, list):
raise TypeError("dump_file_content argument should have list type")
expected_stderr = []
if bypass_prop_validation:
exit_code, output, errput = main.run_command_stdin(
main.svnadmin_binary, expected_stderr, 0, 1, dump_file_content,
'load', '--force-uuid', '--quiet', '--bypass-prop-validation', repo_dir)
else:
exit_code, output, errput = main.run_command_stdin(
main.svnadmin_binary, expected_stderr, 0, 1, dump_file_content,
'load', '--force-uuid', '--quiet', repo_dir)
verify.verify_outputs("Unexpected stderr output", None, errput,
None, expected_stderr)
def run_and_verify_dump(repo_dir, deltas=False):
"Runs 'svnadmin dump' and reports any errors, returning the dump content."
if deltas:
exit_code, output, errput = main.run_svnadmin('dump', '--deltas',
repo_dir)
else:
exit_code, output, errput = main.run_svnadmin('dump', repo_dir)
verify.verify_outputs("Missing expected output(s)", output, errput,
verify.AnyOutput, verify.AnyOutput)
return output
def run_and_verify_svnrdump(dumpfile_content, expected_stdout,
expected_stderr, expected_exit, *varargs):
"""Runs 'svnrdump dump|load' depending on dumpfile_content and
reports any errors."""
exit_code, output, err = main.run_svnrdump(dumpfile_content, *varargs)
# Since main.run_svnrdump() uses binary mode, normalize the stderr
# line endings on Windows ourselves.
if sys.platform == 'win32':
err = map(lambda x : x.replace('\r\n', '\n'), err)
for index, line in enumerate(err[:]):
if re.search("warning: W200007", line):
del err[index]
verify.verify_outputs("Unexpected output", output, err,
expected_stdout, expected_stderr)
verify.verify_exit_code("Unexpected return code", exit_code, expected_exit)
return output
def run_and_verify_svnmucc(message, expected_stdout, expected_stderr,
*varargs):
"""Run svnmucc command and check its output"""
expected_exit = 0
if expected_stderr is not None and expected_stderr != []:
expected_exit = 1
return run_and_verify_svnmucc2(message, expected_stdout, expected_stderr,
expected_exit, *varargs)
def run_and_verify_svnmucc2(message, expected_stdout, expected_stderr,
expected_exit, *varargs):
"""Run svnmucc command and check its output and exit code."""
exit_code, out, err = main.run_svnmucc(*varargs)
verify.verify_outputs("Unexpected output", out, err,
expected_stdout, expected_stderr)
verify.verify_exit_code(message, exit_code, expected_exit)
return exit_code, out, err
def load_repo(sbox, dumpfile_path = None, dump_str = None,
bypass_prop_validation = False):
"Loads the dumpfile into sbox"
if not dump_str:
dump_str = open(dumpfile_path, "rb").read()
# Create a virgin repos and working copy
main.safe_rmtree(sbox.repo_dir, 1)
main.safe_rmtree(sbox.wc_dir, 1)
main.create_repos(sbox.repo_dir)
# Load the mergetracking dumpfile into the repos, and check it out the repo
run_and_verify_load(sbox.repo_dir, dump_str.splitlines(True),
bypass_prop_validation)
run_and_verify_svn(None, None, [], "co", sbox.repo_url, sbox.wc_dir)
return dump_str
def expected_noop_update_output(rev):
"""Return an ExpectedOutput object describing what we'd expect to
see from an update to revision REV that was effectively a no-op (no
server changes transmitted)."""
return verify.createExpectedOutput("Updating '.*':|At revision %d."
% (rev),
"no-op update")
######################################################################
# Subversion Actions
#
# These are all routines that invoke 'svn' in particular ways, and
# then verify the results by comparing expected trees with actual
# trees.
#
def run_and_verify_checkout2(do_remove,
URL, wc_dir_name, output_tree, disk_tree,
singleton_handler_a = None,
a_baton = None,
singleton_handler_b = None,
b_baton = None,
*args):
"""Checkout the URL into a new directory WC_DIR_NAME. *ARGS are any
extra optional args to the checkout subcommand.
The subcommand output will be verified against OUTPUT_TREE,
and the working copy itself will be verified against DISK_TREE.
For the latter comparison, SINGLETON_HANDLER_A and
SINGLETON_HANDLER_B will be passed to tree.compare_trees -- see that
function's doc string for more details. Return if successful, raise
on failure.
WC_DIR_NAME is deleted if DO_REMOVE is True.
"""
if isinstance(output_tree, wc.State):
output_tree = output_tree.old_tree()
if isinstance(disk_tree, wc.State):
disk_tree = disk_tree.old_tree()
# Remove dir if it's already there, unless this is a forced checkout.
# In that case assume we want to test a forced checkout's toleration
# of obstructing paths.
if do_remove:
main.safe_rmtree(wc_dir_name)
# Checkout and make a tree of the output, using l:foo/p:bar
### todo: svn should not be prompting for auth info when using
### repositories with no auth/auth requirements
exit_code, output, errput = main.run_svn(None, 'co',
URL, wc_dir_name, *args)
actual = tree.build_tree_from_checkout(output)
# Verify actual output against expected output.
try:
tree.compare_trees("output", actual, output_tree)
except tree.SVNTreeUnequal:
print("ACTUAL OUTPUT TREE:")
tree.dump_tree_script(actual, wc_dir_name + os.sep)
raise
# Create a tree by scanning the working copy
actual = tree.build_tree_from_wc(wc_dir_name)
# Verify expected disk against actual disk.
try:
tree.compare_trees("disk", actual, disk_tree,
singleton_handler_a, a_baton,
singleton_handler_b, b_baton)
except tree.SVNTreeUnequal:
print("ACTUAL DISK TREE:")
tree.dump_tree_script(actual, wc_dir_name + os.sep)
raise
def run_and_verify_checkout(URL, wc_dir_name, output_tree, disk_tree,
singleton_handler_a = None,
a_baton = None,
singleton_handler_b = None,
b_baton = None,
*args):
"""Same as run_and_verify_checkout2(), but without the DO_REMOVE arg.
WC_DIR_NAME is deleted if present unless the '--force' option is passed
in *ARGS."""
# Remove dir if it's already there, unless this is a forced checkout.
# In that case assume we want to test a forced checkout's toleration
# of obstructing paths.
return run_and_verify_checkout2(('--force' not in args),
URL, wc_dir_name, output_tree, disk_tree,
singleton_handler_a,
a_baton,
singleton_handler_b,
b_baton,
*args)
def run_and_verify_export(URL, export_dir_name, output_tree, disk_tree,
*args):
"""Export the URL into a new directory WC_DIR_NAME.
The subcommand output will be verified against OUTPUT_TREE,
and the exported copy itself will be verified against DISK_TREE.
Return if successful, raise on failure.
"""
assert isinstance(output_tree, wc.State)
assert isinstance(disk_tree, wc.State)
disk_tree = disk_tree.old_tree()
output_tree = output_tree.old_tree()
# Export and make a tree of the output, using l:foo/p:bar
### todo: svn should not be prompting for auth info when using
### repositories with no auth/auth requirements
exit_code, output, errput = main.run_svn(None, 'export',
URL, export_dir_name, *args)
actual = tree.build_tree_from_checkout(output)
# Verify actual output against expected output.
try:
tree.compare_trees("output", actual, output_tree)
except tree.SVNTreeUnequal:
print("ACTUAL OUTPUT TREE:")
tree.dump_tree_script(actual, export_dir_name + os.sep)
raise
# Create a tree by scanning the working copy. Don't ignore
# the .svn directories so that we generate an error if they
# happen to show up.
actual = tree.build_tree_from_wc(export_dir_name, ignore_svn=False)
# Verify expected disk against actual disk.
try:
tree.compare_trees("disk", actual, disk_tree)
except tree.SVNTreeUnequal:
print("ACTUAL DISK TREE:")
tree.dump_tree_script(actual, export_dir_name + os.sep)
raise
# run_and_verify_log_xml
class LogEntry:
def __init__(self, revision, changed_paths=None, revprops=None):
self.revision = revision
if changed_paths == None:
self.changed_paths = {}
else:
self.changed_paths = changed_paths
if revprops == None:
self.revprops = {}
else:
self.revprops = revprops
def assert_changed_paths(self, changed_paths):
"""Assert that changed_paths is the same as this entry's changed_paths
Raises svntest.Failure if not.
"""
raise Failure('NOT IMPLEMENTED')
def assert_revprops(self, revprops):
"""Assert that the dict revprops is the same as this entry's revprops.
Raises svntest.Failure if not.
"""
if self.revprops != revprops:
raise Failure('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(revprops).splitlines(),
pprint.pformat(self.revprops).splitlines())))
class LogParser:
def parse(self, data):
"""Return a list of LogEntrys parsed from the sequence of strings data.
This is the only method of interest to callers.
"""
try:
for i in data:
self.parser.Parse(i)
self.parser.Parse('', True)
except xml.parsers.expat.ExpatError, e:
raise verify.SVNUnexpectedStdout('%s\n%s\n' % (e, ''.join(data),))
return self.entries
def __init__(self):
# for expat
self.parser = xml.parsers.expat.ParserCreate()
self.parser.StartElementHandler = self.handle_start_element
self.parser.EndElementHandler = self.handle_end_element
self.parser.CharacterDataHandler = self.handle_character_data
# Ignore some things.
self.ignore_elements('log', 'paths', 'path', 'revprops')
self.ignore_tags('logentry_end', 'author_start', 'date_start', 'msg_start')
# internal state
self.cdata = []
self.property = None
# the result
self.entries = []
def ignore(self, *args, **kwargs):
del self.cdata[:]
def ignore_tags(self, *args):
for tag in args:
setattr(self, tag, self.ignore)
def ignore_elements(self, *args):
for element in args:
self.ignore_tags(element + '_start', element + '_end')
# expat handlers
def handle_start_element(self, name, attrs):
getattr(self, name + '_start')(attrs)
def handle_end_element(self, name):
getattr(self, name + '_end')()
def handle_character_data(self, data):
self.cdata.append(data)
# element handler utilities
def use_cdata(self):
result = ''.join(self.cdata).strip()
del self.cdata[:]
return result
def svn_prop(self, name):
self.entries[-1].revprops['svn:' + name] = self.use_cdata()
# element handlers
def logentry_start(self, attrs):
self.entries.append(LogEntry(int(attrs['revision'])))
def author_end(self):
self.svn_prop('author')
def msg_end(self):
self.svn_prop('log')
def date_end(self):
# svn:date could be anything, so just note its presence.
self.cdata[:] = ['']
self.svn_prop('date')
def property_start(self, attrs):
self.property = attrs['name']
def property_end(self):
self.entries[-1].revprops[self.property] = self.use_cdata()
def run_and_verify_log_xml(message=None, expected_paths=None,
expected_revprops=None, expected_stdout=None,
expected_stderr=None, args=[]):
"""Call run_and_verify_svn with log --xml and args (optional) as command
arguments, and pass along message, expected_stdout, and expected_stderr.
If message is None, pass the svn log command as message.
expected_paths checking is not yet implemented.
expected_revprops is an optional list of dicts, compared to each
revision's revprops. The list must be in the same order the log entries
come in. Any svn:date revprops in the dicts must be '' in order to
match, as the actual dates could be anything.
expected_paths and expected_revprops are ignored if expected_stdout or
expected_stderr is specified.
"""
if message == None:
message = ' '.join(args)
# We'll parse the output unless the caller specifies expected_stderr or
# expected_stdout for run_and_verify_svn.
parse = True
if expected_stderr == None:
expected_stderr = []
else:
parse = False
if expected_stdout != None:
parse = False
log_args = list(args)
if expected_paths != None:
log_args.append('-v')
(exit_code, stdout, stderr) = run_and_verify_svn(
message, expected_stdout, expected_stderr,
'log', '--xml', *log_args)
if not parse:
return
entries = LogParser().parse(stdout)
for index in range(len(entries)):
entry = entries[index]
if expected_revprops != None:
entry.assert_revprops(expected_revprops[index])
if expected_paths != None:
entry.assert_changed_paths(expected_paths[index])
def verify_update(actual_output,
actual_mergeinfo_output,
actual_elision_output,
wc_dir_name,
output_tree,
mergeinfo_output_tree,
elision_output_tree,
disk_tree,
status_tree,
singleton_handler_a=None,
a_baton=None,
singleton_handler_b=None,
b_baton=None,
check_props=False):
"""Verify update of WC_DIR_NAME.
The subcommand output (found in ACTUAL_OUTPUT, ACTUAL_MERGEINFO_OUTPUT,
and ACTUAL_ELISION_OUTPUT) will be verified against OUTPUT_TREE,
MERGEINFO_OUTPUT_TREE, and ELISION_OUTPUT_TREE respectively (if any of
these is provided, they may be None in which case a comparison is not
done). The working copy itself will be verified against DISK_TREE (if
provided), and the working copy's 'svn status' output will be verified
against STATUS_TREE (if provided). (This is a good way to check that
revision numbers were bumped.)
Return if successful, raise on failure.
For the comparison with DISK_TREE, pass SINGLETON_HANDLER_A and
SINGLETON_HANDLER_B to tree.compare_trees -- see that function's doc
string for more details. If CHECK_PROPS is set, then disk
comparison will examine props."""
if isinstance(actual_output, wc.State):
actual_output = actual_output.old_tree()
if isinstance(actual_mergeinfo_output, wc.State):
actual_mergeinfo_output = actual_mergeinfo_output.old_tree()
if isinstance(actual_elision_output, wc.State):
actual_elision_output = actual_elision_output.old_tree()
if isinstance(output_tree, wc.State):
output_tree = output_tree.old_tree()
if isinstance(mergeinfo_output_tree, wc.State):
mergeinfo_output_tree = mergeinfo_output_tree.old_tree()
if isinstance(elision_output_tree, wc.State):
elision_output_tree = elision_output_tree.old_tree()
if isinstance(disk_tree, wc.State):
disk_tree = disk_tree.old_tree()
if isinstance(status_tree, wc.State):
status_tree = status_tree.old_tree()
# Verify actual output against expected output.
if output_tree:
try:
tree.compare_trees("output", actual_output, output_tree)
except tree.SVNTreeUnequal:
print("ACTUAL OUTPUT TREE:")
tree.dump_tree_script(actual_output, wc_dir_name + os.sep)
raise
# Verify actual mergeinfo recording output against expected output.
if mergeinfo_output_tree:
try:
tree.compare_trees("mergeinfo_output", actual_mergeinfo_output,
mergeinfo_output_tree)
except tree.SVNTreeUnequal:
print("ACTUAL MERGEINFO OUTPUT TREE:")
tree.dump_tree_script(actual_mergeinfo_output,
wc_dir_name + os.sep)
raise
# Verify actual mergeinfo elision output against expected output.
if elision_output_tree:
try:
tree.compare_trees("elision_output", actual_elision_output,
elision_output_tree)
except tree.SVNTreeUnequal:
print("ACTUAL ELISION OUTPUT TREE:")
tree.dump_tree_script(actual_elision_output,
wc_dir_name + os.sep)
raise
# Create a tree by scanning the working copy, and verify it
if disk_tree:
actual_disk = tree.build_tree_from_wc(wc_dir_name, check_props)
try:
tree.compare_trees("disk", actual_disk, disk_tree,
singleton_handler_a, a_baton,
singleton_handler_b, b_baton)
except tree.SVNTreeUnequal:
print("EXPECTED DISK TREE:")
tree.dump_tree_script(disk_tree)
print("ACTUAL DISK TREE:")
tree.dump_tree_script(actual_disk)
raise
# Verify via 'status' command too, if possible.
if status_tree:
run_and_verify_status(wc_dir_name, status_tree)
def verify_disk(wc_dir_name, disk_tree, check_props=False):
"""Verify WC_DIR_NAME against DISK_TREE. If CHECK_PROPS is set,
the comparison will examin props. Returns if successful, raises on
failure."""
verify_update(None, None, None, wc_dir_name, None, None, None, disk_tree,
None, check_props=check_props)
def run_and_verify_update(wc_dir_name,
output_tree, disk_tree, status_tree,
error_re_string = None,
singleton_handler_a = None,
a_baton = None,
singleton_handler_b = None,
b_baton = None,
check_props = False,
*args):
"""Update WC_DIR_NAME. *ARGS are any extra optional args to the
update subcommand. NOTE: If *ARGS is specified at all, explicit
target paths must be passed in *ARGS as well (or a default `.' will
be chosen by the 'svn' binary). This allows the caller to update
many items in a single working copy dir, but still verify the entire
working copy dir.
If ERROR_RE_STRING, the update must exit with error, and the error
message must match regular expression ERROR_RE_STRING.
Else if ERROR_RE_STRING is None, then:
If OUTPUT_TREE is not None, the subcommand output will be verified
against OUTPUT_TREE. If DISK_TREE is not None, the working copy
itself will be verified against DISK_TREE. If STATUS_TREE is not
None, the 'svn status' output will be verified against STATUS_TREE.
(This is a good way to check that revision numbers were bumped.)
For the DISK_TREE verification, SINGLETON_HANDLER_A and
SINGLETON_HANDLER_B will be passed to tree.compare_trees -- see that
function's doc string for more details.
If CHECK_PROPS is set, then disk comparison will examine props.
Return if successful, raise on failure."""
# Update and make a tree of the output.
if len(args):
exit_code, output, errput = main.run_svn(error_re_string, 'up', *args)
else:
exit_code, output, errput = main.run_svn(error_re_string,
'up', wc_dir_name,
*args)
if error_re_string:
rm = re.compile(error_re_string)
for line in errput:
match = rm.search(line)
if match:
return
raise main.SVNUnmatchedError
actual = wc.State.from_checkout(output)
verify_update(actual, None, None, wc_dir_name,
output_tree, None, None, disk_tree, status_tree,
singleton_handler_a, a_baton,
singleton_handler_b, b_baton,
check_props)
def run_and_parse_info(*args):
"""Run 'svn info ARGS' and parse its output into a list of dicts,
one dict per reported node."""
# the returned array
all_infos = []
# per-target variables
iter_info = {}
prev_key = None
lock_comment_lines = 0
lock_comments = []
exit_code, output, errput = main.run_svn(None, 'info', *args)
for line in output:
line = line[:-1] # trim '\n'
if lock_comment_lines > 0:
# mop up any lock comment lines
lock_comments.append(line)
lock_comment_lines = lock_comment_lines - 1
if lock_comment_lines == 0:
iter_info[prev_key] = lock_comments
elif len(line) == 0:
# separator line between items
all_infos.append(iter_info)
iter_info = {}
prev_key = None
lock_comment_lines = 0
lock_comments = []
elif line[0].isspace():
# continuation line (for tree conflicts)
iter_info[prev_key] += line[1:]
else:
# normal line
key, value = line.split(':', 1)
if re.search(' \(\d+ lines?\)$', key):
# numbered continuation lines
match = re.match('^(.*) \((\d+) lines?\)$', key)
key = match.group(1)
lock_comment_lines = int(match.group(2))
elif len(value) > 1:
# normal normal line
iter_info[key] = value[1:]
else:
### originally added for "Tree conflict:\n" lines;
### tree-conflicts output format has changed since then
# continuation lines are implicit (prefixed by whitespace)
iter_info[key] = ''
prev_key = key
return all_infos
def run_and_verify_info(expected_infos, *args):
"""Run 'svn info' with the arguments in *ARGS and verify the results
against expected_infos. The latter should be a list of dicts, one dict
per reported node, in the order in which the 'Path' fields of the output
will appear after sorting them as Python strings. (The dicts in
EXPECTED_INFOS, however, need not have a 'Path' key.)
In the dicts, each key is the before-the-colon part of the 'svn info' output,
and each value is either None (meaning that the key should *not* appear in
the 'svn info' output) or a regex matching the output value. Output lines
not matching a key in the dict are ignored.
Return if successful, raise on failure."""
actual_infos = run_and_parse_info(*args)
actual_infos.sort(key=lambda info: info['Path'])
try:
# zip() won't complain, so check this manually
if len(actual_infos) != len(expected_infos):
raise verify.SVNUnexpectedStdout(
"Expected %d infos, found %d infos"
% (len(expected_infos), len(actual_infos)))
for actual, expected in zip(actual_infos, expected_infos):
# compare dicts
for key, value in expected.items():
assert ':' not in key # caller passed impossible expectations?
if value is None and key in actual:
raise main.SVNLineUnequal("Found unexpected key '%s' with value '%s'"
% (key, actual[key]))
if value is not None and key not in actual:
raise main.SVNLineUnequal("Expected key '%s' (with value '%s') "
"not found" % (key, value))
if value is not None and not re.match(value, actual[key]):
raise verify.SVNUnexpectedStdout("Values of key '%s' don't match:\n"
" Expected: '%s' (regex)\n"
" Found: '%s' (string)\n"
% (key, value, actual[key]))
except:
sys.stderr.write("Bad 'svn info' output:\n"
" Received: %s\n"
" Expected: %s\n"
% (actual_infos, expected_infos))
raise
def run_and_verify_merge(dir, rev1, rev2, url1, url2,
output_tree,
mergeinfo_output_tree,
elision_output_tree,
disk_tree, status_tree, skip_tree,
error_re_string = None,
singleton_handler_a = None,
a_baton = None,
singleton_handler_b = None,
b_baton = None,
check_props = False,
dry_run = True,
*args):
"""Run 'svn merge URL1@REV1 URL2@REV2 DIR' if URL2 is not None
(for a three-way merge between URLs and WC).
If URL2 is None, run 'svn merge -rREV1:REV2 URL1 DIR'. If both REV1
and REV2 are None, leave off the '-r' argument.
If ERROR_RE_STRING, the merge must exit with error, and the error
message must match regular expression ERROR_RE_STRING.
Else if ERROR_RE_STRING is None, then:
The subcommand output will be verified against OUTPUT_TREE. Output
related to mergeinfo notifications will be verified against
MERGEINFO_OUTPUT_TREE if that is not None. Output related to mergeinfo
elision will be verified against ELISION_OUTPUT_TREE if that is not None.
The working copy itself will be verified against DISK_TREE. If optional
STATUS_TREE is given, then 'svn status' output will be compared. The
'skipped' merge output will be compared to SKIP_TREE.
For the DISK_TREE verification, SINGLETON_HANDLER_A and
SINGLETON_HANDLER_B will be passed to tree.compare_trees -- see that
function's doc string for more details.
If CHECK_PROPS is set, then disk comparison will examine props.
If DRY_RUN is set then a --dry-run merge will be carried out first and
the output compared with that of the full merge.
Return if successful, raise on failure.
*ARGS are any extra optional args to the merge subcommand.
NOTE: If *ARGS is specified at all, an explicit target path must be passed
in *ARGS as well. This allows the caller to merge into single items inside
the working copy, but still verify the entire working copy dir. """
merge_command = [ "merge" ]
if url2:
merge_command.extend((url1 + "@" + str(rev1), url2 + "@" + str(rev2)))
else:
if not (rev1 is None and rev2 is None):
merge_command.append("-r" + str(rev1) + ":" + str(rev2))
merge_command.append(url1)
if len(args) == 0:
merge_command.append(dir)
merge_command = tuple(merge_command)
if dry_run:
pre_disk = tree.build_tree_from_wc(dir)
dry_run_command = merge_command + ('--dry-run',)
dry_run_command = dry_run_command + args
exit_code, out_dry, err_dry = main.run_svn(error_re_string,
*dry_run_command)
post_disk = tree.build_tree_from_wc(dir)
try:
tree.compare_trees("disk", post_disk, pre_disk)
except tree.SVNTreeError:
print("=============================================================")
print("Dry-run merge altered working copy")
print("=============================================================")
raise
# Update and make a tree of the output.
merge_command = merge_command + args
exit_code, out, err = main.run_svn(error_re_string, *merge_command)
if error_re_string:
if not error_re_string.startswith(".*"):
error_re_string = ".*(" + error_re_string + ")"
expected_err = verify.RegexOutput(error_re_string, match_all=False)
verify.verify_outputs(None, None, err, None, expected_err)
return
elif err:
raise verify.SVNUnexpectedStderr(err)
# Split the output into that related to application of the actual diff
# and that related to the recording of mergeinfo describing the merge.
merge_diff_out = []
mergeinfo_notification_out = []
mergeinfo_elision_out = []
mergeinfo_notifications = False
elision_notifications = False
for line in out:
if line.startswith('--- Recording'):
mergeinfo_notifications = True
elision_notifications = False
elif line.startswith('--- Eliding'):
mergeinfo_notifications = False
elision_notifications = True
elif line.startswith('--- Merging') or \
line.startswith('--- Reverse-merging') or \
line.startswith('Summary of conflicts') or \
line.startswith('Skipped missing target'):
mergeinfo_notifications = False
elision_notifications = False
if mergeinfo_notifications:
mergeinfo_notification_out.append(line)
elif elision_notifications:
mergeinfo_elision_out.append(line)
else:
merge_diff_out.append(line)
if dry_run and merge_diff_out != out_dry:
# Due to the way ra_serf works, it's possible that the dry-run and
# real merge operations did the same thing, but the output came in
# a different order. Let's see if maybe that's the case by comparing
# the outputs as unordered sets rather than as lists.
#
# This now happens for other RA layers with modern APR because the
# hash order now varies.
#
# The different orders of the real and dry-run merges may cause
# the "Merging rX through rY into" lines to be duplicated a
# different number of times in the two outputs. The list-set
# conversion removes duplicates so these differences are ignored.
# It also removes "U some/path" duplicate lines. Perhaps we
# should avoid that?
out_copy = set(merge_diff_out[:])
out_dry_copy = set(out_dry[:])
if out_copy != out_dry_copy:
print("=============================================================")
print("Merge outputs differ")
print("The dry-run merge output:")
for x in out_dry:
sys.stdout.write(x)
print("The full merge output:")
for x in out:
sys.stdout.write(x)
print("=============================================================")
raise main.SVNUnmatchedError
def missing_skip(a, b):
print("=============================================================")
print("Merge failed to skip: " + a.path)
print("=============================================================")
raise Failure
def extra_skip(a, b):
print("=============================================================")
print("Merge unexpectedly skipped: " + a.path)
print("=============================================================")
raise Failure
myskiptree = tree.build_tree_from_skipped(out)
if isinstance(skip_tree, wc.State):
skip_tree = skip_tree.old_tree()
try:
tree.compare_trees("skip", myskiptree, skip_tree,
extra_skip, None, missing_skip, None)
except tree.SVNTreeUnequal:
print("ACTUAL SKIP TREE:")
tree.dump_tree_script(myskiptree, dir + os.sep)
raise
actual_diff = svntest.wc.State.from_checkout(merge_diff_out, False)
actual_mergeinfo = svntest.wc.State.from_checkout(mergeinfo_notification_out,
False)
actual_elision = svntest.wc.State.from_checkout(mergeinfo_elision_out,
False)
verify_update(actual_diff, actual_mergeinfo, actual_elision, dir,
output_tree, mergeinfo_output_tree, elision_output_tree,
disk_tree, status_tree,
singleton_handler_a, a_baton,
singleton_handler_b, b_baton,
check_props)
def run_and_verify_patch(dir, patch_path,
output_tree, disk_tree, status_tree, skip_tree,
error_re_string=None,
check_props=False,
dry_run=True,
*args):
"""Run 'svn patch patch_path DIR'.
If ERROR_RE_STRING, 'svn patch' must exit with error, and the error
message must match regular expression ERROR_RE_STRING.
Else if ERROR_RE_STRING is None, then:
The subcommand output will be verified against OUTPUT_TREE, and the
working copy itself will be verified against DISK_TREE. If optional
STATUS_TREE is given, then 'svn status' output will be compared.
The 'skipped' merge output will be compared to SKIP_TREE.
If CHECK_PROPS is set, then disk comparison will examine props.
If DRY_RUN is set then a --dry-run patch will be carried out first and
the output compared with that of the full patch application.
Returns if successful, raises on failure."""
patch_command = [ "patch" ]
patch_command.append(patch_path)
patch_command.append(dir)
patch_command = tuple(patch_command)
if dry_run:
pre_disk = tree.build_tree_from_wc(dir)
dry_run_command = patch_command + ('--dry-run',)
dry_run_command = dry_run_command + args
exit_code, out_dry, err_dry = main.run_svn(error_re_string,
*dry_run_command)
post_disk = tree.build_tree_from_wc(dir)
try:
tree.compare_trees("disk", post_disk, pre_disk)
except tree.SVNTreeError:
print("=============================================================")
print("'svn patch --dry-run' altered working copy")
print("=============================================================")
raise
# Update and make a tree of the output.
patch_command = patch_command + args
exit_code, out, err = main.run_svn(True, *patch_command)
if error_re_string:
rm = re.compile(error_re_string)
match = None
for line in err:
match = rm.search(line)
if match:
break
if not match:
raise main.SVNUnmatchedError
elif err:
print("UNEXPECTED STDERR:")
for x in err:
sys.stdout.write(x)
raise verify.SVNUnexpectedStderr
if dry_run and out != out_dry:
# APR hash order means the output order can vary, assume everything is OK
# if only the order changes.
out_dry_expected = svntest.verify.UnorderedOutput(out)
verify.compare_and_display_lines('dry-run patch output not as expected',
'', out_dry_expected, out_dry)
def missing_skip(a, b):
print("=============================================================")
print("'svn patch' failed to skip: " + a.path)
print("=============================================================")
raise Failure
def extra_skip(a, b):
print("=============================================================")
print("'svn patch' unexpectedly skipped: " + a.path)
print("=============================================================")
raise Failure
myskiptree = tree.build_tree_from_skipped(out)
if isinstance(skip_tree, wc.State):
skip_tree = skip_tree.old_tree()
tree.compare_trees("skip", myskiptree, skip_tree,
extra_skip, None, missing_skip, None)
mytree = tree.build_tree_from_checkout(out, 0)
# when the expected output is a list, we want a line-by-line
# comparison to happen instead of a tree comparison
if (isinstance(output_tree, list)
or isinstance(output_tree, verify.UnorderedOutput)):
verify.verify_outputs(None, out, err, output_tree, error_re_string)
output_tree = None
verify_update(mytree, None, None, dir,
output_tree, None, None, disk_tree, status_tree,
check_props=check_props)
def run_and_verify_mergeinfo(error_re_string = None,
expected_output = [],
*args):
"""Run 'svn mergeinfo ARGS', and compare the result against
EXPECTED_OUTPUT, a list of string representations of revisions
expected in the output. Raise an exception if an unexpected
output is encountered."""
mergeinfo_command = ["mergeinfo"]
mergeinfo_command.extend(args)
exit_code, out, err = main.run_svn(error_re_string, *mergeinfo_command)
if error_re_string:
if not error_re_string.startswith(".*"):
error_re_string = ".*(" + error_re_string + ")"
expected_err = verify.RegexOutput(error_re_string, match_all=False)
verify.verify_outputs(None, None, err, None, expected_err)
return
out = sorted([_f for _f in [x.rstrip()[1:] for x in out] if _f])
expected_output.sort()
extra_out = []
if out != expected_output:
exp_hash = dict.fromkeys(expected_output)
for rev in out:
if rev in exp_hash:
del(exp_hash[rev])
else:
extra_out.append(rev)
extra_exp = list(exp_hash.keys())
raise Exception("Unexpected 'svn mergeinfo' output:\n"
" expected but not found: %s\n"
" found but not expected: %s"
% (', '.join([str(x) for x in extra_exp]),
', '.join([str(x) for x in extra_out])))
def run_and_verify_switch(wc_dir_name,
wc_target,
switch_url,
output_tree, disk_tree, status_tree,
error_re_string = None,
singleton_handler_a = None,
a_baton = None,
singleton_handler_b = None,
b_baton = None,
check_props = False,
*args):
"""Switch WC_TARGET (in working copy dir WC_DIR_NAME) to SWITCH_URL.
If ERROR_RE_STRING, the switch must exit with error, and the error
message must match regular expression ERROR_RE_STRING.
Else if ERROR_RE_STRING is None, then:
The subcommand output will be verified against OUTPUT_TREE, and the
working copy itself will be verified against DISK_TREE. If optional
STATUS_TREE is given, then 'svn status' output will be
compared. (This is a good way to check that revision numbers were
bumped.)
For the DISK_TREE verification, SINGLETON_HANDLER_A and
SINGLETON_HANDLER_B will be passed to tree.compare_trees -- see that
function's doc string for more details.
If CHECK_PROPS is set, then disk comparison will examine props.
Return if successful, raise on failure."""
# Update and make a tree of the output.
exit_code, output, errput = main.run_svn(error_re_string, 'switch',
switch_url, wc_target, *args)
if error_re_string:
if not error_re_string.startswith(".*"):
error_re_string = ".*(" + error_re_string + ")"
expected_err = verify.RegexOutput(error_re_string, match_all=False)
verify.verify_outputs(None, None, errput, None, expected_err)
return
elif errput:
raise verify.SVNUnexpectedStderr(err)
actual = wc.State.from_checkout(output)
verify_update(actual, None, None, wc_dir_name,
output_tree, None, None, disk_tree, status_tree,
singleton_handler_a, a_baton,
singleton_handler_b, b_baton,
check_props)
def process_output_for_commit(output):
"""Helper for run_and_verify_commit(), also used in the factory."""
# Remove the final output line, and verify that the commit succeeded.
lastline = ""
rest = []
def external_removal(line):
return line.startswith('Removing external') \
or line.startswith('Removed external')
if len(output):
lastline = output.pop().strip()
while len(output) and external_removal(lastline):
rest.append(lastline)
lastline = output.pop().strip()
cm = re.compile("(Committed|Imported) revision [0-9]+.")
match = cm.search(lastline)
if not match:
print("ERROR: commit did not succeed.")
print("The final line from 'svn ci' was:")
print(lastline)
raise main.SVNCommitFailure
# The new 'final' line in the output is either a regular line that
# mentions {Adding, Deleting, Sending, ...}, or it could be a line
# that says "Transmitting file data ...". If the latter case, we
# want to remove the line from the output; it should be ignored when
# building a tree.
if len(output):
lastline = output.pop()
tm = re.compile("Transmitting file data.+")
match = tm.search(lastline)
if not match:
# whoops, it was important output, put it back.
output.append(lastline)
if len(rest):
output.extend(rest)
return output
def run_and_verify_commit(wc_dir_name, output_tree, status_tree,
error_re_string = None,
*args):
"""Commit and verify results within working copy WC_DIR_NAME,
sending ARGS to the commit subcommand.
The subcommand output will be verified against OUTPUT_TREE. If
optional STATUS_TREE is given, then 'svn status' output will
be compared. (This is a good way to check that revision numbers
were bumped.)
If ERROR_RE_STRING is None, the commit must not exit with error. If
ERROR_RE_STRING is a string, the commit must exit with error, and
the error message must match regular expression ERROR_RE_STRING.
Return if successful, raise on failure."""
if isinstance(output_tree, wc.State):
output_tree = output_tree.old_tree()
if isinstance(status_tree, wc.State):
status_tree = status_tree.old_tree()
# Commit.
if '-m' not in args and '-F' not in args:
args = list(args) + ['-m', 'log msg']
exit_code, output, errput = main.run_svn(error_re_string, 'ci',
*args)
if error_re_string:
if not error_re_string.startswith(".*"):
error_re_string = ".*(" + error_re_string + ")"
expected_err = verify.RegexOutput(error_re_string, match_all=False)
verify.verify_outputs(None, None, errput, None, expected_err)
return
# Else not expecting error:
# Convert the output into a tree.
output = process_output_for_commit(output)
actual = tree.build_tree_from_commit(output)
# Verify actual output against expected output.
try:
tree.compare_trees("output", actual, output_tree)
except tree.SVNTreeError:
verify.display_trees("Output of commit is unexpected",
"OUTPUT TREE", output_tree, actual)
print("ACTUAL OUTPUT TREE:")
tree.dump_tree_script(actual, wc_dir_name + os.sep)
raise
# Verify via 'status' command too, if possible.
if status_tree:
run_and_verify_status(wc_dir_name, status_tree)
# This function always passes '-q' to the status command, which
# suppresses the printing of any unversioned or nonexistent items.
def run_and_verify_status(wc_dir_name, output_tree,
singleton_handler_a = None,
a_baton = None,
singleton_handler_b = None,
b_baton = None):
"""Run 'status' on WC_DIR_NAME and compare it with the
expected OUTPUT_TREE. SINGLETON_HANDLER_A and SINGLETON_HANDLER_B will
be passed to tree.compare_trees - see that function's doc string for
more details.
Returns on success, raises on failure."""
if isinstance(output_tree, wc.State):
output_state = output_tree
output_tree = output_tree.old_tree()
else:
output_state = None
exit_code, output, errput = main.run_svn(None, 'status', '-v', '-u', '-q',
wc_dir_name)
actual = tree.build_tree_from_status(output)
# Verify actual output against expected output.
try:
tree.compare_trees("status", actual, output_tree,
singleton_handler_a, a_baton,
singleton_handler_b, b_baton)
except tree.SVNTreeError:
verify.display_trees(None, 'STATUS OUTPUT TREE', output_tree, actual)
print("ACTUAL STATUS TREE:")
tree.dump_tree_script(actual, wc_dir_name + os.sep)
raise
# if we have an output State, and we can/are-allowed to create an
# entries-based State, then compare the two.
if output_state:
entries_state = wc.State.from_entries(wc_dir_name)
if entries_state:
tweaked = output_state.copy()
tweaked.tweak_for_entries_compare()
try:
tweaked.compare_and_display('entries', entries_state)
except tree.SVNTreeUnequal:
### do something more
raise
# A variant of previous func, but doesn't pass '-q'. This allows us
# to verify unversioned or nonexistent items in the list.
def run_and_verify_unquiet_status(wc_dir_name, status_tree):
"""Run 'status' on WC_DIR_NAME and compare it with the
expected STATUS_TREE.
Returns on success, raises on failure."""
if isinstance(status_tree, wc.State):
status_tree = status_tree.old_tree()
exit_code, output, errput = main.run_svn(None, 'status', '-v',
'-u', wc_dir_name)
actual = tree.build_tree_from_status(output)
# Verify actual output against expected output.
try:
tree.compare_trees("UNQUIET STATUS", actual, status_tree)
except tree.SVNTreeError:
print("ACTUAL UNQUIET STATUS TREE:")
tree.dump_tree_script(actual, wc_dir_name + os.sep)
raise
def run_and_verify_status_xml(expected_entries = [],
*args):
""" Run 'status --xml' with arguments *ARGS. If successful the output
is parsed into an XML document and will be verified by comparing against
EXPECTED_ENTRIES.
"""
exit_code, output, errput = run_and_verify_svn(None, None, [],
'status', '--xml', *args)
if len(errput) > 0:
raise Failure
doc = parseString(''.join(output))
entries = doc.getElementsByTagName('entry')
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
actual_entries = {}
for entry in entries:
wcstatus = entry.getElementsByTagName('wc-status')[0]
commit = entry.getElementsByTagName('commit')
author = entry.getElementsByTagName('author')
rstatus = entry.getElementsByTagName('repos-status')
actual_entry = {'wcprops' : wcstatus.getAttribute('props'),
'wcitem' : wcstatus.getAttribute('item'),
}
if wcstatus.hasAttribute('revision'):
actual_entry['wcrev'] = wcstatus.getAttribute('revision')
if (commit):
actual_entry['crev'] = commit[0].getAttribute('revision')
if (author):
actual_entry['author'] = getText(author[0].childNodes)
if (rstatus):
actual_entry['rprops'] = rstatus[0].getAttribute('props')
actual_entry['ritem'] = rstatus[0].getAttribute('item')
actual_entries[entry.getAttribute('path')] = actual_entry
if expected_entries != actual_entries:
raise Failure('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(expected_entries).splitlines(),
pprint.pformat(actual_entries).splitlines())))
def run_and_verify_diff_summarize_xml(error_re_string = [],
expected_prefix = None,
expected_paths = [],
expected_items = [],
expected_props = [],
expected_kinds = [],
*args):
"""Run 'diff --summarize --xml' with the arguments *ARGS, which should
contain all arguments beyond for your 'diff --summarize --xml' omitting
said arguments. EXPECTED_PREFIX will store a "common" path prefix
expected to be at the beginning of each summarized path. If
EXPECTED_PREFIX is None, then EXPECTED_PATHS will need to be exactly
as 'svn diff --summarize --xml' will output. If ERROR_RE_STRING, the
command must exit with error, and the error message must match regular
expression ERROR_RE_STRING.
Else if ERROR_RE_STRING is None, the subcommand output will be parsed
into an XML document and will then be verified by comparing the parsed
output to the contents in the EXPECTED_PATHS, EXPECTED_ITEMS,
EXPECTED_PROPS and EXPECTED_KINDS. Returns on success, raises
on failure."""
exit_code, output, errput = run_and_verify_svn(None, None, error_re_string,
'diff', '--summarize',
'--xml', *args)
# Return if errors are present since they were expected
if len(errput) > 0:
return
doc = parseString(''.join(output))
paths = doc.getElementsByTagName("path")
items = expected_items
kinds = expected_kinds
for path in paths:
modified_path = path.childNodes[0].data
if (expected_prefix is not None
and modified_path.find(expected_prefix) == 0):
modified_path = modified_path.replace(expected_prefix, '')[1:].strip()
# Workaround single-object diff
if len(modified_path) == 0:
modified_path = path.childNodes[0].data.split(os.sep)[-1]
# From here on, we use '/' as path separator.
if os.sep != "/":
modified_path = modified_path.replace(os.sep, "/")
if modified_path not in expected_paths:
print("ERROR: %s not expected in the changed paths." % modified_path)
raise Failure
index = expected_paths.index(modified_path)
expected_item = items[index]
expected_kind = kinds[index]
expected_prop = expected_props[index]
actual_item = path.getAttribute('item')
actual_kind = path.getAttribute('kind')
actual_prop = path.getAttribute('props')
if expected_item != actual_item:
print("ERROR: expected: %s actual: %s" % (expected_item, actual_item))
raise Failure
if expected_kind != actual_kind:
print("ERROR: expected: %s actual: %s" % (expected_kind, actual_kind))
raise Failure
if expected_prop != actual_prop:
print("ERROR: expected: %s actual: %s" % (expected_prop, actual_prop))
raise Failure
def run_and_verify_diff_summarize(output_tree, *args):
"""Run 'diff --summarize' with the arguments *ARGS.
The subcommand output will be verified against OUTPUT_TREE. Returns
on success, raises on failure.
"""
if isinstance(output_tree, wc.State):
output_tree = output_tree.old_tree()
exit_code, output, errput = main.run_svn(None, 'diff', '--summarize',
*args)
actual = tree.build_tree_from_diff_summarize(output)
# Verify actual output against expected output.
try:
tree.compare_trees("output", actual, output_tree)
except tree.SVNTreeError:
verify.display_trees(None, 'DIFF OUTPUT TREE', output_tree, actual)
print("ACTUAL DIFF OUTPUT TREE:")
tree.dump_tree_script(actual)
raise
def run_and_validate_lock(path, username):
"""`svn lock' the given path and validate the contents of the lock.
Use the given username. This is important because locks are
user specific."""
comment = "Locking path:%s." % path
# lock the path
run_and_verify_svn(None, ".*locked by user", [], 'lock',
'--username', username,
'-m', comment, path)
# Run info and check that we get the lock fields.
exit_code, output, err = run_and_verify_svn(None, None, [],
'info','-R',
path)
### TODO: Leverage RegexOuput([...], match_all=True) here.
# prepare the regexs to compare against
token_re = re.compile(".*?Lock Token: opaquelocktoken:.*?", re.DOTALL)
author_re = re.compile(".*?Lock Owner: %s\n.*?" % username, re.DOTALL)
created_re = re.compile(".*?Lock Created:.*?", re.DOTALL)
comment_re = re.compile(".*?%s\n.*?" % re.escape(comment), re.DOTALL)
# join all output lines into one
output = "".join(output)
# Fail even if one regex does not match
if ( not (token_re.match(output) and
author_re.match(output) and
created_re.match(output) and
comment_re.match(output))):
raise Failure
def _run_and_verify_resolve(cmd, expected_paths, *args):
"""Run "svn CMD" (where CMD is 'resolve' or 'resolved') with arguments
ARGS, and verify that it resolves the paths in EXPECTED_PATHS and no others.
If no ARGS are specified, use the elements of EXPECTED_PATHS as the
arguments."""
# TODO: verify that the status of PATHS changes accordingly.
if len(args) == 0:
args = expected_paths
expected_output = verify.UnorderedOutput([
"Resolved conflicted state of '" + path + "'\n" for path in
expected_paths])
run_and_verify_svn(None, expected_output, [],
cmd, *args)
def run_and_verify_resolve(expected_paths, *args):
"""Run "svn resolve" with arguments ARGS, and verify that it resolves the
paths in EXPECTED_PATHS and no others. If no ARGS are specified, use the
elements of EXPECTED_PATHS as the arguments."""
_run_and_verify_resolve('resolve', expected_paths, *args)
def run_and_verify_resolved(expected_paths, *args):
"""Run "svn resolved" with arguments ARGS, and verify that it resolves the
paths in EXPECTED_PATHS and no others. If no ARGS are specified, use the
elements of EXPECTED_PATHS as the arguments."""
_run_and_verify_resolve('resolved', expected_paths, *args)
def run_and_verify_revert(expected_paths, *args):
"""Run "svn revert" with arguments ARGS, and verify that it reverts
the paths in EXPECTED_PATHS and no others. If no ARGS are
specified, use the elements of EXPECTED_PATHS as the arguments."""
if len(args) == 0:
args = expected_paths
expected_output = verify.UnorderedOutput([
"Reverted '" + path + "'\n" for path in
expected_paths])
run_and_verify_svn(None, expected_output, [],
"revert", *args)
######################################################################
# Other general utilities
# This allows a test to *quickly* bootstrap itself.
def make_repo_and_wc(sbox, create_wc = True, read_only = False):
"""Create a fresh 'Greek Tree' repository and check out a WC from it.
If READ_ONLY is False, a dedicated repository will be created, at the path
SBOX.repo_dir. If READ_ONLY is True, the pristine repository will be used.
In either case, SBOX.repo_url is assumed to point to the repository that
will be used.
If create_wc is True, a dedicated working copy will be checked out from
the repository, at the path SBOX.wc_dir.
Returns on success, raises on failure."""
# Create (or copy afresh) a new repos with a greek tree in it.
if not read_only:
guarantee_greek_repository(sbox.repo_dir)
if create_wc:
# Generate the expected output tree.
expected_output = main.greek_state.copy()
expected_output.wc_dir = sbox.wc_dir
expected_output.tweak(status='A ', contents=None)
# Generate an expected wc tree.
expected_wc = main.greek_state
# Do a checkout, and verify the resulting output and disk contents.
run_and_verify_checkout(sbox.repo_url,
sbox.wc_dir,
expected_output,
expected_wc)
else:
# just make sure the parent folder of our working copy is created
try:
os.mkdir(main.general_wc_dir)
except OSError, err:
if err.errno != errno.EEXIST:
raise
# Duplicate a working copy or other dir.
def duplicate_dir(wc_name, wc_copy_name):
"""Copy the working copy WC_NAME to WC_COPY_NAME. Overwrite any
existing tree at that location."""
main.safe_rmtree(wc_copy_name)
shutil.copytree(wc_name, wc_copy_name)
def get_virginal_state(wc_dir, rev):
"Return a virginal greek tree state for a WC and repos at revision REV."
rev = str(rev) ### maybe switch rev to an integer?
# copy the greek tree, shift it to the new wc_dir, insert a root elem,
# then tweak all values
state = main.greek_state.copy()
state.wc_dir = wc_dir
state.desc[''] = wc.StateItem()
state.tweak(contents=None, status=' ', wc_rev=rev)
return state
# Cheap administrative directory locking
def lock_admin_dir(wc_dir, recursive=False):
"Lock a SVN administrative directory"
db, root_path, relpath = wc.open_wc_db(wc_dir)
svntest.main.run_wc_lock_tester(recursive, wc_dir)
def set_incomplete(wc_dir, revision):
"Make wc_dir incomplete at revision"
svntest.main.run_wc_incomplete_tester(wc_dir, revision)
def get_wc_uuid(wc_dir):
"Return the UUID of the working copy at WC_DIR."
return run_and_parse_info(wc_dir)[0]['Repository UUID']
def get_wc_base_rev(wc_dir):
"Return the BASE revision of the working copy at WC_DIR."
return run_and_parse_info(wc_dir)[0]['Revision']
def hook_failure_message(hook_name):
"""Return the error message that the client prints for failure of the
specified hook HOOK_NAME. The wording changed with Subversion 1.5."""
if svntest.main.options.server_minor_version < 5:
return "'%s' hook failed with error output:\n" % hook_name
else:
if hook_name in ["start-commit", "pre-commit"]:
action = "Commit"
elif hook_name == "pre-revprop-change":
action = "Revprop change"
elif hook_name == "pre-lock":
action = "Lock"
elif hook_name == "pre-unlock":
action = "Unlock"
else:
action = None
if action is None:
message = "%s hook failed (exit code 1)" % (hook_name,)
else:
message = "%s blocked by %s hook (exit code 1)" % (action, hook_name)
return message + " with output:\n"
def create_failing_hook(repo_dir, hook_name, text):
"""Create a HOOK_NAME hook in the repository at REPO_DIR that prints
TEXT to stderr and exits with an error."""
hook_path = os.path.join(repo_dir, 'hooks', hook_name)
# Embed the text carefully: it might include characters like "%" and "'".
main.create_python_hook_script(hook_path, 'import sys\n'
'sys.stderr.write(' + repr(text) + ')\n'
'sys.exit(1)\n')
def enable_revprop_changes(repo_dir):
"""Enable revprop changes in the repository at REPO_DIR by creating a
pre-revprop-change hook script and (if appropriate) making it executable."""
hook_path = main.get_pre_revprop_change_hook_path(repo_dir)
main.create_python_hook_script(hook_path, 'import sys; sys.exit(0)')
def disable_revprop_changes(repo_dir):
"""Disable revprop changes in the repository at REPO_DIR by creating a
pre-revprop-change hook script that prints "pre-revprop-change" followed
by its arguments, and returns an error."""
hook_path = main.get_pre_revprop_change_hook_path(repo_dir)
main.create_python_hook_script(hook_path,
'import sys\n'
'sys.stderr.write("pre-revprop-change %s" % " ".join(sys.argv[1:6]))\n'
'sys.exit(1)\n')
def create_failing_post_commit_hook(repo_dir):
"""Create a post-commit hook script in the repository at REPO_DIR that always
reports an error."""
hook_path = main.get_post_commit_hook_path(repo_dir)
main.create_python_hook_script(hook_path, 'import sys\n'
'sys.stderr.write("Post-commit hook failed")\n'
'sys.exit(1)')
# set_prop can be used for properties with NULL characters which are not
# handled correctly when passed to subprocess.Popen() and values like "*"
# which are not handled correctly on Windows.
def set_prop(name, value, path, expected_re_string=None):
"""Set a property with specified value"""
if value and (value[0] == '-' or '\x00' in value or sys.platform == 'win32'):
from tempfile import mkstemp
(fd, value_file_path) = mkstemp()
value_file = open(value_file_path, 'wb')
value_file.write(value)
value_file.flush()
value_file.close()
exit_code, out, err = main.run_svn(expected_re_string, 'propset',
'-F', value_file_path, name, path)
os.close(fd)
os.remove(value_file_path)
else:
exit_code, out, err = main.run_svn(expected_re_string, 'propset',
name, value, path)
if expected_re_string:
if not expected_re_string.startswith(".*"):
expected_re_string = ".*(" + expected_re_string + ")"
expected_err = verify.RegexOutput(expected_re_string, match_all=False)
verify.verify_outputs(None, None, err, None, expected_err)
def check_prop(name, path, exp_out, revprop=None):
"""Verify that property NAME on PATH has a value of EXP_OUT.
If REVPROP is not None, then it is a revision number and
a revision property is sought."""
if revprop is not None:
revprop_options = ['--revprop', '-r', revprop]
else:
revprop_options = []
# Not using run_svn because binary_mode must be set
exit_code, out, err = main.run_command(main.svn_binary, None, 1, 'pg',
'--strict', name, path,
'--config-dir',
main.default_config_dir,
'--username', main.wc_author,
'--password', main.wc_passwd,
*revprop_options)
if out != exp_out:
print("svn pg --strict %s output does not match expected." % name)
print("Expected standard output: %s\n" % exp_out)
print("Actual standard output: %s\n" % out)
raise Failure
def fill_file_with_lines(wc_path, line_nbr, line_descrip=None,
append=True):
"""Change the file at WC_PATH (adding some lines), and return its
new contents. LINE_NBR indicates the line number at which the new
contents should assume that it's being appended. LINE_DESCRIP is
something like 'This is line' (the default) or 'Conflicting line'."""
if line_descrip is None:
line_descrip = "This is line"
# Generate the new contents for the file.
contents = ""
for n in range(line_nbr, line_nbr + 3):
contents = contents + line_descrip + " " + repr(n) + " in '" + \
os.path.basename(wc_path) + "'.\n"
# Write the new contents to the file.
if append:
main.file_append(wc_path, contents)
else:
main.file_write(wc_path, contents)
return contents
def inject_conflict_into_wc(sbox, state_path, file_path,
expected_disk, expected_status, merged_rev):
"""Create a conflict at FILE_PATH by replacing its contents,
committing the change, backdating it to its previous revision,
changing its contents again, then updating it to merge in the
previous change."""
wc_dir = sbox.wc_dir
# Make a change to the file.
contents = fill_file_with_lines(file_path, 1, "This is line", append=False)
# Commit the changed file, first taking note of the current revision.
prev_rev = expected_status.desc[state_path].wc_rev
expected_output = wc.State(wc_dir, {
state_path : wc.StateItem(verb='Sending'),
})
if expected_status:
expected_status.tweak(state_path, wc_rev=merged_rev)
run_and_verify_commit(wc_dir, expected_output, expected_status,
None, file_path)
# Backdate the file.
exit_code, output, errput = main.run_svn(None, "up", "-r", str(prev_rev),
file_path)
if expected_status:
expected_status.tweak(state_path, wc_rev=prev_rev)
# Make a conflicting change to the file, and backdate the file.
conflicting_contents = fill_file_with_lines(file_path, 1, "Conflicting line",
append=False)
# Merge the previous change into the file to produce a conflict.
if expected_disk:
expected_disk.tweak(state_path, contents="")
expected_output = wc.State(wc_dir, {
state_path : wc.StateItem(status='C '),
})
inject_conflict_into_expected_state(state_path,
expected_disk, expected_status,
conflicting_contents, contents,
merged_rev)
exit_code, output, errput = main.run_svn(None, "up", "-r", str(merged_rev),
file_path)
if expected_status:
expected_status.tweak(state_path, wc_rev=merged_rev)
def inject_conflict_into_expected_state(state_path,
expected_disk, expected_status,
wc_text, merged_text, merged_rev):
"""Update the EXPECTED_DISK and EXPECTED_STATUS trees for the
conflict at STATE_PATH (ignored if None). WC_TEXT, MERGED_TEXT, and
MERGED_REV are used to determine the contents of the conflict (the
text parameters should be newline-terminated)."""
if expected_disk:
conflict_marker = make_conflict_marker_text(wc_text, merged_text,
merged_rev)
existing_text = expected_disk.desc[state_path].contents or ""
expected_disk.tweak(state_path, contents=existing_text + conflict_marker)
if expected_status:
expected_status.tweak(state_path, status='C ')
def make_conflict_marker_text(wc_text, merged_text, merged_rev):
"""Return the conflict marker text described by WC_TEXT (the current
text in the working copy, MERGED_TEXT (the conflicting text merged
in), and MERGED_REV (the revision from whence the conflicting text
came)."""
return "<<<<<<< .working\n" + wc_text + "=======\n" + \
merged_text + ">>>>>>> .merge-right.r" + str(merged_rev) + "\n"
def build_greek_tree_conflicts(sbox):
"""Create a working copy that has tree-conflict markings.
After this function has been called, sbox.wc_dir is a working
copy that has specific tree-conflict markings.
In particular, this does two conflicting sets of edits and performs an
update so that tree conflicts appear.
Note that this function calls sbox.build() because it needs a clean sbox.
So, there is no need to call sbox.build() before this.
The conflicts are the result of an 'update' on the following changes:
Incoming Local
A/D/G/pi text-mod del
A/D/G/rho del text-mod
A/D/G/tau del del
This function is useful for testing that tree-conflicts are handled
properly once they have appeared, e.g. that commits are blocked, that the
info output is correct, etc.
See also the tree-conflicts tests using deep_trees in various other
.py files, and tree_conflict_tests.py.
"""
sbox.build()
wc_dir = sbox.wc_dir
j = os.path.join
G = j(wc_dir, 'A', 'D', 'G')
pi = j(G, 'pi')
rho = j(G, 'rho')
tau = j(G, 'tau')
# Make incoming changes and "store them away" with a commit.
main.file_append(pi, "Incoming edit.\n")
main.run_svn(None, 'del', rho)
main.run_svn(None, 'del', tau)
expected_output = wc.State(wc_dir, {
'A/D/G/pi' : Item(verb='Sending'),
'A/D/G/rho' : Item(verb='Deleting'),
'A/D/G/tau' : Item(verb='Deleting'),
})
expected_status = get_virginal_state(wc_dir, 1)
expected_status.tweak('A/D/G/pi', wc_rev='2')
expected_status.remove('A/D/G/rho', 'A/D/G/tau')
run_and_verify_commit(wc_dir, expected_output, expected_status, None,
'-m', 'Incoming changes.', wc_dir )
# Update back to the pristine state ("time-warp").
expected_output = wc.State(wc_dir, {
'A/D/G/pi' : Item(status='U '),
'A/D/G/rho' : Item(status='A '),
'A/D/G/tau' : Item(status='A '),
})
expected_disk = main.greek_state
expected_status = get_virginal_state(wc_dir, 1)
run_and_verify_update(wc_dir, expected_output, expected_disk,
expected_status, None, None, None, None, None, False,
'-r', '1', wc_dir)
# Make local changes
main.run_svn(None, 'del', pi)
main.file_append(rho, "Local edit.\n")
main.run_svn(None, 'del', tau)
# Update, receiving the incoming changes on top of the local changes,
# causing tree conflicts. Don't check for any particular result: that is
# the job of other tests.
run_and_verify_svn(None, verify.AnyOutput, [], 'update', wc_dir)
def make_deep_trees(base):
"""Helper function for deep trees conflicts. Create a set of trees,
each in its own "container" dir. Any conflicts can be tested separately
in each container.
"""
j = os.path.join
# Create the container dirs.
F = j(base, 'F')
D = j(base, 'D')
DF = j(base, 'DF')
DD = j(base, 'DD')
DDF = j(base, 'DDF')
DDD = j(base, 'DDD')
os.makedirs(F)
os.makedirs(j(D, 'D1'))
os.makedirs(j(DF, 'D1'))
os.makedirs(j(DD, 'D1', 'D2'))
os.makedirs(j(DDF, 'D1', 'D2'))
os.makedirs(j(DDD, 'D1', 'D2', 'D3'))
# Create their files.
alpha = j(F, 'alpha')
beta = j(DF, 'D1', 'beta')
gamma = j(DDF, 'D1', 'D2', 'gamma')
main.file_append(alpha, "This is the file 'alpha'.\n")
main.file_append(beta, "This is the file 'beta'.\n")
main.file_append(gamma, "This is the file 'gamma'.\n")
def add_deep_trees(sbox, base_dir_name):
"""Prepare a "deep_trees" within a given directory.
The directory <sbox.wc_dir>/<base_dir_name> is created and a deep_tree
is created within. The items are only added, a commit has to be
called separately, if needed.
<base_dir_name> will thus be a container for the set of containers
mentioned in make_deep_trees().
"""
j = os.path.join
base = j(sbox.wc_dir, base_dir_name)
make_deep_trees(base)
main.run_svn(None, 'add', base)
Item = wc.StateItem
# initial deep trees state
deep_trees_virginal_state = wc.State('', {
'F' : Item(),
'F/alpha' : Item("This is the file 'alpha'.\n"),
'D' : Item(),
'D/D1' : Item(),
'DF' : Item(),
'DF/D1' : Item(),
'DF/D1/beta' : Item("This is the file 'beta'.\n"),
'DD' : Item(),
'DD/D1' : Item(),
'DD/D1/D2' : Item(),
'DDF' : Item(),
'DDF/D1' : Item(),
'DDF/D1/D2' : Item(),
'DDF/D1/D2/gamma' : Item("This is the file 'gamma'.\n"),
'DDD' : Item(),
'DDD/D1' : Item(),
'DDD/D1/D2' : Item(),
'DDD/D1/D2/D3' : Item(),
})
# Many actions on deep trees and their resulting states...
def deep_trees_leaf_edit(base):
"""Helper function for deep trees test cases. Append text to files,
create new files in empty directories, and change leaf node properties."""
j = os.path.join
F = j(base, 'F', 'alpha')
DF = j(base, 'DF', 'D1', 'beta')
DDF = j(base, 'DDF', 'D1', 'D2', 'gamma')
main.file_append(F, "More text for file alpha.\n")
main.file_append(DF, "More text for file beta.\n")
main.file_append(DDF, "More text for file gamma.\n")
run_and_verify_svn(None, verify.AnyOutput, [],
'propset', 'prop1', '1', F, DF, DDF)
D = j(base, 'D', 'D1')
DD = j(base, 'DD', 'D1', 'D2')
DDD = j(base, 'DDD', 'D1', 'D2', 'D3')
run_and_verify_svn(None, verify.AnyOutput, [],
'propset', 'prop1', '1', D, DD, DDD)
D = j(base, 'D', 'D1', 'delta')
DD = j(base, 'DD', 'D1', 'D2', 'epsilon')
DDD = j(base, 'DDD', 'D1', 'D2', 'D3', 'zeta')
main.file_append(D, "This is the file 'delta'.\n")
main.file_append(DD, "This is the file 'epsilon'.\n")
main.file_append(DDD, "This is the file 'zeta'.\n")
run_and_verify_svn(None, verify.AnyOutput, [],
'add', D, DD, DDD)
# deep trees state after a call to deep_trees_leaf_edit
deep_trees_after_leaf_edit = wc.State('', {
'F' : Item(),
'F/alpha' : Item("This is the file 'alpha'.\nMore text for file alpha.\n"),
'D' : Item(),
'D/D1' : Item(),
'D/D1/delta' : Item("This is the file 'delta'.\n"),
'DF' : Item(),
'DF/D1' : Item(),
'DF/D1/beta' : Item("This is the file 'beta'.\nMore text for file beta.\n"),
'DD' : Item(),
'DD/D1' : Item(),
'DD/D1/D2' : Item(),
'DD/D1/D2/epsilon' : Item("This is the file 'epsilon'.\n"),
'DDF' : Item(),
'DDF/D1' : Item(),
'DDF/D1/D2' : Item(),
'DDF/D1/D2/gamma' : Item("This is the file 'gamma'.\nMore text for file gamma.\n"),
'DDD' : Item(),
'DDD/D1' : Item(),
'DDD/D1/D2' : Item(),
'DDD/D1/D2/D3' : Item(),
'DDD/D1/D2/D3/zeta' : Item("This is the file 'zeta'.\n"),
})
def deep_trees_leaf_del(base):
"""Helper function for deep trees test cases. Delete files and empty
dirs."""
j = os.path.join
F = j(base, 'F', 'alpha')
D = j(base, 'D', 'D1')
DF = j(base, 'DF', 'D1', 'beta')
DD = j(base, 'DD', 'D1', 'D2')
DDF = j(base, 'DDF', 'D1', 'D2', 'gamma')
DDD = j(base, 'DDD', 'D1', 'D2', 'D3')
main.run_svn(None, 'rm', F, D, DF, DD, DDF, DDD)
# deep trees state after a call to deep_trees_leaf_del
deep_trees_after_leaf_del = wc.State('', {
'F' : Item(),
'D' : Item(),
'DF' : Item(),
'DF/D1' : Item(),
'DD' : Item(),
'DD/D1' : Item(),
'DDF' : Item(),
'DDF/D1' : Item(),
'DDF/D1/D2' : Item(),
'DDD' : Item(),
'DDD/D1' : Item(),
'DDD/D1/D2' : Item(),
})
# deep trees state after a call to deep_trees_leaf_del with no commit
def deep_trees_after_leaf_del_no_ci(wc_dir):
if svntest.main.wc_is_singledb(wc_dir):
return deep_trees_after_leaf_del
else:
return deep_trees_empty_dirs
def deep_trees_tree_del(base):
"""Helper function for deep trees test cases. Delete top-level dirs."""
j = os.path.join
F = j(base, 'F', 'alpha')
D = j(base, 'D', 'D1')
DF = j(base, 'DF', 'D1')
DD = j(base, 'DD', 'D1')
DDF = j(base, 'DDF', 'D1')
DDD = j(base, 'DDD', 'D1')
main.run_svn(None, 'rm', F, D, DF, DD, DDF, DDD)
def deep_trees_rmtree(base):
"""Helper function for deep trees test cases. Delete top-level dirs
with rmtree instead of svn del."""
j = os.path.join
F = j(base, 'F', 'alpha')
D = j(base, 'D', 'D1')
DF = j(base, 'DF', 'D1')
DD = j(base, 'DD', 'D1')
DDF = j(base, 'DDF', 'D1')
DDD = j(base, 'DDD', 'D1')
os.unlink(F)
main.safe_rmtree(D)
main.safe_rmtree(DF)
main.safe_rmtree(DD)
main.safe_rmtree(DDF)
main.safe_rmtree(DDD)
# deep trees state after a call to deep_trees_tree_del
deep_trees_after_tree_del = wc.State('', {
'F' : Item(),
'D' : Item(),
'DF' : Item(),
'DD' : Item(),
'DDF' : Item(),
'DDD' : Item(),
})
# deep trees state without any files
deep_trees_empty_dirs = wc.State('', {
'F' : Item(),
'D' : Item(),
'D/D1' : Item(),
'DF' : Item(),
'DF/D1' : Item(),
'DD' : Item(),
'DD/D1' : Item(),
'DD/D1/D2' : Item(),
'DDF' : Item(),
'DDF/D1' : Item(),
'DDF/D1/D2' : Item(),
'DDD' : Item(),
'DDD/D1' : Item(),
'DDD/D1/D2' : Item(),
'DDD/D1/D2/D3' : Item(),
})
# deep trees state after a call to deep_trees_tree_del with no commit
def deep_trees_after_tree_del_no_ci(wc_dir):
if svntest.main.wc_is_singledb(wc_dir):
return deep_trees_after_tree_del
else:
return deep_trees_empty_dirs
def deep_trees_tree_del_repos(base):
"""Helper function for deep trees test cases. Delete top-level dirs,
directly in the repository."""
j = '/'.join
F = j([base, 'F', 'alpha'])
D = j([base, 'D', 'D1'])
DF = j([base, 'DF', 'D1'])
DD = j([base, 'DD', 'D1'])
DDF = j([base, 'DDF', 'D1'])
DDD = j([base, 'DDD', 'D1'])
main.run_svn(None, 'mkdir', '-m', '', F, D, DF, DD, DDF, DDD)
# Expected merge/update/switch output.
deep_trees_conflict_output = wc.State('', {
'F/alpha' : Item(status=' ', treeconflict='C'),
'D/D1' : Item(status=' ', treeconflict='C'),
'DF/D1' : Item(status=' ', treeconflict='C'),
'DD/D1' : Item(status=' ', treeconflict='C'),
'DDF/D1' : Item(status=' ', treeconflict='C'),
'DDD/D1' : Item(status=' ', treeconflict='C'),
})
deep_trees_conflict_output_skipped = wc.State('', {
'D/D1' : Item(verb='Skipped'),
'F/alpha' : Item(verb='Skipped'),
'DD/D1' : Item(verb='Skipped'),
'DF/D1' : Item(verb='Skipped'),
'DDD/D1' : Item(verb='Skipped'),
'DDF/D1' : Item(verb='Skipped'),
})
# Expected status output after merge/update/switch.
deep_trees_status_local_tree_del = wc.State('', {
'' : Item(status=' ', wc_rev=3),
'D' : Item(status=' ', wc_rev=3),
'D/D1' : Item(status='D ', wc_rev=2, treeconflict='C'),
'DD' : Item(status=' ', wc_rev=3),
'DD/D1' : Item(status='D ', wc_rev=2, treeconflict='C'),
'DD/D1/D2' : Item(status='D ', wc_rev=2),
'DDD' : Item(status=' ', wc_rev=3),
'DDD/D1' : Item(status='D ', wc_rev=2, treeconflict='C'),
'DDD/D1/D2' : Item(status='D ', wc_rev=2),
'DDD/D1/D2/D3' : Item(status='D ', wc_rev=2),
'DDF' : Item(status=' ', wc_rev=3),
'DDF/D1' : Item(status='D ', wc_rev=2, treeconflict='C'),
'DDF/D1/D2' : Item(status='D ', wc_rev=2),
'DDF/D1/D2/gamma' : Item(status='D ', wc_rev=2),
'DF' : Item(status=' ', wc_rev=3),
'DF/D1' : Item(status='D ', wc_rev=2, treeconflict='C'),
'DF/D1/beta' : Item(status='D ', wc_rev=2),
'F' : Item(status=' ', wc_rev=3),
'F/alpha' : Item(status='D ', wc_rev=2, treeconflict='C'),
})
deep_trees_status_local_leaf_edit = wc.State('', {
'' : Item(status=' ', wc_rev=3),
'D' : Item(status=' ', wc_rev=3),
'D/D1' : Item(status=' M', wc_rev=2, treeconflict='C'),
'D/D1/delta' : Item(status='A ', wc_rev=0),
'DD' : Item(status=' ', wc_rev=3),
'DD/D1' : Item(status=' ', wc_rev=2, treeconflict='C'),
'DD/D1/D2' : Item(status=' M', wc_rev=2),
'DD/D1/D2/epsilon' : Item(status='A ', wc_rev=0),
'DDD' : Item(status=' ', wc_rev=3),
'DDD/D1' : Item(status=' ', wc_rev=2, treeconflict='C'),
'DDD/D1/D2' : Item(status=' ', wc_rev=2),
'DDD/D1/D2/D3' : Item(status=' M', wc_rev=2),
'DDD/D1/D2/D3/zeta' : Item(status='A ', wc_rev=0),
'DDF' : Item(status=' ', wc_rev=3),
'DDF/D1' : Item(status=' ', wc_rev=2, treeconflict='C'),
'DDF/D1/D2' : Item(status=' ', wc_rev=2),
'DDF/D1/D2/gamma' : Item(status='MM', wc_rev=2),
'DF' : Item(status=' ', wc_rev=3),
'DF/D1' : Item(status=' ', wc_rev=2, treeconflict='C'),
'DF/D1/beta' : Item(status='MM', wc_rev=2),
'F' : Item(status=' ', wc_rev=3),
'F/alpha' : Item(status='MM', wc_rev=2, treeconflict='C'),
})
class DeepTreesTestCase:
"""Describes one tree-conflicts test case.
See deep_trees_run_tests_scheme_for_update(), ..._switch(), ..._merge().
The name field is the subdirectory name in which the test should be run.
The local_action and incoming_action are the functions to run
to construct the local changes and incoming changes, respectively.
See deep_trees_leaf_edit, deep_trees_tree_del, etc.
The expected_* and error_re_string arguments are described in functions
run_and_verify_[update|switch|merge]
except expected_info, which is a dict that has path keys with values
that are dicts as passed to run_and_verify_info():
expected_info = {
'F/alpha' : {
'Revision' : '3',
'Tree conflict' :
'^local delete, incoming edit upon update'
+ ' Source left: .file.*/F/alpha@2'
+ ' Source right: .file.*/F/alpha@3$',
},
'DF/D1' : {
'Tree conflict' :
'^local delete, incoming edit upon update'
+ ' Source left: .dir.*/DF/D1@2'
+ ' Source right: .dir.*/DF/D1@3$',
},
...
}
Note: expected_skip is only used in merge, i.e. using
deep_trees_run_tests_scheme_for_merge.
"""
def __init__(self, name, local_action, incoming_action,
expected_output = None, expected_disk = None,
expected_status = None, expected_skip = None,
error_re_string = None,
commit_block_string = ".*remains in conflict.*",
expected_info = None):
self.name = name
self.local_action = local_action
self.incoming_action = incoming_action
self.expected_output = expected_output
self.expected_disk = expected_disk
self.expected_status = expected_status
self.expected_skip = expected_skip
self.error_re_string = error_re_string
self.commit_block_string = commit_block_string
self.expected_info = expected_info
def deep_trees_run_tests_scheme_for_update(sbox, greater_scheme):
"""
Runs a given list of tests for conflicts occuring at an update operation.
This function wants to save time and perform a number of different
test cases using just a single repository and performing just one commit
for all test cases instead of one for each test case.
1) Each test case is initialized in a separate subdir. Each subdir
again contains one set of "deep_trees", being separate container
dirs for different depths of trees (F, D, DF, DD, DDF, DDD).
2) A commit is performed across all test cases and depths.
(our initial state, -r2)
3) In each test case subdir (e.g. "local_tree_del_incoming_leaf_edit"),
its *incoming* action is performed (e.g. "deep_trees_leaf_edit"), in
each of the different depth trees (F, D, DF, ... DDD).
4) A commit is performed across all test cases and depths:
our "incoming" state is "stored away in the repository for now",
-r3.
5) All test case dirs and contained deep_trees are time-warped
(updated) back to -r2, the initial state containing deep_trees.
6) In each test case subdir (e.g. "local_tree_del_incoming_leaf_edit"),
its *local* action is performed (e.g. "deep_trees_leaf_del"), in
each of the different depth trees (F, D, DF, ... DDD).
7) An update to -r3 is performed across all test cases and depths.
This causes tree-conflicts between the "local" state in the working
copy and the "incoming" state from the repository, -r3.
8) A commit is performed in each separate container, to verify
that each tree-conflict indeed blocks a commit.
The sbox parameter is just the sbox passed to a test function. No need
to call sbox.build(), since it is called (once) within this function.
The "table" greater_scheme models all of the different test cases
that should be run using a single repository.
greater_scheme is a list of DeepTreesTestCase items, which define complete
test setups, so that they can be performed as described above.
"""
j = os.path.join
if not sbox.is_built():
sbox.build()
wc_dir = sbox.wc_dir
# 1) create directories
for test_case in greater_scheme:
try:
add_deep_trees(sbox, test_case.name)
except:
print("ERROR IN: Tests scheme for update: "
+ "while setting up deep trees in '%s'" % test_case.name)
raise
# 2) commit initial state
main.run_svn(None, 'commit', '-m', 'initial state', wc_dir)
# 3) apply incoming changes
for test_case in greater_scheme:
try:
test_case.incoming_action(j(sbox.wc_dir, test_case.name))
except:
print("ERROR IN: Tests scheme for update: "
+ "while performing incoming action in '%s'" % test_case.name)
raise
# 4) commit incoming changes
main.run_svn(None, 'commit', '-m', 'incoming changes', wc_dir)
# 5) time-warp back to -r2
main.run_svn(None, 'update', '-r2', wc_dir)
# 6) apply local changes
for test_case in greater_scheme:
try:
test_case.local_action(j(wc_dir, test_case.name))
except:
print("ERROR IN: Tests scheme for update: "
+ "while performing local action in '%s'" % test_case.name)
raise
# 7) update to -r3, conflicting with incoming changes.
# A lot of different things are expected.
# Do separate update operations for each test case.
for test_case in greater_scheme:
try:
base = j(wc_dir, test_case.name)
x_out = test_case.expected_output
if x_out != None:
x_out = x_out.copy()
x_out.wc_dir = base
x_disk = test_case.expected_disk
x_status = test_case.expected_status
if x_status != None:
x_status.copy()
x_status.wc_dir = base
run_and_verify_update(base, x_out, x_disk, None,
error_re_string = test_case.error_re_string)
if x_status:
run_and_verify_unquiet_status(base, x_status)
x_info = test_case.expected_info or {}
for path in x_info:
run_and_verify_info([x_info[path]], j(base, path))
except:
print("ERROR IN: Tests scheme for update: "
+ "while verifying in '%s'" % test_case.name)
raise
# 8) Verify that commit fails.
for test_case in greater_scheme:
try:
base = j(wc_dir, test_case.name)
x_status = test_case.expected_status
if x_status != None:
x_status.copy()
x_status.wc_dir = base
run_and_verify_commit(base, None, x_status,
test_case.commit_block_string,
base)
except:
print("ERROR IN: Tests scheme for update: "
+ "while checking commit-blocking in '%s'" % test_case.name)
raise
def deep_trees_skipping_on_update(sbox, test_case, skip_paths,
chdir_skip_paths):
"""
Create tree conflicts, then update again, expecting the existing tree
conflicts to be skipped.
SKIP_PATHS is a list of paths, relative to the "base dir", for which
"update" on the "base dir" should report as skipped.
CHDIR_SKIP_PATHS is a list of (target-path, skipped-path) pairs for which
an update of "target-path" (relative to the "base dir") should result in
"skipped-path" (relative to "target-path") being reported as skipped.
"""
"""FURTHER_ACTION is a function that will make a further modification to
each target, this being the modification that we expect to be skipped. The
function takes the "base dir" (the WC path to the test case directory) as
its only argument."""
further_action = deep_trees_tree_del_repos
j = os.path.join
wc_dir = sbox.wc_dir
base = j(wc_dir, test_case.name)
# Initialize: generate conflicts. (We do not check anything here.)
setup_case = DeepTreesTestCase(test_case.name,
test_case.local_action,
test_case.incoming_action,
None,
None,
None)
deep_trees_run_tests_scheme_for_update(sbox, [setup_case])
# Make a further change to each target in the repository so there is a new
# revision to update to. (This is r4.)
further_action(sbox.repo_url + '/' + test_case.name)
# Update whole working copy, expecting the nodes still in conflict to be
# skipped.
x_out = test_case.expected_output
if x_out != None:
x_out = x_out.copy()
x_out.wc_dir = base
x_disk = test_case.expected_disk
x_status = test_case.expected_status
if x_status != None:
x_status = x_status.copy()
x_status.wc_dir = base
# Account for nodes that were updated by further_action
x_status.tweak('', 'D', 'F', 'DD', 'DF', 'DDD', 'DDF', wc_rev=4)
run_and_verify_update(base, x_out, x_disk, None,
error_re_string = test_case.error_re_string)
run_and_verify_unquiet_status(base, x_status)
# Try to update each in-conflict subtree. Expect a 'Skipped' output for
# each, and the WC status to be unchanged.
for path in skip_paths:
run_and_verify_update(j(base, path),
wc.State(base, {path : Item(verb='Skipped')}),
None, None)
run_and_verify_unquiet_status(base, x_status)
# Try to update each in-conflict subtree. Expect a 'Skipped' output for
# each, and the WC status to be unchanged.
# This time, cd to the subdir before updating it.
was_cwd = os.getcwd()
for path, skipped in chdir_skip_paths:
if isinstance(skipped, list):
expected_skip = {}
for p in skipped:
expected_skip[p] = Item(verb='Skipped')
else:
expected_skip = {skipped : Item(verb='Skipped')}
p = j(base, path)
run_and_verify_update(p,
wc.State(p, expected_skip),
None, None)
os.chdir(was_cwd)
run_and_verify_unquiet_status(base, x_status)
# Verify that commit still fails.
for path, skipped in chdir_skip_paths:
run_and_verify_commit(j(base, path), None, None,
test_case.commit_block_string,
base)
run_and_verify_unquiet_status(base, x_status)
def deep_trees_run_tests_scheme_for_switch(sbox, greater_scheme):
"""
Runs a given list of tests for conflicts occuring at a switch operation.
This function wants to save time and perform a number of different
test cases using just a single repository and performing just one commit
for all test cases instead of one for each test case.
1) Each test case is initialized in a separate subdir. Each subdir
again contains two subdirs: one "local" and one "incoming" for
the switch operation. These contain a set of deep_trees each.
2) A commit is performed across all test cases and depths.
(our initial state, -r2)
3) In each test case subdir's incoming subdir, the
incoming actions are performed.
4) A commit is performed across all test cases and depths. (-r3)
5) In each test case subdir's local subdir, the local actions are
performed. They remain uncommitted in the working copy.
6) In each test case subdir's local dir, a switch is performed to its
corresponding incoming dir.
This causes conflicts between the "local" state in the working
copy and the "incoming" state from the incoming subdir (still -r3).
7) A commit is performed in each separate container, to verify
that each tree-conflict indeed blocks a commit.
The sbox parameter is just the sbox passed to a test function. No need
to call sbox.build(), since it is called (once) within this function.
The "table" greater_scheme models all of the different test cases
that should be run using a single repository.
greater_scheme is a list of DeepTreesTestCase items, which define complete
test setups, so that they can be performed as described above.
"""
j = os.path.join
if not sbox.is_built():
sbox.build()
wc_dir = sbox.wc_dir
# 1) Create directories.
for test_case in greater_scheme:
try:
base = j(sbox.wc_dir, test_case.name)
os.makedirs(base)
make_deep_trees(j(base, "local"))
make_deep_trees(j(base, "incoming"))
main.run_svn(None, 'add', base)
except:
print("ERROR IN: Tests scheme for switch: "
+ "while setting up deep trees in '%s'" % test_case.name)
raise
# 2) Commit initial state (-r2).
main.run_svn(None, 'commit', '-m', 'initial state', wc_dir)
# 3) Apply incoming changes
for test_case in greater_scheme:
try:
test_case.incoming_action(j(sbox.wc_dir, test_case.name, "incoming"))
except:
print("ERROR IN: Tests scheme for switch: "
+ "while performing incoming action in '%s'" % test_case.name)
raise
# 4) Commit all changes (-r3).
main.run_svn(None, 'commit', '-m', 'incoming changes', wc_dir)
# 5) Apply local changes in their according subdirs.
for test_case in greater_scheme:
try:
test_case.local_action(j(sbox.wc_dir, test_case.name, "local"))
except:
print("ERROR IN: Tests scheme for switch: "
+ "while performing local action in '%s'" % test_case.name)
raise
# 6) switch the local dir to the incoming url, conflicting with incoming
# changes. A lot of different things are expected.
# Do separate switch operations for each test case.
for test_case in greater_scheme:
try:
local = j(wc_dir, test_case.name, "local")
incoming = sbox.repo_url + "/" + test_case.name + "/incoming"
x_out = test_case.expected_output
if x_out != None:
x_out = x_out.copy()
x_out.wc_dir = local
x_disk = test_case.expected_disk
x_status = test_case.expected_status
if x_status != None:
x_status.copy()
x_status.wc_dir = local
run_and_verify_switch(local, local, incoming, x_out, x_disk, None,
test_case.error_re_string, None, None, None,
None, False, '--ignore-ancestry')
run_and_verify_unquiet_status(local, x_status)
x_info = test_case.expected_info or {}
for path in x_info:
run_and_verify_info([x_info[path]], j(local, path))
except:
print("ERROR IN: Tests scheme for switch: "
+ "while verifying in '%s'" % test_case.name)
raise
# 7) Verify that commit fails.
for test_case in greater_scheme:
try:
local = j(wc_dir, test_case.name, 'local')
x_status = test_case.expected_status
if x_status != None:
x_status.copy()
x_status.wc_dir = local
run_and_verify_commit(local, None, x_status,
test_case.commit_block_string,
local)
except:
print("ERROR IN: Tests scheme for switch: "
+ "while checking commit-blocking in '%s'" % test_case.name)
raise
def deep_trees_run_tests_scheme_for_merge(sbox, greater_scheme,
do_commit_local_changes,
do_commit_conflicts=True,
ignore_ancestry=False):
"""
Runs a given list of tests for conflicts occuring at a merge operation.
This function wants to save time and perform a number of different
test cases using just a single repository and performing just one commit
for all test cases instead of one for each test case.
1) Each test case is initialized in a separate subdir. Each subdir
initially contains another subdir, called "incoming", which
contains a set of deep_trees.
2) A commit is performed across all test cases and depths.
(a pre-initial state)
3) In each test case subdir, the "incoming" subdir is copied to "local",
via the `svn copy' command. Each test case's subdir now has two sub-
dirs: "local" and "incoming", initial states for the merge operation.
4) An update is performed across all test cases and depths, so that the
copies made in 3) are pulled into the wc.
5) In each test case's "incoming" subdir, the incoming action is
performed.
6) A commit is performed across all test cases and depths, to commit
the incoming changes.
If do_commit_local_changes is True, this becomes step 7 (swap steps).
7) In each test case's "local" subdir, the local_action is performed.
If do_commit_local_changes is True, this becomes step 6 (swap steps).
Then, in effect, the local changes are committed as well.
8) In each test case subdir, the "incoming" subdir is merged into the
"local" subdir. If ignore_ancestry is True, then the merge is done
with the --ignore-ancestry option, so mergeinfo is neither considered
nor recorded. This causes conflicts between the "local" state in the
working copy and the "incoming" state from the incoming subdir.
9) If do_commit_conflicts is True, then a commit is performed in each
separate container, to verify that each tree-conflict indeed blocks
a commit.
The sbox parameter is just the sbox passed to a test function. No need
to call sbox.build(), since it is called (once) within this function.
The "table" greater_scheme models all of the different test cases
that should be run using a single repository.
greater_scheme is a list of DeepTreesTestCase items, which define complete
test setups, so that they can be performed as described above.
"""
j = os.path.join
if not sbox.is_built():
sbox.build()
wc_dir = sbox.wc_dir
# 1) Create directories.
for test_case in greater_scheme:
try:
base = j(sbox.wc_dir, test_case.name)
os.makedirs(base)
make_deep_trees(j(base, "incoming"))
main.run_svn(None, 'add', base)
except:
print("ERROR IN: Tests scheme for merge: "
+ "while setting up deep trees in '%s'" % test_case.name)
raise
# 2) Commit pre-initial state (-r2).
main.run_svn(None, 'commit', '-m', 'pre-initial state', wc_dir)
# 3) Copy "incoming" to "local".
for test_case in greater_scheme:
try:
base_url = sbox.repo_url + "/" + test_case.name
incoming_url = base_url + "/incoming"
local_url = base_url + "/local"
main.run_svn(None, 'cp', incoming_url, local_url, '-m',
'copy incoming to local')
except:
print("ERROR IN: Tests scheme for merge: "
+ "while copying deep trees in '%s'" % test_case.name)
raise
# 4) Update to load all of the "/local" subdirs into the working copies.
try:
main.run_svn(None, 'up', sbox.wc_dir)
except:
print("ERROR IN: Tests scheme for merge: "
+ "while updating local subdirs")
raise
# 5) Perform incoming actions
for test_case in greater_scheme:
try:
test_case.incoming_action(j(sbox.wc_dir, test_case.name, "incoming"))
except:
print("ERROR IN: Tests scheme for merge: "
+ "while performing incoming action in '%s'" % test_case.name)
raise
# 6) or 7) Commit all incoming actions
if not do_commit_local_changes:
try:
main.run_svn(None, 'ci', '-m', 'Committing incoming actions',
sbox.wc_dir)
except:
print("ERROR IN: Tests scheme for merge: "
+ "while committing incoming actions")
raise
# 7) or 6) Perform all local actions.
for test_case in greater_scheme:
try:
test_case.local_action(j(sbox.wc_dir, test_case.name, "local"))
except:
print("ERROR IN: Tests scheme for merge: "
+ "while performing local action in '%s'" % test_case.name)
raise
# 6) or 7) Commit all incoming actions
if do_commit_local_changes:
try:
main.run_svn(None, 'ci', '-m', 'Committing incoming and local actions',
sbox.wc_dir)
except:
print("ERROR IN: Tests scheme for merge: "
+ "while committing incoming and local actions")
raise
# 8) Merge all "incoming" subdirs to their respective "local" subdirs.
# This creates conflicts between the local changes in the "local" wc
# subdirs and the incoming states committed in the "incoming" subdirs.
for test_case in greater_scheme:
try:
local = j(sbox.wc_dir, test_case.name, "local")
incoming = sbox.repo_url + "/" + test_case.name + "/incoming"
x_out = test_case.expected_output
if x_out != None:
x_out = x_out.copy()
x_out.wc_dir = local
x_disk = test_case.expected_disk
x_status = test_case.expected_status
if x_status != None:
x_status.copy()
x_status.wc_dir = local
x_skip = test_case.expected_skip
if x_skip != None:
x_skip.copy()
x_skip.wc_dir = local
varargs = (local,)
if ignore_ancestry:
varargs = varargs + ('--ignore-ancestry',)
run_and_verify_merge(local, None, None, incoming, None,
x_out, None, None, x_disk, None, x_skip,
test_case.error_re_string,
None, None, None, None,
False, False, *varargs)
run_and_verify_unquiet_status(local, x_status)
except:
print("ERROR IN: Tests scheme for merge: "
+ "while verifying in '%s'" % test_case.name)
raise
# 9) Verify that commit fails.
if do_commit_conflicts:
for test_case in greater_scheme:
try:
local = j(wc_dir, test_case.name, 'local')
x_status = test_case.expected_status
if x_status != None:
x_status.copy()
x_status.wc_dir = local
run_and_verify_commit(local, None, x_status,
test_case.commit_block_string,
local)
except:
print("ERROR IN: Tests scheme for merge: "
+ "while checking commit-blocking in '%s'" % test_case.name)
raise
| wbond/subversion | subversion/tests/cmdline/svntest/actions.py | Python | apache-2.0 | 113,603 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""TFRecord sources and sinks."""
from __future__ import absolute_import
import logging
import struct
from apache_beam import coders
from apache_beam.io import filebasedsource
from apache_beam.io import fileio
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Write
from apache_beam.transforms import PTransform
import crcmod
__all__ = ['ReadFromTFRecord', 'WriteToTFRecord']
def _default_crc32c_fn(value):
"""Calculates crc32c by either snappy or crcmod based on installation."""
if not _default_crc32c_fn.fn:
try:
import snappy # pylint: disable=import-error
_default_crc32c_fn.fn = snappy._crc32c # pylint: disable=protected-access
except ImportError:
logging.warning('Couldn\'t find python-snappy so the implementation of '
'_TFRecordUtil._masked_crc32c is not as fast as it could '
'be.')
_default_crc32c_fn.fn = crcmod.predefined.mkPredefinedCrcFun('crc-32c')
return _default_crc32c_fn.fn(value)
_default_crc32c_fn.fn = None
class _TFRecordUtil(object):
"""Provides basic TFRecord encoding/decoding with consistency checks.
For detailed TFRecord format description see:
https://www.tensorflow.org/versions/master/api_docs/python/python_io.html#tfrecords-format-details
Note that masks and length are represented in LittleEndian order.
"""
@classmethod
def _masked_crc32c(cls, value, crc32c_fn=_default_crc32c_fn):
"""Compute a masked crc32c checksum for a value.
Args:
value: A string for which we compute the crc.
crc32c_fn: A function that can compute a crc32c.
This is a performance hook that also helps with testing. Callers are
not expected to make use of it directly.
Returns:
Masked crc32c checksum.
"""
crc = crc32c_fn(value)
return (((crc >> 15) | (crc << 17)) + 0xa282ead8) & 0xffffffff
@staticmethod
def encoded_num_bytes(record):
"""Return the number of bytes consumed by a record in its encoded form."""
# 16 = 8 (Length) + 4 (crc of length) + 4 (crc of data)
return len(record) + 16
@classmethod
def write_record(cls, file_handle, value):
"""Encode a value as a TFRecord.
Args:
file_handle: The file to write to.
value: A string content of the record.
"""
encoded_length = struct.pack('<Q', len(value))
file_handle.write('{}{}{}{}'.format(
encoded_length,
struct.pack('<I', cls._masked_crc32c(encoded_length)), #
value,
struct.pack('<I', cls._masked_crc32c(value))))
@classmethod
def read_record(cls, file_handle):
"""Read a record from a TFRecords file.
Args:
file_handle: The file to read from.
Returns:
None if EOF is reached; the paylod of the record otherwise.
Raises:
ValueError: If file appears to not be a valid TFRecords file.
"""
buf_length_expected = 12
buf = file_handle.read(buf_length_expected)
if not buf:
return None # EOF Reached.
# Validate all length related payloads.
if len(buf) != buf_length_expected:
raise ValueError('Not a valid TFRecord. Fewer than %d bytes: %s' %
(buf_length_expected, buf.encode('hex')))
length, length_mask_expected = struct.unpack('<QI', buf)
length_mask_actual = cls._masked_crc32c(buf[:8])
if length_mask_actual != length_mask_expected:
raise ValueError('Not a valid TFRecord. Mismatch of length mask: %s' %
buf.encode('hex'))
# Validate all data related payloads.
buf_length_expected = length + 4
buf = file_handle.read(buf_length_expected)
if len(buf) != buf_length_expected:
raise ValueError('Not a valid TFRecord. Fewer than %d bytes: %s' %
(buf_length_expected, buf.encode('hex')))
data, data_mask_expected = struct.unpack('<%dsI' % length, buf)
data_mask_actual = cls._masked_crc32c(data)
if data_mask_actual != data_mask_expected:
raise ValueError('Not a valid TFRecord. Mismatch of data mask: %s' %
buf.encode('hex'))
# All validation checks passed.
return data
class _TFRecordSource(filebasedsource.FileBasedSource):
"""A File source for reading files of TFRecords.
For detailed TFRecords format description see:
https://www.tensorflow.org/versions/master/api_docs/python/python_io.html#tfrecords-format-details
"""
def __init__(self,
file_pattern,
coder,
compression_type,
validate):
"""Initialize a TFRecordSource. See ReadFromTFRecord for details."""
super(_TFRecordSource, self).__init__(
file_pattern=file_pattern,
compression_type=compression_type,
splittable=False,
validate=validate)
self._coder = coder
def read_records(self, file_name, offset_range_tracker):
if offset_range_tracker.start_position():
raise ValueError('Start position not 0:%s' %
offset_range_tracker.start_position())
current_offset = offset_range_tracker.start_position()
with self.open_file(file_name) as file_handle:
while True:
if not offset_range_tracker.try_claim(current_offset):
raise RuntimeError('Unable to claim position: %s' % current_offset)
record = _TFRecordUtil.read_record(file_handle)
if record is None:
return # Reached EOF
else:
current_offset += _TFRecordUtil.encoded_num_bytes(record)
yield self._coder.decode(record)
class ReadFromTFRecord(PTransform):
"""Transform for reading TFRecord sources."""
def __init__(self,
file_pattern,
coder=coders.BytesCoder(),
compression_type=fileio.CompressionTypes.AUTO,
validate=True,
**kwargs):
"""Initialize a ReadFromTFRecord transform.
Args:
file_pattern: A file glob pattern to read TFRecords from.
coder: Coder used to decode each record.
compression_type: Used to handle compressed input files. Default value
is CompressionTypes.AUTO, in which case the file_path's extension will
be used to detect the compression.
validate: Boolean flag to verify that the files exist during the pipeline
creation time.
**kwargs: optional args dictionary. These are passed through to parent
constructor.
Returns:
A ReadFromTFRecord transform object.
"""
super(ReadFromTFRecord, self).__init__(**kwargs)
self._args = (file_pattern, coder, compression_type, validate)
def expand(self, pvalue):
return pvalue.pipeline | Read(_TFRecordSource(*self._args))
class _TFRecordSink(fileio.FileSink):
"""Sink for writing TFRecords files.
For detailed TFRecord format description see:
https://www.tensorflow.org/versions/master/api_docs/python/python_io.html#tfrecords-format-details
"""
def __init__(self, file_path_prefix, coder, file_name_suffix, num_shards,
shard_name_template, compression_type):
"""Initialize a TFRecordSink. See WriteToTFRecord for details."""
super(_TFRecordSink, self).__init__(
file_path_prefix=file_path_prefix,
coder=coder,
file_name_suffix=file_name_suffix,
num_shards=num_shards,
shard_name_template=shard_name_template,
mime_type='application/octet-stream',
compression_type=compression_type)
def write_encoded_record(self, file_handle, value):
_TFRecordUtil.write_record(file_handle, value)
class WriteToTFRecord(PTransform):
"""Transform for writing to TFRecord sinks."""
def __init__(self,
file_path_prefix,
coder=coders.BytesCoder(),
file_name_suffix='',
num_shards=0,
shard_name_template=fileio.DEFAULT_SHARD_NAME_TEMPLATE,
compression_type=fileio.CompressionTypes.AUTO,
**kwargs):
"""Initialize WriteToTFRecord transform.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix.
coder: Coder used to encode each record.
file_name_suffix: Suffix for the files written.
num_shards: The number of files (shards) used for output. If not set, the
default value will be used.
shard_name_template: A template string containing placeholders for
the shard number and shard count. Currently only '' and
'-SSSSS-of-NNNNN' are patterns allowed.
When constructing a filename for a particular shard number, the
upper-case letters 'S' and 'N' are replaced with the 0-padded shard
number and shard count respectively. This argument can be '' in which
case it behaves as if num_shards was set to 1 and only one file will be
generated. The default pattern is '-SSSSS-of-NNNNN'.
compression_type: Used to handle compressed output files. Typical value
is CompressionTypes.AUTO, in which case the file_path's extension will
be used to detect the compression.
**kwargs: Optional args dictionary. These are passed through to parent
constructor.
Returns:
A WriteToTFRecord transform object.
"""
super(WriteToTFRecord, self).__init__(**kwargs)
self._args = (file_path_prefix, coder, file_name_suffix, num_shards,
shard_name_template, compression_type)
def expand(self, pcoll):
return pcoll | Write(_TFRecordSink(*self._args))
| chamikaramj/incubator-beam | sdks/python/apache_beam/io/tfrecordio.py | Python | apache-2.0 | 10,445 |
################################################################################
# Copyright (c) 2015-2019 Skymind, Inc.
#
# This program and the accompanying materials are made available under the
# terms of the Apache License, Version 2.0 which is available at
# https://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# SPDX-License-Identifier: Apache-2.0
################################################################################
from .progressbar import ProgressBar
import requests
import math
import os
import hashlib
def download(url, file_name):
r = requests.get(url, stream=True)
file_size = int(r.headers['Content-length'])
'''
if py3:
file_size = int(u.getheader("Content-Length")[0])
else:
file_size = int(u.info().getheaders("Content-Length")[0])
'''
file_exists = False
if os.path.isfile(file_name):
local_file_size = os.path.getsize(file_name)
if local_file_size == file_size:
sha1_file = file_name + '.sha1'
if os.path.isfile(sha1_file):
print('sha1 found')
with open(sha1_file) as f:
expected_sha1 = f.read()
BLOCKSIZE = 65536
sha1 = hashlib.sha1()
with open(file_name) as f:
buff = f.read(BLOCKSIZE)
while len(buff) > 0:
sha1.update(buff)
buff = f.read(BLOCKSIZE)
if expected_sha1 == sha1:
file_exists = True
else:
print("File corrupt. Downloading again.")
os.remove(file_name)
else:
file_exists = True
else:
print("File corrupt. Downloading again.")
os.remove(file_name)
if not file_exists:
factor = int(math.floor(math.log(file_size) / math.log(1024)))
display_file_size = str(file_size / 1024 ** factor) + \
['B', 'KB', 'MB', 'GB', 'TB', 'PB'][factor]
print("Source: " + url)
print("Destination " + file_name)
print("Size: " + display_file_size)
file_size_dl = 0
block_sz = 8192
f = open(file_name, 'wb')
pbar = ProgressBar(file_size)
for chunk in r.iter_content(chunk_size=block_sz):
if not chunk:
continue
chunk_size = len(chunk)
file_size_dl += chunk_size
f.write(chunk)
pbar.update(chunk_size)
# status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
# status = status + chr(8)*(len(status)+1)
# print(status)
f.close()
else:
print("File already exists - " + file_name)
return True
| RobAltena/deeplearning4j | pydl4j/pydl4j/downloader.py | Python | apache-2.0 | 3,179 |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
# COM interop utility module
import sys
import nt
from iptest.assert_util import *
from iptest.file_util import *
from iptest.process_util import *
if is_cli:
import clr
from System import Type
from System import Activator
from System import Exception as System_dot_Exception
remove_ironpython_dlls(testpath.public_testdir)
load_iron_python_dll()
import IronPython
load_iron_python_test()
import IronPythonTest
#--For asserts in IP/DLR assemblies----------------------------------------
from System.Diagnostics import Debug, DefaultTraceListener
class MyTraceListener(DefaultTraceListener):
def Fail(self, msg, detailMsg=''):
print "ASSERT FAILED:", msg
if detailMsg!='':
print " ", detailMsg
sys.exit(1)
if is_snap:
Debug.Listeners.Clear()
Debug.Listeners.Add(MyTraceListener())
is_pywin32 = False
if sys.platform=="win32":
try:
import win32com.client
is_pywin32 = True
if sys.prefix not in nt.environ["Path"]:
nt.environ["Path"] += ";" + sys.prefix
except:
pass
#------------------------------------------------------------------------------
#--GLOBALS
windir = get_environ_variable("windir")
agentsvr_path = path_combine(windir, r"msagent\agentsvr.exe")
scriptpw_path = path_combine(windir, r"system32\scriptpw.dll")
STRING_VALUES = [ "", "a", "ab", "abc", "aa",
"a" * 100000,
"1", "1.0", "1L", "object", "str", "object()",
" ", "_", "abc ", " abc", " abc ", "ab c", "ab c",
"\ta", "a\t", "\n", "\t", "\na", "a\n"]
STRING_VALUES = [unicode(x) for x in STRING_VALUES] + STRING_VALUES
def aFunc(): pass
class KNew(object): pass
class KOld: pass
NON_NUMBER_VALUES = [ object,
KNew, KOld,
Exception,
object(), KNew(), KOld(),
aFunc, str, eval, type,
[], [3.14], ["abc"],
(), (3,), (u"xyz",),
xrange(5),
{}, {'a':1},
__builtins__,
]
FPN_VALUES = [ -1.23, -1.0, -0.123, -0.0, 0.123, 1.0, 1.23,
0.0000001, 3.14159265, 1E10, 1.0E10 ]
UINT_VALUES = [ 0, 1, 2, 7, 10, 32]
INT_VALUES = [ -x for x in UINT_VALUES ] + UINT_VALUES
LONG_VALUES = [long(x) for x in INT_VALUES]
COMPLEX_VALUES = [ 3j]
#--Subclasses of Python/.NET types
class Py_Str(str): pass
if is_cli:
class Py_System_String(System.String): pass
class Py_Float(float): pass
class Py_Double(float): pass
if is_cli:
class Py_System_Double(System.Double): pass
class Py_UShort(int): pass
class Py_ULong(long): pass
class Py_ULongLong(long): pass
class Py_Short(int): pass
class Py_Long(int): pass
if is_cli:
class Py_System_Int32(System.Int32): pass
class Py_LongLong(long): pass
#-------Helpers----------------
def shallow_copy(in_list):
'''
We do not necessarily have access to the copy module.
'''
return [x for x in in_list]
def pos_num_helper(clr_type):
return [
clr_type.MinValue,
clr_type.MinValue + 1,
clr_type.MinValue + 2,
clr_type.MinValue + 10,
clr_type.MaxValue/2,
clr_type.MaxValue - 10,
clr_type.MaxValue - 2,
clr_type.MaxValue - 1,
clr_type.MaxValue,
]
def overflow_num_helper(clr_type):
return [
clr_type.MinValue - 1,
clr_type.MinValue - 2,
clr_type.MinValue - 3,
clr_type.MinValue - 10,
clr_type.MaxValue + 10,
clr_type.MaxValue + 3,
clr_type.MaxValue + 2,
clr_type.MaxValue + 1,
]
def valueErrorTrigger(in_type):
ret_val = {}
############################################################
#Is there anything in Python not being able to evaluate to a bool?
ret_val["VARIANT_BOOL"] = [ ]
############################################################
ret_val["BYTE"] = shallow_copy(NON_NUMBER_VALUES)
ret_val["BYTE"] += COMPLEX_VALUES
if sys.platform=="win32":
ret_val["BYTE"] += FPN_VALUES #Merlin 323751
ret_val["BYTE"] = [x for x in ret_val["BYTE"] if type(x) not in [unicode, str]] #INCOMPAT BUG - should be ValueError
ret_val["BYTE"] = [x for x in ret_val["BYTE"] if not isinstance(x, KOld)] #INCOMPAT BUG - should be AttributeError
############################################################
ret_val["BSTR"] = shallow_copy(NON_NUMBER_VALUES)
ret_val["BSTR"] += COMPLEX_VALUES
if sys.platform=="win32":
ret_val["BSTR"] = [] #INCOMPAT BUG
#strip out string values
ret_val["BSTR"] = [x for x in ret_val["BSTR"] if type(x) is not str and type(x) is not KNew and type(x) is not KOld and type(x) is not object]
############################################################
ret_val["CHAR"] = shallow_copy(NON_NUMBER_VALUES)
ret_val["CHAR"] += COMPLEX_VALUES
if sys.platform=="win32":
ret_val["CHAR"] += FPN_VALUES #Merlin 323751
############################################################
ret_val["FLOAT"] = shallow_copy(NON_NUMBER_VALUES)
ret_val["FLOAT"] += COMPLEX_VALUES
if sys.platform=="win32":
ret_val["FLOAT"] += UINT_VALUES + INT_VALUES #COMPAT BUG
############################################################
ret_val["DOUBLE"] = shallow_copy(ret_val["FLOAT"])
############################################################
ret_val["USHORT"] = shallow_copy(NON_NUMBER_VALUES)
ret_val["USHORT"] += COMPLEX_VALUES
if sys.platform=="win32":
ret_val["USHORT"] += FPN_VALUES #Merlin 323751
############################################################
ret_val["ULONG"] = shallow_copy(ret_val["USHORT"])
############################################################
ret_val["ULONGLONG"] = shallow_copy(ret_val["ULONG"])
############################################################
ret_val["SHORT"] = shallow_copy(NON_NUMBER_VALUES)
ret_val["SHORT"] += COMPLEX_VALUES
if sys.platform=="win32":
ret_val["SHORT"] += FPN_VALUES #Merlin 323751
############################################################
ret_val["LONG"] = shallow_copy(ret_val["SHORT"])
############################################################
ret_val["LONGLONG"] = shallow_copy(ret_val["LONG"])
############################################################
return ret_val[in_type]
def typeErrorTrigger(in_type):
ret_val = {}
############################################################
#Is there anything in Python not being able to evaluate to a bool?
ret_val["VARIANT_BOOL"] = [ ]
############################################################
ret_val["BYTE"] = []
############################################################
ret_val["BSTR"] = []
#strip out string values
ret_val["BSTR"] = [x for x in ret_val["BSTR"] if type(x) is not str]
############################################################
ret_val["CHAR"] = []
############################################################
ret_val["FLOAT"] = []
############################################################
ret_val["DOUBLE"] = []
############################################################
ret_val["USHORT"] = []
############################################################
ret_val["ULONG"] = []
############################################################
ret_val["ULONGLONG"] = []
############################################################
ret_val["SHORT"] = []
############################################################
ret_val["LONG"] = []
############################################################
ret_val["LONGLONG"] = []
############################################################
return ret_val[in_type]
def overflowErrorTrigger(in_type):
ret_val = {}
############################################################
ret_val["VARIANT_BOOL"] = []
############################################################
ret_val["BYTE"] = []
ret_val["BYTE"] += overflow_num_helper(System.Byte)
############################################################
#Doesn't seem possible to create a value (w/o 1st overflowing
#in Python) to pass to the COM method which will overflow.
ret_val["BSTR"] = [] #["0123456789" * 1234567890]
############################################################
ret_val["CHAR"] = []
ret_val["CHAR"] += overflow_num_helper(System.SByte)
############################################################
ret_val["FLOAT"] = []
ret_val["FLOAT"] += overflow_num_helper(System.Double)
#Shouldn't be possible to overflow a double.
ret_val["DOUBLE"] = []
############################################################
ret_val["USHORT"] = []
ret_val["USHORT"] += overflow_num_helper(System.UInt16)
ret_val["ULONG"] = []
ret_val["ULONG"] += overflow_num_helper(System.UInt32)
ret_val["ULONGLONG"] = []
# Dev10 475426
#ret_val["ULONGLONG"] += overflow_num_helper(System.UInt64)
ret_val["SHORT"] = []
ret_val["SHORT"] += overflow_num_helper(System.Int16)
ret_val["LONG"] = []
# Dev10 475426
#ret_val["LONG"] += overflow_num_helper(System.Int32)
ret_val["LONGLONG"] = []
# Dev10 475426
#ret_val["LONGLONG"] += overflow_num_helper(System.Int64)
############################################################
return ret_val[in_type]
def pythonToCOM(in_type):
'''
Given a COM type (in string format), this helper function returns a list of
lists where each sublists contains 1-N elements. Each of these elements in
turn are of different types (compatible with in_type), but equivalent to
one another.
'''
ret_val = {}
############################################################
temp_funcs = [int, bool, System.Boolean] # long, Dev10 475426
temp_values = [ 0, 1, True, False]
ret_val["VARIANT_BOOL"] = [ [y(x) for y in temp_funcs] for x in temp_values]
############################################################
temp_funcs = [System.Byte]
temp_values = pos_num_helper(System.Byte)
ret_val["BYTE"] = [ [y(x) for y in temp_funcs] for x in temp_values]
############################################################
temp_funcs = [ str, unicode, # Py_Str, Py_System_String,
System.String ]
temp_values = shallow_copy(STRING_VALUES)
ret_val["BSTR"] = [ [y(x) for y in temp_funcs] for x in temp_values]
############################################################
temp_funcs = [System.SByte]
temp_values = pos_num_helper(System.SByte)
ret_val["CHAR"] = [ [y(x) for y in temp_funcs] for x in temp_values]
############################################################
temp_funcs = [ float, # Py_Float,
System.Single]
ret_val["FLOAT"] = [ [y(x) for y in temp_funcs] for x in FPN_VALUES]
############################################################
temp_funcs = [ float, System.Double] # Py_Double, Py_System_Double,
temp_values = [-1.0e+308, 1.0e308] + FPN_VALUES
ret_val["DOUBLE"] = [ [y(x) for y in temp_funcs] for x in temp_values]
ret_val["DOUBLE"] += ret_val["FLOAT"]
############################################################
temp_funcs = [int, System.UInt16] # Py_UShort,
temp_values = pos_num_helper(System.UInt16)
ret_val["USHORT"] = [ [y(x) for y in temp_funcs] for x in temp_values]
############################################################
temp_funcs = [int, System.UInt32] # Py_ULong,
temp_values = pos_num_helper(System.UInt32) + pos_num_helper(System.UInt16)
ret_val["ULONG"] = [ [y(x) for y in temp_funcs] for x in temp_values]
ret_val["ULONG"] += ret_val["USHORT"]
############################################################
temp_funcs = [int, long, System.UInt64] # Py_ULongLong,
temp_values = pos_num_helper(System.UInt64) + pos_num_helper(System.UInt32) + pos_num_helper(System.UInt16)
ret_val["ULONGLONG"] = [ [y(x) for y in temp_funcs] for x in temp_values]
ret_val["ULONGLONG"] += ret_val["ULONG"]
############################################################
temp_funcs = [int, System.Int16] # Py_Short,
temp_values = pos_num_helper(System.Int16)
ret_val["SHORT"] = [ [y(x) for y in temp_funcs] for x in temp_values]
############################################################
temp_funcs = [int, System.Int32] # Py_Long, Dev10 475426
temp_values = pos_num_helper(System.Int32) + pos_num_helper(System.Int16)
ret_val["LONG"] = [ [y(x) for y in temp_funcs] for x in temp_values]
ret_val["LONG"] += ret_val["SHORT"]
############################################################
temp_funcs = [int, long, System.Int64] # Py_LongLong, Dev10 475426
temp_values = pos_num_helper(System.Int64) + pos_num_helper(System.Int32) + pos_num_helper(System.Int16)
ret_val["LONGLONG"] = [ [y(x) for y in temp_funcs] for x in temp_values]
ret_val["LONGLONG"] += ret_val["LONG"]
############################################################
return ret_val[in_type]
#------------------------------------------------------------------------------
#--Override a couple of definitions from assert_util
from iptest import assert_util
DEBUG = 1
def assert_helper(in_dict):
#add the keys if they're not there
if not in_dict.has_key("runonly"): in_dict["runonly"] = True
if not in_dict.has_key("skip"): in_dict["skip"] = False
#determine whether this test will be run or not
run = in_dict["runonly"] and not in_dict["skip"]
#strip out the keys
for x in ["runonly", "skip"]: in_dict.pop(x)
if not run:
if in_dict.has_key("bugid"):
print "...skipped an assert due to bug", str(in_dict["bugid"])
elif DEBUG:
print "...skipped an assert on", sys.platform
if in_dict.has_key("bugid"): in_dict.pop("bugid")
return run
def Assert(*args, **kwargs):
if assert_helper(kwargs): assert_util.Assert(*args, **kwargs)
def AreEqual(*args, **kwargs):
if assert_helper(kwargs): assert_util.AreEqual(*args, **kwargs)
def AssertError(*args, **kwargs):
try:
if assert_helper(kwargs): assert_util.AssertError(*args, **kwargs)
except Exception, e:
print "AssertError(" + str(args) + ", " + str(kwargs) + ") failed!"
raise e
def AssertErrorWithMessage(*args, **kwargs):
try:
if assert_helper(kwargs): assert_util.AssertErrorWithMessage(*args, **kwargs)
except Exception, e:
print "AssertErrorWithMessage(" + str(args) + ", " + str(kwargs) + ") failed!"
raise e
def AssertErrorWithPartialMessage(*args, **kwargs):
try:
if assert_helper(kwargs): assert_util.AssertErrorWithPartialMessage(*args, **kwargs)
except Exception, e:
print "AssertErrorWithPartialMessage(" + str(args) + ", " + str(kwargs) + ") failed!"
raise e
def AlmostEqual(*args, **kwargs):
if assert_helper(kwargs): assert_util.AlmostEqual(*args, **kwargs)
#------------------------------------------------------------------------------
#--HELPERS
def TryLoadExcelInteropAssembly():
try:
clr.AddReferenceByName('Microsoft.Office.Interop.Excel, Version=12.0.0.0, Culture=neutral, PublicKeyToken=71e9bce111e9429c')
except:
try:
clr.AddReferenceByName('Microsoft.Office.Interop.Excel, Version=11.0.0.0, Culture=neutral, PublicKeyToken=71e9bce111e9429c')
except:
pass
#------------------------------------------------------------------------------
def TryLoadWordInteropAssembly():
try:
clr.AddReferenceByName('Microsoft.Office.Interop.Word, Version=12.0.0.0, Culture=neutral, PublicKeyToken=71e9bce111e9429c')
except:
try:
clr.AddReferenceByName('Microsoft.Office.Interop.Word, Version=11.0.0.0, Culture=neutral, PublicKeyToken=71e9bce111e9429c')
except:
pass
#------------------------------------------------------------------------------
def IsExcelInstalled():
from Microsoft.Win32 import Registry
from System.IO import File
excel = None
#Office 11 or 12 are both OK for this test. Office 12 is preferred.
excel = Registry.LocalMachine.OpenSubKey("Software\\Microsoft\\Office\\12.0\\Excel\\InstallRoot")
if excel==None:
excel = Registry.LocalMachine.OpenSubKey("Software\\Microsoft\\Office\\11.0\\Excel\\InstallRoot")
#sanity check
if excel==None:
return False
#make sure it's really installed on disk
excel_path = excel.GetValue("Path") + "excel.exe"
return File.Exists(excel_path)
#------------------------------------------------------------------------------
def IsWordInstalled():
from Microsoft.Win32 import Registry
from System.IO import File
word = None
#Office 11 or 12 are both OK for this test. Office 12 is preferred.
word = Registry.LocalMachine.OpenSubKey("Software\\Microsoft\\Office\\12.0\\Word\\InstallRoot")
if word==None:
word= Registry.LocalMachine.OpenSubKey("Software\\Microsoft\\Office\\11.0\\Word\\InstallRoot")
#sanity check
if word==None:
return False
#make sure it's really installed on disk
word_path = word.GetValue("Path") + "winword.exe"
return File.Exists(word_path)
#------------------------------------------------------------------------------
def CreateExcelApplication():
#TODO: why is there use of the GUID here?
#import clr
#typelib = clr.LoadTypeLibrary(System.Guid("00020813-0000-0000-C000-000000000046"))
#return typelib.Excel.Application()
import System
type = System.Type.GetTypeFromProgID("Excel.Application")
return System.Activator.CreateInstance(type)
#------------------------------------------------------------------------------
def CreateWordApplication():
import System
#import clr
#typelib = clr.LoadTypeLibrary(System.Guid("00020905-0000-0000-C000-000000000046"))
#return typelib.Word.Application()
type = System.Type.GetTypeFromProgID("Word.Application")
return System.Activator.CreateInstance(type)
#------------------------------------------------------------------------------
def CreateAgentServer():
import clr
from System import Guid
typelib = clr.LoadTypeLibrary(Guid("A7B93C73-7B81-11D0-AC5F-00C04FD97575"))
return typelib.AgentServerObjects.AgentServer()
#------------------------------------------------------------------------------
def CreateDlrComServer():
com_type_name = "DlrComLibrary.DlrComServer"
if is_cli:
com_obj = getRCWFromProgID(com_type_name)
else:
com_obj = win32com.client.Dispatch(com_type_name)
return com_obj
#------------------------------------------------------------------------------
def getTypeFromProgID(prog_id):
'''
Returns the Type object for prog_id.
'''
return Type.GetTypeFromProgID(prog_id)
#------------------------------------------------------------------------------
def getRCWFromProgID(prog_id):
'''
Returns an instance of prog_id.
'''
if is_cli:
return Activator.CreateInstance(getTypeFromProgID(prog_id))
else:
return win32com.client.Dispatch(prog_id)
#------------------------------------------------------------------------------
def genPeverifyInteropAsm(file):
#if this isn't a test run that will invoke peverify there's no point in
#continuing
if not is_peverify_run:
return
else:
mod_name = file.rsplit("\\", 1)[1].split(".py")[0]
print "Generating interop assemblies for the", mod_name, "test module which are needed in %TEMP% by peverify..."
from System.IO import Path
tempDir = Path.GetTempPath()
cwd = nt.getcwd()
#maps COM interop test module names to a list of DLLs
module_dll_dict = {
"excel" : [],
"msagent" : [agentsvr_path],
"scriptpw" : [scriptpw_path],
"word" : [],
}
dlrcomlib_list = [ "dlrcomserver", "paramsinretval", "method", "obj", "prop", ]
if is_cli32:
temp_name = testpath.rowan_root + "\\Test\\DlrComLibrary\\Debug\\DlrComLibrary.dll"
else:
temp_name = testpath.rowan_root + "\\Test\\DlrComLibrary\\x64\\Release\\DlrComLibrary.dll"
for mod_name in dlrcomlib_list: module_dll_dict[mod_name] = [ temp_name ]
if not file_exists_in_path("tlbimp.exe"):
print "ERROR: tlbimp.exe is not in the path!"
sys.exit(1)
try:
if not module_dll_dict.has_key(mod_name):
print "WARNING: cannot determine which interop assemblies to install!"
print " This may affect peverify runs adversely."
print
return
else:
nt.chdir(tempDir)
for com_dll in module_dll_dict[mod_name]:
if not file_exists(com_dll):
print "\tERROR: %s does not exist!" % (com_dll)
continue
print "\trunning tlbimp on", com_dll
run_tlbimp(com_dll)
finally:
nt.chdir(cwd)
#------------------------------------------------------------------------------
#--Fake parts of System for compat tests
if sys.platform=="win32":
class System:
class Byte(int):
MinValue = 0
MaxValue = 255
class SByte(int):
MinValue = -128
MaxValue = 127
class Int16(int):
MinValue = -32768
MaxValue = 32767
class UInt16(int):
MinValue = 0
MaxValue = 65535
class Int32(int):
MinValue = -2147483648
MaxValue = 2147483647
class UInt32(long):
MinValue = 0
MaxValue = 4294967295
class Int64(long):
MinValue = -9223372036854775808L
MaxValue = 9223372036854775807L
class UInt64(long):
MinValue = 0L
MaxValue = 18446744073709551615
class Single(float):
MinValue = -3.40282e+038
MaxValue = 3.40282e+038
class Double(float):
MinValue = -1.79769313486e+308
MaxValue = 1.79769313486e+308
class String(str):
pass
class Boolean(int):
pass
#------------------------------------------------------------------------------
def run_com_test(name, file):
run_test(name)
genPeverifyInteropAsm(file)
| slozier/ironpython2 | Src/IronPython/Lib/iptest/cominterop_util.py | Python | apache-2.0 | 24,101 |
import re
# Python 2/3 compatibility hackery
try:
unicode
except NameError:
unicode = str
def compile_url(url):
clean_url = unicode(url).lstrip(u'/')
return re.compile(clean_url)
def compile_urls(urls):
return [compile_url(expr) for expr in urls]
| ghickman/incuna-auth | incuna_auth/middleware/utils.py | Python | bsd-2-clause | 272 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from math import log
import argh
import numpy as np
from chemreac import ReactionDiffusion
from chemreac.integrate import run
from chemreac.util.plotting import plot_solver_linear_error
def efield_cb(x, logx=False):
"""
Returns a flat efield (-1)
"""
return -np.ones_like(x)
def y0_flat_cb(x, logx=False, use_log2=False):
xc = x[:-1] + np.diff(x)/2
if logx:
expb = (lambda arg: 2**arg) if use_log2 else np.exp
x, xc = map(expb, (x, xc))
return 17 - 11*(xc-x[0])/(x[-1]-x[0])
def y0_cylindrical_cb(x, logx=False, use_log2=False):
xc = x[:-1] + np.diff(x)/2
if logx:
expb = (lambda arg: 2**arg) if use_log2 else np.exp
x, xc = map(expb, (x, xc))
return 17 - np.log((xc-x[0])/(x[-1]-x[0]))
def y0_spherical_cb(x, logx=False, use_log2=False):
xc = x[:-1] + np.diff(x)/2
if logx:
expb = (lambda arg: 2**arg) if use_log2 else np.exp
x, xc = map(expb, (x, xc))
return 3 + 0.1/((xc-x[0])/(x[-1]-x[0]))
def integrate_rd(D=2e-3, t0=3., tend=7., x0=0.0, xend=1.0, mu=None, N=32,
nt=25, geom='f', logt=False, logy=False, logx=False,
random=False, nstencil=3, lrefl=False, rrefl=False,
num_jacobian=False, method='bdf', plot=False,
atol=1e-6, rtol=1e-6, efield=False, random_seed=42,
verbose=False, use_log2=False):
if random_seed:
np.random.seed(random_seed)
n = 1
mu = float(mu or x0)
tout = np.linspace(t0, tend, nt)
assert geom in 'fcs'
# Setup the grid
logb = (lambda arg: log(arg)/log(2)) if use_log2 else log
_x0 = logb(x0) if logx else x0
_xend = logb(xend) if logx else xend
x = np.linspace(_x0, _xend, N+1)
if random:
x += (np.random.random(N+1)-0.5)*(_xend-_x0)/(N+2)
mob = 0.3
# Initial conditions
y0 = {
'f': y0_flat_cb,
'c': y0_cylindrical_cb,
's': y0_spherical_cb
}[geom](x, logx)
# Setup the system
stoich_active = []
stoich_prod = []
k = []
assert not lrefl
assert not rrefl
rd = ReactionDiffusion(
n, stoich_active, stoich_prod, k, N,
D=[D],
z_chg=[1],
mobility=[mob],
x=x,
geom=geom,
logy=logy,
logt=logt,
logx=logx,
nstencil=nstencil,
lrefl=lrefl,
rrefl=rrefl,
use_log2=use_log2
)
if efield:
if geom != 'f':
raise ValueError("Only analytic sol. for flat drift implemented.")
rd.efield = efield_cb(rd.xcenters, logx)
# Analytic reference values
t = tout.copy().reshape((nt, 1))
Cref = np.repeat(y0[np.newaxis, :, np.newaxis], nt, axis=0)
if efield:
Cref += t.reshape((nt, 1, 1))*mob
# Run the integration
integr = run(rd, y0, tout, atol=atol, rtol=rtol,
with_jacobian=(not num_jacobian), method=method)
Cout, info = integr.Cout, integr.info
if verbose:
print(info)
def lin_err(i=slice(None), j=slice(None)):
return integr.Cout[i, :, j] - Cref[i, :, j]
rmsd = np.sum(lin_err()**2 / N, axis=1)**0.5
ave_rmsd_over_atol = np.average(rmsd, axis=0)/info['atol']
# Plot results
if plot:
import matplotlib.pyplot as plt
def _plot(y, c, ttl=None, apply_exp_on_y=False):
plt.plot(rd.xcenters, rd.expb(y) if apply_exp_on_y else y, c=c)
if N < 100:
plt.vlines(rd.x, 0, np.ones_like(rd.x)*max(y), linewidth=.1,
colors='gray')
plt.xlabel('x / m')
plt.ylabel('C / M')
if ttl:
plt.title(ttl)
for i in range(nt):
c = 1-tout[i]/tend
c = (1.0-c, .5-c/2, .5-c/2) # over time: dark red -> light red
plt.subplot(4, 1, 1)
_plot(Cout[i, :, 0], c, 'Simulation (N={})'.format(rd.N),
apply_exp_on_y=logy)
plt.subplot(4, 1, 2)
_plot(Cref[i, :, 0], c, 'Analytic', apply_exp_on_y=logy)
ax_err = plt.subplot(4, 1, 3)
plot_solver_linear_error(integr, Cref, ax_err, ti=i,
bi=slice(None),
color=c, fill=(i == 0))
plt.title('Linear rel error / Log abs. tol. (={})'.format(
info['atol']))
plt.subplot(4, 1, 4)
tspan = [tout[0], tout[-1]]
plt.plot(tout, rmsd[:, 0] / info['atol'], 'r')
plt.plot(tspan, [ave_rmsd_over_atol[0]]*2, 'r--')
plt.xlabel('Time / s')
plt.ylabel(r'$\sqrt{\langle E^2 \rangle} / atol$')
plt.tight_layout()
plt.show()
return tout, Cout, info, ave_rmsd_over_atol, rd
if __name__ == '__main__':
argh.dispatch_command(integrate_rd, output_file=None)
| bjodah/chemreac | examples/steady_state.py | Python | bsd-2-clause | 4,970 |
# Generated by Django 3.1.4 on 2020-12-15 15:58
from django.db import migrations
def copy_labels(apps, schema_editor):
Trek = apps.get_model('trekking', 'Trek')
Label = apps.get_model('common', 'Label')
for trek in Trek.objects.all():
for label in trek.labels.all():
label2, created = Label.objects.get_or_create(name=label.name, defaults={'advice': label.advice, 'filter': label.filter_rando})
trek.labels2.add(label2)
class Migration(migrations.Migration):
dependencies = [
('trekking', '0023_trek_labels2'),
]
operations = [
migrations.RunPython(copy_labels),
]
| makinacorpus/Geotrek | geotrek/trekking/migrations/0024_copy_labels.py | Python | bsd-2-clause | 649 |
#!/usr/bin/env python3
import canmatrix.formats
from canmatrix.join import join_frame_by_signal_start_bit
files = ["../test/db_B.dbc", "../test/db_A.dbc"]
target = join_frame_by_signal_start_bit(files)
#
# export the new (target)-Matrix for example as .dbc:
#
canmatrix.formats.dumpp(target, "target.dbc")
canmatrix.formats.dumpp(target, "target.xlsx")
| ebroecker/canmatrix | examples/exampleJoin.py | Python | bsd-2-clause | 357 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'', include('project.core.urls', namespace='core')),
)
| pombredanne/django-boilerplate-1 | project/urls.py | Python | bsd-2-clause | 248 |
"""Auxiliary functions."""
import cPickle as pickle
import os
import sys
import gzip
import urllib
import numpy
import theano
import theano.tensor as T
import theano.sandbox.cuda
from athenet.utils import BIN_DIR, DATA_DIR
def load_data_from_pickle(filename):
"""Load data from pickle file.
:param filename: File with pickled data, may be gzipped.
:return: Data loaded from file.
"""
try:
f = gzip.open(filename, 'rb')
data = pickle.load(f)
except:
f = open(filename, 'rb')
data = pickle.load(f)
f.close()
return data
def save_data_to_pickle(data, filename):
"""Saves data to gzipped pickle file.
:param data: Data to be saved.
:param filename: Name of file to save data.
"""
with gzip.open(filename, 'wb') as f:
pickle.dump(data, f)
def load_data(filename, url=None):
"""Load data from file, download file if it doesn't exist.
:param filename: File with pickled data, may be gzipped.
:param url: Url for downloading file.
:return: Unpickled data.
"""
if not os.path.isfile(filename):
if not url:
return None
download_file(filename, url)
data = load_data_from_pickle(filename)
return data
def download_file(filename, url):
"""Download file from given url.
:param filename: Name of a file to be downloaded.
:param url: Url for downloading file.
"""
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
print 'Downloading ' + os.path.basename(filename) + '...',
sys.stdout.flush()
urllib.urlretrieve(url, filename)
print 'Done'
def get_data_path(name):
"""Return absolute path to the data file.
:param name: Name of the file.
:return: Full path to the file.
"""
return os.path.join(DATA_DIR, name)
def get_bin_path(name):
"""Return absolute path to the binary data file.
:param name: Name of the file.
:return: Full path to the file.
"""
return os.path.join(BIN_DIR, name)
def zero_fraction(network):
"""Returns fraction of zeros in weights of Network.
Biases are not considered.
:param network: Network for which we count fraction of zeros.
:return: Fraction of zeros.
"""
params = [layer.W for layer in network.weighted_layers]
n_non_zero = 0
n_fields = 0
for param in params:
n_fields += numpy.size(param)
n_non_zero += numpy.count_nonzero(param)
n_zero = n_fields - n_non_zero
return (1.0 * n_zero) / (1.0 * n_fields)
def count_zeros_in_layer(layer):
return layer.W.size - numpy.count_nonzero(layer.W)
def count_zeros(network):
"""
Returns zeros in weights of Network.
Biases are not considered.
:param network: Network for which we count zeros.
:return: List of number of weights being zero for each layer.
"""
return numpy.array([count_zeros_in_layer(layer)
for layer in network.weighted_layers])
len_prev = 0
def overwrite(text='', length=None):
"""Write text in a current line, overwriting previously written text.
Previously written text also needs to be written using this function for
it to work properly. Otherwise optional argument length can be given to
specify length of a previous text.
:param string text: Text to be written.
:param integer length: Length of a previous text.
"""
global len_prev
if length is None:
length = len_prev
print '\r' + ' '*length,
print '\r' + text,
len_prev = len(text)
def cudnn_available():
"""Check if cuDNN is available.
:return: True, if cuDNN is available, False otherwise.
"""
try:
return theano.sandbox.cuda.dnn_available()
except:
return False
def reshape_for_padding(layer_input, image_shape, batch_size, padding,
value=0.0):
"""Returns padded tensor.
:param theano.tensor4 layer_input: input in shape
(batch_size, number of channels,
height, width)
:param tuple of integers image_shape: shape of input images in format
(height, width, number of channels)
:param integer batch_size: size of input batch size
:param pair of integers padding: padding to be applied to layer_input
:param float value: value of new fields
:returns: padded layer_input
:rtype: theano.tensor4
"""
if padding == (0, 0):
return layer_input
h, w, n_channels = image_shape
pad_h, pad_w = padding
h_in = h + 2*pad_h
w_in = w + 2*pad_w
extra_pixels = T.alloc(numpy.array(value, dtype=theano.config.floatX),
batch_size, n_channels, h_in, w_in)
extra_pixels = T.set_subtensor(
extra_pixels[:, :, pad_h:pad_h+h, pad_w:pad_w+w], layer_input)
return extra_pixels
def convolution(layer_input, w_shared, stride, n_groups, image_shape,
padding, batch_size, filter_shape):
"""Returns result of applying convolution to layer_input.
:param theano.tensor4 layer_input: input of convolution in format
(batch_size, number of channels,
height, width)
:param theano.tensor4 w_shared: weights in format
(number of output channels,
number of input channels,
height, width)
:param pair of integers stride: stride of convolution
:param integer n_groups: number of groups in convolution
:param image_shape: shape of single image in layer_input in format
(height, width, number of channels)
:type image_shape: tuple of 3 integers
:param pair of integers padding: padding of convolution
:param integer batch_size: size of batch of layer_input
:param filter_shape: shape of single filter in format
(height, width, number of output channels)
:type filter_shape: tuple of 3 integers
"""
n_channels = image_shape[2]
n_filters = filter_shape[2]
n_group_channels = n_channels / n_groups
n_group_filters = n_filters / n_groups
h, w = image_shape[0:2]
pad_h, pad_w = padding
group_image_shape = (batch_size, n_group_channels,
h + 2*pad_h, w + 2*pad_w)
h, w = filter_shape[0:2]
group_filter_shape = (n_group_filters, n_group_channels, h, w)
conv_outputs = [T.nnet.conv.conv2d(
input=layer_input[:, i*n_group_channels:(i+1)*n_group_channels,
:, :],
filters=w_shared[i*n_group_filters:(i+1)*n_group_filters,
:, :, :],
filter_shape=group_filter_shape,
image_shape=group_image_shape,
subsample=stride
) for i in xrange(n_groups)]
return T.concatenate(conv_outputs, axis=1)
| heurezjusz/Athena | athenet/utils/misc.py | Python | bsd-2-clause | 7,019 |
# encoding: utf-8
from django.db.utils import IntegrityError, DatabaseError
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'PollAnswerUser', fields ['poll_answer', 'user']
db.create_unique('pybb_pollansweruser', ['poll_answer_id', 'user_id'])
def backwards(self, orm):
# Removing unique constraint on 'PollAnswerUser', fields ['poll_answer', 'user']
db.delete_unique('pybb_pollansweruser', ['poll_answer_id', 'user_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'pybb.attachment': {
'Meta': {'object_name': 'Attachment'},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['pybb.Post']"}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'pybb.category': {
'Meta': {'ordering': "['position']", 'object_name': 'Category'},
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'pybb.forum': {
'Meta': {'ordering': "['position']", 'object_name': 'Forum'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'forums'", 'to': "orm['pybb.Category']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'headline': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name), 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'readed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'readed_forums'", 'symmetrical': 'False', 'through': "orm['pybb.ForumReadTracker']", 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)}),
'topic_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'pybb.forumreadtracker': {
'Meta': {'object_name': 'ForumReadTracker'},
'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pybb.Forum']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)})
},
'pybb.pollanswer': {
'Meta': {'object_name': 'PollAnswer'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_answers'", 'to': "orm['pybb.Topic']"})
},
'pybb.pollansweruser': {
'Meta': {'unique_together': "(('poll_answer', 'user'),)", 'object_name': 'PollAnswerUser'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'poll_answer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'users'", 'to': "orm['pybb.PollAnswer']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_answers'", 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)})
},
'pybb.post': {
'Meta': {'ordering': "['created']", 'object_name': 'Post'},
'body': ('django.db.models.fields.TextField', [], {}),
'body_html': ('django.db.models.fields.TextField', [], {}),
'body_text': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'on_moderation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['pybb.Topic']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)}),
'user_ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15', 'blank': 'True'})
},
'pybb.profile': {
'Meta': {'object_name': 'Profile'},
'autosubscribe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'avatar': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'ru-RU'", 'max_length': '10', 'blank': 'True'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'show_signatures': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'signature': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'blank': 'True'}),
'signature_html': ('django.db.models.fields.TextField', [], {'max_length': '1054', 'blank': 'True'}),
'time_zone': ('django.db.models.fields.FloatField', [], {'default': '3.0'}),
'user': ('annoying.fields.AutoOneToOneField', [], {'related_name': "'pybb_profile'", 'unique': 'True', 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)})
},
'pybb.topic': {
'Meta': {'ordering': "['-created']", 'object_name': 'Topic'},
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'topics'", 'to': "orm['pybb.Forum']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'on_moderation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'poll_question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'poll_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'readed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'readed_topics'", 'symmetrical': 'False', 'through': "orm['pybb.TopicReadTracker']", 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)}),
'sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'subscriptions'", 'blank': 'True', 'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'pybb.topicreadtracker': {
'Meta': {'object_name': 'TopicReadTracker'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time_stamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pybb.Topic']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s.%s']"% (User._meta.app_label, User._meta.object_name)})
}
}
complete_apps = ['pybb'] | zekone/dj_pybb | pybb/migrations/0023_auto__add_unique_pollansweruser_poll_answer_user.py | Python | bsd-2-clause | 12,958 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "opentrain.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| hasadna/OpenTrain | webserver/opentrain/manage.py | Python | bsd-3-clause | 252 |
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import json
import os
import pipes
import shutil
import subprocess
import sys
script_dir = os.path.dirname(os.path.realpath(__file__))
chrome_src = os.path.abspath(os.path.join(script_dir, os.pardir))
SRC_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(chrome_src, 'tools', 'gyp', 'pylib'))
json_data_file = os.path.join(script_dir, 'win_toolchain.json')
import gyp
# Use MSVS2013 as the default toolchain.
CURRENT_DEFAULT_TOOLCHAIN_VERSION = '2013'
def SetEnvironmentAndGetRuntimeDllDirs():
"""Sets up os.environ to use the depot_tools VS toolchain with gyp, and
returns the location of the VS runtime DLLs so they can be copied into
the output directory after gyp generation.
"""
vs_runtime_dll_dirs = None
depot_tools_win_toolchain = \
bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1')))
# When running on a non-Windows host, only do this if the SDK has explicitly
# been downloaded before (in which case json_data_file will exist).
if ((sys.platform in ('win32', 'cygwin') or os.path.exists(json_data_file))
and depot_tools_win_toolchain):
if ShouldUpdateToolchain():
Update()
with open(json_data_file, 'r') as tempf:
toolchain_data = json.load(tempf)
toolchain = toolchain_data['path']
version = toolchain_data['version']
win_sdk = toolchain_data.get('win_sdk')
if not win_sdk:
win_sdk = toolchain_data['win8sdk']
wdk = toolchain_data['wdk']
# TODO(scottmg): The order unfortunately matters in these. They should be
# split into separate keys for x86 and x64. (See CopyVsRuntimeDlls call
# below). http://crbug.com/345992
vs_runtime_dll_dirs = toolchain_data['runtime_dirs']
os.environ['GYP_MSVS_OVERRIDE_PATH'] = toolchain
os.environ['GYP_MSVS_VERSION'] = version
# We need to make sure windows_sdk_path is set to the automated
# toolchain values in GYP_DEFINES, but don't want to override any
# otheroptions.express
# values there.
gyp_defines_dict = gyp.NameValueListToDict(gyp.ShlexEnv('GYP_DEFINES'))
gyp_defines_dict['windows_sdk_path'] = win_sdk
os.environ['GYP_DEFINES'] = ' '.join('%s=%s' % (k, pipes.quote(str(v)))
for k, v in gyp_defines_dict.iteritems())
os.environ['WINDOWSSDKDIR'] = win_sdk
os.environ['WDK_DIR'] = wdk
# Include the VS runtime in the PATH in case it's not machine-installed.
runtime_path = os.path.pathsep.join(vs_runtime_dll_dirs)
os.environ['PATH'] = runtime_path + os.path.pathsep + os.environ['PATH']
elif sys.platform == 'win32' and not depot_tools_win_toolchain:
if not 'GYP_MSVS_OVERRIDE_PATH' in os.environ:
os.environ['GYP_MSVS_OVERRIDE_PATH'] = DetectVisualStudioPath()
if not 'GYP_MSVS_VERSION' in os.environ:
os.environ['GYP_MSVS_VERSION'] = GetVisualStudioVersion()
return vs_runtime_dll_dirs
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
raise Exception('The python library _winreg not found.')
def GetVisualStudioVersion():
"""Return GYP_MSVS_VERSION of Visual Studio.
"""
return os.environ.get('GYP_MSVS_VERSION', CURRENT_DEFAULT_TOOLCHAIN_VERSION)
def DetectVisualStudioPath():
"""Return path to the GYP_MSVS_VERSION of Visual Studio.
"""
# Note that this code is used from
# build/toolchain/win/setup_toolchain.py as well.
version_as_year = GetVisualStudioVersion()
year_to_version = {
'2013': '12.0',
'2015': '14.0',
}
if version_as_year not in year_to_version:
raise Exception(('Visual Studio version %s (from GYP_MSVS_VERSION)'
' not supported. Supported versions are: %s') % (
version_as_year, ', '.join(year_to_version.keys())))
version = year_to_version[version_as_year]
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version]
for key in keys:
path = _RegistryGetValue(key, 'InstallDir')
if not path:
continue
path = os.path.normpath(os.path.join(path, '..', '..'))
return path
raise Exception(('Visual Studio Version %s (from GYP_MSVS_VERSION)'
' not found.') % (version_as_year))
def _VersionNumber():
"""Gets the standard version number ('120', '140', etc.) based on
GYP_MSVS_VERSION."""
vs_version = GetVisualStudioVersion()
if vs_version == '2013':
return '120'
elif vs_version == '2015':
return '140'
else:
raise ValueError('Unexpected GYP_MSVS_VERSION')
def _CopyRuntimeImpl(target, source, verbose=True):
"""Copy |source| to |target| if it doesn't already exist or if it
needs to be updated.
"""
if (os.path.isdir(os.path.dirname(target)) and
(not os.path.isfile(target) or
os.stat(target).st_mtime != os.stat(source).st_mtime)):
if verbose:
print 'Copying %s to %s...' % (source, target)
if os.path.exists(target):
os.unlink(target)
shutil.copy2(source, target)
def _CopyRuntime2013(target_dir, source_dir, dll_pattern):
"""Copy both the msvcr and msvcp runtime DLLs, only if the target doesn't
exist, but the target directory does exist."""
for file_part in ('p', 'r'):
dll = dll_pattern % file_part
target = os.path.join(target_dir, dll)
source = os.path.join(source_dir, dll)
_CopyRuntimeImpl(target, source)
def _CopyRuntime2015(target_dir, source_dir, dll_pattern, suffix):
"""Copy both the msvcp and vccorlib runtime DLLs, only if the target doesn't
exist, but the target directory does exist."""
for file_part in ('msvcp', 'vccorlib', 'vcruntime'):
dll = dll_pattern % file_part
target = os.path.join(target_dir, dll)
source = os.path.join(source_dir, dll)
_CopyRuntimeImpl(target, source)
ucrt_src_dir = os.path.join(source_dir, 'api-ms-win-*.dll')
print 'Copying %s to %s...' % (ucrt_src_dir, target_dir)
for ucrt_src_file in glob.glob(ucrt_src_dir):
file_part = os.path.basename(ucrt_src_file)
ucrt_dst_file = os.path.join(target_dir, file_part)
_CopyRuntimeImpl(ucrt_dst_file, ucrt_src_file, False)
_CopyRuntimeImpl(os.path.join(target_dir, 'ucrtbase' + suffix),
os.path.join(source_dir, 'ucrtbase' + suffix))
def _CopyRuntime(target_dir, source_dir, target_cpu, debug):
"""Copy the VS runtime DLLs, only if the target doesn't exist, but the target
directory does exist. Handles VS 2013 and VS 2015."""
suffix = "d.dll" if debug else ".dll"
if GetVisualStudioVersion() == '2015':
_CopyRuntime2015(target_dir, source_dir, '%s140' + suffix, suffix)
else:
_CopyRuntime2013(target_dir, source_dir, 'msvc%s120' + suffix)
# Copy the PGO runtime library to the release directories.
if not debug and os.environ.get('GYP_MSVS_OVERRIDE_PATH'):
pgo_x86_runtime_dir = os.path.join(os.environ.get('GYP_MSVS_OVERRIDE_PATH'),
'VC', 'bin')
pgo_x64_runtime_dir = os.path.join(pgo_x86_runtime_dir, 'amd64')
pgo_runtime_dll = 'pgort' + _VersionNumber() + '.dll'
if target_cpu == "x86":
source_x86 = os.path.join(pgo_x86_runtime_dir, pgo_runtime_dll)
if os.path.exists(source_x86):
_CopyRuntimeImpl(os.path.join(target_dir, pgo_runtime_dll), source_x86)
elif target_cpu == "x64":
source_x64 = os.path.join(pgo_x64_runtime_dir, pgo_runtime_dll)
if os.path.exists(source_x64):
_CopyRuntimeImpl(os.path.join(target_dir, pgo_runtime_dll),
source_x64)
else:
raise NotImplementedError("Unexpected target_cpu value:" + target_cpu)
def CopyVsRuntimeDlls(output_dir, runtime_dirs):
"""Copies the VS runtime DLLs from the given |runtime_dirs| to the output
directory so that even if not system-installed, built binaries are likely to
be able to run.
This needs to be run after gyp has been run so that the expected target
output directories are already created.
This is used for the GYP build and gclient runhooks.
"""
x86, x64 = runtime_dirs
out_debug = os.path.join(output_dir, 'Debug')
out_debug_nacl64 = os.path.join(output_dir, 'Debug', 'x64')
out_release = os.path.join(output_dir, 'Release')
out_release_nacl64 = os.path.join(output_dir, 'Release', 'x64')
out_debug_x64 = os.path.join(output_dir, 'Debug_x64')
out_release_x64 = os.path.join(output_dir, 'Release_x64')
if os.path.exists(out_debug) and not os.path.exists(out_debug_nacl64):
os.makedirs(out_debug_nacl64)
if os.path.exists(out_release) and not os.path.exists(out_release_nacl64):
os.makedirs(out_release_nacl64)
_CopyRuntime(out_debug, x86, "x86", debug=True)
_CopyRuntime(out_release, x86, "x86", debug=False)
_CopyRuntime(out_debug_x64, x64, "x64", debug=True)
_CopyRuntime(out_release_x64, x64, "x64", debug=False)
_CopyRuntime(out_debug_nacl64, x64, "x64", debug=True)
_CopyRuntime(out_release_nacl64, x64, "x64", debug=False)
def CopyDlls(target_dir, configuration, target_cpu):
"""Copy the VS runtime DLLs into the requested directory as needed.
configuration is one of 'Debug' or 'Release'.
target_cpu is one of 'x86' or 'x64'.
The debug configuration gets both the debug and release DLLs; the
release config only the latter.
This is used for the GN build.
"""
vs_runtime_dll_dirs = SetEnvironmentAndGetRuntimeDllDirs()
if not vs_runtime_dll_dirs:
return
x64_runtime, x86_runtime = vs_runtime_dll_dirs
runtime_dir = x64_runtime if target_cpu == 'x64' else x86_runtime
_CopyRuntime(target_dir, runtime_dir, target_cpu, debug=False)
if configuration == 'Debug':
_CopyRuntime(target_dir, runtime_dir, target_cpu, debug=True)
def _GetDesiredVsToolchainHashes():
"""Load a list of SHA1s corresponding to the toolchains that we want installed
to build with."""
if GetVisualStudioVersion() == '2015':
# Update 1 with hot fixes.
return ['b349b3cc596d5f7e13d649532ddd7e8db39db0cb']
else:
# Default to VS2013.
return ['4087e065abebdca6dbd0caca2910c6718d2ec67f']
def ShouldUpdateToolchain():
"""Check if the toolchain should be upgraded."""
if not os.path.exists(json_data_file):
return True
with open(json_data_file, 'r') as tempf:
toolchain_data = json.load(tempf)
version = toolchain_data['version']
env_version = GetVisualStudioVersion()
# If there's a mismatch between the version set in the environment and the one
# in the json file then the toolchain should be updated.
return version != env_version
def Update(force=False):
"""Requests an update of the toolchain to the specific hashes we have at
this revision. The update outputs a .json of the various configuration
information required to pass to gyp which we use in |GetToolchainDir()|.
"""
if force != False and force != '--force':
print >>sys.stderr, 'Unknown parameter "%s"' % force
return 1
if force == '--force' or os.path.exists(json_data_file):
force = True
depot_tools_win_toolchain = \
bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1')))
if ((sys.platform in ('win32', 'cygwin') or force) and
depot_tools_win_toolchain):
import find_depot_tools
depot_tools_path = find_depot_tools.add_depot_tools_to_path()
get_toolchain_args = [
sys.executable,
os.path.join(depot_tools_path,
'win_toolchain',
'get_toolchain_if_necessary.py'),
'--output-json', json_data_file,
] + _GetDesiredVsToolchainHashes()
if force:
get_toolchain_args.append('--force')
subprocess.check_call(get_toolchain_args)
return 0
def GetToolchainDir():
"""Gets location information about the current toolchain (must have been
previously updated by 'update'). This is used for the GN build."""
runtime_dll_dirs = SetEnvironmentAndGetRuntimeDllDirs()
# If WINDOWSSDKDIR is not set, search the default SDK path and set it.
if not 'WINDOWSSDKDIR' in os.environ:
default_sdk_path = 'C:\\Program Files (x86)\\Windows Kits\\10'
if os.path.isdir(default_sdk_path):
os.environ['WINDOWSSDKDIR'] = default_sdk_path
print '''vs_path = "%s"
sdk_path = "%s"
vs_version = "%s"
wdk_dir = "%s"
runtime_dirs = "%s"
''' % (
os.environ['GYP_MSVS_OVERRIDE_PATH'],
os.environ['WINDOWSSDKDIR'],
GetVisualStudioVersion(),
os.environ.get('WDK_DIR', ''),
os.path.pathsep.join(runtime_dll_dirs or ['None']))
def main():
commands = {
'update': Update,
'get_toolchain_dir': GetToolchainDir,
'copy_dlls': CopyDlls,
}
if len(sys.argv) < 2 or sys.argv[1] not in commands:
print >>sys.stderr, 'Expected one of: %s' % ', '.join(commands)
return 1
return commands[sys.argv[1]](*sys.argv[2:])
if __name__ == '__main__':
sys.exit(main())
| ds-hwang/chromium-crosswalk | build/vs_toolchain.py | Python | bsd-3-clause | 13,647 |
# @Float(label="Diameter of the circle ROI (pixel)", value=7) circle_diam
from ij.plugin.frame import RoiManager
from ij.gui import OvalRoi
rm = RoiManager.getInstance()
new_rois = []
for roi in rm.getRoisAsArray():
assert roi.getTypeAsString() == 'Point', "ROI needs to be a point"
x_center = roi.getContainedPoints()[0].x - (circle_diam / 2) + 0.5
y_center = roi.getContainedPoints()[0].y - (circle_diam / 2) + 0.5
new_roi = OvalRoi(x_center, y_center, circle_diam, circle_diam)
new_rois.append(new_roi)
rm.reset()
for new_roi in new_rois:
rm.addRoi(new_roi)
print("Done")
| hadim/fiji_tools | src/main/resources/script_templates/Hadim_Scripts/ROI/Circle_ROI_Builder.py | Python | bsd-3-clause | 587 |
#!/usr/bin/python
# Copyright 2008-2010 WebDriver committers
# Copyright 2008-2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import tempfile
import time
import shutil
import unittest
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoSuchFrameException
def not_available_on_remote(func):
def testMethod(self):
print self.driver
if type(self.driver) == 'remote':
return lambda x: None
else:
return func(self)
return testMethod
class CorrectEventFiringTests(unittest.TestCase):
def testShouldFireClickEventWhenClicking(self):
self._loadPage("javascriptPage")
self._clickOnElementWhichRecordsEvents()
self._assertEventFired("click")
def testShouldFireMouseDownEventWhenClicking(self):
self._loadPage("javascriptPage")
self._clickOnElementWhichRecordsEvents()
self._assertEventFired("mousedown")
def testShouldFireMouseUpEventWhenClicking(self):
self._loadPage("javascriptPage")
self._clickOnElementWhichRecordsEvents()
self._assertEventFired("mouseup")
def testShouldIssueMouseDownEvents(self):
self._loadPage("javascriptPage")
self.driver.find_element_by_id("mousedown").click()
result = self.driver.find_element_by_id("result").text
self.assertEqual(result, "mouse down")
def testShouldIssueClickEvents(self):
self._loadPage("javascriptPage")
self.driver.find_element_by_id("mouseclick").click()
result = self.driver.find_element_by_id("result").text
self.assertEqual(result, "mouse click")
def testShouldIssueMouseUpEvents(self):
self._loadPage("javascriptPage")
self.driver.find_element_by_id("mouseup").click()
result = self.driver.find_element_by_id("result").text
self.assertEqual(result, "mouse up")
def testMouseEventsShouldBubbleUpToContainingElements(self):
self._loadPage("javascriptPage")
self.driver.find_element_by_id("child").click()
result = self.driver.find_element_by_id("result").text
self.assertEqual(result, "mouse down")
def testShouldEmitOnChangeEventsWhenSelectingElements(self):
self._loadPage("javascriptPage")
# Intentionally not looking up the select tag. See selenium r7937 for details.
allOptions = self.driver.find_elements_by_xpath("//select[@id='selector']//option")
initialTextValue = self.driver.find_element_by_id("result").text
foo = allOptions[0]
bar = allOptions[1]
foo.select()
self.assertEqual(self.driver.find_element_by_id("result").text, initialTextValue)
bar.select()
self.assertEqual(self.driver.find_element_by_id("result").text, "bar")
def testShouldEmitOnChangeEventsWhenChangingTheStateOfACheckbox(self):
self._loadPage("javascriptPage")
checkbox = self.driver.find_element_by_id("checkbox")
checkbox.select()
self.assertEqual(self.driver.find_element_by_id("result").text, "checkbox thing")
def testShouldEmitClickEventWhenClickingOnATextInputElement(self):
self._loadPage("javascriptPage")
clicker = self.driver.find_element_by_id("clickField")
clicker.click()
self.assertEqual(clicker.value, "Clicked")
def testClearingAnElementShouldCauseTheOnChangeHandlerToFire(self):
self._loadPage("javascriptPage")
element = self.driver.find_element_by_id("clearMe")
element.clear()
result = self.driver.find_element_by_id("result")
self.assertEqual(result.text, "Cleared");
# TODO Currently Failing and needs fixing
#def testSendingKeysToAnotherElementShouldCauseTheBlurEventToFire(self):
# self._loadPage("javascriptPage")
# element = self.driver.find_element_by_id("theworks")
# element.send_keys("foo")
# element2 = self.driver.find_element_by_id("changeable")
# element2.send_keys("bar")
# self._assertEventFired("blur")
# TODO Currently Failing and needs fixing
#def testSendingKeysToAnElementShouldCauseTheFocusEventToFire(self):
# self._loadPage("javascriptPage")
# element = self.driver.find_element_by_id("theworks")
# element.send_keys("foo")
# self._assertEventFired("focus")
def _clickOnElementWhichRecordsEvents(self):
self.driver.find_element_by_id("plainButton").click()
def _assertEventFired(self, eventName):
result = self.driver.find_element_by_id("result")
text = result.text
self.assertTrue(eventName in text, "No " + eventName + " fired: " + text)
def _pageURL(self, name):
return "http://localhost:%d/%s.html" % (self.webserver.port, name)
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| gx1997/chrome-loongson | third_party/webdriver/python/test/selenium/webdriver/common/correct_event_firing_tests.py | Python | bsd-3-clause | 5,540 |
""" This module attempts to make it easy to create VTK-Python
unittests. The module uses unittest for the test interface. For more
documentation on what unittests are and how to use them, please read
these:
http://www.python.org/doc/current/lib/module-unittest.html
http://www.diveintopython.org/roman_divein.html
This VTK-Python test module supports image based tests with multiple
images per test suite and multiple images per individual test as well.
It also prints information appropriate for Dart
(http://public.kitware.com/Dart/).
This module defines several useful classes and functions to make
writing tests easy. The most important of these are:
class vtkTest:
Subclass this for your tests. It also has a few useful internal
functions that can be used to do some simple blackbox testing.
compareImage(renwin, img_fname, threshold=10):
Compares renwin with image and generates image if it does not
exist. The threshold determines how closely the images must match.
The function also handles multiple images and finds the best
matching image.
compareImageWithSavedImage(src_img, img_fname, threshold=10):
Compares given source image (in the form of a vtkImageData) with
saved image and generates the image if it does not exist. The
threshold determines how closely the images must match. The
function also handles multiple images and finds the best matching
image.
getAbsImagePath(img_basename):
Returns the full path to the image given the basic image name.
main(cases):
Does the testing given a list of tuples containing test classes and
the starting string of the functions used for testing.
interact():
Interacts with the user if necessary. The behavior of this is
rather trivial and works best when using Tkinter. It does not do
anything by default and stops to interact with the user when given
the appropriate command line arguments.
isInteractive():
If interact() is not good enough, use this to find if the mode is
interactive or not and do whatever is necessary to generate an
interactive view.
Examples:
The best way to learn on how to use this module is to look at a few
examples. The end of this file contains a trivial example. Please
also look at the following examples:
Rendering/Testing/Python/TestTkRenderWidget.py,
Rendering/Testing/Python/TestTkRenderWindowInteractor.py
Created: September, 2002
Prabhu Ramachandran <[email protected]>
"""
import sys, os, time
import os.path
import unittest, getopt
import vtk
import BlackBox
# location of the VTK data files. Set via command line args or
# environment variable.
VTK_DATA_ROOT = ""
# location of the VTK baseline images. Set via command line args or
# environment variable.
VTK_BASELINE_ROOT = ""
# location of the VTK difference images for failed tests. Set via
# command line args or environment variable.
VTK_TEMP_DIR = ""
# Verbosity of the test messages (used by unittest)
_VERBOSE = 0
# Determines if it is necessary to interact with the user. If zero
# dont interact if 1 interact. Set via command line args
_INTERACT = 0
# This will be set to 1 when the image test will not be performed.
# This option is used internally by the script and set via command
# line arguments.
_NO_IMAGE = 0
class vtkTest(unittest.TestCase):
"""A simple default VTK test class that defines a few useful
blackbox tests that can be readily used. Derive your test cases
from this class and use the following if you'd like to.
Note: Unittest instantiates this class (or your subclass) each
time it tests a method. So if you do not want that to happen when
generating VTK pipelines you should create the pipeline in the
class definition as done below for _blackbox.
"""
_blackbox = BlackBox.Tester(debug=0)
# Due to what seems to be a bug in python some objects leak.
# Avoid the exit-with-error in vtkDebugLeaks.
dl = vtk.vtkDebugLeaks()
dl.SetExitError(0)
dl = None
def _testParse(self, obj):
"""Does a blackbox test by attempting to parse the class for
its various methods using vtkMethodParser. This is a useful
test because it gets all the methods of the vtkObject, parses
them and sorts them into different classes of objects."""
self._blackbox.testParse(obj)
def _testGetSet(self, obj, excluded_methods=[]):
"""Checks the Get/Set method pairs by setting the value using
the current state and making sure that it equals the value it
was originally. This effectively calls _testParse
internally. """
self._blackbox.testGetSet(obj, excluded_methods)
def _testBoolean(self, obj, excluded_methods=[]):
"""Checks the Boolean methods by setting the value on and off
and making sure that the GetMethod returns the the set value.
This effectively calls _testParse internally. """
self._blackbox.testBoolean(obj, excluded_methods)
def interact():
"""Interacts with the user if necessary. """
global _INTERACT
if _INTERACT:
raw_input("\nPress Enter/Return to continue with the testing. --> ")
def isInteractive():
"""Returns if the currently chosen mode is interactive or not
based on command line options."""
return _INTERACT
def getAbsImagePath(img_basename):
"""Returns the full path to the image given the basic image
name."""
global VTK_BASELINE_ROOT
return os.path.join(VTK_BASELINE_ROOT, img_basename)
def _getTempImagePath(img_fname):
x = os.path.join(VTK_TEMP_DIR, os.path.split(img_fname)[1])
return os.path.abspath(x)
def compareImageWithSavedImage(src_img, img_fname, threshold=10):
"""Compares a source image (src_img, which is a vtkImageData) with
the saved image file whose name is given in the second argument.
If the image file does not exist the image is generated and
stored. If not the source image is compared to that of the
figure. This function also handles multiple images and finds the
best matching image.
"""
global _NO_IMAGE
if _NO_IMAGE:
return
f_base, f_ext = os.path.splitext(img_fname)
if not os.path.isfile(img_fname):
# generate the image
pngw = vtk.vtkPNGWriter()
pngw.SetFileName(_getTempImagePath(img_fname))
pngw.SetInputData(src_img)
pngw.Write()
return
pngr = vtk.vtkPNGReader()
pngr.SetFileName(img_fname)
idiff = vtk.vtkImageDifference()
idiff.SetInputData(src_img)
idiff.SetImageConnection(pngr.GetOutputPort())
min_err = idiff.GetThresholdedError()
img_err = min_err
best_img = img_fname
err_index = 0
count = 0
if min_err > threshold:
count = 1
test_failed = 1
err_index = -1
while 1: # keep trying images till we get the best match.
new_fname = f_base + "_%d.png"%count
if not os.path.exists(new_fname):
# no other image exists.
break
# since file exists check if it matches.
pngr.SetFileName(new_fname)
pngr.Update()
idiff.Update()
alt_err = idiff.GetThresholdedError()
if alt_err < threshold:
# matched,
err_index = count
test_failed = 0
min_err = alt_err
img_err = alt_err
best_img = new_fname
break
else:
if alt_err < min_err:
# image is a better match.
err_index = count
min_err = alt_err
img_err = alt_err
best_img = new_fname
count = count + 1
# closes while loop.
if test_failed:
_handleFailedImage(idiff, pngr, best_img)
# Print for Dart.
_printDartImageError(img_err, err_index, f_base)
msg = "Failed image test: %f\n"%idiff.GetThresholdedError()
raise AssertionError, msg
# output the image error even if a test passed
_printDartImageSuccess(img_err, err_index)
def compareImage(renwin, img_fname, threshold=10):
"""Compares renwin's (a vtkRenderWindow) contents with the image
file whose name is given in the second argument. If the image
file does not exist the image is generated and stored. If not the
image in the render window is compared to that of the figure.
This function also handles multiple images and finds the best
matching image. """
global _NO_IMAGE
if _NO_IMAGE:
return
w2if = vtk.vtkWindowToImageFilter()
w2if.ReadFrontBufferOff()
w2if.SetInput(renwin)
return compareImageWithSavedImage(w2if.GetOutput(), img_fname, threshold)
def _printDartImageError(img_err, err_index, img_base):
"""Prints the XML data necessary for Dart."""
img_base = _getTempImagePath(img_base)
print "Failed image test with error: %f"%img_err
print "<DartMeasurement name=\"ImageError\" type=\"numeric/double\">",
print "%f </DartMeasurement>"%img_err
if err_index <= 0:
print "<DartMeasurement name=\"BaselineImage\" type=\"text/string\">Standard</DartMeasurement>",
else:
print "<DartMeasurement name=\"BaselineImage\" type=\"numeric/integer\">",
print "%d </DartMeasurement>"%err_index
print "<DartMeasurementFile name=\"TestImage\" type=\"image/png\">",
print "%s </DartMeasurementFile>"%(img_base + '.png')
print "<DartMeasurementFile name=\"DifferenceImage\" type=\"image/png\">",
print "%s </DartMeasurementFile>"%(img_base + '.diff.png')
print "<DartMeasurementFile name=\"ValidImage\" type=\"image/png\">",
print "%s </DartMeasurementFile>"%(img_base + '.valid.png')
def _printDartImageSuccess(img_err, err_index):
"Prints XML data for Dart when image test succeeded."
print "<DartMeasurement name=\"ImageError\" type=\"numeric/double\">",
print "%f </DartMeasurement>"%img_err
if err_index <= 0:
print "<DartMeasurement name=\"BaselineImage\" type=\"text/string\">Standard</DartMeasurement>",
else:
print "<DartMeasurement name=\"BaselineImage\" type=\"numeric/integer\">",
print "%d </DartMeasurement>"%err_index
def _handleFailedImage(idiff, pngr, img_fname):
"""Writes all the necessary images when an image comparison
failed."""
f_base, f_ext = os.path.splitext(img_fname)
# write the difference image gamma adjusted for the dashboard.
gamma = vtk.vtkImageShiftScale()
gamma.SetInputConnection(idiff.GetOutputPort())
gamma.SetShift(0)
gamma.SetScale(10)
pngw = vtk.vtkPNGWriter()
pngw.SetFileName(_getTempImagePath(f_base + ".diff.png"))
pngw.SetInputConnection(gamma.GetOutputPort())
pngw.Write()
# Write out the image that was generated. Write it out as full so that
# it may be used as a baseline image if the tester deems it valid.
pngw.SetInputConnection(idiff.GetInputConnection(0,0))
pngw.SetFileName(_getTempImagePath(f_base + ".png"))
pngw.Write()
# write out the valid image that matched.
pngw.SetInputConnection(idiff.GetInputConnection(1,0))
pngw.SetFileName(_getTempImagePath(f_base + ".valid.png"))
pngw.Write()
def main(cases):
""" Pass a list of tuples containing test classes and the starting
string of the functions used for testing.
Example:
main ([(vtkTestClass, 'test'), (vtkTestClass1, 'test')])
"""
processCmdLine()
timer = vtk.vtkTimerLog()
s_time = timer.GetCPUTime()
s_wall_time = time.time()
# run the tests
result = test(cases)
tot_time = timer.GetCPUTime() - s_time
tot_wall_time = float(time.time() - s_wall_time)
# output measurements for Dart
print "<DartMeasurement name=\"WallTime\" type=\"numeric/double\">",
print " %f </DartMeasurement>"%tot_wall_time
print "<DartMeasurement name=\"CPUTime\" type=\"numeric/double\">",
print " %f </DartMeasurement>"%tot_time
# Delete these to eliminate debug leaks warnings.
del cases, timer
if result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
def test(cases):
""" Pass a list of tuples containing test classes and the
functions used for testing.
It returns a unittest._TextTestResult object.
Example:
test = test_suite([(vtkTestClass, 'test'),
(vtkTestClass1, 'test')])
"""
# Make the test suites from the arguments.
suites = []
for case in cases:
suites.append(unittest.makeSuite(case[0], case[1]))
test_suite = unittest.TestSuite(suites)
# Now run the tests.
runner = unittest.TextTestRunner(verbosity=_VERBOSE)
result = runner.run(test_suite)
return result
def usage():
msg="""Usage:\nTestScript.py [options]\nWhere options are:\n
-D /path/to/VTKData
--data-dir /path/to/VTKData
Directory containing VTK Data use for tests. If this option
is not set via the command line the environment variable
VTK_DATA_ROOT is used. If the environment variable is not
set the value defaults to '../../../../VTKData'.
-B /path/to/valid/image_dir/
--baseline-root /path/to/valid/image_dir/
This is a path to the directory containing the valid images
for comparison. If this option is not set via the command
line the environment variable VTK_BASELINE_ROOT is used. If
the environment variable is not set the value defaults to
the same value set for -D (--data-dir).
-T /path/to/valid/temporary_dir/
--temp-dir /path/to/valid/temporary_dir/
This is a path to the directory where the image differences
are written. If this option is not set via the command line
the environment variable VTK_TEMP_DIR is used. If the
environment variable is not set the value defaults to
'../../../Testing/Temporary'.
-v level
--verbose level
Sets the verbosity of the test runner. Valid values are 0,
1, and 2 in increasing order of verbosity.
-I
--interact
Interacts with the user when chosen. If this is not chosen
the test will run and exit as soon as it is finished. When
enabled, the behavior of this is rather trivial and works
best when the test uses Tkinter.
-n
--no-image
Does not do any image comparisons. This is useful if you
want to run the test and not worry about test images or
image failures etc.
-h
--help
Prints this message.
"""
return msg
def parseCmdLine():
arguments = sys.argv[1:]
options = "B:D:T:v:hnI"
long_options = ['baseline-root=', 'data-dir=', 'temp-dir=',
'verbose=', 'help', 'no-image', 'interact']
try:
opts, args = getopt.getopt(arguments, options, long_options)
except getopt.error, msg:
print usage()
print '-'*70
print msg
sys.exit (1)
return opts, args
def processCmdLine():
opts, args = parseCmdLine()
global VTK_DATA_ROOT, VTK_BASELINE_ROOT, VTK_TEMP_DIR
global _VERBOSE, _NO_IMAGE, _INTERACT
# setup defaults
try:
VTK_DATA_ROOT = os.environ['VTK_DATA_ROOT']
except KeyError:
VTK_DATA_ROOT = os.path.normpath("../../../../VTKData")
try:
VTK_BASELINE_ROOT = os.environ['VTK_BASELINE_ROOT']
except KeyError:
pass
try:
VTK_TEMP_DIR = os.environ['VTK_TEMP_DIR']
except KeyError:
VTK_TEMP_DIR = os.path.normpath("../../../Testing/Temporary")
for o, a in opts:
if o in ('-D', '--data-dir'):
VTK_DATA_ROOT = os.path.abspath(a)
if o in ('-B', '--baseline-root'):
VTK_BASELINE_ROOT = os.path.abspath(a)
if o in ('-T', '--temp-dir'):
VTK_TEMP_DIR = os.path.abspath(a)
if o in ('-n', '--no-image'):
_NO_IMAGE = 1
if o in ('-I', '--interact'):
_INTERACT = 1
if o in ('-v', '--verbose'):
try:
_VERBOSE = int(a)
except:
msg="Verbosity should be an integer. 0, 1, 2 are valid."
print msg
sys.exit(1)
if o in ('-h', '--help'):
print usage()
sys.exit()
if not VTK_BASELINE_ROOT: # default value.
VTK_BASELINE_ROOT = VTK_DATA_ROOT
if __name__ == "__main__":
######################################################################
# A Trivial test case to illustrate how this module works.
class SampleTest(vtkTest):
obj = vtk.vtkActor()
def testParse(self):
"Test if class is parseable"
self._testParse(self.obj)
def testGetSet(self):
"Testing Get/Set methods"
self._testGetSet(self.obj)
def testBoolean(self):
"Testing Boolean methods"
self._testBoolean(self.obj)
# Test with the above trivial sample test.
main( [ (SampleTest, 'test') ] )
| cjh1/vtkmodular | Wrapping/Python/vtk/test/Testing.py | Python | bsd-3-clause | 17,259 |
#!/usr/bin/env python
from distutils.core import setup
DISTNAME = 'tract_querier'
DESCRIPTION = \
'WMQL: Query language for automatic tract extraction from '\
'full-brain tractographies with '\
'a registered template on top of them'
LONG_DESCRIPTION = open('README.md').read()
MAINTAINER = 'Demian Wassermann'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://demianw.github.io/tract_querier'
LICENSE = open('license.rst').read()
DOWNLOAD_URL = 'https://github.com/demianw/tract_querier'
VERSION = '0.1'
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(quiet=True)
config.add_subpackage('tract_querier')
return config
if __name__ == "__main__":
setup(
name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
requires=[
'numpy(>=1.6)',
'nibabel(>=1.3)'
],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'
],
scripts=[
'scripts/tract_querier',
'scripts/tract_math'
],
**(configuration().todict())
)
| oesteban/tract_querier | setup.py | Python | bsd-3-clause | 1,691 |
if __name__ == '__main__':
import instance_occlsegm_lib
dataset = instance_occlsegm_lib.datasets.apc.arc2017.JskARC2017DatasetV1(
'train')
instance_occlsegm_lib.datasets.view_class_seg_dataset(dataset)
| start-jsk/jsk_apc | demos/instance_occlsegm/tests/datasets_tests/apc_tests/arc2017_tests/jsk_tests/check_jsk_arc2017_v1_dataset.py | Python | bsd-3-clause | 222 |
#!/usr/bin/env vpython3
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import itertools
import logging
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from third_party import schema
import metrics_utils
# We have to disable monitoring before importing gclient.
metrics_utils.COLLECT_METRICS = False
import gclient
import gclient_eval
import gclient_utils
class GClientEvalTest(unittest.TestCase):
def test_str(self):
self.assertEqual('foo', gclient_eval._gclient_eval('"foo"'))
def test_tuple(self):
self.assertEqual(('a', 'b'), gclient_eval._gclient_eval('("a", "b")'))
def test_list(self):
self.assertEqual(['a', 'b'], gclient_eval._gclient_eval('["a", "b"]'))
def test_dict(self):
self.assertEqual({'a': 'b'}, gclient_eval._gclient_eval('{"a": "b"}'))
def test_name_safe(self):
self.assertEqual(True, gclient_eval._gclient_eval('True'))
def test_name_unsafe(self):
with self.assertRaises(ValueError) as cm:
gclient_eval._gclient_eval('UnsafeName')
self.assertIn('invalid name \'UnsafeName\'', str(cm.exception))
def test_invalid_call(self):
with self.assertRaises(ValueError) as cm:
gclient_eval._gclient_eval('Foo("bar")')
self.assertIn('Str and Var are the only allowed functions',
str(cm.exception))
def test_expands_vars(self):
self.assertEqual(
'foo',
gclient_eval._gclient_eval('Var("bar")', vars_dict={'bar': 'foo'}))
self.assertEqual(
'baz',
gclient_eval._gclient_eval(
'Var("bar")',
vars_dict={'bar': gclient_eval.ConstantString('baz')}))
def test_expands_vars_with_braces(self):
self.assertEqual(
'foo',
gclient_eval._gclient_eval('"{bar}"', vars_dict={'bar': 'foo'}))
self.assertEqual(
'baz',
gclient_eval._gclient_eval(
'"{bar}"',
vars_dict={'bar': gclient_eval.ConstantString('baz')}))
def test_invalid_var(self):
with self.assertRaises(KeyError) as cm:
gclient_eval._gclient_eval('"{bar}"', vars_dict={})
self.assertIn('bar was used as a variable, but was not declared',
str(cm.exception))
def test_plus(self):
self.assertEqual('foo', gclient_eval._gclient_eval('"f" + "o" + "o"'))
def test_format(self):
self.assertEqual('foo', gclient_eval._gclient_eval('"%s" % "foo"'))
def test_not_expression(self):
with self.assertRaises(SyntaxError) as cm:
gclient_eval._gclient_eval('def foo():\n pass')
self.assertIn('invalid syntax', str(cm.exception))
def test_not_whitelisted(self):
with self.assertRaises(ValueError) as cm:
gclient_eval._gclient_eval('[x for x in [1, 2, 3]]')
self.assertIn(
'unexpected AST node: <_ast.ListComp object', str(cm.exception))
def test_dict_ordered(self):
for test_case in itertools.permutations(range(4)):
input_data = ['{'] + ['"%s": "%s",' % (n, n) for n in test_case] + ['}']
expected = [(str(n), str(n)) for n in test_case]
result = gclient_eval._gclient_eval(''.join(input_data))
self.assertEqual(expected, list(result.items()))
class ExecTest(unittest.TestCase):
def test_multiple_assignment(self):
with self.assertRaises(ValueError) as cm:
gclient_eval.Exec('a, b, c = "a", "b", "c"')
self.assertIn(
'invalid assignment: target should be a name', str(cm.exception))
def test_override(self):
with self.assertRaises(ValueError) as cm:
gclient_eval.Exec('a = "a"\na = "x"')
self.assertIn(
'invalid assignment: overrides var \'a\'', str(cm.exception))
def test_schema_wrong_type(self):
with self.assertRaises(gclient_utils.Error):
gclient_eval.Exec('include_rules = {}')
def test_recursedeps_list(self):
local_scope = gclient_eval.Exec(
'recursedeps = [["src/third_party/angle", "DEPS.chromium"]]')
self.assertEqual(
{'recursedeps': [['src/third_party/angle', 'DEPS.chromium']]},
local_scope)
def test_var(self):
local_scope = gclient_eval.Exec('\n'.join([
'vars = {',
' "foo": "bar",',
' "baz": Str("quux")',
'}',
'deps = {',
' "a_dep": "a" + Var("foo") + "b" + Var("baz"),',
'}',
]))
Str = gclient_eval.ConstantString
self.assertEqual({
'vars': {'foo': 'bar', 'baz': Str('quux')},
'deps': {'a_dep': 'abarbquux'},
}, local_scope)
def test_braces_var(self):
local_scope = gclient_eval.Exec('\n'.join([
'vars = {',
' "foo": "bar",',
' "baz": Str("quux")',
'}',
'deps = {',
' "a_dep": "a{foo}b{baz}",',
'}',
]))
Str = gclient_eval.ConstantString
self.assertEqual({
'vars': {'foo': 'bar',
'baz': Str('quux')},
'deps': {'a_dep': 'abarbquux'},
}, local_scope)
def test_empty_deps(self):
local_scope = gclient_eval.Exec('deps = {}')
self.assertEqual({'deps': {}}, local_scope)
def test_overrides_vars(self):
local_scope = gclient_eval.Exec('\n'.join([
'vars = {',
' "foo": "bar",',
' "quux": Str("quuz")',
'}',
'deps = {',
' "a_dep": "a{foo}b",',
' "b_dep": "c{quux}d",',
'}',
]), vars_override={'foo': 'baz', 'quux': 'corge'})
Str = gclient_eval.ConstantString
self.assertEqual({
'vars': {'foo': 'bar', 'quux': Str('quuz')},
'deps': {'a_dep': 'abazb', 'b_dep': 'ccorged'},
}, local_scope)
def test_doesnt_override_undeclared_vars(self):
with self.assertRaises(KeyError) as cm:
gclient_eval.Exec('\n'.join([
'vars = {',
' "foo": "bar",',
'}',
'deps = {',
' "a_dep": "a{baz}b",',
'}',
]), vars_override={'baz': 'lalala'})
self.assertIn('baz was used as a variable, but was not declared',
str(cm.exception))
def test_doesnt_allow_duplicate_deps(self):
with self.assertRaises(ValueError) as cm:
gclient_eval.Parse('\n'.join([
'deps = {',
' "a_dep": {',
' "url": "a_url@a_rev",',
' "condition": "foo",',
' },',
' "a_dep": {',
' "url": "a_url@another_rev",',
' "condition": "not foo",',
' }',
'}',
]), '<unknown>')
self.assertIn('duplicate key in dictionary: a_dep', str(cm.exception))
class UpdateConditionTest(unittest.TestCase):
def test_both_present(self):
info = {'condition': 'foo'}
gclient_eval.UpdateCondition(info, 'and', 'bar')
self.assertEqual(info, {'condition': '(foo) and (bar)'})
info = {'condition': 'foo'}
gclient_eval.UpdateCondition(info, 'or', 'bar')
self.assertEqual(info, {'condition': '(foo) or (bar)'})
def test_one_present_and(self):
# If one of info's condition or new_condition is present, and |op| == 'and'
# then the the result must be the present condition.
info = {'condition': 'foo'}
gclient_eval.UpdateCondition(info, 'and', None)
self.assertEqual(info, {'condition': 'foo'})
info = {}
gclient_eval.UpdateCondition(info, 'and', 'bar')
self.assertEqual(info, {'condition': 'bar'})
def test_both_absent_and(self):
# Nothing happens
info = {}
gclient_eval.UpdateCondition(info, 'and', None)
self.assertEqual(info, {})
def test_or(self):
# If one of info's condition and new_condition is not present, then there
# shouldn't be a condition. An absent value is treated as implicitly True.
info = {'condition': 'foo'}
gclient_eval.UpdateCondition(info, 'or', None)
self.assertEqual(info, {})
info = {}
gclient_eval.UpdateCondition(info, 'or', 'bar')
self.assertEqual(info, {})
info = {}
gclient_eval.UpdateCondition(info, 'or', None)
self.assertEqual(info, {})
class EvaluateConditionTest(unittest.TestCase):
def test_true(self):
self.assertTrue(gclient_eval.EvaluateCondition('True', {}))
def test_variable(self):
self.assertFalse(gclient_eval.EvaluateCondition('foo', {'foo': 'False'}))
def test_variable_cyclic_reference(self):
with self.assertRaises(ValueError) as cm:
self.assertTrue(gclient_eval.EvaluateCondition('bar', {'bar': 'bar'}))
self.assertIn(
'invalid cyclic reference to \'bar\' (inside \'bar\')',
str(cm.exception))
def test_operators(self):
self.assertFalse(gclient_eval.EvaluateCondition(
'a and not (b or c)', {'a': 'True', 'b': 'False', 'c': 'True'}))
def test_expansion(self):
self.assertTrue(gclient_eval.EvaluateCondition(
'a or b', {'a': 'b and c', 'b': 'not c', 'c': 'False'}))
def test_string_equality(self):
self.assertTrue(gclient_eval.EvaluateCondition(
'foo == "baz"', {'foo': '"baz"'}))
self.assertFalse(gclient_eval.EvaluateCondition(
'foo == "bar"', {'foo': '"baz"'}))
def test_string_inequality(self):
self.assertTrue(gclient_eval.EvaluateCondition(
'foo != "bar"', {'foo': '"baz"'}))
self.assertFalse(gclient_eval.EvaluateCondition(
'foo != "baz"', {'foo': '"baz"'}))
def test_triple_or(self):
self.assertTrue(gclient_eval.EvaluateCondition(
'a or b or c', {'a': 'False', 'b': 'False', 'c': 'True'}))
self.assertFalse(gclient_eval.EvaluateCondition(
'a or b or c', {'a': 'False', 'b': 'False', 'c': 'False'}))
def test_triple_and(self):
self.assertTrue(gclient_eval.EvaluateCondition(
'a and b and c', {'a': 'True', 'b': 'True', 'c': 'True'}))
self.assertFalse(gclient_eval.EvaluateCondition(
'a and b and c', {'a': 'True', 'b': 'True', 'c': 'False'}))
def test_triple_and_and_or(self):
self.assertTrue(gclient_eval.EvaluateCondition(
'a and b and c or d or e',
{'a': 'False', 'b': 'False', 'c': 'False', 'd': 'False', 'e': 'True'}))
self.assertFalse(gclient_eval.EvaluateCondition(
'a and b and c or d or e',
{'a': 'True', 'b': 'True', 'c': 'False', 'd': 'False', 'e': 'False'}))
def test_string_bool(self):
self.assertFalse(gclient_eval.EvaluateCondition(
'false_str_var and true_var',
{'false_str_var': 'False', 'true_var': True}))
def test_string_bool_typo(self):
with self.assertRaises(ValueError) as cm:
gclient_eval.EvaluateCondition(
'false_var_str and true_var',
{'false_str_var': 'False', 'true_var': True})
self.assertIn(
'invalid "and" operand \'false_var_str\' '
'(inside \'false_var_str and true_var\')',
str(cm.exception))
def test_non_bool_in_or(self):
with self.assertRaises(ValueError) as cm:
gclient_eval.EvaluateCondition(
'string_var or true_var',
{'string_var': 'Kittens', 'true_var': True})
self.assertIn(
'invalid "or" operand \'Kittens\' '
'(inside \'string_var or true_var\')',
str(cm.exception))
def test_non_bool_in_and(self):
with self.assertRaises(ValueError) as cm:
gclient_eval.EvaluateCondition(
'string_var and true_var',
{'string_var': 'Kittens', 'true_var': True})
self.assertIn(
'invalid "and" operand \'Kittens\' '
'(inside \'string_var and true_var\')',
str(cm.exception))
def test_tuple_presence(self):
self.assertTrue(gclient_eval.EvaluateCondition(
'foo in ("bar", "baz")', {'foo': 'bar'}))
self.assertFalse(gclient_eval.EvaluateCondition(
'foo in ("bar", "baz")', {'foo': 'not_bar'}))
def test_unsupported_tuple_operation(self):
with self.assertRaises(ValueError) as cm:
gclient_eval.EvaluateCondition('foo == ("bar", "baz")', {'foo': 'bar'})
self.assertIn('unexpected AST node', str(cm.exception))
with self.assertRaises(ValueError) as cm:
gclient_eval.EvaluateCondition('(foo,) == "bar"', {'foo': 'bar'})
self.assertIn('unexpected AST node', str(cm.exception))
def test_str_in_condition(self):
Str = gclient_eval.ConstantString
self.assertTrue(gclient_eval.EvaluateCondition(
's_var == "foo"',
{'s_var': Str("foo")}))
self.assertFalse(gclient_eval.EvaluateCondition(
's_var in ("baz", "quux")',
{'s_var': Str("foo")}))
class VarTest(unittest.TestCase):
def assert_adds_var(self, before, after):
local_scope = gclient_eval.Exec('\n'.join(before))
gclient_eval.AddVar(local_scope, 'baz', 'lemur')
results = gclient_eval.RenderDEPSFile(local_scope)
self.assertEqual(results, '\n'.join(after))
def test_adds_var(self):
before = [
'vars = {',
' "foo": "bar",',
'}',
]
after = [
'vars = {',
' "baz": "lemur",',
' "foo": "bar",',
'}',
]
self.assert_adds_var(before, after)
def test_adds_var_twice(self):
local_scope = gclient_eval.Exec('\n'.join([
'vars = {',
' "foo": "bar",',
'}',
]))
gclient_eval.AddVar(local_scope, 'baz', 'lemur')
gclient_eval.AddVar(local_scope, 'v8_revision', 'deadbeef')
result = gclient_eval.RenderDEPSFile(local_scope)
self.assertEqual(result, '\n'.join([
'vars = {',
' "v8_revision": "deadbeef",',
' "baz": "lemur",',
' "foo": "bar",',
'}',
]))
def test_gets_and_sets_var(self):
local_scope = gclient_eval.Exec('\n'.join([
'vars = {',
' "foo": "bar",',
' "quux": Str("quuz")',
'}',
]))
self.assertEqual(gclient_eval.GetVar(local_scope, 'foo'),
"bar")
self.assertEqual(gclient_eval.GetVar(local_scope, 'quux'),
"quuz")
gclient_eval.SetVar(local_scope, 'foo', 'baz')
gclient_eval.SetVar(local_scope, 'quux', 'corge')
result = gclient_eval.RenderDEPSFile(local_scope)
self.assertEqual(result, '\n'.join([
'vars = {',
' "foo": "baz",',
' "quux": Str("corge")',
'}',
]))
def test_gets_and_sets_var_non_string(self):
local_scope = gclient_eval.Exec('\n'.join([
'vars = {',
' "foo": True,',
'}',
]))
result = gclient_eval.GetVar(local_scope, 'foo')
self.assertEqual(result, True)
gclient_eval.SetVar(local_scope, 'foo', 'False')
result = gclient_eval.RenderDEPSFile(local_scope)
self.assertEqual(result, '\n'.join([
'vars = {',
' "foo": False,',
'}',
]))
def test_add_preserves_formatting(self):
before = [
'# Copyright stuff',
'# some initial comments',
'',
'vars = { ',
' # Some comments.',
' "foo": "bar",',
'',
' # More comments.',
' # Even more comments.',
' "v8_revision": ',
' "deadbeef",',
' # Someone formatted this wrong',
'}',
]
after = [
'# Copyright stuff',
'# some initial comments',
'',
'vars = { ',
' "baz": "lemur",',
' # Some comments.',
' "foo": "bar",',
'',
' # More comments.',
' # Even more comments.',
' "v8_revision": ',
' "deadbeef",',
' # Someone formatted this wrong',
'}',
]
self.assert_adds_var(before, after)
def test_set_preserves_formatting(self):
local_scope = gclient_eval.Exec('\n'.join([
'vars = {',
' # Comment with trailing space ',
' "foo": \'bar\',',
'}',
]))
gclient_eval.SetVar(local_scope, 'foo', 'baz')
result = gclient_eval.RenderDEPSFile(local_scope)
self.assertEqual(result, '\n'.join([
'vars = {',
' # Comment with trailing space ',
' "foo": \'baz\',',
'}',
]))
class CipdTest(unittest.TestCase):
def test_gets_and_sets_cipd(self):
local_scope = gclient_eval.Exec('\n'.join([
'deps = {',
' "src/cipd/package": {',
' "packages": [',
' {',
' "package": "some/cipd/package",',
' "version": "deadbeef",',
' },',
' {',
' "package": "another/cipd/package",',
' "version": "version:5678",',
' },',
' ],',
' "condition": "checkout_android",',
' "dep_type": "cipd",',
' },',
'}',
]))
self.assertEqual(
gclient_eval.GetCIPD(
local_scope, 'src/cipd/package', 'some/cipd/package'),
'deadbeef')
self.assertEqual(
gclient_eval.GetCIPD(
local_scope, 'src/cipd/package', 'another/cipd/package'),
'version:5678')
gclient_eval.SetCIPD(
local_scope, 'src/cipd/package', 'another/cipd/package', 'version:6789')
gclient_eval.SetCIPD(
local_scope, 'src/cipd/package', 'some/cipd/package', 'foobar')
result = gclient_eval.RenderDEPSFile(local_scope)
self.assertEqual(result, '\n'.join([
'deps = {',
' "src/cipd/package": {',
' "packages": [',
' {',
' "package": "some/cipd/package",',
' "version": "foobar",',
' },',
' {',
' "package": "another/cipd/package",',
' "version": "version:6789",',
' },',
' ],',
' "condition": "checkout_android",',
' "dep_type": "cipd",',
' },',
'}',
]))
def test_gets_and_sets_cipd_vars(self):
local_scope = gclient_eval.Exec('\n'.join([
'vars = {',
' "cipd-rev": "git_revision:deadbeef",',
' "another-cipd-rev": "version:1.0.3",',
'}',
'deps = {',
' "src/cipd/package": {',
' "packages": [',
' {',
' "package": "some/cipd/package",',
' "version": Var("cipd-rev"),',
' },',
' {',
' "package": "another/cipd/package",',
' "version": "{another-cipd-rev}",',
' },',
' ],',
' "condition": "checkout_android",',
' "dep_type": "cipd",',
' },',
'}',
]))
self.assertEqual(
gclient_eval.GetCIPD(
local_scope, 'src/cipd/package', 'some/cipd/package'),
'git_revision:deadbeef')
self.assertEqual(
gclient_eval.GetCIPD(
local_scope, 'src/cipd/package', 'another/cipd/package'),
'version:1.0.3')
gclient_eval.SetCIPD(
local_scope, 'src/cipd/package', 'another/cipd/package',
'version:1.1.0')
gclient_eval.SetCIPD(
local_scope, 'src/cipd/package', 'some/cipd/package',
'git_revision:foobar')
result = gclient_eval.RenderDEPSFile(local_scope)
self.assertEqual(result, '\n'.join([
'vars = {',
' "cipd-rev": "git_revision:foobar",',
' "another-cipd-rev": "version:1.1.0",',
'}',
'deps = {',
' "src/cipd/package": {',
' "packages": [',
' {',
' "package": "some/cipd/package",',
' "version": Var("cipd-rev"),',
' },',
' {',
' "package": "another/cipd/package",',
' "version": "{another-cipd-rev}",',
' },',
' ],',
' "condition": "checkout_android",',
' "dep_type": "cipd",',
' },',
'}',
]))
def test_preserves_escaped_vars(self):
local_scope = gclient_eval.Exec('\n'.join([
'deps = {',
' "src/cipd/package": {',
' "packages": [',
' {',
' "package": "package/${{platform}}",',
' "version": "version:abcd",',
' },',
' ],',
' "dep_type": "cipd",',
' },',
'}',
]))
gclient_eval.SetCIPD(
local_scope, 'src/cipd/package', 'package/${platform}', 'version:dcba')
result = gclient_eval.RenderDEPSFile(local_scope)
self.assertEqual(result, '\n'.join([
'deps = {',
' "src/cipd/package": {',
' "packages": [',
' {',
' "package": "package/${{platform}}",',
' "version": "version:dcba",',
' },',
' ],',
' "dep_type": "cipd",',
' },',
'}',
]))
class RevisionTest(unittest.TestCase):
def assert_gets_and_sets_revision(self, before, after, rev_before='deadbeef'):
local_scope = gclient_eval.Exec('\n'.join(before))
result = gclient_eval.GetRevision(local_scope, 'src/dep')
self.assertEqual(result, rev_before)
gclient_eval.SetRevision(local_scope, 'src/dep', 'deadfeed')
self.assertEqual('\n'.join(after), gclient_eval.RenderDEPSFile(local_scope))
def test_revision(self):
before = [
'deps = {',
' "src/dep": "https://example.com/dep.git@deadbeef",',
'}',
]
after = [
'deps = {',
' "src/dep": "https://example.com/dep.git@deadfeed",',
'}',
]
self.assert_gets_and_sets_revision(before, after)
def test_revision_new_line(self):
before = [
'deps = {',
' "src/dep": "https://example.com/dep.git@"',
' + "deadbeef",',
'}',
]
after = [
'deps = {',
' "src/dep": "https://example.com/dep.git@"',
' + "deadfeed",',
'}',
]
self.assert_gets_and_sets_revision(before, after)
def test_revision_windows_local_path(self):
before = [
'deps = {',
' "src/dep": "file:///C:\\\\path.git@deadbeef",',
'}',
]
after = [
'deps = {',
' "src/dep": "file:///C:\\\\path.git@deadfeed",',
'}',
]
self.assert_gets_and_sets_revision(before, after)
def test_revision_multiline_strings(self):
deps = [
'deps = {',
' "src/dep": "https://example.com/dep.git@"',
' "deadbeef",',
'}',
]
with self.assertRaises(ValueError) as e:
local_scope = gclient_eval.Exec('\n'.join(deps))
gclient_eval.SetRevision(local_scope, 'src/dep', 'deadfeed')
self.assertEqual(
'Can\'t update value for src/dep. Multiline strings and implicitly '
'concatenated strings are not supported.\n'
'Consider reformatting the DEPS file.',
str(e.exception))
def test_revision_implicitly_concatenated_strings(self):
deps = [
'deps = {',
' "src/dep": "https://example.com" + "/dep.git@" "deadbeef",',
'}',
]
with self.assertRaises(ValueError) as e:
local_scope = gclient_eval.Exec('\n'.join(deps))
gclient_eval.SetRevision(local_scope, 'src/dep', 'deadfeed')
self.assertEqual(
'Can\'t update value for src/dep. Multiline strings and implicitly '
'concatenated strings are not supported.\n'
'Consider reformatting the DEPS file.',
str(e.exception))
def test_revision_inside_dict(self):
before = [
'deps = {',
' "src/dep": {',
' "url": "https://example.com/dep.git@deadbeef",',
' "condition": "some_condition",',
' },',
'}',
]
after = [
'deps = {',
' "src/dep": {',
' "url": "https://example.com/dep.git@deadfeed",',
' "condition": "some_condition",',
' },',
'}',
]
self.assert_gets_and_sets_revision(before, after)
def test_follows_var_braces(self):
before = [
'vars = {',
' "dep_revision": "deadbeef",',
'}',
'deps = {',
' "src/dep": "https://example.com/dep.git@{dep_revision}",',
'}',
]
after = [
'vars = {',
' "dep_revision": "deadfeed",',
'}',
'deps = {',
' "src/dep": "https://example.com/dep.git@{dep_revision}",',
'}',
]
self.assert_gets_and_sets_revision(before, after)
def test_follows_var_braces_newline(self):
before = [
'vars = {',
' "dep_revision": "deadbeef",',
'}',
'deps = {',
' "src/dep": "https://example.com/dep.git"',
' + "@{dep_revision}",',
'}',
]
after = [
'vars = {',
' "dep_revision": "deadfeed",',
'}',
'deps = {',
' "src/dep": "https://example.com/dep.git"',
' + "@{dep_revision}",',
'}',
]
self.assert_gets_and_sets_revision(before, after)
def test_follows_var_function(self):
before = [
'vars = {',
' "dep_revision": "deadbeef",',
'}',
'deps = {',
' "src/dep": "https://example.com/dep.git@" + Var("dep_revision"),',
'}',
]
after = [
'vars = {',
' "dep_revision": "deadfeed",',
'}',
'deps = {',
' "src/dep": "https://example.com/dep.git@" + Var("dep_revision"),',
'}',
]
self.assert_gets_and_sets_revision(before, after)
def test_pins_revision(self):
before = [
'deps = {',
' "src/dep": "https://example.com/dep.git",',
'}',
]
after = [
'deps = {',
' "src/dep": "https://example.com/dep.git@deadfeed",',
'}',
]
self.assert_gets_and_sets_revision(before, after, rev_before=None)
def test_preserves_variables(self):
before = [
'vars = {',
' "src_root": "src"',
'}',
'deps = {',
' "{src_root}/dep": "https://example.com/dep.git@deadbeef",',
'}',
]
after = [
'vars = {',
' "src_root": "src"',
'}',
'deps = {',
' "{src_root}/dep": "https://example.com/dep.git@deadfeed",',
'}',
]
self.assert_gets_and_sets_revision(before, after)
def test_preserves_formatting(self):
before = [
'vars = {',
' # Some comment on deadbeef ',
' "dep_revision": "deadbeef",',
'}',
'deps = {',
' "src/dep": {',
' "url": "https://example.com/dep.git@" + Var("dep_revision"),',
'',
' "condition": "some_condition",',
' },',
'}',
]
after = [
'vars = {',
' # Some comment on deadbeef ',
' "dep_revision": "deadfeed",',
'}',
'deps = {',
' "src/dep": {',
' "url": "https://example.com/dep.git@" + Var("dep_revision"),',
'',
' "condition": "some_condition",',
' },',
'}',
]
self.assert_gets_and_sets_revision(before, after)
class ParseTest(unittest.TestCase):
def callParse(self, vars_override=None):
return gclient_eval.Parse('\n'.join([
'vars = {',
' "foo": "bar",',
'}',
'deps = {',
' "a_dep": "a{foo}b",',
'}',
]), '<unknown>', vars_override)
def test_supports_vars_inside_vars(self):
deps_file = '\n'.join([
'vars = {',
' "foo": "bar",',
' "baz": "\\"{foo}\\" == \\"bar\\"",',
'}',
'deps = {',
' "src/baz": {',
' "url": "baz_url",',
' "condition": "baz",',
' },',
'}',
])
local_scope = gclient_eval.Parse(deps_file, '<unknown>', None)
self.assertEqual({
'vars': {'foo': 'bar',
'baz': '"bar" == "bar"'},
'deps': {'src/baz': {'url': 'baz_url',
'dep_type': 'git',
'condition': 'baz'}},
}, local_scope)
def test_has_builtin_vars(self):
builtin_vars = {'builtin_var': 'foo'}
deps_file = '\n'.join([
'deps = {',
' "a_dep": "a{builtin_var}b",',
'}',
])
local_scope = gclient_eval.Parse(deps_file, '<unknown>', None, builtin_vars)
self.assertEqual({
'deps': {'a_dep': {'url': 'afoob',
'dep_type': 'git'}},
}, local_scope)
def test_declaring_builtin_var_has_no_effect(self):
builtin_vars = {'builtin_var': 'foo'}
deps_file = '\n'.join([
'vars = {',
' "builtin_var": "bar",',
'}',
'deps = {',
' "a_dep": "a{builtin_var}b",',
'}',
])
local_scope = gclient_eval.Parse(deps_file, '<unknown>', None, builtin_vars)
self.assertEqual({
'vars': {'builtin_var': 'bar'},
'deps': {'a_dep': {'url': 'afoob',
'dep_type': 'git'}},
}, local_scope)
def test_override_builtin_var(self):
builtin_vars = {'builtin_var': 'foo'}
vars_override = {'builtin_var': 'override'}
deps_file = '\n'.join([
'deps = {',
' "a_dep": "a{builtin_var}b",',
'}',
])
local_scope = gclient_eval.Parse(
deps_file, '<unknown>', vars_override, builtin_vars)
self.assertEqual({
'deps': {'a_dep': {'url': 'aoverrideb',
'dep_type': 'git'}},
}, local_scope, str(local_scope))
def test_expands_vars(self):
local_scope = self.callParse()
self.assertEqual({
'vars': {'foo': 'bar'},
'deps': {'a_dep': {'url': 'abarb',
'dep_type': 'git'}},
}, local_scope)
def test_overrides_vars(self):
local_scope = self.callParse(vars_override={'foo': 'baz'})
self.assertEqual({
'vars': {'foo': 'bar'},
'deps': {'a_dep': {'url': 'abazb',
'dep_type': 'git'}},
}, local_scope)
def test_no_extra_vars(self):
deps_file = '\n'.join([
'vars = {',
' "foo": "bar",',
'}',
'deps = {',
' "a_dep": "a{baz}b",',
'}',
])
with self.assertRaises(KeyError) as cm:
gclient_eval.Parse(deps_file, '<unknown>', {'baz': 'lalala'})
self.assertIn('baz was used as a variable, but was not declared',
str(cm.exception))
def test_standardizes_deps_string_dep(self):
local_scope = gclient_eval.Parse('\n'.join([
'deps = {',
' "a_dep": "a_url@a_rev",',
'}',
]), '<unknown>')
self.assertEqual({
'deps': {'a_dep': {'url': 'a_url@a_rev',
'dep_type': 'git'}},
}, local_scope)
def test_standardizes_deps_dict_dep(self):
local_scope = gclient_eval.Parse('\n'.join([
'deps = {',
' "a_dep": {',
' "url": "a_url@a_rev",',
' "condition": "checkout_android",',
' },',
'}',
]), '<unknown>')
self.assertEqual({
'deps': {'a_dep': {'url': 'a_url@a_rev',
'dep_type': 'git',
'condition': 'checkout_android'}},
}, local_scope)
def test_ignores_none_in_deps_os(self):
local_scope = gclient_eval.Parse('\n'.join([
'deps = {',
' "a_dep": "a_url@a_rev",',
'}',
'deps_os = {',
' "mac": {',
' "a_dep": None,',
' },',
'}',
]), '<unknown>')
self.assertEqual({
'deps': {'a_dep': {'url': 'a_url@a_rev',
'dep_type': 'git'}},
}, local_scope)
def test_merges_deps_os_extra_dep(self):
local_scope = gclient_eval.Parse('\n'.join([
'deps = {',
' "a_dep": "a_url@a_rev",',
'}',
'deps_os = {',
' "mac": {',
' "b_dep": "b_url@b_rev"',
' },',
'}',
]), '<unknown>')
self.assertEqual({
'deps': {'a_dep': {'url': 'a_url@a_rev',
'dep_type': 'git'},
'b_dep': {'url': 'b_url@b_rev',
'dep_type': 'git',
'condition': 'checkout_mac'}},
}, local_scope)
def test_merges_deps_os_existing_dep_with_no_condition(self):
local_scope = gclient_eval.Parse('\n'.join([
'deps = {',
' "a_dep": "a_url@a_rev",',
'}',
'deps_os = {',
' "mac": {',
' "a_dep": "a_url@a_rev"',
' },',
'}',
]), '<unknown>')
self.assertEqual({
'deps': {'a_dep': {'url': 'a_url@a_rev',
'dep_type': 'git'}},
}, local_scope)
def test_merges_deps_os_existing_dep_with_condition(self):
local_scope = gclient_eval.Parse('\n'.join([
'deps = {',
' "a_dep": {',
' "url": "a_url@a_rev",',
' "condition": "some_condition",',
' },',
'}',
'deps_os = {',
' "mac": {',
' "a_dep": "a_url@a_rev"',
' },',
'}',
]), '<unknown>')
self.assertEqual({
'deps': {
'a_dep': {'url': 'a_url@a_rev',
'dep_type': 'git',
'condition': '(checkout_mac) or (some_condition)'},
},
}, local_scope)
def test_merges_deps_os_multiple_os(self):
local_scope = gclient_eval.Parse('\n'.join([
'deps_os = {',
' "win": {'
' "a_dep": "a_url@a_rev"',
' },',
' "mac": {',
' "a_dep": "a_url@a_rev"',
' },',
'}',
]), '<unknown>')
self.assertEqual({
'deps': {
'a_dep': {'url': 'a_url@a_rev',
'dep_type': 'git',
'condition': '(checkout_mac) or (checkout_win)'},
},
}, local_scope)
def test_fails_to_merge_same_dep_with_different_revisions(self):
with self.assertRaises(gclient_eval.gclient_utils.Error) as cm:
gclient_eval.Parse('\n'.join([
'deps = {',
' "a_dep": {',
' "url": "a_url@a_rev",',
' "condition": "some_condition",',
' },',
'}',
'deps_os = {',
' "mac": {',
' "a_dep": "a_url@b_rev"',
' },',
'}',
]), '<unknown>')
self.assertIn('conflicts with existing deps', str(cm.exception))
def test_merges_hooks_os(self):
local_scope = gclient_eval.Parse('\n'.join([
'hooks = [',
' {',
' "action": ["a", "action"],',
' },',
']',
'hooks_os = {',
' "mac": [',
' {',
' "action": ["b", "action"]',
' },',
' ]',
'}',
]), '<unknown>')
self.assertEqual({
"hooks": [{"action": ["a", "action"]},
{"action": ["b", "action"], "condition": "checkout_mac"}],
}, local_scope)
if __name__ == '__main__':
level = logging.DEBUG if '-v' in sys.argv else logging.FATAL
logging.basicConfig(
level=level,
format='%(asctime).19s %(levelname)s %(filename)s:'
'%(lineno)s %(message)s')
unittest.main()
| CoherentLabs/depot_tools | tests/gclient_eval_unittest.py | Python | bsd-3-clause | 35,099 |
"""
Problem Statement
-----------------
Assume that there is a large data set of mostly unique samples where a hidden
binary variable is dependent on the number of similar samples that exist in the
set (i.e. a sample is called positive if it has many neighbors) and that our
goal is to label all samples in this set. It is easy to see that, given sparse
enough data, if a clustering method relies on the same sample property on which
the ground truth similarity space is defined, it will naturally separate the
samples into two groups -- those found in clusters and containing mostly
positives, and those found outside clusters and containing mostly negatives.
There would exist only one possible perfect clustering -- the one with a
single, entirely homogeneous cluster C that covers all positives present in the
data set. If one were to obtain such clustering, one could correctly label all
positive samples in one step with the simple rule, *all positive samples belong
to cluster C*. Under an imperfect clustering, on the other hand, the presence
of the given sample in a cluster of size two or more implies the sample is only
likely to be positive, with the confidence of the positive call monotonously
increasing with the size of the cluster.
In other words, our expectation from a good clustering is that it will help us
minimize the amount of work labeling samples.
The application that inspired the design of this metric was mining for positive
spam examples in large data sets of short user-generated content. Given large
enough data sets, spam content naturally forms clusters either because creative
rewriting of every single individual spam message is too expensive for spammers
to employ, or because, even if human or algorithmic rewriting is applied, one
can still find features that link individual spam messages to their creator or
to the product or service being promoted in the spam campaign. The finding was
consistent with what is reported in literature [104]_.
Algorithm
---------
Given a clustering, we order the clusters from the largest one to the smallest
one. We then plot a cumulative step function where the width of the bin under a
given "step" is proportional to cluster size, and the height of the bin is
proportional to the expected number of positive samples seen so far [103]_. If a
sample is in a cluster of size one, we assume it is likely to be negative and
is therefore checked on an individual basis (the specific setting of cluster
size at which the expectation changes is our 'threshold' parameter. The result
of this assumption is that the expected contribution from unclustered
samples is equal to their actual contribution (we assume individual checking
always gives a correct answer). After two-way normalization, a perfect
clustering (i.e. where a single perfectly homogeneous cluster covers the entire
set of positives) will have the AUL score of 1.0. A failure to will result in
the AUL of 0.5. A perverse clustering, i.e. one where many negative samples fall
into clusters whose size is above our threshold, or where many positive samples
remain unclustered (fall into clusters of size below the threshold one) the AUL
somewhere between 0.0 and 0.5.
A special treatment is necessary for cases where clusters are tied by size. If
one were to treat tied clusters as a single group, one would obtain AUL of 1.0
when no clusters at all are present, which is against our desiderata. On the
other hand, if one were to treat tied clusters entirely separately, one would
obtain different results depending on the properties of the sorting algorithm,
also an undesirable situation. Always placing "heavy" clusters (i.e. those
containing more positives) towards the beginning or towards the end of the tied
group will result in, respectively, overestimating or underestimating the true
AUL. The solution here is to average the positive counts among all clusters in a
tied group, and then walk through them one by one, with the stepwise cumulative
function asymptotically approaching a diagonal from the group's bottom left
corner to the top right one. This way, a complete absence of clustering (i.e.
all clusters are of size one) will always result in AUL of 0.5.
The resulting AUL measure has some similarity with the Gini coefficient of
inequality [105]_ except we plot the corresponding curve in the opposite
direction (from "richest" to "poorest"), and do not subtract 0.5 from the
resulting score.
.. [103] We take the expected number of positives and not the actual number seen
so far as the vertical scale in order to penalize non-homogeneous
clusters. Otherwise the y=1.0 ceiling would be reached early in the
process even in very bad cases, for example when there is only one giant
non-homogeneous cluster.
References
----------
.. [104] `Whissell, J. S., & Clarke, C. L. (2011, September). Clustering for
semi-supervised spam filtering. In Proceedings of the 8th Annual
Collaboration, Electronic messaging, Anti-Abuse and Spam Conference
(pp. 125-134). ACM.
<https://doi.org/10.1145/2030376.2030391>`_
.. [105] `Wikipedia entry for Gini coefficient of inequality
<https://en.wikipedia.org/wiki/Gini_coefficient>`_
"""
import warnings
import numpy as np
from itertools import izip, chain
from operator import itemgetter
from sklearn.metrics.ranking import auc, roc_curve
from pymaptools.iter import aggregate_tuples
from pymaptools.containers import labels_to_clusters
def num2bool(num):
"""True if zero or positive real, False otherwise
When binarizing class labels, this lets us be consistent with Scikit-Learn
where binary labels can be {0, 1} with 0 being negative or {-1, 1} with -1
being negative.
"""
return num > 0
class LiftCurve(object):
"""Lift Curve for cluster-size correlated classification
"""
def __init__(self, score_groups):
self._score_groups = list(score_groups)
@classmethod
def from_counts(cls, counts_true, counts_pred):
"""Instantiates class from arrays of true and predicted counts
Parameters
----------
counts_true : array, shape = [n_clusters]
Count of positives in cluster
counts_pred : array, shape = [n_clusters]
Predicted number of positives in each cluster
"""
# convert input to a series of tuples
count_groups = izip(counts_pred, counts_true)
# sort tuples by predicted count in descending order
count_groups = sorted(count_groups, key=itemgetter(0), reverse=True)
# group tuples by predicted count so as to handle ties correctly
return cls(aggregate_tuples(count_groups))
@classmethod
def from_clusters(cls, clusters, is_class_pos=num2bool):
"""Instantiates class from clusters of class-coded points
Parameters
----------
clusters : collections.Iterable
List of lists of class labels
is_class_pos: label_true -> Bool
Boolean predicate used to binarize true (class) labels
"""
# take all non-empty clusters, score them by size and by number of
# ground truth positives
data = ((len(cluster), sum(is_class_pos(class_label) for class_label in cluster))
for cluster in clusters if cluster)
scores_pred, scores_true = zip(*data) or ([], [])
return cls.from_counts(scores_true, scores_pred)
@classmethod
def from_labels(cls, labels_true, labels_pred, is_class_pos=num2bool):
"""Instantiates class from arrays of classes and cluster sizes
Parameters
----------
labels_true : array, shape = [n_samples]
Class labels. If binary, 'is_class_pos' is optional
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
is_class_pos: label_true -> Bool
Boolean predicate used to binarize true (class) labels
"""
clusters = labels_to_clusters(labels_true, labels_pred)
return cls.from_clusters(clusters, is_class_pos=is_class_pos)
def aul_score(self, threshold=1, plot=False):
"""Calculate AUL score
Parameters
----------
threshold : int, optional (default=1)
only predicted scores above this number considered accurate
plot : bool, optional (default=False)
whether to return X and Y data series for plotting
"""
total_any = 0
total_true = 0
assumed_vertical = 0
aul = 0.0
if plot:
xs, ys = [], []
bin_height = 0.0
bin_right_edge = 0.0
# second pass: iterate over each group of predicted scores of the same
# size and calculate the AUL metric
for pred_score, true_scores in self._score_groups:
# number of clusters
num_true_scores = len(true_scores)
# sum total of positives in all clusters of given size
group_height = sum(true_scores)
total_true += group_height
# cluster size x number of clusters of given size
group_width = pred_score * num_true_scores
total_any += group_width
if pred_score > threshold:
# penalize non-homogeneous clusters simply by assuming that they
# are homogeneous, in which case their expected vertical
# contribution should be equal to their horizontal contribution.
height_incr = group_width
else:
# clusters of size one are by definition homogeneous so their
# expected vertical contribution equals sum total of any
# remaining true positives.
height_incr = group_height
assumed_vertical += height_incr
if plot:
avg_true_score = group_height / float(num_true_scores)
for _ in true_scores:
bin_height += avg_true_score
aul += bin_height * pred_score
if plot:
xs.append(bin_right_edge)
bin_right_edge += pred_score
xs.append(bin_right_edge)
ys.append(bin_height)
ys.append(bin_height)
else:
# if not tasked with generating plots, use a geometric method
# instead of looping
aul += (total_true * group_width -
((num_true_scores - 1) * pred_score * group_height) / 2.0)
if total_true > total_any:
warnings.warn(
"Number of positives found (%d) exceeds total count of %d"
% (total_true, total_any)
)
rect_area = assumed_vertical * total_any
# special case: since normalizing the AUL defines it as always smaller
# than the bounding rectangle, when denominator in the expression below
# is zero, the AUL score is also equal to zero.
aul = 0.0 if rect_area == 0 else aul / rect_area
if plot:
xs = np.array(xs, dtype=float) / total_any
ys = np.array(ys, dtype=float) / assumed_vertical
return aul, xs, ys
else:
return aul
def plot(self, threshold=1, fill=True, marker=None, save_to=None): # pragma: no cover
"""Create a graphical representation of Lift Curve
Requires Matplotlib
Parameters
----------
threshold : int, optional (default=1)
only predicted scores above this number considered accurate
marker : str, optional (default=None)
Whether to draw marker at each bend
save_to : str, optional (default=None)
If specified, save the plot to path instead of displaying
"""
from matplotlib import pyplot as plt
score, xs, ys = self.aul_score(threshold=threshold, plot=True)
fig, ax = plt.subplots()
ax.plot(xs, ys, marker=marker, linestyle='-')
if fill:
ax.fill([0.0] + list(xs) + [1.0], [0.0] + list(ys) + [0.0], 'b', alpha=0.2)
ax.plot([0.0, 1.0], [0.0, 1.0], linestyle='--', color='grey')
ax.plot([0.0, 1.0], [1.0, 1.0], linestyle='--', color='grey')
ax.plot([1.0, 1.0], [0.0, 1.0], linestyle='--', color='grey')
ax.set_xlim(xmin=0.0, xmax=1.03)
ax.set_ylim(ymin=0.0, ymax=1.04)
ax.set_xlabel("portion total")
ax.set_ylabel("portion expected positive")
ax.set_title("Lift Curve (AUL=%.3f)" % score)
if save_to is None:
fig.show()
else:
fig.savefig(save_to)
plt.close(fig)
def aul_score_from_clusters(clusters):
"""Calculate AUL score given clusters of class-coded points
Parameters
----------
clusters : collections.Iterable
List of clusters where each point is binary-coded according to true
class.
Returns
-------
aul : float
"""
return LiftCurve.from_clusters(clusters).aul_score()
def aul_score_from_labels(y_true, labels_pred):
"""AUL score given array of classes and array of cluster sizes
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1}
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
aul : float
"""
return LiftCurve.from_labels(y_true, labels_pred).aul_score()
class RocCurve(object):
"""Receiver Operating Characteristic (ROC)
::
>>> c = RocCurve.from_labels([0, 0, 1, 1],
... [0.1, 0.4, 0.35, 0.8])
>>> c.auc_score()
0.75
>>> c.max_informedness()
0.5
"""
def __init__(self, fprs, tprs, thresholds=None, pos_label=None,
sample_weight=None):
self.fprs = fprs
self.tprs = tprs
self.thresholds = thresholds
self.pos_label = pos_label
self.sample_weight = sample_weight
def plot(self, fill=True, marker=None, save_to=None): # pragma: no cover
"""Plot the ROC curve
"""
from matplotlib import pyplot as plt
score = self.auc_score()
xs, ys = self.fprs, self.tprs
fig, ax = plt.subplots()
ax.plot(xs, ys, marker=marker, linestyle='-')
if fill:
ax.fill([0.0] + list(xs) + [1.0], [0.0] + list(ys) + [0.0], 'b', alpha=0.2)
ax.plot([0.0, 1.0], [0.0, 1.0], linestyle='--', color='grey')
ax.plot([0.0, 1.0], [1.0, 1.0], linestyle='--', color='grey')
ax.plot([1.0, 1.0], [0.0, 1.0], linestyle='--', color='grey')
ax.set_xlim(xmin=0.0, xmax=1.03)
ax.set_ylim(ymin=0.0, ymax=1.04)
ax.set_ylabel('TPR')
ax.set_xlabel('FPR')
ax.set_title("ROC Curve (AUC=%.3f)" % score)
if save_to is None:
fig.show()
else:
fig.savefig(save_to)
plt.close(fig)
@classmethod
def from_scores(cls, scores_neg, scores_pos):
"""Instantiate given scores of two ground truth classes
The score arrays don't have to be the same length.
"""
scores_pos = ((1, x) for x in scores_pos if not np.isnan(x))
scores_neg = ((0, x) for x in scores_neg if not np.isnan(x))
all_scores = zip(*chain(scores_neg, scores_pos)) or ([], [])
return cls.from_labels(*all_scores)
@classmethod
def from_labels(cls, labels_true, y_score, is_class_pos=num2bool):
"""Instantiate assuming binary labeling of {0, 1}
labels_true : array, shape = [n_samples]
Class labels. If binary, 'is_class_pos' is optional
y_score : array, shape = [n_samples]
Predicted scores
is_class_pos: label_true -> Bool
Boolean predicate used to binarize true (class) labels
"""
# num2bool Y labels
y_true = map(is_class_pos, labels_true)
# calculate axes
fprs, tprs, thresholds = roc_curve(
y_true, y_score, pos_label=True)
return cls(fprs, tprs, thresholds=thresholds)
@classmethod
def from_clusters(cls, clusters, is_class_pos=num2bool):
"""Instantiates class from clusters of class-coded points
Parameters
----------
clusters : collections.Iterable
List of lists of class labels
is_class_pos: label_true -> Bool
Boolean predicate used to binarize true (class) labels
"""
y_true = []
y_score = []
for cluster in clusters:
pred_cluster = len(cluster)
for point in cluster:
true_cluster = is_class_pos(point)
y_true.append(true_cluster)
y_score.append(pred_cluster)
return cls.from_labels(y_true, y_score)
def auc_score(self):
"""Replacement for Scikit-Learn's method
If number of Y classes is other than two, a warning will be triggered
but no exception thrown (the return value will be a NaN). Also, we
don't reorder arrays during ROC calculation since they are assumed to be
in order.
"""
return auc(self.fprs, self.tprs, reorder=False)
def optimal_cutoff(self, scoring_method):
"""Optimal cutoff point on ROC curve under scoring method
The scoring method must take two arguments: fpr and tpr.
"""
max_index = np.NINF
opt_pair = (np.nan, np.nan)
for pair in izip(self.fprs, self.tprs):
index = scoring_method(*pair)
if index > max_index:
opt_pair = pair
max_index = index
return opt_pair, max_index
@staticmethod
def _informedness(fpr, tpr):
return tpr - fpr
def max_informedness(self):
"""Maximum value of Informedness (TPR minus FPR) on a ROC curve
A diagram of what this measure looks like is shown in [101]_. Note a
correspondence between the definitions of this measure and that of
Kolmogorov-Smirnov's supremum statistic.
References
----------
.. [101] `Wikipedia entry for Youden's J statistic
<https://en.wikipedia.org/wiki/Youden%27s_J_statistic>`_
"""
return self.optimal_cutoff(self._informedness)[1]
def roc_auc_score(y_true, y_score, sample_weight=None):
"""AUC score for a ROC curve
Replaces Scikit Learn implementation (given binary ``y_true``).
"""
return RocCurve.from_labels(y_true, y_score).auc_score()
def dist_auc(scores0, scores1):
"""AUC score for two distributions, with NaN correction
Note: arithmetic mean appears to be appropriate here, as other means don't
result in total of 1.0 when sides are switched.
"""
scores0_len = len(scores0)
scores1_len = len(scores1)
scores0p = [x for x in scores0 if not np.isnan(x)]
scores1p = [x for x in scores1 if not np.isnan(x)]
scores0n_len = scores0_len - len(scores0p)
scores1n_len = scores1_len - len(scores1p)
# ``nan_pairs`` are pairs for which it is impossible to define order, due
# to at least one of the members of each being a NaN. ``def_pairs`` are
# pairs for which order can be established.
all_pairs = 2 * scores0_len * scores1_len
nan_pairs = scores0n_len * scores1_len + scores1n_len * scores0_len
def_pairs = all_pairs - nan_pairs
# the final score is the average of the score for the defined portion and
# of random-chance AUC (0.5), weighted according to the number of pairs in
# each group.
auc_score = RocCurve.from_scores(scores0p, scores1p).auc_score()
return np.average([auc_score, 0.5], weights=[def_pairs, nan_pairs])
| escherba/lsh-hdc | lsh_hdc/ranking.py | Python | bsd-3-clause | 19,946 |
#from https://github.com/serge-sans-paille/pythran/issues/1229
#runas import numpy as np; x = np.arange(3., 10.); empirical(x, 3., .5)
import numpy as np
#pythran export empirical(float[:], float, float)
def empirical(ds, alpha, x):
sds = np.sort(ds)
ds_to_the_alpha = sds**alpha
fractions = ds_to_the_alpha #/ sum (ds_to_the_alpha)
thresholds = np.cumsum(fractions)
thresholds /= thresholds[-1]
i = find_first (thresholds, lambda u: x < u)
return i
#pthran export find_first(float[:], bool (float))
def find_first (seq, pred):
for i,x in enumerate (seq):
print(i, x, pred(x))
if pred(x):
return i
return None
| serge-sans-paille/pythran | pythran/tests/cases/empirical.py | Python | bsd-3-clause | 678 |
import numpy as np
from nose.tools import raises
from skimage.filter import median_filter
def test_00_00_zeros():
'''The median filter on an array of all zeros should be zero'''
result = median_filter(np.zeros((10, 10)), 3, np.ones((10, 10), bool))
assert np.all(result == 0)
def test_00_01_all_masked():
'''Test a completely masked image
Regression test of IMG-1029'''
result = median_filter(np.zeros((10, 10)), 3, np.zeros((10, 10), bool))
assert (np.all(result == 0))
def test_00_02_all_but_one_masked():
mask = np.zeros((10, 10), bool)
mask[5, 5] = True
median_filter(np.zeros((10, 10)), 3, mask)
def test_01_01_mask():
'''The median filter, masking a single value'''
img = np.zeros((10, 10))
img[5, 5] = 1
mask = np.ones((10, 10), bool)
mask[5, 5] = False
result = median_filter(img, 3, mask)
assert (np.all(result[mask] == 0))
np.testing.assert_equal(result[5, 5], 1)
def test_02_01_median():
'''A median filter larger than the image = median of image'''
np.random.seed(0)
img = np.random.uniform(size=(9, 9))
result = median_filter(img, 20, np.ones((9, 9), bool))
np.testing.assert_equal(result[0, 0], np.median(img))
assert (np.all(result == np.median(img)))
def test_02_02_median_bigger():
'''Use an image of more than 255 values to test approximation'''
np.random.seed(0)
img = np.random.uniform(size=(20, 20))
result = median_filter(img, 40, np.ones((20, 20), bool))
sorted = np.ravel(img)
sorted.sort()
min_acceptable = sorted[198]
max_acceptable = sorted[202]
assert (np.all(result >= min_acceptable))
assert (np.all(result <= max_acceptable))
def test_03_01_shape():
'''Make sure the median filter is the expected octagonal shape'''
radius = 5
a_2 = int(radius / 2.414213)
i, j = np.mgrid[-10:11, -10:11]
octagon = np.ones((21, 21), bool)
#
# constrain the octagon mask to be the points that are on
# the correct side of the 8 edges
#
octagon[i < -radius] = False
octagon[i > radius] = False
octagon[j < -radius] = False
octagon[j > radius] = False
octagon[i + j < -radius - a_2] = False
octagon[j - i > radius + a_2] = False
octagon[i + j > radius + a_2] = False
octagon[i - j > radius + a_2] = False
np.random.seed(0)
img = np.random.uniform(size=(21, 21))
result = median_filter(img, radius, np.ones((21, 21), bool))
sorted = img[octagon]
sorted.sort()
min_acceptable = sorted[len(sorted) / 2 - 1]
max_acceptable = sorted[len(sorted) / 2 + 1]
assert (result[10, 10] >= min_acceptable)
assert (result[10, 10] <= max_acceptable)
def test_04_01_half_masked():
'''Make sure that the median filter can handle large masked areas.'''
img = np.ones((20, 20))
mask = np.ones((20, 20), bool)
mask[10:, :] = False
img[~ mask] = 2
img[1, 1] = 0 # to prevent short circuit for uniform data.
result = median_filter(img, 5, mask)
# in partial coverage areas, the result should be only
# from the masked pixels
assert (np.all(result[:14, :] == 1))
# in zero coverage areas, the result should be the lowest
# value in the valid area
assert (np.all(result[15:, :] == np.min(img[mask])))
def test_default_values():
img = (np.random.random((20, 20)) * 255).astype(np.uint8)
mask = np.ones((20, 20), dtype=np.uint8)
result1 = median_filter(img, radius=2, mask=mask, percent=50)
result2 = median_filter(img)
np.testing.assert_array_equal(result1, result2)
@raises(ValueError)
def test_insufficient_size():
img = (np.random.random((20, 20)) * 255).astype(np.uint8)
median_filter(img, radius=1)
@raises(TypeError)
def test_wrong_shape():
img = np.empty((10, 10, 3))
median_filter(img)
if __name__ == "__main__":
np.testing.run_module_suite()
| chintak/scikit-image | skimage/filter/tests/test_ctmf.py | Python | bsd-3-clause | 3,895 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'BatchLocationStatus.non_response'
db.add_column(u'survey_batchlocationstatus', 'non_response',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'BatchLocationStatus.non_response'
db.delete_column(u'survey_batchlocationstatus', 'non_response')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'locations.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'point': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Point']", 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': u"orm['locations.LocationType']"})
},
u'locations.locationtype': {
'Meta': {'object_name': 'LocationType'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'primary_key': 'True'})
},
u'locations.point': {
'Meta': {'object_name': 'Point'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'})
},
'survey.answerrule': {
'Meta': {'object_name': 'AnswerRule'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_rule'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'condition': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'next_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_question_rules'", 'null': 'True', 'to': "orm['survey.Question']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rule'", 'null': 'True', 'to': "orm['survey.Question']"}),
'validate_with_max_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'validate_with_min_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'validate_with_option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answer_rule'", 'null': 'True', 'to': "orm['survey.QuestionOption']"}),
'validate_with_question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'validate_with_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'})
},
'survey.backend': {
'Meta': {'object_name': 'Backend'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
'survey.batch': {
'Meta': {'unique_together': "(('survey', 'name'),)", 'object_name': 'Batch'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch'", 'null': 'True', 'to': "orm['survey.Survey']"})
},
'survey.batchlocationstatus': {
'Meta': {'object_name': 'BatchLocationStatus'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_locations'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_batches'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'non_response': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.batchquestionorder': {
'Meta': {'object_name': 'BatchQuestionOrder'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_question_order'", 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_batch_order'", 'to': "orm['survey.Question']"})
},
'survey.formula': {
'Meta': {'object_name': 'Formula'},
'count': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'as_count'", 'null': 'True', 'to': "orm['survey.Question']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'as_denominator'", 'null': 'True', 'to': "orm['survey.Question']"}),
'denominator_options': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'denominator_options'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['survey.QuestionOption']"}),
'groups': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'as_group'", 'null': 'True', 'to': "orm['survey.HouseholdMemberGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'formula'", 'null': 'True', 'to': "orm['survey.Indicator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'numerator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'as_numerator'", 'null': 'True', 'to': "orm['survey.Question']"}),
'numerator_options': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'numerator_options'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['survey.QuestionOption']"})
},
'survey.groupcondition': {
'Meta': {'unique_together': "(('value', 'attribute', 'condition'),)", 'object_name': 'GroupCondition'},
'attribute': ('django.db.models.fields.CharField', [], {'default': "'AGE'", 'max_length': '20'}),
'condition': ('django.db.models.fields.CharField', [], {'default': "'EQUALS'", 'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'conditions'", 'symmetrical': 'False', 'to': "orm['survey.HouseholdMemberGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'survey.household': {
'Meta': {'object_name': 'Household'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household_code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'households'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'random_sample_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'survey_household'", 'null': 'True', 'to': "orm['survey.Survey']"}),
'uid': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'survey.householdbatchcompletion': {
'Meta': {'object_name': 'HouseholdBatchCompletion'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_completion_households'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_completion_batches'", 'null': 'True', 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Investigator']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.householdhead': {
'Meta': {'object_name': 'HouseholdHead', '_ormbases': ['survey.HouseholdMember']},
u'householdmember_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['survey.HouseholdMember']", 'unique': 'True', 'primary_key': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'default': "'16'", 'max_length': '100'}),
'resident_since_month': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}),
'resident_since_year': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1984'})
},
'survey.householdmember': {
'Meta': {'object_name': 'HouseholdMember'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'household_member'", 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'survey.householdmemberbatchcompletion': {
'Meta': {'object_name': 'HouseholdMemberBatchCompletion'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_households'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_member_batches'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.householdmembergroup': {
'Meta': {'object_name': 'HouseholdMemberGroup'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'unique': 'True', 'max_length': '5'})
},
'survey.indicator': {
'Meta': {'object_name': 'Indicator'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measure': ('django.db.models.fields.CharField', [], {'default': "'Percentage'", 'max_length': '255'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'module': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'indicator'", 'to': "orm['survey.QuestionModule']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'survey.investigator': {
'Meta': {'object_name': 'Investigator'},
'age': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backend': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Backend']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_blocked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'English'", 'max_length': '100', 'null': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'weights': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'survey.locationautocomplete': {
'Meta': {'object_name': 'LocationAutoComplete'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'survey.locationcode': {
'Meta': {'object_name': 'LocationCode'},
'code': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'code'", 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.locationtypedetails': {
'Meta': {'object_name': 'LocationTypeDetails'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'details'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'has_code': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length_of_code': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'location_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'details'", 'to': u"orm['locations.LocationType']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.locationweight': {
'Meta': {'object_name': 'LocationWeight'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'weight'", 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'selection_probability': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'location_weight'", 'to': "orm['survey.Survey']"})
},
'survey.multichoiceanswer': {
'Meta': {'object_name': 'MultiChoiceAnswer'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.QuestionOption']", 'null': 'True'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'is_old': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.numericalanswer': {
'Meta': {'object_name': 'NumericalAnswer'},
'answer': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '5', 'null': 'True'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'is_old': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.question': {
'Meta': {'object_name': 'Question'},
'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'batches': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'questions'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_group'", 'null': 'True', 'to': "orm['survey.HouseholdMemberGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'module': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'module_question'", 'null': 'True', 'to': "orm['survey.QuestionModule']"}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['survey.Question']"}),
'subquestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'survey.questionmodule': {
'Meta': {'object_name': 'QuestionModule'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'survey.questionoption': {
'Meta': {'object_name': 'QuestionOption'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'null': 'True', 'to': "orm['survey.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'survey.randomhouseholdselection': {
'Meta': {'object_name': 'RandomHouseHoldSelection'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'no_of_households': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'selected_households': ('django.db.models.fields.CharField', [], {'max_length': '510'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'random_household'", 'null': 'True', 'to': "orm['survey.Survey']"})
},
'survey.survey': {
'Meta': {'object_name': 'Survey'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'has_sampling': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'sample_size': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10', 'max_length': '2'}),
'type': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.textanswer': {
'Meta': {'object_name': 'TextAnswer'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'is_old': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.unknowndobattribute': {
'Meta': {'object_name': 'UnknownDOBAttribute'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household_member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unknown_dob_attribute'", 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
'survey.uploaderrorlog': {
'Meta': {'object_name': 'UploadErrorLog'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'row_number': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'survey.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['survey'] | antsmc2/mics | survey/migrations/0119_auto__add_field_batchlocationstatus_non_response.py | Python | bsd-3-clause | 36,447 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# moldynplot.StateProbFigureManager.py
#
# Copyright (C) 2015-2017 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Generates one or more state probability figures to specifications in a
YAML file.
"""
################################### MODULES ###################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
if __name__ == "__main__":
__package__ = str("moldynplot")
import moldynplot
from .myplotspec.FigureManager import FigureManager
from .myplotspec.manage_defaults_presets import manage_defaults_presets
from .myplotspec.manage_kwargs import manage_kwargs
################################### CLASSES ###################################
class StateProbFigureManager(FigureManager):
"""
Class to manage the generation of probability distribution figures
"""
defaults = """
draw_figure:
subplot_kw:
autoscale_on: False
axisbg: none
multi_tick_params:
bottom: off
left: on
right: off
top: off
shared_legend: True
shared_legend_kw:
spines: False
handle_kw:
ls: none
marker: s
mec: black
legend_kw:
borderaxespad: 0.0
frameon: False
handletextpad: 0.0
loc: 9
numpoints: 1
draw_subplot:
tick_params:
bottom: off
direction: out
left: on
right: off
top: off
title_kw:
verticalalignment: bottom
grid: True
grid_kw:
axis: y
b: True
color: [0.7,0.7,0.7]
linestyle: '-'
xticklabels: []
yticks: [0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
draw_dataset:
bar_kw:
align: center
width: 0.6
ecolor: black
zorder: 3
error_kw:
zorder: 4
handle_kw:
ls: none
marker: s
mec: black
"""
available_presets = """
pbound:
help: Probability of bound state
draw_subplot:
ylabel: $P_{bound}$
yticklabels: [0.0,"",0.2,"",0.4,"",0.6,"",0.8,"",1.0]
presentation_wide:
class: target
inherits: presentation_wide
draw_figure:
bottom: 1.70
left: 4.60
right: 4.60
sub_width: 3.20
sub_height: 3.20
top: 1.00
wspace: 0.20
shared_legend: True
shared_legend_kw:
left: 4.60
sub_width: 10.00
sub_height: 1.50
bottom: 0.00
legend_kw:
columnspacing: 1
labelspacing: 0.8
legend_fp: 20r
loc: 9
ncol: 4
draw_dataset:
bar_kw:
error_kw:
capsize: 4
capthick: 2
elinewidth: 2
linewidth: 2
edgecolor: [0,0,0]
handle_kw:
ms: 20
mew: 2.0
manuscript:
class: target
inherits: manuscript
draw_figure:
left: 0.40
sub_width: 1.50
wspace: 0.10
right: 0.10
top: 0.35
sub_height: 1.50
bottom: 0.45
shared_legend: True
shared_legend_kw:
left: 0.40
sub_width: 1.50
sub_height: 0.40
bottom: 0.00
handle_kw:
mew: 0.5
ms: 5
legend_kw:
columnspacing: 0.5
labelspacing: 0.5
ncol: 4
draw_dataset:
bar_kw:
error_kw:
capsize: 2
capthick: 0.5
elinewidth: 0.5
linewidth: 0.5
edgecolor: [0,0,0]
handle_kw:
markeredgewidth: 0.5
markersize: 5
"""
@manage_defaults_presets()
@manage_kwargs()
def draw_dataset(self, subplot, experiment=None, x=None, label="",
handles=None, draw_bar=True, draw_plot=False, verbose=1, debug=0,
**kwargs):
"""
Draws a dataset.
Arguments:
subplot (Axes): Axes on which to draw
x (float): X coordinate of bar
label (str, optional): Dataset label
color (str, list, ndarray, float, optional): Dataset color
bar_kw (dict, optional): Additional keyword arguments passed
to subplot.plot()
handles (OrderedDict, optional): Nascent OrderedDict of
[labels]: handles on subplot
kwargs (dict): Additional keyword arguments
"""
from .myplotspec import get_colors, multi_get_copy
from .dataset import H5Dataset
# Handle missing input gracefully
handle_kw = multi_get_copy("handle_kw", kwargs, {})
if experiment is not None:
subplot.axhspan(experiment[0], experiment[1], lw=2,
color=[0.6, 0.6, 0.6])
if kwargs.get("draw_experiment_handle", True):
handles["Experiment"] = \
subplot.plot([-10, -10], [-10, -10], mfc=[0.6, 0.6, 0.6],
**handle_kw)[0]
return
if "infile" not in kwargs:
if "P unbound" in kwargs and "P unbound se" in kwargs:
y = 1.0 - kwargs.pop("P unbound")
yerr = kwargs.pop("P unbound se") * 1.96
elif "y" in kwargs and "y se" in kwargs:
y = kwargs.pop("y")
yerr = kwargs.pop("y se") * 1.96
elif "P unbound" in kwargs and not draw_bar and draw_plot:
y = 1.0 - kwargs.pop("P unbound")
else:
return
else:
dataset = H5Dataset(default_address="assign/stateprobs",
default_key="pbound", **kwargs)
y = 1.0 - dataset.datasets["pbound"]["P unbound"][0]
yerr = dataset.datasets["pbound"]["P unbound se"][0] * 1.96
# Configure plot settings
# Plot
if draw_bar:
bar_kw = multi_get_copy("bar_kw", kwargs, {})
get_colors(bar_kw, kwargs)
barplot = subplot.bar(x, y, yerr=yerr, **bar_kw)
handle_kw = multi_get_copy("handle_kw", kwargs, {})
handle_kw["mfc"] = barplot.patches[0].get_facecolor()
handle = subplot.plot([-10, -10], [-10, -10], **handle_kw)[0]
if draw_plot:
plot_kw = multi_get_copy("plot_kw", kwargs, {})
get_colors(plot_kw)
subplot.plot(x, y, **plot_kw)
if handles is not None and label is not None:
handles[label] = handle
#################################### MAIN #####################################
if __name__ == "__main__":
StateProbFigureManager().main()
| KarlTDebiec/myplotspec_sim | moldynplot/StateProbFigureManager.py | Python | bsd-3-clause | 7,284 |
from .. import fixtures
import unittest
import arbor as arb
"""
tests for (dynamically loaded) catalogues
"""
class recipe(arb.recipe):
def __init__(self):
arb.recipe.__init__(self)
self.tree = arb.segment_tree()
self.tree.append(arb.mnpos, (0, 0, 0, 10), (1, 0, 0, 10), 1)
self.props = arb.neuron_cable_properties()
try:
self.cat = arb.default_catalogue()
self.props.register(self.cat)
except:
print("Catalogue not found. Are you running from build directory?")
raise
d = arb.decor()
d.paint('(all)', arb.density('pas'))
d.set_property(Vm=0.0)
self.cell = arb.cable_cell(self.tree, arb.label_dict(), d)
def global_properties(self, _):
return self.props
def num_cells(self):
return 1
def cell_kind(self, gid):
return arb.cell_kind.cable
def cell_description(self, gid):
return self.cell
class TestCatalogues(unittest.TestCase):
def test_nonexistent(self):
with self.assertRaises(FileNotFoundError):
arb.load_catalogue("_NO_EXIST_.so")
@fixtures.dummy_catalogue
def test_shared_catalogue(self, dummy_catalogue):
cat = dummy_catalogue
nms = [m for m in cat]
self.assertEqual(nms, ['dummy'], "Expected equal names.")
for nm in nms:
prm = list(cat[nm].parameters.keys())
self.assertEqual(prm, ['gImbar'], "Expected equal parameters on mechanism '{}'.".format(nm))
def test_simulation(self):
rcp = recipe()
ctx = arb.context()
dom = arb.partition_load_balance(rcp, ctx)
sim = arb.simulation(rcp, dom, ctx)
sim.run(tfinal=30)
def test_empty(self):
def len(cat):
return sum(1 for _ in cat)
def hash_(cat):
return hash(" ".join(sorted(cat)))
cat = arb.catalogue()
ref = arb.default_catalogue()
other = arb.default_catalogue()
# Test empty constructor
self.assertEqual(0, len(cat), "Expected no mechanisms in `arbor.catalogue()`.")
# Test empty extend
other.extend(cat, "")
self.assertEqual(hash_(ref), hash_(other), "Extending cat with empty should not change cat.")
self.assertEqual(0, len(cat), "Extending cat with empty should not change empty.")
other.extend(cat, "prefix/")
self.assertEqual(hash_(ref), hash_(other), "Extending cat with prefixed empty should not change cat.")
self.assertEqual(0, len(cat), "Extending cat with prefixed empty should not change empty.")
cat.extend(other, "")
self.assertEqual(hash_(other), hash_(cat), "Extending empty with cat should turn empty into cat.")
cat = arb.catalogue()
cat.extend(other, "prefix/")
self.assertNotEqual(hash_(other), hash_(cat), "Extending empty with prefixed cat should not yield cat")
| halfflat/nestmc-proto | python/test/unit/test_catalogues.py | Python | bsd-3-clause | 2,948 |
# Author: Travis Oliphant
# 1999 -- 2002
from __future__ import division, print_function, absolute_import
import warnings
import threading
from . import sigtools
from scipy._lib.six import callable
from scipy._lib._version import NumpyVersion
from scipy import linalg
from scipy.fftpack import (fft, ifft, ifftshift, fft2, ifft2, fftn,
ifftn, fftfreq)
from numpy.fft import rfftn, irfftn
from numpy import (allclose, angle, arange, argsort, array, asarray,
atleast_1d, atleast_2d, cast, dot, exp, expand_dims,
iscomplexobj, isscalar, mean, ndarray, newaxis, ones, pi,
poly, polyadd, polyder, polydiv, polymul, polysub, polyval,
prod, product, r_, ravel, real_if_close, reshape,
roots, sort, sum, take, transpose, unique, where, zeros,
zeros_like)
import numpy as np
from scipy.special import factorial
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
'residuez', 'resample', 'detrend', 'lfilter_zi', 'sosfilt_zi',
'filtfilt', 'decimate', 'vectorstrength']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
_rfft_mt_safe = (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
_rfft_lock = threading.Lock()
def _valfrommode(mode):
try:
val = _modedict[mode]
except KeyError:
if mode not in [0, 1, 2]:
raise ValueError("Acceptable mode flags are 'valid' (0),"
" 'same' (1), or 'full' (2).")
val = mode
return val
def _bvalfromboundary(boundary):
try:
val = _boundarydict[boundary] << 2
except KeyError:
if val not in [0, 1, 2]:
raise ValueError("Acceptable boundary flags are 'fill', 'wrap'"
" (or 'circular'), \n and 'symm'"
" (or 'symmetric').")
val = boundary << 2
return val
def _check_valid_mode_shapes(shape1, shape2):
for d1, d2 in zip(shape1, shape2):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in "
"every dimension for 'valid' mode.")
def correlate(in1, in2, mode='full'):
"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as:
z[...,k,...] = sum[..., i_l, ...]
x[..., i_l,...] * conj(y[..., i_l + k,...])
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + np.random.randn(len(sig))
>>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
>>> import matplotlib.pyplot as plt
>>> clock = np.arange(64, len(sig), 128)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(corr)
>>> ax_corr.plot(clock, corr[clock], 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
# Don't use _valfrommode, since correlate should not accept numeric modes
try:
val = _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
if in1.ndim == in2.ndim == 0:
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
# _correlateND is far slower when in2.size > in1.size, so swap them
# and then undo the effect afterward
swapped_inputs = (mode == 'full') and (in2.size > in1.size)
if swapped_inputs:
in1, in2 = in2, in1
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
# Reverse and conjugate to undo the effect of swapping inputs
if swapped_inputs:
slice_obj = [slice(None, None, -1)] * len(z.shape)
z = z[slice_obj].conj()
return z
def _centered(arr, newsize):
# Return the center newsize portion of the array.
newsize = asarray(newsize)
currsize = array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _next_regular(target):
"""
Find the next regular number greater than or equal to target.
Regular numbers are composites of the prime factors 2, 3, and 5.
Also known as 5-smooth numbers or Hamming numbers, these are the optimal
size for inputs to FFTPACK.
Target must be a positive integer.
"""
if target <= 6:
return target
# Quickly check if it's already a power of 2
if not (target & (target-1)):
return target
match = float('inf') # Anything found will be smaller
p5 = 1
while p5 < target:
p35 = p5
while p35 < target:
# Ceiling integer division, avoiding conversion to float
# (quotient = ceil(target / p35))
quotient = -(-target // p35)
# Quickly find next power of 2 >= quotient
try:
p2 = 2**((quotient - 1).bit_length())
except AttributeError:
# Fallback for Python <2.7
p2 = 2**(len(bin(quotient - 1)) - 2)
N = p2 * p35
if N == target:
return N
elif N < match:
match = N
p35 *= 3
if p35 == target:
return p35
if p35 < match:
match = p35
p5 *= 5
if p5 == target:
return p5
if p5 < match:
match = p5
return match
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Autocorrelation of white noise is an impulse. (This is at least 100 times
as fast as `convolve`.)
>>> from scipy import signal
>>> sig = np.random.randn(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> lena = misc.lena()
>>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
>>> blurred = signal.fftconvolve(lena, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(1, 3)
>>> ax_orig.imshow(lena, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
shape = s1 + s2 - 1
if mode == "valid":
_check_valid_mode_shapes(s1, s2)
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [_next_regular(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
ret = irfftn(rfftn(in1, fshape) *
rfftn(in2, fshape), fshape)[fslice].copy()
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
ret = ifftn(fftn(in1, fshape) * fftn(in2, fshape))[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def convolve(in1, in2, mode='full'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See also
--------
numpy.polymul : performs polynomial multiplication (same operation, but
also accepts poly1d objects)
"""
volume = asarray(in1)
kernel = asarray(in2)
if volume.ndim == kernel.ndim == 0:
return volume * kernel
slice_obj = [slice(None, None, -1)] * len(kernel.shape)
if np.iscomplexobj(kernel):
return correlate(volume, kernel[slice_obj].conj(), mode)
else:
return correlate(volume, kernel[slice_obj], mode)
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `in`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `in`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * len(volume.shape)
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(len(volume.shape)):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or arraylike, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * len(im.shape)
mysize = asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') / product(mysize, axis=0)
- lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Compute the gradient of an image by 2D convolution with a complex Scharr
operator. (Horizontal operator is real, vertical is imaginary.) Use
symmetric boundary condition to avoid creating edges at the image
boundaries.
>>> from scipy import signal
>>> from scipy import misc
>>> lena = misc.lena()
>>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
... [-10+0j, 0+ 0j, +10 +0j],
... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
>>> grad = signal.convolve2d(lena, scharr, boundary='symm', mode='same')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(1, 3)
>>> ax_orig.imshow(lena, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_mag.imshow(np.absolute(grad), cmap='gray')
>>> ax_mag.set_title('Gradient magnitude')
>>> ax_mag.set_axis_off()
>>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
>>> ax_ang.set_title('Gradient orientation')
>>> ax_ang.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> from scipy import signal
>>> from scipy import misc
>>> lena = misc.lena() - misc.lena().mean()
>>> template = np.copy(lena[235:295, 310:370]) # right eye
>>> template -= template.mean()
>>> lena = lena + np.random.randn(*lena.shape) * 50 # add noise
>>> corr = signal.correlate2d(lena, template, boundary='symm', mode='same')
>>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(1, 3)
>>> ax_orig.imshow(lena, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(template, cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(corr, cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(x, y, 'ro')
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 0, val, bval, fillvalue)
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a),len(b))-1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[nb]*x[n-nb]
- a[1]*y[n-1] - ... - a[na]*y[n-na]
using the following difference equations::
y[m] = b[0]*x[m] + z[0,m-1]
z[0,m] = b[1]*x[m] + z[1,m-1] - a[1]*y[m]
...
z[n-3,m] = b[n-2]*x[m] + z[n-2,m-1] - a[n-2]*y[m]
z[n-2,m] = b[n-1]*x[m] - a[n-1]*y[m]
where m is the output sample number and n=max(len(a),len(b)) is the
model order.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -nb
b[0] + b[1]z + ... + b[nb] z
Y(z) = ---------------------------------- X(z)
-1 -na
a[0] + a[1]z + ... + a[na] z
"""
if isscalar(a):
a = [a]
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the initial conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N=len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M=len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi``.
``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``, where ``K = max(M,N)``.
See Also
--------
lfilter
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
if y.dtype.kind in 'bui':
# ensure calculations are floating point
y = y.astype(np.float64)
zi = zeros(K, y.dtype)
if x is None:
x = zeros(M, y.dtype)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves ``divisor`` out of ``signal``.
Returns the quotient and remainder such that
``signal = convolve(divisor, quotient) + remainder``
Parameters
----------
signal : array_like
Signal data, typically a recorded signal
divisor : array_like
Divisor data, typically an impulse response or filter that was
applied to the original signal
Returns
-------
quotient : ndarray
Quotient, typically the recovered original signal
remainder : ndarray
Remainder
Examples
--------
Deconvolve a signal that's been filtered:
>>> from scipy import signal
>>> original = [0, 1, 0, 0, 1, 1, 0, 0]
>>> impulse_response = [2, 1]
>>> recorded = signal.convolve(impulse_response, original)
>>> recorded
array([0, 2, 1, 0, 2, 3, 1, 0, 0])
>>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
>>> recovered
array([ 0., 1., 0., 0., 1., 1., 0., 0.])
See also
--------
numpy.polydiv : performs polynomial division (same operation, but
also accepts poly1d objects)
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = ones(N - D + 1, float)
input[1:] = 0
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if len(x.shape) > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[ind]
x = ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if len(x.shape) > 2:
raise ValueError("x must be 2-D.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = len(x.shape)
while k > 2:
h = h[:, newaxis]
k -= 1
x = ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"""Sort roots based on magnitude.
Parameters
----------
p : array_like
The roots to sort, as a 1-D array.
Returns
-------
p_sorted : ndarray
Sorted roots.
indx : ndarray
Array of indices needed to sort the input `p`.
"""
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.max
elif rtype in ['min', 'minimum']:
comproot = np.min
elif rtype in ['avg', 'mean']:
comproot = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(s) and a(s) from partial fraction expansion.
If ``M = len(b)`` and ``N = len(a)``::
b(s) b[0] x**(M-1) + b[1] x**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] x**(N-1) + a[1] x**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Parameters
----------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
'max': pick the maximum of those roots.
'min': pick the minimum of those roots.
'avg': take the average of those roots.
See Also
--------
residue, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * poly(t2))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If ``M = len(b)`` and ``N = len(a)``, then the partial-fraction
expansion H(s) is defined as::
b(s) b[0] s**(M-1) + b[1] s**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] s**(N-1) + a[1] s**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, pout[n]) / polyval(an, pout[n])
/ factorial(sig - m))
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(z) / a(z).
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also
--------
invresz, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(z) and a(z) from partial fraction expansion.
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1)...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See Also
--------
residuez, unique_roots, invres
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * poly(t2))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * len(x) / num``.
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
As noted, `resample` uses FFT transformations, which can be very
slow if the number of input samples is large and prime, see
`scipy.fftpack.fft`.
"""
x = asarray(x)
X = fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftfreq(Nx))
elif isinstance(window, ndarray) and window.shape == (Nx,):
W = window
else:
W = ifftshift(get_window(window, Nx))
newshape = [1] * x.ndim
newshape[axis] = len(W)
W.shape = newshape
X = X * W
sl = [slice(None)] * len(x.shape)
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) // 2)
Y[sl] = X[sl]
sl[axis] = slice(-(N - 1) // 2, None)
Y[sl] = X[sl]
y = ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def vectorstrength(events, period):
'''
Determine the vector strength of the events corresponding to the given
period.
The vector strength is a measure of phase synchrony, how well the
timing of the events is synchronized to a single period of a periodic
signal.
If multiple periods are used, calculate the vector strength of each.
This is called the "resonating vector strength".
Parameters
----------
events : 1D array_like
An array of time points containing the timing of the events.
period : float or array_like
The period of the signal that the events should synchronize to.
The period is in the same units as `events`. It can also be an array
of periods, in which case the outputs are arrays of the same length.
Returns
-------
strength : float or 1D array
The strength of the synchronization. 1.0 is perfect synchronization
and 0.0 is no synchronization. If `period` is an array, this is also
an array with each element containing the vector strength at the
corresponding period.
phase : float or array
The phase that the events are most strongly synchronized to in radians.
If `period` is an array, this is also an array with each element
containing the phase for the corresponding period.
References
----------
van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
strength: Auditory system, electric fish, and noise.
Chaos 21, 047508 (2011);
doi: 10.1063/1.3670512
van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
biological and mathematical perspectives. Biol Cybern.
2013 Aug;107(4):385-96. doi: 10.1007/s00422-013-0561-7.
van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
when we vary the "probing" frequency while keeping the spike times
fixed. Biol Cybern. 2013 Aug;107(4):491-94.
doi: 10.1007/s00422-013-0560-8
'''
events = asarray(events)
period = asarray(period)
if events.ndim > 1:
raise ValueError('events cannot have dimensions more than 1')
if period.ndim > 1:
raise ValueError('period cannot have dimensions more than 1')
# we need to know later if period was originally a scalar
scalarperiod = not period.ndim
events = atleast_2d(events)
period = atleast_2d(period)
if (period <= 0).any():
raise ValueError('periods must be positive')
# this converts the times to vectors
vectors = exp(dot(2j*pi/period.T, events))
# the vector strength is just the magnitude of the mean of the vectors
# the vector phase is the angle of the mean of the vectors
vectormean = mean(vectors, axis=1)
strength = abs(vectormean)
phase = angle(vectormean)
# if the original period was a scalar, return scalars
if scalarperiod:
strength = strength[0]
phase = phase[0]
return strength, phase
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1e3
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, prod(dshape, axis=0) // N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Compute an initial state `zi` for the lfilter function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be 1-D.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be 1-D.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def sosfilt_zi(sos):
"""
Compute an initial state `zi` for the sosfilt function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
zi : ndarray
Initial conditions suitable for use with ``sosfilt``, shape
``(n_sections, 2)``.
See Also
--------
sosfilt, zpk2sos
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Filter a rectangular pulse that begins at time 0, with and without
the use of the `zi` argument of `scipy.signal.sosfilt`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sos = signal.butter(9, 0.125, output='sos')
>>> zi = signal.sosfilt_zi(sos)
>>> x = (np.arange(250) < 100).astype(int)
>>> f1 = signal.sosfilt(sos, x)
>>> f2, zo = signal.sosfilt(sos, x, zi=zi)
>>> plt.plot(x, 'k--', label='x')
>>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
>>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
>>> plt.legend(loc='best')
>>> plt.show()
"""
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
"""Forward-backward IIR filter that uses Gustafsson's method.
Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
then backward, using Gustafsson's initial conditions [1]_.
Let ``y_fb`` be the result of filtering first forward and then backward,
and let ``y_bf`` be the result of filtering first backward then forward.
Gustafsson's method is to compute initial conditions for the forward
pass and the backward pass such that ``y_fb == y_bf``.
Parameters
----------
b : scalar or 1-D ndarray
Numerator coefficients of the filter.
a : scalar or 1-D ndarray
Denominator coefficients of the filter.
x : ndarray
Data to be filtered.
axis : int, optional
Axis of `x` to be filtered. Default is -1.
irlen : int or None, optional
The length of the nonnegligible part of the impulse response.
If `irlen` is None, or if the length of the signal is less than
``2 * irlen``, then no part of the impulse response is ignored.
Returns
-------
y : ndarray
The filtered data.
x0 : ndarray
Initial condition for the forward filter.
x1 : ndarray
Initial condition for the backward filter.
Notes
-----
Typically the return values `x0` and `x1` are not needed by the
caller. The intended use of these return values is in unit tests.
References
----------
.. [1] F. Gustaffson. Determining the initial states in forward-backward
filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
"""
# In the comments, "Gustafsson's paper" and [1] refer to the
# paper referenced in the docstring.
b = np.atleast_1d(b)
a = np.atleast_1d(a)
order = max(len(b), len(a)) - 1
if order == 0:
# The filter is just scalar multiplication, with no state.
scale = (b[0] / a[0])**2
y = scale * x
return y, np.array([]), np.array([])
if axis != -1 or axis != x.ndim - 1:
# Move the axis containing the data to the end.
x = np.swapaxes(x, axis, x.ndim - 1)
# n is the number of samples in the data to be filtered.
n = x.shape[-1]
if irlen is None or n <= 2*irlen:
m = n
else:
m = irlen
# Create Obs, the observability matrix (called O in the paper).
# This matrix can be interpreted as the operator that propagates
# an arbitrary initial state to the output, assuming the input is
# zero.
# In Gustafsson's paper, the forward and backward filters are not
# necessarily the same, so he has both O_f and O_b. We use the same
# filter in both directions, so we only need O. The same comment
# applies to S below.
Obs = np.zeros((m, order))
zi = np.zeros(order)
zi[0] = 1
Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
for k in range(1, order):
Obs[k:, k] = Obs[:-k, 0]
# Obsr is O^R (Gustafsson's notation for row-reversed O)
Obsr = Obs[::-1]
# Create S. S is the matrix that applies the filter to the reversed
# propagated initial conditions. That is,
# out = S.dot(zi)
# is the same as
# tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
# out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
# Equations (5) & (6) of [1]
S = lfilter(b, a, Obs[::-1], axis=0)
# Sr is S^R (row-reversed S)
Sr = S[::-1]
# M is [(S^R - O), (O^R - S)]
if m == n:
M = np.hstack((Sr - Obs, Obsr - S))
else:
# Matrix described in section IV of [1].
M = np.zeros((2*m, 2*order))
M[:m, :order] = Sr - Obs
M[m:, order:] = Obsr - S
# Naive forward-backward and backward-forward filters.
# These have large transients because the filters use zero initial
# conditions.
y_f = lfilter(b, a, x)
y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
y_bf = lfilter(b, a, y_b)
delta_y_bf_fb = y_bf - y_fb
if m == n:
delta = delta_y_bf_fb
else:
start_m = delta_y_bf_fb[..., :m]
end_m = delta_y_bf_fb[..., -m:]
delta = np.concatenate((start_m, end_m), axis=-1)
# ic_opt holds the "optimal" initial conditions.
# The following code computes the result shown in the formula
# of the paper between equations (6) and (7).
if delta.ndim == 1:
ic_opt = linalg.lstsq(M, delta)[0]
else:
# Reshape delta so it can be used as an array of multiple
# right-hand-sides in linalg.lstsq.
delta2d = delta.reshape(-1, delta.shape[-1]).T
ic_opt0 = linalg.lstsq(M, delta2d)[0].T
ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
# Now compute the filtered signal using equation (7) of [1].
# First, form [S^R, O^R] and call it W.
if m == n:
W = np.hstack((Sr, Obsr))
else:
W = np.zeros((2*m, 2*order))
W[:m, :order] = Sr
W[m:, order:] = Obsr
# Equation (7) of [1] says
# Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
# `wic` is (almost) the product on the right.
# W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
# so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
# so wic has shape (..., m).
wic = ic_opt.dot(W.T)
# `wic` is "almost" the product of W and the optimal ICs in equation
# (7)--if we're using a truncated impulse response (m < n), `wic`
# contains only the adjustments required for the ends of the signal.
# Here we form y_opt, taking this into account if necessary.
y_opt = y_fb
if m == n:
y_opt += wic
else:
y_opt[..., :m] += wic[..., :m]
y_opt[..., -m:] += wic[..., -m:]
x0 = ic_opt[..., :order]
x1 = ic_opt[..., -order:]
if axis != -1 or axis != x.ndim - 1:
# Restore the data axis to its original position.
x0 = np.swapaxes(x0, axis, x.ndim - 1)
x1 = np.swapaxes(x1, axis, x.ndim - 1)
y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
return y_opt, x0, x1
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
irlen=None):
"""
A forward-backward filter.
This function applies a linear filter twice, once forward and once
backwards. The combined filter has linear phase.
The function provides options for handling the edges of the signal.
When `method` is "pad", the function pads the data along the given axis
in one of three ways: odd, even or constant. The odd and even extensions
have the corresponding symmetry about the end point of the data. The
constant extension extends the data with the values at the end points. On
both the forward and backward passes, the initial condition of the
filter is found by using `lfilter_zi` and scaling it by the end point of
the extended data.
When `method` is "gust", Gustafsson's method [1]_ is used. Initial
conditions are chosen for the forward and backward passes so that the
forward-backward filter gives the same result as the backward-forward
filter.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is ``3 * max(len(a), len(b))``.
method : str, optional
Determines the method for handling the edges of the signal, either
"pad" or "gust". When `method` is "pad", the signal is padded; the
type of padding is determined by `padtype` and `padlen`, and `irlen`
is ignored. When `method` is "gust", Gustafsson's method is used,
and `padtype` and `padlen` are ignored.
irlen : int or None, optional
When `method` is "gust", `irlen` specifies the length of the
impulse response of the filter. If `irlen` is None, no part
of the impulse response is ignored. For a long signal, specifying
`irlen` can significantly improve the performance of the filter.
Returns
-------
y : ndarray
The filtered output, an array of type numpy.float64 with the same
shape as `x`.
See Also
--------
lfilter_zi, lfilter
Notes
-----
The option to use Gustaffson's method was added in scipy version 0.16.0.
References
----------
.. [1] F. Gustaffson, "Determining the initial states in forward-backward
filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
1996.
Examples
--------
The examples will use several functions from `scipy.signal`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist rate, or 125 Hz, and apply it to ``x`` with `filtfilt`.
The result should be approximately ``xlow``, with no phase shift.
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
The following example demonstrates the option ``method="gust"``.
First, create a filter.
>>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
>>> np.random.seed(123456)
`sig` is a random input signal to be filtered.
>>> n = 60
>>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
Apply `filtfilt` to `sig`, once using the Gustafsson method, and
once using padding, and plot the results for comparison.
>>> fgust = signal.filtfilt(b, a, sig, method="gust")
>>> fpad = signal.filtfilt(b, a, sig, padlen=50)
>>> plt.plot(sig, 'k-', label='input')
>>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
>>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
>>> plt.legend(loc='best')
>>> plt.show()
The `irlen` argument can be used to improve the performance
of Gustafsson's method.
Estimate the impulse response length of the filter.
>>> z, p, k = signal.tf2zpk(b, a)
>>> eps = 1e-9
>>> r = np.max(np.abs(p))
>>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
>>> approx_impulse_len
137
Apply the filter to a longer signal, with and without the `irlen`
argument. The difference between `y1` and `y2` is small. For long
signals, using `irlen` gives a significant performance improvement.
>>> x = np.random.randn(5000)
>>> y1 = signal.filtfilt(b, a, x, method='gust')
>>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
>>> print(np.max(np.abs(y1 - y2)))
1.80056858312e-10
"""
b = np.atleast_1d(b)
a = np.atleast_1d(a)
x = np.asarray(x)
if method not in ["pad", "gust"]:
raise ValueError("method must be 'pad' or 'gust'.")
if method == "gust":
y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
return y
# `method` is "pad"...
ntaps = max(len(a), len(b))
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`. This is implemented by performing `lfilter` for each
second-order section. See `lfilter` for details.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
.. versionadded:: 0.16.0
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
>>> x = np.zeros(700)
>>> x[0] = 1.
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = signal.sosfilt(sos, x)
>>> plt.plot(y_tf, 'r', label='TF')
>>> plt.plot(y_sos, 'k', label='SOS')
>>> plt.legend(loc='best')
>>> plt.show()
"""
x = np.asarray(x)
sos = atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r.' %
(axis, x.shape, n_sections, x_zi_shape))
zf = zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
from scipy.signal.filter_design import cheby1
from scipy.signal.fir_filter_design import firwin
def decimate(x, q, n=None, ftype='iir', axis=-1):
"""
Downsample the signal by using a filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : ndarray
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor.
n : int, optional
The order of the filter (1 less than the length for 'fir').
ftype : str {'iir', 'fir'}, optional
The type of the lowpass filter.
axis : int, optional
The axis along which to decimate.
Returns
-------
y : ndarray
The down-sampled signal.
See also
--------
resample
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is None:
if ftype == 'fir':
n = 30
else:
n = 8
if ftype == 'fir':
b = firwin(n + 1, 1. / q, window='hamming')
a = 1.
else:
b, a = cheby1(n, 0.05, 0.8 / q)
y = lfilter(b, a, x, axis=axis)
sl = [slice(None)] * y.ndim
sl[axis] = slice(None, None, q)
return y[sl]
| witcxc/scipy | scipy/signal/signaltools.py | Python | bsd-3-clause | 81,684 |
import inspect
import cytoolz
import toolz
from types import BuiltinFunctionType
from cytoolz import curry, identity, keyfilter, valfilter, merge_with
@curry
def isfrommod(modname, func):
mod = getattr(func, '__module__', '') or ''
return modname in mod
def test_class_sigs():
""" Test that all ``cdef class`` extension types in ``cytoolz`` have
correctly embedded the function signature as done in ``toolz``.
"""
# only consider items created in both `toolz` and `cytoolz`
toolz_dict = valfilter(isfrommod('toolz'), toolz.__dict__)
cytoolz_dict = valfilter(isfrommod('cytoolz'), cytoolz.__dict__)
# only test `cdef class` extensions from `cytoolz`
cytoolz_dict = valfilter(lambda x: not isinstance(x, BuiltinFunctionType),
cytoolz_dict)
# full API coverage should be tested elsewhere
toolz_dict = keyfilter(lambda x: x in cytoolz_dict, toolz_dict)
cytoolz_dict = keyfilter(lambda x: x in toolz_dict, cytoolz_dict)
d = merge_with(identity, toolz_dict, cytoolz_dict)
for key, (toolz_func, cytoolz_func) in d.items():
try:
# function
toolz_spec = inspect.getargspec(toolz_func)
except TypeError:
try:
# curried or partial object
toolz_spec = inspect.getargspec(toolz_func.func)
except (TypeError, AttributeError):
# class
toolz_spec = inspect.getargspec(toolz_func.__init__)
toolz_sig = toolz_func.__name__ + inspect.formatargspec(*toolz_spec)
if toolz_sig not in cytoolz_func.__doc__:
message = ('cytoolz.%s does not have correct function signature.'
'\n\nExpected: %s'
'\n\nDocstring in cytoolz is:\n%s'
% (key, toolz_sig, cytoolz_func.__doc__))
assert False, message
skip_sigs = ['identity']
aliases = {'comp': 'compose'}
def test_sig_at_beginning():
""" Test that the function signature is at the beginning of the docstring
and is followed by exactly one blank line.
"""
cytoolz_dict = valfilter(isfrommod('cytoolz'), cytoolz.__dict__)
cytoolz_dict = keyfilter(lambda x: x not in skip_sigs, cytoolz_dict)
for key, val in cytoolz_dict.items():
doclines = val.__doc__.splitlines()
assert len(doclines) > 2, (
'cytoolz.%s docstring too short:\n\n%s' % (key, val.__doc__))
sig = '%s(' % aliases.get(key, key)
assert sig in doclines[0], (
'cytoolz.%s docstring missing signature at beginning:\n\n%s'
% (key, val.__doc__))
assert not doclines[1], (
'cytoolz.%s docstring missing blank line after signature:\n\n%s'
% (key, val.__doc__))
assert doclines[2], (
'cytoolz.%s docstring too many blank lines after signature:\n\n%s'
% (key, val.__doc__))
| ljwolf/cytoolz | cytoolz/tests/test_embedded_sigs.py | Python | bsd-3-clause | 2,950 |
"""
==================================================
Explicit feature map approximation for RBF kernels
==================================================
.. currentmodule:: sklearn.kernel_approximation
An example shows how to use :class:`RBFSampler` to appoximate the feature map
of an RBF kernel for classification with an SVM on the digits dataset. Results
using a linear SVM in the original space, a linear SVM using the approximate
mapping and using a kernelized SVM are compared. Timings and accuracy for
varying amounts of Monte Carlo samplings for the approximate mapping are shown.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel map.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` is described in detail in
:ref:`kernel_approximation`.
"""
print __doc__
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# modified Andreas Mueller
# License: Simplified BSD
# Standard scientific Python imports
import pylab as pl
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import RBFSampler
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map = RBFSampler(gamma=.2, random_state=1)
approx_kernel_svm = pipeline.Pipeline([("feature_map", feature_map),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 50 * np.arange(1, 10)
approx_kernel_scores = []
approx_kernel_times = []
for D in sample_sizes:
approx_kernel_svm.set_params(feature_map__n_components=D)
approx_kernel_timing = time()
approx_kernel_svm.fit(data_train, targets_train)
approx_kernel_times.append(time() - approx_kernel_timing)
score = approx_kernel_svm.score(data_test, targets_test)
approx_kernel_scores.append(score)
# plot the results:
accuracy = pl.subplot(211)
# second y axis for timeings
timescale = pl.subplot(212)
accuracy.plot(sample_sizes, approx_kernel_scores, label="approx. kernel")
timescale.plot(sample_sizes, approx_kernel_times, '--',
label='approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]], [linear_svm_score,
linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]], [linear_svm_time,
linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]], [kernel_svm_score,
kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]], [kernel_svm_time,
kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(approx_kernel_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel) with rbf feature map\n n_components=100']
pl.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, approx_kernel_svm)):
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
pl.subplot(1, 2, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
pl.contourf(multiples, multiples, Z, cmap=pl.cm.Paired)
pl.axis('off')
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=pl.cm.Paired)
pl.title(titles[i])
pl.show()
| sgenoud/scikit-learn | examples/plot_kernel_approximation.py | Python | bsd-3-clause | 6,494 |
"""django-cms-redirects"""
VERSION = (1, 0, 6)
__version__ = "1.0.6"
| vovanbo/djangocms-redirects | cms_redirects/__init__.py | Python | bsd-3-clause | 69 |
#!/usr/bin/env python
import gtk
import Editor
def main(filenames=[]):
"""
start the editor, with a new empty document
or load all *filenames* as tabs
returns the tab object
"""
Editor.register_stock_icons()
editor = Editor.EditorWindow()
tabs = map(editor.load_document, filenames)
if len(filenames) == 0:
editor.welcome()
return tabs
def run():
"""
handle all initialisation and start main() and gtk.main()
"""
try: # this works only on linux
from ctypes import cdll
libc = cdll.LoadLibrary("libc.so.6")
libc.prctl(15, 'odMLEditor', 0, 0, 0)
except:
pass
from optparse import OptionParser
parser = OptionParser()
(options, args) = parser.parse_args()
main(filenames=args)
gtk.main()
if __name__=="__main__":
run()
| carloscanova/python-odml | odml/gui/__main__.py | Python | bsd-3-clause | 845 |
# Merge stimuli information from spike2 mat file into Kwik file
import h5py as h5
import tables
import os
import numpy as np
import argparse
import glob
try: import simplejson as json
except ImportError: import json
from klusta_pipeline.dataio import load_recordings, save_info, load_digmark, load_stim_info
from klusta_pipeline.utils import get_import_list, validate_merge, realign
def get_args():
parser = argparse.ArgumentParser(description='Compile Spike2 epoch .mat files into KlustaKwik KWD file.')
parser.add_argument('path', default = './', nargs='?',
help='directory containing all of the mat files to compile')
parser.add_argument('dest', default = './', nargs='?',
help='destination directory for kwd and other files')
return parser.parse_args()
def get_rec_samples(kwd_file,index):
with h5.File(kwd_file, 'r') as kwd:
return kwd['/recordings/{}/data'.format(index)].shape[0]
def merge_recording_info(klu_path,mat_path):
batch = klu_path.split('__')[-1]
with open(os.path.join(klu_path,batch+'_info.json')) as f:
info = json.load(f)
assert 'recordings' not in info
import_list = get_import_list(mat_path,info['exports'])
for item in import_list:
assert os.path.exists(item), item
mat_data = validate_merge(import_list,info['omit'])
fs = info['params']['fs']
chans = set(mat_data[0]['chans'])
for d2 in mat_data[1:]:
chans = chans.intersection(d2['chans'])
chans = list(chans)
for i,m in zip(info['exports'],mat_data):
i['chans'] = chans
rec_list = []
for import_file in import_list:
recordings = load_recordings(import_file,chans)
for r in recordings:
rec = realign(r,chans,fs,'spline')
del rec['data']
rec_list.append(rec)
info['recordings'] = [{k:v for k,v in rec.items() if k is not 'data'} for rec in rec_list]
save_info(klu_path,info)
return info
def merge(spike2mat_folder, kwik_folder):
info_json = glob.glob(os.path.join(kwik_folder,'*_info.json'))[0]
with open(info_json, 'r') as f:
info = json.load(f)
kwik_data_file = os.path.join(kwik_folder,info['name']+'.kwik')
kwd_raw_file = os.path.join(kwik_folder,info['name']+'.raw.kwd')\
with tables.open_file(kwik_data_file, 'r+') as kkfile:
digmark_timesamples = []
digmark_recording = []
digmark_codes = []
stimulus_timesamples = []
stimulus_recording = []
stimulus_codes = []
stimulus_names = []
spike_recording_obj = kkfile.get_node('/channel_groups/0/spikes','recording')
spike_time_samples_obj = kkfile.get_node('/channel_groups/0/spikes','time_samples')
spike_recording = spike_recording_obj.read()
spike_time_samples = spike_time_samples_obj.read()
try:
assert 'recordings' in info
except AssertionError:
info = merge_recording_info(kwik_folder,spike2mat_folder)
order = np.sort([str(ii) for ii in range(len(info['recordings']))])
print order
print len(spike_recording)
is_done = np.zeros(spike_recording.shape,np.bool_)
for rr,rid_str in enumerate(order):
# rr: index of for-loop
# rid: recording id
# rid_str: string form of recording id
rid = int(rid_str)
rec = info['recordings'][rid]
n_samps = get_rec_samples(kwd_raw_file,rid)
#is_done = np.vectorize(lambda x: x not in done)
todo = ~is_done & (spike_time_samples >= n_samps)
print "rec {}: {} spikes done".format(rid,is_done.sum())
print "setting {} spikes to next cluster".format(todo.sum())
if todo.sum()>0:
spike_recording[todo] = int(order[rr+1])
spike_time_samples[todo] -= n_samps
is_done = is_done | ~todo
print is_done.sum()
t0 = rec['start_time']
fs = rec['fs']
dur = float(n_samps) / fs
s2mat = os.path.split(rec['file_origin'])[-1]
s2mat = os.path.join(spike2mat_folder, s2mat)
codes, times = load_digmark(s2mat)
rec_mask = (times >= t0) * (times < (t0+dur))
codes = codes[rec_mask]
times = times[rec_mask] - t0
time_samples = (times * fs).round().astype(np.uint64)
recording = rid * np.ones(codes.shape,np.uint16)
digmark_timesamples.append(time_samples)
digmark_recording.append(recording)
digmark_codes.append(codes)
codes, times, names = load_stim_info(s2mat)
rec_mask = (times >= t0) * (times < (t0+dur))
codes = codes[rec_mask]
names = names[rec_mask]
times = times[rec_mask] - t0
time_samples = (times * fs).round().astype(np.uint64)
recording = rid * np.ones(codes.shape,np.uint16)
stimulus_timesamples.append(time_samples)
stimulus_recording.append(recording)
stimulus_codes.append(codes)
stimulus_names.append(names)
digmark_timesamples = np.concatenate(digmark_timesamples)
digmark_recording = np.concatenate(digmark_recording)
digmark_codes = np.concatenate(digmark_codes)
stimulus_timesamples = np.concatenate(stimulus_timesamples)
stimulus_recording = np.concatenate(stimulus_recording)
stimulus_codes = np.concatenate(stimulus_codes)
stimulus_names = np.concatenate(stimulus_names)
print digmark_timesamples.dtype
print digmark_recording.dtype
print digmark_codes.dtype
print stimulus_timesamples.dtype
print stimulus_recording.dtype
print stimulus_codes.dtype
print stimulus_names.dtype
kkfile.create_group("/", "event_types", "event_types")
kkfile.create_group("/event_types", "DigMark")
kkfile.create_earray("/event_types/DigMark", 'time_samples', obj=digmark_timesamples)
kkfile.create_earray("/event_types/DigMark", 'recording', obj=digmark_recording)
kkfile.create_earray("/event_types/DigMark", 'codes', obj=digmark_codes)
kkfile.create_group("/event_types", "Stimulus")
kkfile.create_earray("/event_types/Stimulus", 'time_samples', obj=stimulus_timesamples)
kkfile.create_earray("/event_types/Stimulus", 'recording', obj=stimulus_recording)
kkfile.create_earray("/event_types/Stimulus", 'codes', obj=stimulus_codes)
kkfile.create_earray("/event_types/Stimulus", 'text', obj=stimulus_names)
spike_recording_obj[:] = spike_recording
spike_time_samples_obj[:] = spike_time_samples
def main():
args = get_args()
spike2mat_folder = os.path.abspath(args.path)
kwik_folder = os.path.abspath(args.dest)
merge(spike2mat_folder, kwik_folder)
if __name__ == '__main__':
main()
| gentnerlab/klusta-pipeline | klusta_pipeline/merge_stim_kwik.py | Python | bsd-3-clause | 7,008 |
"""
Module for abstract serializer/unserializer base classes.
"""
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.db import models
from django.utils.encoding import smart_str, smart_unicode
class SerializationError(Exception):
"""Something bad happened during serialization."""
pass
class DeserializationError(Exception):
"""Something bad happened during deserialization."""
pass
class Serializer(object):
"""
Abstract serializer base class.
"""
# Indicates if the implemented serializer is only available for
# internal Django use.
internal_use_only = False
def serialize(self, queryset, **options):
"""
Serialize a queryset.
"""
self.options = options
self.stream = options.get("stream", StringIO())
self.selected_fields = options.get("fields")
self.start_serialization()
for obj in queryset:
self.start_object(obj)
for field in obj._meta.local_fields:
if field.serialize:
if field.rel is None:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_field(obj, field)
else:
if self.selected_fields is None or field.attname[:-3] in self.selected_fields:
self.handle_fk_field(obj, field)
for field in obj._meta.many_to_many:
if field.serialize:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_m2m_field(obj, field)
self.end_object(obj)
self.end_serialization()
return self.getvalue()
def get_string_value(self, obj, field):
"""
Convert a field's value to a string.
"""
if isinstance(field, models.DateTimeField):
value = getattr(obj, field.name).strftime("%Y-%m-%d %H:%M:%S")
else:
value = field.flatten_data(follow=None, obj=obj).get(field.name, "")
return smart_unicode(value)
def start_serialization(self):
"""
Called when serializing of the queryset starts.
"""
raise NotImplementedError
def end_serialization(self):
"""
Called when serializing of the queryset ends.
"""
pass
def start_object(self, obj):
"""
Called when serializing of an object starts.
"""
raise NotImplementedError
def end_object(self, obj):
"""
Called when serializing of an object ends.
"""
pass
def handle_field(self, obj, field):
"""
Called to handle each individual (non-relational) field on an object.
"""
raise NotImplementedError
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey field.
"""
raise NotImplementedError
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField.
"""
raise NotImplementedError
def getvalue(self):
"""
Return the fully serialized queryset (or None if the output stream is
not seekable).
"""
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
class Deserializer(object):
"""
Abstract base deserializer class.
"""
def __init__(self, stream_or_string, **options):
"""
Init this serializer given a stream or a string
"""
self.options = options
if isinstance(stream_or_string, basestring):
self.stream = StringIO(stream_or_string)
else:
self.stream = stream_or_string
# hack to make sure that the models have all been loaded before
# deserialization starts (otherwise subclass calls to get_model()
# and friends might fail...)
models.get_apps()
def __iter__(self):
return self
def next(self):
"""Iteration iterface -- return the next item in the stream"""
raise NotImplementedError
class DeserializedObject(object):
"""
A deserialized model.
Basically a container for holding the pre-saved deserialized data along
with the many-to-many data saved with the object.
Call ``save()`` to save the object (with the many-to-many data) to the
database; call ``save(save_m2m=False)`` to save just the object fields
(and not touch the many-to-many stuff.)
"""
def __init__(self, obj, m2m_data=None):
self.object = obj
self.m2m_data = m2m_data
def __repr__(self):
return "<DeserializedObject: %s>" % smart_str(self.object)
def save(self, save_m2m=True):
# Call save on the Model baseclass directly. This bypasses any
# model-defined save. The save is also forced to be raw.
# This ensures that the data that is deserialized is literally
# what came from the file, not post-processed by pre_save/save
# methods.
models.Model.save_base(self.object, raw=True)
if self.m2m_data and save_m2m:
for accessor_name, object_list in self.m2m_data.items():
setattr(self.object, accessor_name, object_list)
# prevent a second (possibly accidental) call to save() from saving
# the m2m data twice.
self.m2m_data = None
| diofeher/django-nfa | django/core/serializers/base.py | Python | bsd-3-clause | 5,533 |
Subsets and Splits